]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/net/ethernet/emulex/benet/be_main.c
be2net: don't create multiple TXQs in BE2
[mv-sheeva.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 #include <asm/div64.h>
22
23 MODULE_VERSION(DRV_VER);
24 MODULE_DEVICE_TABLE(pci, be_dev_ids);
25 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26 MODULE_AUTHOR("ServerEngines Corporation");
27 MODULE_LICENSE("GPL");
28
29 static ushort rx_frag_size = 2048;
30 static unsigned int num_vfs;
31 module_param(rx_frag_size, ushort, S_IRUGO);
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35
36 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
37         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
38         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
39         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
41         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
42         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
43         { 0 }
44 };
45 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46 /* UE Status Low CSR */
47 static const char * const ue_status_low_desc[] = {
48         "CEV",
49         "CTX",
50         "DBUF",
51         "ERX",
52         "Host",
53         "MPU",
54         "NDMA",
55         "PTC ",
56         "RDMA ",
57         "RXF ",
58         "RXIPS ",
59         "RXULP0 ",
60         "RXULP1 ",
61         "RXULP2 ",
62         "TIM ",
63         "TPOST ",
64         "TPRE ",
65         "TXIPS ",
66         "TXULP0 ",
67         "TXULP1 ",
68         "UC ",
69         "WDMA ",
70         "TXULP2 ",
71         "HOST1 ",
72         "P0_OB_LINK ",
73         "P1_OB_LINK ",
74         "HOST_GPIO ",
75         "MBOX ",
76         "AXGMAC0",
77         "AXGMAC1",
78         "JTAG",
79         "MPU_INTPEND"
80 };
81 /* UE Status High CSR */
82 static const char * const ue_status_hi_desc[] = {
83         "LPCMEMHOST",
84         "MGMT_MAC",
85         "PCS0ONLINE",
86         "MPU_IRAM",
87         "PCS1ONLINE",
88         "PCTL0",
89         "PCTL1",
90         "PMEM",
91         "RR",
92         "TXPB",
93         "RXPP",
94         "XAUI",
95         "TXP",
96         "ARM",
97         "IPC",
98         "HOST2",
99         "HOST3",
100         "HOST4",
101         "HOST5",
102         "HOST6",
103         "HOST7",
104         "HOST8",
105         "HOST9",
106         "NETC",
107         "Unknown",
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown"
115 };
116
117 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118 {
119         struct be_dma_mem *mem = &q->dma_mem;
120         if (mem->va)
121                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122                                   mem->dma);
123 }
124
125 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126                 u16 len, u16 entry_size)
127 {
128         struct be_dma_mem *mem = &q->dma_mem;
129
130         memset(q, 0, sizeof(*q));
131         q->len = len;
132         q->entry_size = entry_size;
133         mem->size = len * entry_size;
134         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135                                      GFP_KERNEL);
136         if (!mem->va)
137                 return -1;
138         memset(mem->va, 0, mem->size);
139         return 0;
140 }
141
142 static void be_intr_set(struct be_adapter *adapter, bool enable)
143 {
144         u32 reg, enabled;
145
146         if (adapter->eeh_err)
147                 return;
148
149         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
150                                 &reg);
151         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
152
153         if (!enabled && enable)
154                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155         else if (enabled && !enable)
156                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
157         else
158                 return;
159
160         pci_write_config_dword(adapter->pdev,
161                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
162 }
163
164 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
165 {
166         u32 val = 0;
167         val |= qid & DB_RQ_RING_ID_MASK;
168         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
169
170         wmb();
171         iowrite32(val, adapter->db + DB_RQ_OFFSET);
172 }
173
174 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
175 {
176         u32 val = 0;
177         val |= qid & DB_TXULP_RING_ID_MASK;
178         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
179
180         wmb();
181         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
182 }
183
184 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
185                 bool arm, bool clear_int, u16 num_popped)
186 {
187         u32 val = 0;
188         val |= qid & DB_EQ_RING_ID_MASK;
189         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
190                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
191
192         if (adapter->eeh_err)
193                 return;
194
195         if (arm)
196                 val |= 1 << DB_EQ_REARM_SHIFT;
197         if (clear_int)
198                 val |= 1 << DB_EQ_CLR_SHIFT;
199         val |= 1 << DB_EQ_EVNT_SHIFT;
200         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
201         iowrite32(val, adapter->db + DB_EQ_OFFSET);
202 }
203
204 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
205 {
206         u32 val = 0;
207         val |= qid & DB_CQ_RING_ID_MASK;
208         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
209                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
210
211         if (adapter->eeh_err)
212                 return;
213
214         if (arm)
215                 val |= 1 << DB_CQ_REARM_SHIFT;
216         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
217         iowrite32(val, adapter->db + DB_CQ_OFFSET);
218 }
219
220 static int be_mac_addr_set(struct net_device *netdev, void *p)
221 {
222         struct be_adapter *adapter = netdev_priv(netdev);
223         struct sockaddr *addr = p;
224         int status = 0;
225
226         if (!is_valid_ether_addr(addr->sa_data))
227                 return -EADDRNOTAVAIL;
228
229         /* MAC addr configuration will be done in hardware for VFs
230          * by their corresponding PFs. Just copy to netdev addr here
231          */
232         if (!be_physfn(adapter))
233                 goto netdev_addr;
234
235         status = be_cmd_pmac_del(adapter, adapter->if_handle,
236                                 adapter->pmac_id, 0);
237         if (status)
238                 return status;
239
240         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
241                                 adapter->if_handle, &adapter->pmac_id, 0);
242 netdev_addr:
243         if (!status)
244                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
245
246         return status;
247 }
248
249 static void populate_be2_stats(struct be_adapter *adapter)
250 {
251         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
252         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
253         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
254         struct be_port_rxf_stats_v0 *port_stats =
255                                         &rxf_stats->port[adapter->port_num];
256         struct be_drv_stats *drvs = &adapter->drv_stats;
257
258         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
259         drvs->rx_pause_frames = port_stats->rx_pause_frames;
260         drvs->rx_crc_errors = port_stats->rx_crc_errors;
261         drvs->rx_control_frames = port_stats->rx_control_frames;
262         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
263         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
264         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
265         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
266         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
267         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
268         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
269         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
270         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
271         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
272         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
273         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
274         drvs->rx_dropped_header_too_small =
275                 port_stats->rx_dropped_header_too_small;
276         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
277         drvs->rx_alignment_symbol_errors =
278                 port_stats->rx_alignment_symbol_errors;
279
280         drvs->tx_pauseframes = port_stats->tx_pauseframes;
281         drvs->tx_controlframes = port_stats->tx_controlframes;
282
283         if (adapter->port_num)
284                 drvs->jabber_events = rxf_stats->port1_jabber_events;
285         else
286                 drvs->jabber_events = rxf_stats->port0_jabber_events;
287         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
288         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
289         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
290         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
291         drvs->forwarded_packets = rxf_stats->forwarded_packets;
292         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
293         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
294         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
295         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
296 }
297
298 static void populate_be3_stats(struct be_adapter *adapter)
299 {
300         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
301         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
302         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
303         struct be_port_rxf_stats_v1 *port_stats =
304                                         &rxf_stats->port[adapter->port_num];
305         struct be_drv_stats *drvs = &adapter->drv_stats;
306
307         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
308         drvs->rx_pause_frames = port_stats->rx_pause_frames;
309         drvs->rx_crc_errors = port_stats->rx_crc_errors;
310         drvs->rx_control_frames = port_stats->rx_control_frames;
311         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
312         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
313         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
314         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
315         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
316         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
317         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
318         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
319         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
320         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
321         drvs->rx_dropped_header_too_small =
322                 port_stats->rx_dropped_header_too_small;
323         drvs->rx_input_fifo_overflow_drop =
324                 port_stats->rx_input_fifo_overflow_drop;
325         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
326         drvs->rx_alignment_symbol_errors =
327                 port_stats->rx_alignment_symbol_errors;
328         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
329         drvs->tx_pauseframes = port_stats->tx_pauseframes;
330         drvs->tx_controlframes = port_stats->tx_controlframes;
331         drvs->jabber_events = port_stats->jabber_events;
332         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
333         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
334         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
335         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
336         drvs->forwarded_packets = rxf_stats->forwarded_packets;
337         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
338         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
339         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
340         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
341 }
342
343 static void populate_lancer_stats(struct be_adapter *adapter)
344 {
345
346         struct be_drv_stats *drvs = &adapter->drv_stats;
347         struct lancer_pport_stats *pport_stats =
348                                         pport_stats_from_cmd(adapter);
349
350         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
351         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
352         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
353         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
354         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
355         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
356         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
357         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
358         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
359         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
360         drvs->rx_dropped_tcp_length =
361                                 pport_stats->rx_dropped_invalid_tcp_length;
362         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
363         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
364         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
365         drvs->rx_dropped_header_too_small =
366                                 pport_stats->rx_dropped_header_too_small;
367         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
368         drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
369         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
370         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
371         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
372         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
373         drvs->jabber_events = pport_stats->rx_jabbers;
374         drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
375         drvs->forwarded_packets = pport_stats->num_forwards_lo;
376         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
377         drvs->rx_drops_too_many_frags =
378                                 pport_stats->rx_drops_too_many_frags_lo;
379 }
380
381 static void accumulate_16bit_val(u32 *acc, u16 val)
382 {
383 #define lo(x)                   (x & 0xFFFF)
384 #define hi(x)                   (x & 0xFFFF0000)
385         bool wrapped = val < lo(*acc);
386         u32 newacc = hi(*acc) + val;
387
388         if (wrapped)
389                 newacc += 65536;
390         ACCESS_ONCE(*acc) = newacc;
391 }
392
393 void be_parse_stats(struct be_adapter *adapter)
394 {
395         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
396         struct be_rx_obj *rxo;
397         int i;
398
399         if (adapter->generation == BE_GEN3) {
400                 if (lancer_chip(adapter))
401                         populate_lancer_stats(adapter);
402                  else
403                         populate_be3_stats(adapter);
404         } else {
405                 populate_be2_stats(adapter);
406         }
407
408         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
409         for_all_rx_queues(adapter, rxo, i) {
410                 /* below erx HW counter can actually wrap around after
411                  * 65535. Driver accumulates a 32-bit value
412                  */
413                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
414                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
415         }
416 }
417
418 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
419                                         struct rtnl_link_stats64 *stats)
420 {
421         struct be_adapter *adapter = netdev_priv(netdev);
422         struct be_drv_stats *drvs = &adapter->drv_stats;
423         struct be_rx_obj *rxo;
424         struct be_tx_obj *txo;
425         u64 pkts, bytes;
426         unsigned int start;
427         int i;
428
429         for_all_rx_queues(adapter, rxo, i) {
430                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
431                 do {
432                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
433                         pkts = rx_stats(rxo)->rx_pkts;
434                         bytes = rx_stats(rxo)->rx_bytes;
435                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
436                 stats->rx_packets += pkts;
437                 stats->rx_bytes += bytes;
438                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
439                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
440                                         rx_stats(rxo)->rx_drops_no_frags;
441         }
442
443         for_all_tx_queues(adapter, txo, i) {
444                 const struct be_tx_stats *tx_stats = tx_stats(txo);
445                 do {
446                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
447                         pkts = tx_stats(txo)->tx_pkts;
448                         bytes = tx_stats(txo)->tx_bytes;
449                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
450                 stats->tx_packets += pkts;
451                 stats->tx_bytes += bytes;
452         }
453
454         /* bad pkts received */
455         stats->rx_errors = drvs->rx_crc_errors +
456                 drvs->rx_alignment_symbol_errors +
457                 drvs->rx_in_range_errors +
458                 drvs->rx_out_range_errors +
459                 drvs->rx_frame_too_long +
460                 drvs->rx_dropped_too_small +
461                 drvs->rx_dropped_too_short +
462                 drvs->rx_dropped_header_too_small +
463                 drvs->rx_dropped_tcp_length +
464                 drvs->rx_dropped_runt;
465
466         /* detailed rx errors */
467         stats->rx_length_errors = drvs->rx_in_range_errors +
468                 drvs->rx_out_range_errors +
469                 drvs->rx_frame_too_long;
470
471         stats->rx_crc_errors = drvs->rx_crc_errors;
472
473         /* frame alignment errors */
474         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
475
476         /* receiver fifo overrun */
477         /* drops_no_pbuf is no per i/f, it's per BE card */
478         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
479                                 drvs->rx_input_fifo_overflow_drop +
480                                 drvs->rx_drops_no_pbuf;
481         return stats;
482 }
483
484 void be_link_status_update(struct be_adapter *adapter, u32 link_status)
485 {
486         struct net_device *netdev = adapter->netdev;
487
488         /* when link status changes, link speed must be re-queried from card */
489         adapter->link_speed = -1;
490         if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
491                 netif_carrier_on(netdev);
492                 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
493         } else {
494                 netif_carrier_off(netdev);
495                 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
496         }
497 }
498
499 static void be_tx_stats_update(struct be_tx_obj *txo,
500                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
501 {
502         struct be_tx_stats *stats = tx_stats(txo);
503
504         u64_stats_update_begin(&stats->sync);
505         stats->tx_reqs++;
506         stats->tx_wrbs += wrb_cnt;
507         stats->tx_bytes += copied;
508         stats->tx_pkts += (gso_segs ? gso_segs : 1);
509         if (stopped)
510                 stats->tx_stops++;
511         u64_stats_update_end(&stats->sync);
512 }
513
514 /* Determine number of WRB entries needed to xmit data in an skb */
515 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
516                                                                 bool *dummy)
517 {
518         int cnt = (skb->len > skb->data_len);
519
520         cnt += skb_shinfo(skb)->nr_frags;
521
522         /* to account for hdr wrb */
523         cnt++;
524         if (lancer_chip(adapter) || !(cnt & 1)) {
525                 *dummy = false;
526         } else {
527                 /* add a dummy to make it an even num */
528                 cnt++;
529                 *dummy = true;
530         }
531         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
532         return cnt;
533 }
534
535 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
536 {
537         wrb->frag_pa_hi = upper_32_bits(addr);
538         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
539         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
540 }
541
542 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
543                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
544 {
545         u8 vlan_prio = 0;
546         u16 vlan_tag = 0;
547
548         memset(hdr, 0, sizeof(*hdr));
549
550         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
551
552         if (skb_is_gso(skb)) {
553                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
554                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
555                         hdr, skb_shinfo(skb)->gso_size);
556                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
557                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
558                 if (lancer_chip(adapter) && adapter->sli_family  ==
559                                                         LANCER_A0_SLI_FAMILY) {
560                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
561                         if (is_tcp_pkt(skb))
562                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
563                                                                 tcpcs, hdr, 1);
564                         else if (is_udp_pkt(skb))
565                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
566                                                                 udpcs, hdr, 1);
567                 }
568         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
569                 if (is_tcp_pkt(skb))
570                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
571                 else if (is_udp_pkt(skb))
572                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
573         }
574
575         if (vlan_tx_tag_present(skb)) {
576                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
577                 vlan_tag = vlan_tx_tag_get(skb);
578                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
579                 /* If vlan priority provided by OS is NOT in available bmap */
580                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
581                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
582                                         adapter->recommended_prio;
583                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
584         }
585
586         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
587         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
588         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
589         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
590 }
591
592 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
593                 bool unmap_single)
594 {
595         dma_addr_t dma;
596
597         be_dws_le_to_cpu(wrb, sizeof(*wrb));
598
599         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
600         if (wrb->frag_len) {
601                 if (unmap_single)
602                         dma_unmap_single(dev, dma, wrb->frag_len,
603                                          DMA_TO_DEVICE);
604                 else
605                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
606         }
607 }
608
609 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
610                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
611 {
612         dma_addr_t busaddr;
613         int i, copied = 0;
614         struct device *dev = &adapter->pdev->dev;
615         struct sk_buff *first_skb = skb;
616         struct be_eth_wrb *wrb;
617         struct be_eth_hdr_wrb *hdr;
618         bool map_single = false;
619         u16 map_head;
620
621         hdr = queue_head_node(txq);
622         queue_head_inc(txq);
623         map_head = txq->head;
624
625         if (skb->len > skb->data_len) {
626                 int len = skb_headlen(skb);
627                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
628                 if (dma_mapping_error(dev, busaddr))
629                         goto dma_err;
630                 map_single = true;
631                 wrb = queue_head_node(txq);
632                 wrb_fill(wrb, busaddr, len);
633                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
634                 queue_head_inc(txq);
635                 copied += len;
636         }
637
638         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
639                 const struct skb_frag_struct *frag =
640                         &skb_shinfo(skb)->frags[i];
641                 busaddr = skb_frag_dma_map(dev, frag, 0,
642                                            skb_frag_size(frag), DMA_TO_DEVICE);
643                 if (dma_mapping_error(dev, busaddr))
644                         goto dma_err;
645                 wrb = queue_head_node(txq);
646                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
647                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
648                 queue_head_inc(txq);
649                 copied += skb_frag_size(frag);
650         }
651
652         if (dummy_wrb) {
653                 wrb = queue_head_node(txq);
654                 wrb_fill(wrb, 0, 0);
655                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
656                 queue_head_inc(txq);
657         }
658
659         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
660         be_dws_cpu_to_le(hdr, sizeof(*hdr));
661
662         return copied;
663 dma_err:
664         txq->head = map_head;
665         while (copied) {
666                 wrb = queue_head_node(txq);
667                 unmap_tx_frag(dev, wrb, map_single);
668                 map_single = false;
669                 copied -= wrb->frag_len;
670                 queue_head_inc(txq);
671         }
672         return 0;
673 }
674
675 static netdev_tx_t be_xmit(struct sk_buff *skb,
676                         struct net_device *netdev)
677 {
678         struct be_adapter *adapter = netdev_priv(netdev);
679         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
680         struct be_queue_info *txq = &txo->q;
681         u32 wrb_cnt = 0, copied = 0;
682         u32 start = txq->head;
683         bool dummy_wrb, stopped = false;
684
685         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
686
687         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
688         if (copied) {
689                 /* record the sent skb in the sent_skb table */
690                 BUG_ON(txo->sent_skb_list[start]);
691                 txo->sent_skb_list[start] = skb;
692
693                 /* Ensure txq has space for the next skb; Else stop the queue
694                  * *BEFORE* ringing the tx doorbell, so that we serialze the
695                  * tx compls of the current transmit which'll wake up the queue
696                  */
697                 atomic_add(wrb_cnt, &txq->used);
698                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
699                                                                 txq->len) {
700                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
701                         stopped = true;
702                 }
703
704                 be_txq_notify(adapter, txq->id, wrb_cnt);
705
706                 be_tx_stats_update(txo, wrb_cnt, copied,
707                                 skb_shinfo(skb)->gso_segs, stopped);
708         } else {
709                 txq->head = start;
710                 dev_kfree_skb_any(skb);
711         }
712         return NETDEV_TX_OK;
713 }
714
715 static int be_change_mtu(struct net_device *netdev, int new_mtu)
716 {
717         struct be_adapter *adapter = netdev_priv(netdev);
718         if (new_mtu < BE_MIN_MTU ||
719                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
720                                         (ETH_HLEN + ETH_FCS_LEN))) {
721                 dev_info(&adapter->pdev->dev,
722                         "MTU must be between %d and %d bytes\n",
723                         BE_MIN_MTU,
724                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
725                 return -EINVAL;
726         }
727         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
728                         netdev->mtu, new_mtu);
729         netdev->mtu = new_mtu;
730         return 0;
731 }
732
733 /*
734  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
735  * If the user configures more, place BE in vlan promiscuous mode.
736  */
737 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
738 {
739         u16 vtag[BE_NUM_VLANS_SUPPORTED];
740         u16 ntags = 0, i;
741         int status = 0;
742         u32 if_handle;
743
744         if (vf) {
745                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
746                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
747                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
748         }
749
750         /* No need to further configure vids if in promiscuous mode */
751         if (adapter->promiscuous)
752                 return 0;
753
754         if (adapter->vlans_added <= adapter->max_vlans)  {
755                 /* Construct VLAN Table to give to HW */
756                 for (i = 0; i < VLAN_N_VID; i++) {
757                         if (adapter->vlan_tag[i]) {
758                                 vtag[ntags] = cpu_to_le16(i);
759                                 ntags++;
760                         }
761                 }
762                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
763                                         vtag, ntags, 1, 0);
764         } else {
765                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
766                                         NULL, 0, 1, 1);
767         }
768
769         return status;
770 }
771
772 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
773 {
774         struct be_adapter *adapter = netdev_priv(netdev);
775
776         adapter->vlans_added++;
777         if (!be_physfn(adapter))
778                 return;
779
780         adapter->vlan_tag[vid] = 1;
781         if (adapter->vlans_added <= (adapter->max_vlans + 1))
782                 be_vid_config(adapter, false, 0);
783 }
784
785 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
786 {
787         struct be_adapter *adapter = netdev_priv(netdev);
788
789         adapter->vlans_added--;
790
791         if (!be_physfn(adapter))
792                 return;
793
794         adapter->vlan_tag[vid] = 0;
795         if (adapter->vlans_added <= adapter->max_vlans)
796                 be_vid_config(adapter, false, 0);
797 }
798
799 static void be_set_rx_mode(struct net_device *netdev)
800 {
801         struct be_adapter *adapter = netdev_priv(netdev);
802
803         if (netdev->flags & IFF_PROMISC) {
804                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
805                 adapter->promiscuous = true;
806                 goto done;
807         }
808
809         /* BE was previously in promiscuous mode; disable it */
810         if (adapter->promiscuous) {
811                 adapter->promiscuous = false;
812                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
813
814                 if (adapter->vlans_added)
815                         be_vid_config(adapter, false, 0);
816         }
817
818         /* Enable multicast promisc if num configured exceeds what we support */
819         if (netdev->flags & IFF_ALLMULTI ||
820                         netdev_mc_count(netdev) > BE_MAX_MC) {
821                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
822                 goto done;
823         }
824
825         be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
826 done:
827         return;
828 }
829
830 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
831 {
832         struct be_adapter *adapter = netdev_priv(netdev);
833         int status;
834
835         if (!adapter->sriov_enabled)
836                 return -EPERM;
837
838         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
839                 return -EINVAL;
840
841         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
842                 status = be_cmd_pmac_del(adapter,
843                                         adapter->vf_cfg[vf].vf_if_handle,
844                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
845
846         status = be_cmd_pmac_add(adapter, mac,
847                                 adapter->vf_cfg[vf].vf_if_handle,
848                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
849
850         if (status)
851                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
852                                 mac, vf);
853         else
854                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
855
856         return status;
857 }
858
859 static int be_get_vf_config(struct net_device *netdev, int vf,
860                         struct ifla_vf_info *vi)
861 {
862         struct be_adapter *adapter = netdev_priv(netdev);
863
864         if (!adapter->sriov_enabled)
865                 return -EPERM;
866
867         if (vf >= num_vfs)
868                 return -EINVAL;
869
870         vi->vf = vf;
871         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
872         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
873         vi->qos = 0;
874         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
875
876         return 0;
877 }
878
879 static int be_set_vf_vlan(struct net_device *netdev,
880                         int vf, u16 vlan, u8 qos)
881 {
882         struct be_adapter *adapter = netdev_priv(netdev);
883         int status = 0;
884
885         if (!adapter->sriov_enabled)
886                 return -EPERM;
887
888         if ((vf >= num_vfs) || (vlan > 4095))
889                 return -EINVAL;
890
891         if (vlan) {
892                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
893                 adapter->vlans_added++;
894         } else {
895                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
896                 adapter->vlans_added--;
897         }
898
899         status = be_vid_config(adapter, true, vf);
900
901         if (status)
902                 dev_info(&adapter->pdev->dev,
903                                 "VLAN %d config on VF %d failed\n", vlan, vf);
904         return status;
905 }
906
907 static int be_set_vf_tx_rate(struct net_device *netdev,
908                         int vf, int rate)
909 {
910         struct be_adapter *adapter = netdev_priv(netdev);
911         int status = 0;
912
913         if (!adapter->sriov_enabled)
914                 return -EPERM;
915
916         if ((vf >= num_vfs) || (rate < 0))
917                 return -EINVAL;
918
919         if (rate > 10000)
920                 rate = 10000;
921
922         adapter->vf_cfg[vf].vf_tx_rate = rate;
923         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
924
925         if (status)
926                 dev_info(&adapter->pdev->dev,
927                                 "tx rate %d on VF %d failed\n", rate, vf);
928         return status;
929 }
930
931 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
932 {
933         struct be_eq_obj *rx_eq = &rxo->rx_eq;
934         struct be_rx_stats *stats = rx_stats(rxo);
935         ulong now = jiffies;
936         ulong delta = now - stats->rx_jiffies;
937         u64 pkts;
938         unsigned int start, eqd;
939
940         if (!rx_eq->enable_aic)
941                 return;
942
943         /* Wrapped around */
944         if (time_before(now, stats->rx_jiffies)) {
945                 stats->rx_jiffies = now;
946                 return;
947         }
948
949         /* Update once a second */
950         if (delta < HZ)
951                 return;
952
953         do {
954                 start = u64_stats_fetch_begin_bh(&stats->sync);
955                 pkts = stats->rx_pkts;
956         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
957
958         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
959         stats->rx_pkts_prev = pkts;
960         stats->rx_jiffies = now;
961         eqd = stats->rx_pps / 110000;
962         eqd = eqd << 3;
963         if (eqd > rx_eq->max_eqd)
964                 eqd = rx_eq->max_eqd;
965         if (eqd < rx_eq->min_eqd)
966                 eqd = rx_eq->min_eqd;
967         if (eqd < 10)
968                 eqd = 0;
969         if (eqd != rx_eq->cur_eqd) {
970                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
971                 rx_eq->cur_eqd = eqd;
972         }
973 }
974
975 static void be_rx_stats_update(struct be_rx_obj *rxo,
976                 struct be_rx_compl_info *rxcp)
977 {
978         struct be_rx_stats *stats = rx_stats(rxo);
979
980         u64_stats_update_begin(&stats->sync);
981         stats->rx_compl++;
982         stats->rx_bytes += rxcp->pkt_size;
983         stats->rx_pkts++;
984         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
985                 stats->rx_mcast_pkts++;
986         if (rxcp->err)
987                 stats->rx_compl_err++;
988         u64_stats_update_end(&stats->sync);
989 }
990
991 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
992 {
993         /* L4 checksum is not reliable for non TCP/UDP packets.
994          * Also ignore ipcksm for ipv6 pkts */
995         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
996                                 (rxcp->ip_csum || rxcp->ipv6);
997 }
998
999 static struct be_rx_page_info *
1000 get_rx_page_info(struct be_adapter *adapter,
1001                 struct be_rx_obj *rxo,
1002                 u16 frag_idx)
1003 {
1004         struct be_rx_page_info *rx_page_info;
1005         struct be_queue_info *rxq = &rxo->q;
1006
1007         rx_page_info = &rxo->page_info_tbl[frag_idx];
1008         BUG_ON(!rx_page_info->page);
1009
1010         if (rx_page_info->last_page_user) {
1011                 dma_unmap_page(&adapter->pdev->dev,
1012                                dma_unmap_addr(rx_page_info, bus),
1013                                adapter->big_page_size, DMA_FROM_DEVICE);
1014                 rx_page_info->last_page_user = false;
1015         }
1016
1017         atomic_dec(&rxq->used);
1018         return rx_page_info;
1019 }
1020
1021 /* Throwaway the data in the Rx completion */
1022 static void be_rx_compl_discard(struct be_adapter *adapter,
1023                 struct be_rx_obj *rxo,
1024                 struct be_rx_compl_info *rxcp)
1025 {
1026         struct be_queue_info *rxq = &rxo->q;
1027         struct be_rx_page_info *page_info;
1028         u16 i, num_rcvd = rxcp->num_rcvd;
1029
1030         for (i = 0; i < num_rcvd; i++) {
1031                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1032                 put_page(page_info->page);
1033                 memset(page_info, 0, sizeof(*page_info));
1034                 index_inc(&rxcp->rxq_idx, rxq->len);
1035         }
1036 }
1037
1038 /*
1039  * skb_fill_rx_data forms a complete skb for an ether frame
1040  * indicated by rxcp.
1041  */
1042 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1043                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1044 {
1045         struct be_queue_info *rxq = &rxo->q;
1046         struct be_rx_page_info *page_info;
1047         u16 i, j;
1048         u16 hdr_len, curr_frag_len, remaining;
1049         u8 *start;
1050
1051         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1052         start = page_address(page_info->page) + page_info->page_offset;
1053         prefetch(start);
1054
1055         /* Copy data in the first descriptor of this completion */
1056         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1057
1058         /* Copy the header portion into skb_data */
1059         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1060         memcpy(skb->data, start, hdr_len);
1061         skb->len = curr_frag_len;
1062         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1063                 /* Complete packet has now been moved to data */
1064                 put_page(page_info->page);
1065                 skb->data_len = 0;
1066                 skb->tail += curr_frag_len;
1067         } else {
1068                 skb_shinfo(skb)->nr_frags = 1;
1069                 skb_frag_set_page(skb, 0, page_info->page);
1070                 skb_shinfo(skb)->frags[0].page_offset =
1071                                         page_info->page_offset + hdr_len;
1072                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1073                 skb->data_len = curr_frag_len - hdr_len;
1074                 skb->truesize += rx_frag_size;
1075                 skb->tail += hdr_len;
1076         }
1077         page_info->page = NULL;
1078
1079         if (rxcp->pkt_size <= rx_frag_size) {
1080                 BUG_ON(rxcp->num_rcvd != 1);
1081                 return;
1082         }
1083
1084         /* More frags present for this completion */
1085         index_inc(&rxcp->rxq_idx, rxq->len);
1086         remaining = rxcp->pkt_size - curr_frag_len;
1087         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1088                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1089                 curr_frag_len = min(remaining, rx_frag_size);
1090
1091                 /* Coalesce all frags from the same physical page in one slot */
1092                 if (page_info->page_offset == 0) {
1093                         /* Fresh page */
1094                         j++;
1095                         skb_frag_set_page(skb, j, page_info->page);
1096                         skb_shinfo(skb)->frags[j].page_offset =
1097                                                         page_info->page_offset;
1098                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1099                         skb_shinfo(skb)->nr_frags++;
1100                 } else {
1101                         put_page(page_info->page);
1102                 }
1103
1104                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1105                 skb->len += curr_frag_len;
1106                 skb->data_len += curr_frag_len;
1107                 skb->truesize += rx_frag_size;
1108                 remaining -= curr_frag_len;
1109                 index_inc(&rxcp->rxq_idx, rxq->len);
1110                 page_info->page = NULL;
1111         }
1112         BUG_ON(j > MAX_SKB_FRAGS);
1113 }
1114
1115 /* Process the RX completion indicated by rxcp when GRO is disabled */
1116 static void be_rx_compl_process(struct be_adapter *adapter,
1117                         struct be_rx_obj *rxo,
1118                         struct be_rx_compl_info *rxcp)
1119 {
1120         struct net_device *netdev = adapter->netdev;
1121         struct sk_buff *skb;
1122
1123         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1124         if (unlikely(!skb)) {
1125                 rx_stats(rxo)->rx_drops_no_skbs++;
1126                 be_rx_compl_discard(adapter, rxo, rxcp);
1127                 return;
1128         }
1129
1130         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1131
1132         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1133                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1134         else
1135                 skb_checksum_none_assert(skb);
1136
1137         skb->protocol = eth_type_trans(skb, netdev);
1138         if (adapter->netdev->features & NETIF_F_RXHASH)
1139                 skb->rxhash = rxcp->rss_hash;
1140
1141
1142         if (rxcp->vlanf)
1143                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1144
1145         netif_receive_skb(skb);
1146 }
1147
1148 /* Process the RX completion indicated by rxcp when GRO is enabled */
1149 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1150                 struct be_rx_obj *rxo,
1151                 struct be_rx_compl_info *rxcp)
1152 {
1153         struct be_rx_page_info *page_info;
1154         struct sk_buff *skb = NULL;
1155         struct be_queue_info *rxq = &rxo->q;
1156         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1157         u16 remaining, curr_frag_len;
1158         u16 i, j;
1159
1160         skb = napi_get_frags(&eq_obj->napi);
1161         if (!skb) {
1162                 be_rx_compl_discard(adapter, rxo, rxcp);
1163                 return;
1164         }
1165
1166         remaining = rxcp->pkt_size;
1167         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1168                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1169
1170                 curr_frag_len = min(remaining, rx_frag_size);
1171
1172                 /* Coalesce all frags from the same physical page in one slot */
1173                 if (i == 0 || page_info->page_offset == 0) {
1174                         /* First frag or Fresh page */
1175                         j++;
1176                         skb_frag_set_page(skb, j, page_info->page);
1177                         skb_shinfo(skb)->frags[j].page_offset =
1178                                                         page_info->page_offset;
1179                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1180                 } else {
1181                         put_page(page_info->page);
1182                 }
1183                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1184                 skb->truesize += rx_frag_size;
1185                 remaining -= curr_frag_len;
1186                 index_inc(&rxcp->rxq_idx, rxq->len);
1187                 memset(page_info, 0, sizeof(*page_info));
1188         }
1189         BUG_ON(j > MAX_SKB_FRAGS);
1190
1191         skb_shinfo(skb)->nr_frags = j + 1;
1192         skb->len = rxcp->pkt_size;
1193         skb->data_len = rxcp->pkt_size;
1194         skb->ip_summed = CHECKSUM_UNNECESSARY;
1195         if (adapter->netdev->features & NETIF_F_RXHASH)
1196                 skb->rxhash = rxcp->rss_hash;
1197
1198         if (rxcp->vlanf)
1199                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1200
1201         napi_gro_frags(&eq_obj->napi);
1202 }
1203
1204 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1205                                 struct be_eth_rx_compl *compl,
1206                                 struct be_rx_compl_info *rxcp)
1207 {
1208         rxcp->pkt_size =
1209                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1210         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1211         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1212         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1213         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1214         rxcp->ip_csum =
1215                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1216         rxcp->l4_csum =
1217                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1218         rxcp->ipv6 =
1219                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1220         rxcp->rxq_idx =
1221                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1222         rxcp->num_rcvd =
1223                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1224         rxcp->pkt_type =
1225                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1226         rxcp->rss_hash =
1227                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1228         if (rxcp->vlanf) {
1229                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1230                                           compl);
1231                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1232                                                compl);
1233         }
1234         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1235 }
1236
1237 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1238                                 struct be_eth_rx_compl *compl,
1239                                 struct be_rx_compl_info *rxcp)
1240 {
1241         rxcp->pkt_size =
1242                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1243         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1244         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1245         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1246         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1247         rxcp->ip_csum =
1248                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1249         rxcp->l4_csum =
1250                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1251         rxcp->ipv6 =
1252                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1253         rxcp->rxq_idx =
1254                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1255         rxcp->num_rcvd =
1256                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1257         rxcp->pkt_type =
1258                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1259         rxcp->rss_hash =
1260                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1261         if (rxcp->vlanf) {
1262                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1263                                           compl);
1264                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1265                                                compl);
1266         }
1267         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1268 }
1269
1270 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1271 {
1272         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1273         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1274         struct be_adapter *adapter = rxo->adapter;
1275
1276         /* For checking the valid bit it is Ok to use either definition as the
1277          * valid bit is at the same position in both v0 and v1 Rx compl */
1278         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1279                 return NULL;
1280
1281         rmb();
1282         be_dws_le_to_cpu(compl, sizeof(*compl));
1283
1284         if (adapter->be3_native)
1285                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1286         else
1287                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1288
1289         if (rxcp->vlanf) {
1290                 /* vlanf could be wrongly set in some cards.
1291                  * ignore if vtm is not set */
1292                 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1293                         rxcp->vlanf = 0;
1294
1295                 if (!lancer_chip(adapter))
1296                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1297
1298                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1299                     !adapter->vlan_tag[rxcp->vlan_tag])
1300                         rxcp->vlanf = 0;
1301         }
1302
1303         /* As the compl has been parsed, reset it; we wont touch it again */
1304         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1305
1306         queue_tail_inc(&rxo->cq);
1307         return rxcp;
1308 }
1309
1310 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1311 {
1312         u32 order = get_order(size);
1313
1314         if (order > 0)
1315                 gfp |= __GFP_COMP;
1316         return  alloc_pages(gfp, order);
1317 }
1318
1319 /*
1320  * Allocate a page, split it to fragments of size rx_frag_size and post as
1321  * receive buffers to BE
1322  */
1323 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1324 {
1325         struct be_adapter *adapter = rxo->adapter;
1326         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1327         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1328         struct be_queue_info *rxq = &rxo->q;
1329         struct page *pagep = NULL;
1330         struct be_eth_rx_d *rxd;
1331         u64 page_dmaaddr = 0, frag_dmaaddr;
1332         u32 posted, page_offset = 0;
1333
1334         page_info = &rxo->page_info_tbl[rxq->head];
1335         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1336                 if (!pagep) {
1337                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1338                         if (unlikely(!pagep)) {
1339                                 rx_stats(rxo)->rx_post_fail++;
1340                                 break;
1341                         }
1342                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1343                                                     0, adapter->big_page_size,
1344                                                     DMA_FROM_DEVICE);
1345                         page_info->page_offset = 0;
1346                 } else {
1347                         get_page(pagep);
1348                         page_info->page_offset = page_offset + rx_frag_size;
1349                 }
1350                 page_offset = page_info->page_offset;
1351                 page_info->page = pagep;
1352                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1353                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1354
1355                 rxd = queue_head_node(rxq);
1356                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1357                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1358
1359                 /* Any space left in the current big page for another frag? */
1360                 if ((page_offset + rx_frag_size + rx_frag_size) >
1361                                         adapter->big_page_size) {
1362                         pagep = NULL;
1363                         page_info->last_page_user = true;
1364                 }
1365
1366                 prev_page_info = page_info;
1367                 queue_head_inc(rxq);
1368                 page_info = &page_info_tbl[rxq->head];
1369         }
1370         if (pagep)
1371                 prev_page_info->last_page_user = true;
1372
1373         if (posted) {
1374                 atomic_add(posted, &rxq->used);
1375                 be_rxq_notify(adapter, rxq->id, posted);
1376         } else if (atomic_read(&rxq->used) == 0) {
1377                 /* Let be_worker replenish when memory is available */
1378                 rxo->rx_post_starved = true;
1379         }
1380 }
1381
1382 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1383 {
1384         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1385
1386         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1387                 return NULL;
1388
1389         rmb();
1390         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1391
1392         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1393
1394         queue_tail_inc(tx_cq);
1395         return txcp;
1396 }
1397
1398 static u16 be_tx_compl_process(struct be_adapter *adapter,
1399                 struct be_tx_obj *txo, u16 last_index)
1400 {
1401         struct be_queue_info *txq = &txo->q;
1402         struct be_eth_wrb *wrb;
1403         struct sk_buff **sent_skbs = txo->sent_skb_list;
1404         struct sk_buff *sent_skb;
1405         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1406         bool unmap_skb_hdr = true;
1407
1408         sent_skb = sent_skbs[txq->tail];
1409         BUG_ON(!sent_skb);
1410         sent_skbs[txq->tail] = NULL;
1411
1412         /* skip header wrb */
1413         queue_tail_inc(txq);
1414
1415         do {
1416                 cur_index = txq->tail;
1417                 wrb = queue_tail_node(txq);
1418                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1419                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1420                 unmap_skb_hdr = false;
1421
1422                 num_wrbs++;
1423                 queue_tail_inc(txq);
1424         } while (cur_index != last_index);
1425
1426         kfree_skb(sent_skb);
1427         return num_wrbs;
1428 }
1429
1430 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1431 {
1432         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1433
1434         if (!eqe->evt)
1435                 return NULL;
1436
1437         rmb();
1438         eqe->evt = le32_to_cpu(eqe->evt);
1439         queue_tail_inc(&eq_obj->q);
1440         return eqe;
1441 }
1442
1443 static int event_handle(struct be_adapter *adapter,
1444                         struct be_eq_obj *eq_obj,
1445                         bool rearm)
1446 {
1447         struct be_eq_entry *eqe;
1448         u16 num = 0;
1449
1450         while ((eqe = event_get(eq_obj)) != NULL) {
1451                 eqe->evt = 0;
1452                 num++;
1453         }
1454
1455         /* Deal with any spurious interrupts that come
1456          * without events
1457          */
1458         if (!num)
1459                 rearm = true;
1460
1461         be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1462         if (num)
1463                 napi_schedule(&eq_obj->napi);
1464
1465         return num;
1466 }
1467
1468 /* Just read and notify events without processing them.
1469  * Used at the time of destroying event queues */
1470 static void be_eq_clean(struct be_adapter *adapter,
1471                         struct be_eq_obj *eq_obj)
1472 {
1473         struct be_eq_entry *eqe;
1474         u16 num = 0;
1475
1476         while ((eqe = event_get(eq_obj)) != NULL) {
1477                 eqe->evt = 0;
1478                 num++;
1479         }
1480
1481         if (num)
1482                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1483 }
1484
1485 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1486 {
1487         struct be_rx_page_info *page_info;
1488         struct be_queue_info *rxq = &rxo->q;
1489         struct be_queue_info *rx_cq = &rxo->cq;
1490         struct be_rx_compl_info *rxcp;
1491         u16 tail;
1492
1493         /* First cleanup pending rx completions */
1494         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1495                 be_rx_compl_discard(adapter, rxo, rxcp);
1496                 be_cq_notify(adapter, rx_cq->id, false, 1);
1497         }
1498
1499         /* Then free posted rx buffer that were not used */
1500         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1501         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1502                 page_info = get_rx_page_info(adapter, rxo, tail);
1503                 put_page(page_info->page);
1504                 memset(page_info, 0, sizeof(*page_info));
1505         }
1506         BUG_ON(atomic_read(&rxq->used));
1507         rxq->tail = rxq->head = 0;
1508 }
1509
1510 static void be_tx_compl_clean(struct be_adapter *adapter,
1511                                 struct be_tx_obj *txo)
1512 {
1513         struct be_queue_info *tx_cq = &txo->cq;
1514         struct be_queue_info *txq = &txo->q;
1515         struct be_eth_tx_compl *txcp;
1516         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1517         struct sk_buff **sent_skbs = txo->sent_skb_list;
1518         struct sk_buff *sent_skb;
1519         bool dummy_wrb;
1520
1521         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1522         do {
1523                 while ((txcp = be_tx_compl_get(tx_cq))) {
1524                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1525                                         wrb_index, txcp);
1526                         num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1527                         cmpl++;
1528                 }
1529                 if (cmpl) {
1530                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1531                         atomic_sub(num_wrbs, &txq->used);
1532                         cmpl = 0;
1533                         num_wrbs = 0;
1534                 }
1535
1536                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1537                         break;
1538
1539                 mdelay(1);
1540         } while (true);
1541
1542         if (atomic_read(&txq->used))
1543                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1544                         atomic_read(&txq->used));
1545
1546         /* free posted tx for which compls will never arrive */
1547         while (atomic_read(&txq->used)) {
1548                 sent_skb = sent_skbs[txq->tail];
1549                 end_idx = txq->tail;
1550                 index_adv(&end_idx,
1551                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1552                         txq->len);
1553                 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1554                 atomic_sub(num_wrbs, &txq->used);
1555         }
1556 }
1557
1558 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1559 {
1560         struct be_queue_info *q;
1561
1562         q = &adapter->mcc_obj.q;
1563         if (q->created)
1564                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1565         be_queue_free(adapter, q);
1566
1567         q = &adapter->mcc_obj.cq;
1568         if (q->created)
1569                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1570         be_queue_free(adapter, q);
1571 }
1572
1573 /* Must be called only after TX qs are created as MCC shares TX EQ */
1574 static int be_mcc_queues_create(struct be_adapter *adapter)
1575 {
1576         struct be_queue_info *q, *cq;
1577
1578         /* Alloc MCC compl queue */
1579         cq = &adapter->mcc_obj.cq;
1580         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1581                         sizeof(struct be_mcc_compl)))
1582                 goto err;
1583
1584         /* Ask BE to create MCC compl queue; share TX's eq */
1585         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1586                 goto mcc_cq_free;
1587
1588         /* Alloc MCC queue */
1589         q = &adapter->mcc_obj.q;
1590         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1591                 goto mcc_cq_destroy;
1592
1593         /* Ask BE to create MCC queue */
1594         if (be_cmd_mccq_create(adapter, q, cq))
1595                 goto mcc_q_free;
1596
1597         return 0;
1598
1599 mcc_q_free:
1600         be_queue_free(adapter, q);
1601 mcc_cq_destroy:
1602         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1603 mcc_cq_free:
1604         be_queue_free(adapter, cq);
1605 err:
1606         return -1;
1607 }
1608
1609 static void be_tx_queues_destroy(struct be_adapter *adapter)
1610 {
1611         struct be_queue_info *q;
1612         struct be_tx_obj *txo;
1613         u8 i;
1614
1615         for_all_tx_queues(adapter, txo, i) {
1616                 q = &txo->q;
1617                 if (q->created)
1618                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1619                 be_queue_free(adapter, q);
1620
1621                 q = &txo->cq;
1622                 if (q->created)
1623                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1624                 be_queue_free(adapter, q);
1625         }
1626
1627         /* Clear any residual events */
1628         be_eq_clean(adapter, &adapter->tx_eq);
1629
1630         q = &adapter->tx_eq.q;
1631         if (q->created)
1632                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1633         be_queue_free(adapter, q);
1634 }
1635
1636 static int be_num_txqs_want(struct be_adapter *adapter)
1637 {
1638         if ((num_vfs && adapter->sriov_enabled) ||
1639                 (adapter->function_mode & 0x400) ||
1640                 lancer_chip(adapter) || !be_physfn(adapter) ||
1641                 adapter->generation == BE_GEN2)
1642                 return 1;
1643         else
1644                 return MAX_TX_QS;
1645 }
1646
1647 /* One TX event queue is shared by all TX compl qs */
1648 static int be_tx_queues_create(struct be_adapter *adapter)
1649 {
1650         struct be_queue_info *eq, *q, *cq;
1651         struct be_tx_obj *txo;
1652         u8 i;
1653
1654         adapter->num_tx_qs = be_num_txqs_want(adapter);
1655         if (adapter->num_tx_qs != MAX_TX_QS)
1656                 netif_set_real_num_tx_queues(adapter->netdev,
1657                         adapter->num_tx_qs);
1658
1659         adapter->tx_eq.max_eqd = 0;
1660         adapter->tx_eq.min_eqd = 0;
1661         adapter->tx_eq.cur_eqd = 96;
1662         adapter->tx_eq.enable_aic = false;
1663
1664         eq = &adapter->tx_eq.q;
1665         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1666                 sizeof(struct be_eq_entry)))
1667                 return -1;
1668
1669         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1670                 goto err;
1671         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1672
1673         for_all_tx_queues(adapter, txo, i) {
1674                 cq = &txo->cq;
1675                 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1676                         sizeof(struct be_eth_tx_compl)))
1677                         goto err;
1678
1679                 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1680                         goto err;
1681
1682                 q = &txo->q;
1683                 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1684                         sizeof(struct be_eth_wrb)))
1685                         goto err;
1686
1687                 if (be_cmd_txq_create(adapter, q, cq))
1688                         goto err;
1689         }
1690         return 0;
1691
1692 err:
1693         be_tx_queues_destroy(adapter);
1694         return -1;
1695 }
1696
1697 static void be_rx_queues_destroy(struct be_adapter *adapter)
1698 {
1699         struct be_queue_info *q;
1700         struct be_rx_obj *rxo;
1701         int i;
1702
1703         for_all_rx_queues(adapter, rxo, i) {
1704                 be_queue_free(adapter, &rxo->q);
1705
1706                 q = &rxo->cq;
1707                 if (q->created)
1708                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1709                 be_queue_free(adapter, q);
1710
1711                 q = &rxo->rx_eq.q;
1712                 if (q->created)
1713                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1714                 be_queue_free(adapter, q);
1715         }
1716 }
1717
1718 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1719 {
1720         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1721                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1722                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1723         } else {
1724                 dev_warn(&adapter->pdev->dev,
1725                         "No support for multiple RX queues\n");
1726                 return 1;
1727         }
1728 }
1729
1730 static int be_rx_queues_create(struct be_adapter *adapter)
1731 {
1732         struct be_queue_info *eq, *q, *cq;
1733         struct be_rx_obj *rxo;
1734         int rc, i;
1735
1736         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1737                                 msix_enabled(adapter) ?
1738                                         adapter->num_msix_vec - 1 : 1);
1739         if (adapter->num_rx_qs != MAX_RX_QS)
1740                 dev_warn(&adapter->pdev->dev,
1741                         "Can create only %d RX queues", adapter->num_rx_qs);
1742
1743         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1744         for_all_rx_queues(adapter, rxo, i) {
1745                 rxo->adapter = adapter;
1746                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1747                 rxo->rx_eq.enable_aic = true;
1748
1749                 /* EQ */
1750                 eq = &rxo->rx_eq.q;
1751                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1752                                         sizeof(struct be_eq_entry));
1753                 if (rc)
1754                         goto err;
1755
1756                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1757                 if (rc)
1758                         goto err;
1759
1760                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1761
1762                 /* CQ */
1763                 cq = &rxo->cq;
1764                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1765                                 sizeof(struct be_eth_rx_compl));
1766                 if (rc)
1767                         goto err;
1768
1769                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1770                 if (rc)
1771                         goto err;
1772
1773                 /* Rx Q - will be created in be_open() */
1774                 q = &rxo->q;
1775                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1776                                 sizeof(struct be_eth_rx_d));
1777                 if (rc)
1778                         goto err;
1779
1780         }
1781
1782         return 0;
1783 err:
1784         be_rx_queues_destroy(adapter);
1785         return -1;
1786 }
1787
1788 static bool event_peek(struct be_eq_obj *eq_obj)
1789 {
1790         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1791         if (!eqe->evt)
1792                 return false;
1793         else
1794                 return true;
1795 }
1796
1797 static irqreturn_t be_intx(int irq, void *dev)
1798 {
1799         struct be_adapter *adapter = dev;
1800         struct be_rx_obj *rxo;
1801         int isr, i, tx = 0 , rx = 0;
1802
1803         if (lancer_chip(adapter)) {
1804                 if (event_peek(&adapter->tx_eq))
1805                         tx = event_handle(adapter, &adapter->tx_eq, false);
1806                 for_all_rx_queues(adapter, rxo, i) {
1807                         if (event_peek(&rxo->rx_eq))
1808                                 rx |= event_handle(adapter, &rxo->rx_eq, true);
1809                 }
1810
1811                 if (!(tx || rx))
1812                         return IRQ_NONE;
1813
1814         } else {
1815                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1816                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1817                 if (!isr)
1818                         return IRQ_NONE;
1819
1820                 if ((1 << adapter->tx_eq.eq_idx & isr))
1821                         event_handle(adapter, &adapter->tx_eq, false);
1822
1823                 for_all_rx_queues(adapter, rxo, i) {
1824                         if ((1 << rxo->rx_eq.eq_idx & isr))
1825                                 event_handle(adapter, &rxo->rx_eq, true);
1826                 }
1827         }
1828
1829         return IRQ_HANDLED;
1830 }
1831
1832 static irqreturn_t be_msix_rx(int irq, void *dev)
1833 {
1834         struct be_rx_obj *rxo = dev;
1835         struct be_adapter *adapter = rxo->adapter;
1836
1837         event_handle(adapter, &rxo->rx_eq, true);
1838
1839         return IRQ_HANDLED;
1840 }
1841
1842 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1843 {
1844         struct be_adapter *adapter = dev;
1845
1846         event_handle(adapter, &adapter->tx_eq, false);
1847
1848         return IRQ_HANDLED;
1849 }
1850
1851 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1852 {
1853         return (rxcp->tcpf && !rxcp->err) ? true : false;
1854 }
1855
1856 static int be_poll_rx(struct napi_struct *napi, int budget)
1857 {
1858         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1859         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1860         struct be_adapter *adapter = rxo->adapter;
1861         struct be_queue_info *rx_cq = &rxo->cq;
1862         struct be_rx_compl_info *rxcp;
1863         u32 work_done;
1864
1865         rx_stats(rxo)->rx_polls++;
1866         for (work_done = 0; work_done < budget; work_done++) {
1867                 rxcp = be_rx_compl_get(rxo);
1868                 if (!rxcp)
1869                         break;
1870
1871                 /* Is it a flush compl that has no data */
1872                 if (unlikely(rxcp->num_rcvd == 0))
1873                         goto loop_continue;
1874
1875                 /* Discard compl with partial DMA Lancer B0 */
1876                 if (unlikely(!rxcp->pkt_size)) {
1877                         be_rx_compl_discard(adapter, rxo, rxcp);
1878                         goto loop_continue;
1879                 }
1880
1881                 /* On BE drop pkts that arrive due to imperfect filtering in
1882                  * promiscuous mode on some skews
1883                  */
1884                 if (unlikely(rxcp->port != adapter->port_num &&
1885                                 !lancer_chip(adapter))) {
1886                         be_rx_compl_discard(adapter, rxo, rxcp);
1887                         goto loop_continue;
1888                 }
1889
1890                 if (do_gro(rxcp))
1891                         be_rx_compl_process_gro(adapter, rxo, rxcp);
1892                 else
1893                         be_rx_compl_process(adapter, rxo, rxcp);
1894 loop_continue:
1895                 be_rx_stats_update(rxo, rxcp);
1896         }
1897
1898         /* Refill the queue */
1899         if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1900                 be_post_rx_frags(rxo, GFP_ATOMIC);
1901
1902         /* All consumed */
1903         if (work_done < budget) {
1904                 napi_complete(napi);
1905                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1906         } else {
1907                 /* More to be consumed; continue with interrupts disabled */
1908                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1909         }
1910         return work_done;
1911 }
1912
1913 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1914  * For TX/MCC we don't honour budget; consume everything
1915  */
1916 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1917 {
1918         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1919         struct be_adapter *adapter =
1920                 container_of(tx_eq, struct be_adapter, tx_eq);
1921         struct be_tx_obj *txo;
1922         struct be_eth_tx_compl *txcp;
1923         int tx_compl, mcc_compl, status = 0;
1924         u8 i;
1925         u16 num_wrbs;
1926
1927         for_all_tx_queues(adapter, txo, i) {
1928                 tx_compl = 0;
1929                 num_wrbs = 0;
1930                 while ((txcp = be_tx_compl_get(&txo->cq))) {
1931                         num_wrbs += be_tx_compl_process(adapter, txo,
1932                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
1933                                         wrb_index, txcp));
1934                         tx_compl++;
1935                 }
1936                 if (tx_compl) {
1937                         be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1938
1939                         atomic_sub(num_wrbs, &txo->q.used);
1940
1941                         /* As Tx wrbs have been freed up, wake up netdev queue
1942                          * if it was stopped due to lack of tx wrbs.  */
1943                         if (__netif_subqueue_stopped(adapter->netdev, i) &&
1944                                 atomic_read(&txo->q.used) < txo->q.len / 2) {
1945                                 netif_wake_subqueue(adapter->netdev, i);
1946                         }
1947
1948                         u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1949                         tx_stats(txo)->tx_compl += tx_compl;
1950                         u64_stats_update_end(&tx_stats(txo)->sync_compl);
1951                 }
1952         }
1953
1954         mcc_compl = be_process_mcc(adapter, &status);
1955
1956         if (mcc_compl) {
1957                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1958                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1959         }
1960
1961         napi_complete(napi);
1962
1963         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1964         adapter->drv_stats.tx_events++;
1965         return 1;
1966 }
1967
1968 void be_detect_dump_ue(struct be_adapter *adapter)
1969 {
1970         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1971         u32 i;
1972
1973         pci_read_config_dword(adapter->pdev,
1974                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1975         pci_read_config_dword(adapter->pdev,
1976                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1977         pci_read_config_dword(adapter->pdev,
1978                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1979         pci_read_config_dword(adapter->pdev,
1980                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1981
1982         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1983         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1984
1985         if (ue_status_lo || ue_status_hi) {
1986                 adapter->ue_detected = true;
1987                 adapter->eeh_err = true;
1988                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1989         }
1990
1991         if (ue_status_lo) {
1992                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1993                         if (ue_status_lo & 1)
1994                                 dev_err(&adapter->pdev->dev,
1995                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1996                 }
1997         }
1998         if (ue_status_hi) {
1999                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2000                         if (ue_status_hi & 1)
2001                                 dev_err(&adapter->pdev->dev,
2002                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2003                 }
2004         }
2005
2006 }
2007
2008 static void be_worker(struct work_struct *work)
2009 {
2010         struct be_adapter *adapter =
2011                 container_of(work, struct be_adapter, work.work);
2012         struct be_rx_obj *rxo;
2013         int i;
2014
2015         if (!adapter->ue_detected && !lancer_chip(adapter))
2016                 be_detect_dump_ue(adapter);
2017
2018         /* when interrupts are not yet enabled, just reap any pending
2019         * mcc completions */
2020         if (!netif_running(adapter->netdev)) {
2021                 int mcc_compl, status = 0;
2022
2023                 mcc_compl = be_process_mcc(adapter, &status);
2024
2025                 if (mcc_compl) {
2026                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2027                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2028                 }
2029
2030                 goto reschedule;
2031         }
2032
2033         if (!adapter->stats_cmd_sent) {
2034                 if (lancer_chip(adapter))
2035                         lancer_cmd_get_pport_stats(adapter,
2036                                                 &adapter->stats_cmd);
2037                 else
2038                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
2039         }
2040
2041         for_all_rx_queues(adapter, rxo, i) {
2042                 be_rx_eqd_update(adapter, rxo);
2043
2044                 if (rxo->rx_post_starved) {
2045                         rxo->rx_post_starved = false;
2046                         be_post_rx_frags(rxo, GFP_KERNEL);
2047                 }
2048         }
2049
2050 reschedule:
2051         adapter->work_counter++;
2052         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2053 }
2054
2055 static void be_msix_disable(struct be_adapter *adapter)
2056 {
2057         if (msix_enabled(adapter)) {
2058                 pci_disable_msix(adapter->pdev);
2059                 adapter->num_msix_vec = 0;
2060         }
2061 }
2062
2063 static void be_msix_enable(struct be_adapter *adapter)
2064 {
2065 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
2066         int i, status, num_vec;
2067
2068         num_vec = be_num_rxqs_want(adapter) + 1;
2069
2070         for (i = 0; i < num_vec; i++)
2071                 adapter->msix_entries[i].entry = i;
2072
2073         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2074         if (status == 0) {
2075                 goto done;
2076         } else if (status >= BE_MIN_MSIX_VECTORS) {
2077                 num_vec = status;
2078                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2079                                 num_vec) == 0)
2080                         goto done;
2081         }
2082         return;
2083 done:
2084         adapter->num_msix_vec = num_vec;
2085         return;
2086 }
2087
2088 static int be_sriov_enable(struct be_adapter *adapter)
2089 {
2090         be_check_sriov_fn_type(adapter);
2091 #ifdef CONFIG_PCI_IOV
2092         if (be_physfn(adapter) && num_vfs) {
2093                 int status, pos;
2094                 u16 nvfs;
2095
2096                 pos = pci_find_ext_capability(adapter->pdev,
2097                                                 PCI_EXT_CAP_ID_SRIOV);
2098                 pci_read_config_word(adapter->pdev,
2099                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2100
2101                 if (num_vfs > nvfs) {
2102                         dev_info(&adapter->pdev->dev,
2103                                         "Device supports %d VFs and not %d\n",
2104                                         nvfs, num_vfs);
2105                         num_vfs = nvfs;
2106                 }
2107
2108                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2109                 adapter->sriov_enabled = status ? false : true;
2110
2111                 if (adapter->sriov_enabled) {
2112                         adapter->vf_cfg = kcalloc(num_vfs,
2113                                                 sizeof(struct be_vf_cfg),
2114                                                 GFP_KERNEL);
2115                         if (!adapter->vf_cfg)
2116                                 return -ENOMEM;
2117                 }
2118         }
2119 #endif
2120         return 0;
2121 }
2122
2123 static void be_sriov_disable(struct be_adapter *adapter)
2124 {
2125 #ifdef CONFIG_PCI_IOV
2126         if (adapter->sriov_enabled) {
2127                 pci_disable_sriov(adapter->pdev);
2128                 kfree(adapter->vf_cfg);
2129                 adapter->sriov_enabled = false;
2130         }
2131 #endif
2132 }
2133
2134 static inline int be_msix_vec_get(struct be_adapter *adapter,
2135                                         struct be_eq_obj *eq_obj)
2136 {
2137         return adapter->msix_entries[eq_obj->eq_idx].vector;
2138 }
2139
2140 static int be_request_irq(struct be_adapter *adapter,
2141                 struct be_eq_obj *eq_obj,
2142                 void *handler, char *desc, void *context)
2143 {
2144         struct net_device *netdev = adapter->netdev;
2145         int vec;
2146
2147         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2148         vec = be_msix_vec_get(adapter, eq_obj);
2149         return request_irq(vec, handler, 0, eq_obj->desc, context);
2150 }
2151
2152 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2153                         void *context)
2154 {
2155         int vec = be_msix_vec_get(adapter, eq_obj);
2156         free_irq(vec, context);
2157 }
2158
2159 static int be_msix_register(struct be_adapter *adapter)
2160 {
2161         struct be_rx_obj *rxo;
2162         int status, i;
2163         char qname[10];
2164
2165         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2166                                 adapter);
2167         if (status)
2168                 goto err;
2169
2170         for_all_rx_queues(adapter, rxo, i) {
2171                 sprintf(qname, "rxq%d", i);
2172                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2173                                 qname, rxo);
2174                 if (status)
2175                         goto err_msix;
2176         }
2177
2178         return 0;
2179
2180 err_msix:
2181         be_free_irq(adapter, &adapter->tx_eq, adapter);
2182
2183         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2184                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2185
2186 err:
2187         dev_warn(&adapter->pdev->dev,
2188                 "MSIX Request IRQ failed - err %d\n", status);
2189         be_msix_disable(adapter);
2190         return status;
2191 }
2192
2193 static int be_irq_register(struct be_adapter *adapter)
2194 {
2195         struct net_device *netdev = adapter->netdev;
2196         int status;
2197
2198         if (msix_enabled(adapter)) {
2199                 status = be_msix_register(adapter);
2200                 if (status == 0)
2201                         goto done;
2202                 /* INTx is not supported for VF */
2203                 if (!be_physfn(adapter))
2204                         return status;
2205         }
2206
2207         /* INTx */
2208         netdev->irq = adapter->pdev->irq;
2209         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2210                         adapter);
2211         if (status) {
2212                 dev_err(&adapter->pdev->dev,
2213                         "INTx request IRQ failed - err %d\n", status);
2214                 return status;
2215         }
2216 done:
2217         adapter->isr_registered = true;
2218         return 0;
2219 }
2220
2221 static void be_irq_unregister(struct be_adapter *adapter)
2222 {
2223         struct net_device *netdev = adapter->netdev;
2224         struct be_rx_obj *rxo;
2225         int i;
2226
2227         if (!adapter->isr_registered)
2228                 return;
2229
2230         /* INTx */
2231         if (!msix_enabled(adapter)) {
2232                 free_irq(netdev->irq, adapter);
2233                 goto done;
2234         }
2235
2236         /* MSIx */
2237         be_free_irq(adapter, &adapter->tx_eq, adapter);
2238
2239         for_all_rx_queues(adapter, rxo, i)
2240                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2241
2242 done:
2243         adapter->isr_registered = false;
2244 }
2245
2246 static void be_rx_queues_clear(struct be_adapter *adapter)
2247 {
2248         struct be_queue_info *q;
2249         struct be_rx_obj *rxo;
2250         int i;
2251
2252         for_all_rx_queues(adapter, rxo, i) {
2253                 q = &rxo->q;
2254                 if (q->created) {
2255                         be_cmd_rxq_destroy(adapter, q);
2256                         /* After the rxq is invalidated, wait for a grace time
2257                          * of 1ms for all dma to end and the flush compl to
2258                          * arrive
2259                          */
2260                         mdelay(1);
2261                         be_rx_q_clean(adapter, rxo);
2262                 }
2263
2264                 /* Clear any residual events */
2265                 q = &rxo->rx_eq.q;
2266                 if (q->created)
2267                         be_eq_clean(adapter, &rxo->rx_eq);
2268         }
2269 }
2270
2271 static int be_close(struct net_device *netdev)
2272 {
2273         struct be_adapter *adapter = netdev_priv(netdev);
2274         struct be_rx_obj *rxo;
2275         struct be_tx_obj *txo;
2276         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2277         int vec, i;
2278
2279         be_async_mcc_disable(adapter);
2280
2281         if (!lancer_chip(adapter))
2282                 be_intr_set(adapter, false);
2283
2284         for_all_rx_queues(adapter, rxo, i)
2285                 napi_disable(&rxo->rx_eq.napi);
2286
2287         napi_disable(&tx_eq->napi);
2288
2289         if (lancer_chip(adapter)) {
2290                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2291                 for_all_rx_queues(adapter, rxo, i)
2292                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2293                 for_all_tx_queues(adapter, txo, i)
2294                          be_cq_notify(adapter, txo->cq.id, false, 0);
2295         }
2296
2297         if (msix_enabled(adapter)) {
2298                 vec = be_msix_vec_get(adapter, tx_eq);
2299                 synchronize_irq(vec);
2300
2301                 for_all_rx_queues(adapter, rxo, i) {
2302                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2303                         synchronize_irq(vec);
2304                 }
2305         } else {
2306                 synchronize_irq(netdev->irq);
2307         }
2308         be_irq_unregister(adapter);
2309
2310         /* Wait for all pending tx completions to arrive so that
2311          * all tx skbs are freed.
2312          */
2313         for_all_tx_queues(adapter, txo, i)
2314                 be_tx_compl_clean(adapter, txo);
2315
2316         be_rx_queues_clear(adapter);
2317         return 0;
2318 }
2319
2320 static int be_rx_queues_setup(struct be_adapter *adapter)
2321 {
2322         struct be_rx_obj *rxo;
2323         int rc, i;
2324         u8 rsstable[MAX_RSS_QS];
2325
2326         for_all_rx_queues(adapter, rxo, i) {
2327                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2328                         rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2329                         adapter->if_handle,
2330                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2331                 if (rc)
2332                         return rc;
2333         }
2334
2335         if (be_multi_rxq(adapter)) {
2336                 for_all_rss_queues(adapter, rxo, i)
2337                         rsstable[i] = rxo->rss_id;
2338
2339                 rc = be_cmd_rss_config(adapter, rsstable,
2340                         adapter->num_rx_qs - 1);
2341                 if (rc)
2342                         return rc;
2343         }
2344
2345         /* First time posting */
2346         for_all_rx_queues(adapter, rxo, i) {
2347                 be_post_rx_frags(rxo, GFP_KERNEL);
2348                 napi_enable(&rxo->rx_eq.napi);
2349         }
2350         return 0;
2351 }
2352
2353 static int be_open(struct net_device *netdev)
2354 {
2355         struct be_adapter *adapter = netdev_priv(netdev);
2356         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2357         struct be_rx_obj *rxo;
2358         int status, i;
2359
2360         status = be_rx_queues_setup(adapter);
2361         if (status)
2362                 goto err;
2363
2364         napi_enable(&tx_eq->napi);
2365
2366         be_irq_register(adapter);
2367
2368         if (!lancer_chip(adapter))
2369                 be_intr_set(adapter, true);
2370
2371         /* The evt queues are created in unarmed state; arm them */
2372         for_all_rx_queues(adapter, rxo, i) {
2373                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2374                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2375         }
2376         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2377
2378         /* Now that interrupts are on we can process async mcc */
2379         be_async_mcc_enable(adapter);
2380
2381         return 0;
2382 err:
2383         be_close(adapter->netdev);
2384         return -EIO;
2385 }
2386
2387 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2388 {
2389         struct be_dma_mem cmd;
2390         int status = 0;
2391         u8 mac[ETH_ALEN];
2392
2393         memset(mac, 0, ETH_ALEN);
2394
2395         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2396         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2397                                     GFP_KERNEL);
2398         if (cmd.va == NULL)
2399                 return -1;
2400         memset(cmd.va, 0, cmd.size);
2401
2402         if (enable) {
2403                 status = pci_write_config_dword(adapter->pdev,
2404                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2405                 if (status) {
2406                         dev_err(&adapter->pdev->dev,
2407                                 "Could not enable Wake-on-lan\n");
2408                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2409                                           cmd.dma);
2410                         return status;
2411                 }
2412                 status = be_cmd_enable_magic_wol(adapter,
2413                                 adapter->netdev->dev_addr, &cmd);
2414                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2415                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2416         } else {
2417                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2418                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2419                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2420         }
2421
2422         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2423         return status;
2424 }
2425
2426 /*
2427  * Generate a seed MAC address from the PF MAC Address using jhash.
2428  * MAC Address for VFs are assigned incrementally starting from the seed.
2429  * These addresses are programmed in the ASIC by the PF and the VF driver
2430  * queries for the MAC address during its probe.
2431  */
2432 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2433 {
2434         u32 vf;
2435         int status = 0;
2436         u8 mac[ETH_ALEN];
2437
2438         be_vf_eth_addr_generate(adapter, mac);
2439
2440         for (vf = 0; vf < num_vfs; vf++) {
2441                 status = be_cmd_pmac_add(adapter, mac,
2442                                         adapter->vf_cfg[vf].vf_if_handle,
2443                                         &adapter->vf_cfg[vf].vf_pmac_id,
2444                                         vf + 1);
2445                 if (status)
2446                         dev_err(&adapter->pdev->dev,
2447                                 "Mac address add failed for VF %d\n", vf);
2448                 else
2449                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2450
2451                 mac[5] += 1;
2452         }
2453         return status;
2454 }
2455
2456 static void be_vf_clear(struct be_adapter *adapter)
2457 {
2458         u32 vf;
2459
2460         for (vf = 0; vf < num_vfs; vf++) {
2461                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2462                         be_cmd_pmac_del(adapter,
2463                                         adapter->vf_cfg[vf].vf_if_handle,
2464                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2465         }
2466
2467         for (vf = 0; vf < num_vfs; vf++)
2468                 if (adapter->vf_cfg[vf].vf_if_handle)
2469                         be_cmd_if_destroy(adapter,
2470                                 adapter->vf_cfg[vf].vf_if_handle, vf + 1);
2471 }
2472
2473 static int be_clear(struct be_adapter *adapter)
2474 {
2475         if (be_physfn(adapter) && adapter->sriov_enabled)
2476                 be_vf_clear(adapter);
2477
2478         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2479
2480         be_mcc_queues_destroy(adapter);
2481         be_rx_queues_destroy(adapter);
2482         be_tx_queues_destroy(adapter);
2483         adapter->eq_next_idx = 0;
2484
2485         adapter->be3_native = false;
2486         adapter->promiscuous = false;
2487
2488         /* tell fw we're done with firing cmds */
2489         be_cmd_fw_clean(adapter);
2490         return 0;
2491 }
2492
2493 static int be_vf_setup(struct be_adapter *adapter)
2494 {
2495         u32 cap_flags, en_flags, vf;
2496         u16 lnk_speed;
2497         int status;
2498
2499         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2500         for (vf = 0; vf < num_vfs; vf++) {
2501                 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2502                                         &adapter->vf_cfg[vf].vf_if_handle,
2503                                         NULL, vf+1);
2504                 if (status)
2505                         goto err;
2506                 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2507         }
2508
2509         if (!lancer_chip(adapter)) {
2510                 status = be_vf_eth_addr_config(adapter);
2511                 if (status)
2512                         goto err;
2513         }
2514
2515         for (vf = 0; vf < num_vfs; vf++) {
2516                 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2517                                 vf + 1);
2518                 if (status)
2519                         goto err;
2520                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2521         }
2522         return 0;
2523 err:
2524         return status;
2525 }
2526
2527 static int be_setup(struct be_adapter *adapter)
2528 {
2529         struct net_device *netdev = adapter->netdev;
2530         u32 cap_flags, en_flags;
2531         u32 tx_fc, rx_fc;
2532         int status;
2533         u8 mac[ETH_ALEN];
2534
2535         /* Allow all priorities by default. A GRP5 evt may modify this */
2536         adapter->vlan_prio_bmap = 0xff;
2537         adapter->link_speed = -1;
2538
2539         be_cmd_req_native_mode(adapter);
2540
2541         status = be_tx_queues_create(adapter);
2542         if (status != 0)
2543                 goto err;
2544
2545         status = be_rx_queues_create(adapter);
2546         if (status != 0)
2547                 goto err;
2548
2549         status = be_mcc_queues_create(adapter);
2550         if (status != 0)
2551                 goto err;
2552
2553         memset(mac, 0, ETH_ALEN);
2554         status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2555                         true /*permanent */, 0);
2556         if (status)
2557                 return status;
2558         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2559         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2560
2561         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2562                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2563         cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2564                         BE_IF_FLAGS_PROMISCUOUS;
2565         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2566                 cap_flags |= BE_IF_FLAGS_RSS;
2567                 en_flags |= BE_IF_FLAGS_RSS;
2568         }
2569         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2570                         netdev->dev_addr, &adapter->if_handle,
2571                         &adapter->pmac_id, 0);
2572         if (status != 0)
2573                 goto err;
2574
2575         /* For BEx, the VF's permanent mac queried from card is incorrect.
2576          * Query the mac configued by the PF using if_handle
2577          */
2578         if (!be_physfn(adapter) && !lancer_chip(adapter)) {
2579                 status = be_cmd_mac_addr_query(adapter, mac,
2580                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2581                 if (!status) {
2582                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2583                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2584                 }
2585         }
2586
2587         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2588
2589         status = be_vid_config(adapter, false, 0);
2590         if (status)
2591                 goto err;
2592
2593         be_set_rx_mode(adapter->netdev);
2594
2595         status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2596         if (status)
2597                 goto err;
2598         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2599                 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2600                                         adapter->rx_fc);
2601                 if (status)
2602                         goto err;
2603         }
2604
2605         pcie_set_readrq(adapter->pdev, 4096);
2606
2607         if (be_physfn(adapter) && adapter->sriov_enabled) {
2608                 status = be_vf_setup(adapter);
2609                 if (status)
2610                         goto err;
2611         }
2612
2613         return 0;
2614 err:
2615         be_clear(adapter);
2616         return status;
2617 }
2618
2619 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2620 static bool be_flash_redboot(struct be_adapter *adapter,
2621                         const u8 *p, u32 img_start, int image_size,
2622                         int hdr_size)
2623 {
2624         u32 crc_offset;
2625         u8 flashed_crc[4];
2626         int status;
2627
2628         crc_offset = hdr_size + img_start + image_size - 4;
2629
2630         p += crc_offset;
2631
2632         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2633                         (image_size - 4));
2634         if (status) {
2635                 dev_err(&adapter->pdev->dev,
2636                 "could not get crc from flash, not flashing redboot\n");
2637                 return false;
2638         }
2639
2640         /*update redboot only if crc does not match*/
2641         if (!memcmp(flashed_crc, p, 4))
2642                 return false;
2643         else
2644                 return true;
2645 }
2646
2647 static bool phy_flashing_required(struct be_adapter *adapter)
2648 {
2649         int status = 0;
2650         struct be_phy_info phy_info;
2651
2652         status = be_cmd_get_phy_info(adapter, &phy_info);
2653         if (status)
2654                 return false;
2655         if ((phy_info.phy_type == TN_8022) &&
2656                 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2657                 return true;
2658         }
2659         return false;
2660 }
2661
2662 static int be_flash_data(struct be_adapter *adapter,
2663                         const struct firmware *fw,
2664                         struct be_dma_mem *flash_cmd, int num_of_images)
2665
2666 {
2667         int status = 0, i, filehdr_size = 0;
2668         u32 total_bytes = 0, flash_op;
2669         int num_bytes;
2670         const u8 *p = fw->data;
2671         struct be_cmd_write_flashrom *req = flash_cmd->va;
2672         const struct flash_comp *pflashcomp;
2673         int num_comp;
2674
2675         static const struct flash_comp gen3_flash_types[10] = {
2676                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2677                         FLASH_IMAGE_MAX_SIZE_g3},
2678                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2679                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2680                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2681                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2682                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2683                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2684                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2685                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2686                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2687                         FLASH_IMAGE_MAX_SIZE_g3},
2688                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2689                         FLASH_IMAGE_MAX_SIZE_g3},
2690                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2691                         FLASH_IMAGE_MAX_SIZE_g3},
2692                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2693                         FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2694                 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2695                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2696         };
2697         static const struct flash_comp gen2_flash_types[8] = {
2698                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2699                         FLASH_IMAGE_MAX_SIZE_g2},
2700                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2701                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2702                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2703                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2704                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2705                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2706                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2707                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2708                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2709                         FLASH_IMAGE_MAX_SIZE_g2},
2710                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2711                         FLASH_IMAGE_MAX_SIZE_g2},
2712                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2713                          FLASH_IMAGE_MAX_SIZE_g2}
2714         };
2715
2716         if (adapter->generation == BE_GEN3) {
2717                 pflashcomp = gen3_flash_types;
2718                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2719                 num_comp = ARRAY_SIZE(gen3_flash_types);
2720         } else {
2721                 pflashcomp = gen2_flash_types;
2722                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2723                 num_comp = ARRAY_SIZE(gen2_flash_types);
2724         }
2725         for (i = 0; i < num_comp; i++) {
2726                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2727                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2728                         continue;
2729                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2730                         if (!phy_flashing_required(adapter))
2731                                 continue;
2732                 }
2733                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2734                         (!be_flash_redboot(adapter, fw->data,
2735                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2736                         (num_of_images * sizeof(struct image_hdr)))))
2737                         continue;
2738                 p = fw->data;
2739                 p += filehdr_size + pflashcomp[i].offset
2740                         + (num_of_images * sizeof(struct image_hdr));
2741                 if (p + pflashcomp[i].size > fw->data + fw->size)
2742                         return -1;
2743                 total_bytes = pflashcomp[i].size;
2744                 while (total_bytes) {
2745                         if (total_bytes > 32*1024)
2746                                 num_bytes = 32*1024;
2747                         else
2748                                 num_bytes = total_bytes;
2749                         total_bytes -= num_bytes;
2750                         if (!total_bytes) {
2751                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2752                                         flash_op = FLASHROM_OPER_PHY_FLASH;
2753                                 else
2754                                         flash_op = FLASHROM_OPER_FLASH;
2755                         } else {
2756                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2757                                         flash_op = FLASHROM_OPER_PHY_SAVE;
2758                                 else
2759                                         flash_op = FLASHROM_OPER_SAVE;
2760                         }
2761                         memcpy(req->params.data_buf, p, num_bytes);
2762                         p += num_bytes;
2763                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2764                                 pflashcomp[i].optype, flash_op, num_bytes);
2765                         if (status) {
2766                                 if ((status == ILLEGAL_IOCTL_REQ) &&
2767                                         (pflashcomp[i].optype ==
2768                                                 IMG_TYPE_PHY_FW))
2769                                         break;
2770                                 dev_err(&adapter->pdev->dev,
2771                                         "cmd to write to flash rom failed.\n");
2772                                 return -1;
2773                         }
2774                 }
2775         }
2776         return 0;
2777 }
2778
2779 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2780 {
2781         if (fhdr == NULL)
2782                 return 0;
2783         if (fhdr->build[0] == '3')
2784                 return BE_GEN3;
2785         else if (fhdr->build[0] == '2')
2786                 return BE_GEN2;
2787         else
2788                 return 0;
2789 }
2790
2791 static int lancer_fw_download(struct be_adapter *adapter,
2792                                 const struct firmware *fw)
2793 {
2794 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2795 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2796         struct be_dma_mem flash_cmd;
2797         const u8 *data_ptr = NULL;
2798         u8 *dest_image_ptr = NULL;
2799         size_t image_size = 0;
2800         u32 chunk_size = 0;
2801         u32 data_written = 0;
2802         u32 offset = 0;
2803         int status = 0;
2804         u8 add_status = 0;
2805
2806         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2807                 dev_err(&adapter->pdev->dev,
2808                         "FW Image not properly aligned. "
2809                         "Length must be 4 byte aligned.\n");
2810                 status = -EINVAL;
2811                 goto lancer_fw_exit;
2812         }
2813
2814         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2815                                 + LANCER_FW_DOWNLOAD_CHUNK;
2816         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2817                                                 &flash_cmd.dma, GFP_KERNEL);
2818         if (!flash_cmd.va) {
2819                 status = -ENOMEM;
2820                 dev_err(&adapter->pdev->dev,
2821                         "Memory allocation failure while flashing\n");
2822                 goto lancer_fw_exit;
2823         }
2824
2825         dest_image_ptr = flash_cmd.va +
2826                                 sizeof(struct lancer_cmd_req_write_object);
2827         image_size = fw->size;
2828         data_ptr = fw->data;
2829
2830         while (image_size) {
2831                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2832
2833                 /* Copy the image chunk content. */
2834                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2835
2836                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2837                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2838                                 &data_written, &add_status);
2839
2840                 if (status)
2841                         break;
2842
2843                 offset += data_written;
2844                 data_ptr += data_written;
2845                 image_size -= data_written;
2846         }
2847
2848         if (!status) {
2849                 /* Commit the FW written */
2850                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2851                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2852                                         &data_written, &add_status);
2853         }
2854
2855         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2856                                 flash_cmd.dma);
2857         if (status) {
2858                 dev_err(&adapter->pdev->dev,
2859                         "Firmware load error. "
2860                         "Status code: 0x%x Additional Status: 0x%x\n",
2861                         status, add_status);
2862                 goto lancer_fw_exit;
2863         }
2864
2865         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2866 lancer_fw_exit:
2867         return status;
2868 }
2869
2870 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2871 {
2872         struct flash_file_hdr_g2 *fhdr;
2873         struct flash_file_hdr_g3 *fhdr3;
2874         struct image_hdr *img_hdr_ptr = NULL;
2875         struct be_dma_mem flash_cmd;
2876         const u8 *p;
2877         int status = 0, i = 0, num_imgs = 0;
2878
2879         p = fw->data;
2880         fhdr = (struct flash_file_hdr_g2 *) p;
2881
2882         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2883         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2884                                           &flash_cmd.dma, GFP_KERNEL);
2885         if (!flash_cmd.va) {
2886                 status = -ENOMEM;
2887                 dev_err(&adapter->pdev->dev,
2888                         "Memory allocation failure while flashing\n");
2889                 goto be_fw_exit;
2890         }
2891
2892         if ((adapter->generation == BE_GEN3) &&
2893                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2894                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2895                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2896                 for (i = 0; i < num_imgs; i++) {
2897                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2898                                         (sizeof(struct flash_file_hdr_g3) +
2899                                          i * sizeof(struct image_hdr)));
2900                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2901                                 status = be_flash_data(adapter, fw, &flash_cmd,
2902                                                         num_imgs);
2903                 }
2904         } else if ((adapter->generation == BE_GEN2) &&
2905                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2906                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2907         } else {
2908                 dev_err(&adapter->pdev->dev,
2909                         "UFI and Interface are not compatible for flashing\n");
2910                 status = -1;
2911         }
2912
2913         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2914                           flash_cmd.dma);
2915         if (status) {
2916                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2917                 goto be_fw_exit;
2918         }
2919
2920         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2921
2922 be_fw_exit:
2923         return status;
2924 }
2925
2926 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2927 {
2928         const struct firmware *fw;
2929         int status;
2930
2931         if (!netif_running(adapter->netdev)) {
2932                 dev_err(&adapter->pdev->dev,
2933                         "Firmware load not allowed (interface is down)\n");
2934                 return -1;
2935         }
2936
2937         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2938         if (status)
2939                 goto fw_exit;
2940
2941         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2942
2943         if (lancer_chip(adapter))
2944                 status = lancer_fw_download(adapter, fw);
2945         else
2946                 status = be_fw_download(adapter, fw);
2947
2948 fw_exit:
2949         release_firmware(fw);
2950         return status;
2951 }
2952
2953 static struct net_device_ops be_netdev_ops = {
2954         .ndo_open               = be_open,
2955         .ndo_stop               = be_close,
2956         .ndo_start_xmit         = be_xmit,
2957         .ndo_set_rx_mode        = be_set_rx_mode,
2958         .ndo_set_mac_address    = be_mac_addr_set,
2959         .ndo_change_mtu         = be_change_mtu,
2960         .ndo_get_stats64        = be_get_stats64,
2961         .ndo_validate_addr      = eth_validate_addr,
2962         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2963         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2964         .ndo_set_vf_mac         = be_set_vf_mac,
2965         .ndo_set_vf_vlan        = be_set_vf_vlan,
2966         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2967         .ndo_get_vf_config      = be_get_vf_config
2968 };
2969
2970 static void be_netdev_init(struct net_device *netdev)
2971 {
2972         struct be_adapter *adapter = netdev_priv(netdev);
2973         struct be_rx_obj *rxo;
2974         int i;
2975
2976         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2977                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2978                 NETIF_F_HW_VLAN_TX;
2979         if (be_multi_rxq(adapter))
2980                 netdev->hw_features |= NETIF_F_RXHASH;
2981
2982         netdev->features |= netdev->hw_features |
2983                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2984
2985         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2986                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2987
2988         netdev->flags |= IFF_MULTICAST;
2989
2990         netif_set_gso_max_size(netdev, 65535);
2991
2992         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2993
2994         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2995
2996         for_all_rx_queues(adapter, rxo, i)
2997                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2998                                 BE_NAPI_WEIGHT);
2999
3000         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
3001                 BE_NAPI_WEIGHT);
3002 }
3003
3004 static void be_unmap_pci_bars(struct be_adapter *adapter)
3005 {
3006         if (adapter->csr)
3007                 iounmap(adapter->csr);
3008         if (adapter->db)
3009                 iounmap(adapter->db);
3010 }
3011
3012 static int be_map_pci_bars(struct be_adapter *adapter)
3013 {
3014         u8 __iomem *addr;
3015         int db_reg;
3016
3017         if (lancer_chip(adapter)) {
3018                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3019                         pci_resource_len(adapter->pdev, 0));
3020                 if (addr == NULL)
3021                         return -ENOMEM;
3022                 adapter->db = addr;
3023                 return 0;
3024         }
3025
3026         if (be_physfn(adapter)) {
3027                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3028                                 pci_resource_len(adapter->pdev, 2));
3029                 if (addr == NULL)
3030                         return -ENOMEM;
3031                 adapter->csr = addr;
3032         }
3033
3034         if (adapter->generation == BE_GEN2) {
3035                 db_reg = 4;
3036         } else {
3037                 if (be_physfn(adapter))
3038                         db_reg = 4;
3039                 else
3040                         db_reg = 0;
3041         }
3042         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3043                                 pci_resource_len(adapter->pdev, db_reg));
3044         if (addr == NULL)
3045                 goto pci_map_err;
3046         adapter->db = addr;
3047
3048         return 0;
3049 pci_map_err:
3050         be_unmap_pci_bars(adapter);
3051         return -ENOMEM;
3052 }
3053
3054
3055 static void be_ctrl_cleanup(struct be_adapter *adapter)
3056 {
3057         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3058
3059         be_unmap_pci_bars(adapter);
3060
3061         if (mem->va)
3062                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3063                                   mem->dma);
3064
3065         mem = &adapter->rx_filter;
3066         if (mem->va)
3067                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3068                                   mem->dma);
3069 }
3070
3071 static int be_ctrl_init(struct be_adapter *adapter)
3072 {
3073         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3074         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3075         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3076         int status;
3077
3078         status = be_map_pci_bars(adapter);
3079         if (status)
3080                 goto done;
3081
3082         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3083         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3084                                                 mbox_mem_alloc->size,
3085                                                 &mbox_mem_alloc->dma,
3086                                                 GFP_KERNEL);
3087         if (!mbox_mem_alloc->va) {
3088                 status = -ENOMEM;
3089                 goto unmap_pci_bars;
3090         }
3091         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3092         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3093         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3094         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3095
3096         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3097         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3098                                         &rx_filter->dma, GFP_KERNEL);
3099         if (rx_filter->va == NULL) {
3100                 status = -ENOMEM;
3101                 goto free_mbox;
3102         }
3103         memset(rx_filter->va, 0, rx_filter->size);
3104
3105         mutex_init(&adapter->mbox_lock);
3106         spin_lock_init(&adapter->mcc_lock);
3107         spin_lock_init(&adapter->mcc_cq_lock);
3108
3109         init_completion(&adapter->flash_compl);
3110         pci_save_state(adapter->pdev);
3111         return 0;
3112
3113 free_mbox:
3114         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3115                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3116
3117 unmap_pci_bars:
3118         be_unmap_pci_bars(adapter);
3119
3120 done:
3121         return status;
3122 }
3123
3124 static void be_stats_cleanup(struct be_adapter *adapter)
3125 {
3126         struct be_dma_mem *cmd = &adapter->stats_cmd;
3127
3128         if (cmd->va)
3129                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3130                                   cmd->va, cmd->dma);
3131 }
3132
3133 static int be_stats_init(struct be_adapter *adapter)
3134 {
3135         struct be_dma_mem *cmd = &adapter->stats_cmd;
3136
3137         if (adapter->generation == BE_GEN2) {
3138                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3139         } else {
3140                 if (lancer_chip(adapter))
3141                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3142                 else
3143                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3144         }
3145         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3146                                      GFP_KERNEL);
3147         if (cmd->va == NULL)
3148                 return -1;
3149         memset(cmd->va, 0, cmd->size);
3150         return 0;
3151 }
3152
3153 static void __devexit be_remove(struct pci_dev *pdev)
3154 {
3155         struct be_adapter *adapter = pci_get_drvdata(pdev);
3156
3157         if (!adapter)
3158                 return;
3159
3160         cancel_delayed_work_sync(&adapter->work);
3161
3162         unregister_netdev(adapter->netdev);
3163
3164         be_clear(adapter);
3165
3166         be_stats_cleanup(adapter);
3167
3168         be_ctrl_cleanup(adapter);
3169
3170         be_sriov_disable(adapter);
3171
3172         be_msix_disable(adapter);
3173
3174         pci_set_drvdata(pdev, NULL);
3175         pci_release_regions(pdev);
3176         pci_disable_device(pdev);
3177
3178         free_netdev(adapter->netdev);
3179 }
3180
3181 static int be_get_config(struct be_adapter *adapter)
3182 {
3183         int status;
3184
3185         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3186                         &adapter->function_mode, &adapter->function_caps);
3187         if (status)
3188                 return status;
3189
3190         if (adapter->function_mode & 0x400)
3191                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3192         else
3193                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3194
3195         status = be_cmd_get_cntl_attributes(adapter);
3196         if (status)
3197                 return status;
3198
3199         return 0;
3200 }
3201
3202 static int be_dev_family_check(struct be_adapter *adapter)
3203 {
3204         struct pci_dev *pdev = adapter->pdev;
3205         u32 sli_intf = 0, if_type;
3206
3207         switch (pdev->device) {
3208         case BE_DEVICE_ID1:
3209         case OC_DEVICE_ID1:
3210                 adapter->generation = BE_GEN2;
3211                 break;
3212         case BE_DEVICE_ID2:
3213         case OC_DEVICE_ID2:
3214                 adapter->generation = BE_GEN3;
3215                 break;
3216         case OC_DEVICE_ID3:
3217         case OC_DEVICE_ID4:
3218                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3219                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3220                                                 SLI_INTF_IF_TYPE_SHIFT;
3221
3222                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3223                         if_type != 0x02) {
3224                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3225                         return -EINVAL;
3226                 }
3227                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3228                                          SLI_INTF_FAMILY_SHIFT);
3229                 adapter->generation = BE_GEN3;
3230                 break;
3231         default:
3232                 adapter->generation = 0;
3233         }
3234         return 0;
3235 }
3236
3237 static int lancer_wait_ready(struct be_adapter *adapter)
3238 {
3239 #define SLIPORT_READY_TIMEOUT 500
3240         u32 sliport_status;
3241         int status = 0, i;
3242
3243         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3244                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3245                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3246                         break;
3247
3248                 msleep(20);
3249         }
3250
3251         if (i == SLIPORT_READY_TIMEOUT)
3252                 status = -1;
3253
3254         return status;
3255 }
3256
3257 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3258 {
3259         int status;
3260         u32 sliport_status, err, reset_needed;
3261         status = lancer_wait_ready(adapter);
3262         if (!status) {
3263                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3264                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3265                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3266                 if (err && reset_needed) {
3267                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3268                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3269
3270                         /* check adapter has corrected the error */
3271                         status = lancer_wait_ready(adapter);
3272                         sliport_status = ioread32(adapter->db +
3273                                                         SLIPORT_STATUS_OFFSET);
3274                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3275                                                 SLIPORT_STATUS_RN_MASK);
3276                         if (status || sliport_status)
3277                                 status = -1;
3278                 } else if (err || reset_needed) {
3279                         status = -1;
3280                 }
3281         }
3282         return status;
3283 }
3284
3285 static int __devinit be_probe(struct pci_dev *pdev,
3286                         const struct pci_device_id *pdev_id)
3287 {
3288         int status = 0;
3289         struct be_adapter *adapter;
3290         struct net_device *netdev;
3291
3292         status = pci_enable_device(pdev);
3293         if (status)
3294                 goto do_none;
3295
3296         status = pci_request_regions(pdev, DRV_NAME);
3297         if (status)
3298                 goto disable_dev;
3299         pci_set_master(pdev);
3300
3301         netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3302         if (netdev == NULL) {
3303                 status = -ENOMEM;
3304                 goto rel_reg;
3305         }
3306         adapter = netdev_priv(netdev);
3307         adapter->pdev = pdev;
3308         pci_set_drvdata(pdev, adapter);
3309
3310         status = be_dev_family_check(adapter);
3311         if (status)
3312                 goto free_netdev;
3313
3314         adapter->netdev = netdev;
3315         SET_NETDEV_DEV(netdev, &pdev->dev);
3316
3317         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3318         if (!status) {
3319                 netdev->features |= NETIF_F_HIGHDMA;
3320         } else {
3321                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3322                 if (status) {
3323                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3324                         goto free_netdev;
3325                 }
3326         }
3327
3328         status = be_sriov_enable(adapter);
3329         if (status)
3330                 goto free_netdev;
3331
3332         status = be_ctrl_init(adapter);
3333         if (status)
3334                 goto disable_sriov;
3335
3336         if (lancer_chip(adapter)) {
3337                 status = lancer_test_and_set_rdy_state(adapter);
3338                 if (status) {
3339                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3340                         goto ctrl_clean;
3341                 }
3342         }
3343
3344         /* sync up with fw's ready state */
3345         if (be_physfn(adapter)) {
3346                 status = be_cmd_POST(adapter);
3347                 if (status)
3348                         goto ctrl_clean;
3349         }
3350
3351         /* tell fw we're ready to fire cmds */
3352         status = be_cmd_fw_init(adapter);
3353         if (status)
3354                 goto ctrl_clean;
3355
3356         status = be_cmd_reset_function(adapter);
3357         if (status)
3358                 goto ctrl_clean;
3359
3360         status = be_stats_init(adapter);
3361         if (status)
3362                 goto ctrl_clean;
3363
3364         status = be_get_config(adapter);
3365         if (status)
3366                 goto stats_clean;
3367
3368         /* The INTR bit may be set in the card when probed by a kdump kernel
3369          * after a crash.
3370          */
3371         if (!lancer_chip(adapter))
3372                 be_intr_set(adapter, false);
3373
3374         be_msix_enable(adapter);
3375
3376         INIT_DELAYED_WORK(&adapter->work, be_worker);
3377         adapter->rx_fc = adapter->tx_fc = true;
3378
3379         status = be_setup(adapter);
3380         if (status)
3381                 goto msix_disable;
3382
3383         be_netdev_init(netdev);
3384         status = register_netdev(netdev);
3385         if (status != 0)
3386                 goto unsetup;
3387
3388         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3389
3390         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3391         return 0;
3392
3393 unsetup:
3394         be_clear(adapter);
3395 msix_disable:
3396         be_msix_disable(adapter);
3397 stats_clean:
3398         be_stats_cleanup(adapter);
3399 ctrl_clean:
3400         be_ctrl_cleanup(adapter);
3401 disable_sriov:
3402         be_sriov_disable(adapter);
3403 free_netdev:
3404         free_netdev(netdev);
3405         pci_set_drvdata(pdev, NULL);
3406 rel_reg:
3407         pci_release_regions(pdev);
3408 disable_dev:
3409         pci_disable_device(pdev);
3410 do_none:
3411         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3412         return status;
3413 }
3414
3415 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3416 {
3417         struct be_adapter *adapter = pci_get_drvdata(pdev);
3418         struct net_device *netdev =  adapter->netdev;
3419
3420         cancel_delayed_work_sync(&adapter->work);
3421         if (adapter->wol)
3422                 be_setup_wol(adapter, true);
3423
3424         netif_device_detach(netdev);
3425         if (netif_running(netdev)) {
3426                 rtnl_lock();
3427                 be_close(netdev);
3428                 rtnl_unlock();
3429         }
3430         be_clear(adapter);
3431
3432         be_msix_disable(adapter);
3433         pci_save_state(pdev);
3434         pci_disable_device(pdev);
3435         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3436         return 0;
3437 }
3438
3439 static int be_resume(struct pci_dev *pdev)
3440 {
3441         int status = 0;
3442         struct be_adapter *adapter = pci_get_drvdata(pdev);
3443         struct net_device *netdev =  adapter->netdev;
3444
3445         netif_device_detach(netdev);
3446
3447         status = pci_enable_device(pdev);
3448         if (status)
3449                 return status;
3450
3451         pci_set_power_state(pdev, 0);
3452         pci_restore_state(pdev);
3453
3454         be_msix_enable(adapter);
3455         /* tell fw we're ready to fire cmds */
3456         status = be_cmd_fw_init(adapter);
3457         if (status)
3458                 return status;
3459
3460         be_setup(adapter);
3461         if (netif_running(netdev)) {
3462                 rtnl_lock();
3463                 be_open(netdev);
3464                 rtnl_unlock();
3465         }
3466         netif_device_attach(netdev);
3467
3468         if (adapter->wol)
3469                 be_setup_wol(adapter, false);
3470
3471         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3472         return 0;
3473 }
3474
3475 /*
3476  * An FLR will stop BE from DMAing any data.
3477  */
3478 static void be_shutdown(struct pci_dev *pdev)
3479 {
3480         struct be_adapter *adapter = pci_get_drvdata(pdev);
3481
3482         if (!adapter)
3483                 return;
3484
3485         cancel_delayed_work_sync(&adapter->work);
3486
3487         netif_device_detach(adapter->netdev);
3488
3489         if (adapter->wol)
3490                 be_setup_wol(adapter, true);
3491
3492         be_cmd_reset_function(adapter);
3493
3494         pci_disable_device(pdev);
3495 }
3496
3497 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3498                                 pci_channel_state_t state)
3499 {
3500         struct be_adapter *adapter = pci_get_drvdata(pdev);
3501         struct net_device *netdev =  adapter->netdev;
3502
3503         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3504
3505         adapter->eeh_err = true;
3506
3507         netif_device_detach(netdev);
3508
3509         if (netif_running(netdev)) {
3510                 rtnl_lock();
3511                 be_close(netdev);
3512                 rtnl_unlock();
3513         }
3514         be_clear(adapter);
3515
3516         if (state == pci_channel_io_perm_failure)
3517                 return PCI_ERS_RESULT_DISCONNECT;
3518
3519         pci_disable_device(pdev);
3520
3521         return PCI_ERS_RESULT_NEED_RESET;
3522 }
3523
3524 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3525 {
3526         struct be_adapter *adapter = pci_get_drvdata(pdev);
3527         int status;
3528
3529         dev_info(&adapter->pdev->dev, "EEH reset\n");
3530         adapter->eeh_err = false;
3531
3532         status = pci_enable_device(pdev);
3533         if (status)
3534                 return PCI_ERS_RESULT_DISCONNECT;
3535
3536         pci_set_master(pdev);
3537         pci_set_power_state(pdev, 0);
3538         pci_restore_state(pdev);
3539
3540         /* Check if card is ok and fw is ready */
3541         status = be_cmd_POST(adapter);
3542         if (status)
3543                 return PCI_ERS_RESULT_DISCONNECT;
3544
3545         return PCI_ERS_RESULT_RECOVERED;
3546 }
3547
3548 static void be_eeh_resume(struct pci_dev *pdev)
3549 {
3550         int status = 0;
3551         struct be_adapter *adapter = pci_get_drvdata(pdev);
3552         struct net_device *netdev =  adapter->netdev;
3553
3554         dev_info(&adapter->pdev->dev, "EEH resume\n");
3555
3556         pci_save_state(pdev);
3557
3558         /* tell fw we're ready to fire cmds */
3559         status = be_cmd_fw_init(adapter);
3560         if (status)
3561                 goto err;
3562
3563         status = be_setup(adapter);
3564         if (status)
3565                 goto err;
3566
3567         if (netif_running(netdev)) {
3568                 status = be_open(netdev);
3569                 if (status)
3570                         goto err;
3571         }
3572         netif_device_attach(netdev);
3573         return;
3574 err:
3575         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3576 }
3577
3578 static struct pci_error_handlers be_eeh_handlers = {
3579         .error_detected = be_eeh_err_detected,
3580         .slot_reset = be_eeh_reset,
3581         .resume = be_eeh_resume,
3582 };
3583
3584 static struct pci_driver be_driver = {
3585         .name = DRV_NAME,
3586         .id_table = be_dev_ids,
3587         .probe = be_probe,
3588         .remove = be_remove,
3589         .suspend = be_suspend,
3590         .resume = be_resume,
3591         .shutdown = be_shutdown,
3592         .err_handler = &be_eeh_handlers
3593 };
3594
3595 static int __init be_init_module(void)
3596 {
3597         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3598             rx_frag_size != 2048) {
3599                 printk(KERN_WARNING DRV_NAME
3600                         " : Module param rx_frag_size must be 2048/4096/8192."
3601                         " Using 2048\n");
3602                 rx_frag_size = 2048;
3603         }
3604
3605         return pci_register_driver(&be_driver);
3606 }
3607 module_init(be_init_module);
3608
3609 static void __exit be_exit_module(void)
3610 {
3611         pci_unregister_driver(&be_driver);
3612 }
3613 module_exit(be_exit_module);