]> git.karo-electronics.de Git - linux-beck.git/blob - drivers/net/ethernet/emulex/benet/be_main.c
be2net: Remove an incorrect pvid check in Tx
[linux-beck.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL);
150         if (!mem->va)
151                 return -ENOMEM;
152         memset(mem->va, 0, mem->size);
153         return 0;
154 }
155
156 static void be_intr_set(struct be_adapter *adapter, bool enable)
157 {
158         u32 reg, enabled;
159
160         if (adapter->eeh_error)
161                 return;
162
163         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164                                 &reg);
165         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
167         if (!enabled && enable)
168                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169         else if (enabled && !enable)
170                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
171         else
172                 return;
173
174         pci_write_config_dword(adapter->pdev,
175                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
176 }
177
178 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180         u32 val = 0;
181         val |= qid & DB_RQ_RING_ID_MASK;
182         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
183
184         wmb();
185         iowrite32(val, adapter->db + DB_RQ_OFFSET);
186 }
187
188 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
189 {
190         u32 val = 0;
191         val |= qid & DB_TXULP_RING_ID_MASK;
192         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
193
194         wmb();
195         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
196 }
197
198 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
199                 bool arm, bool clear_int, u16 num_popped)
200 {
201         u32 val = 0;
202         val |= qid & DB_EQ_RING_ID_MASK;
203         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
205
206         if (adapter->eeh_error)
207                 return;
208
209         if (arm)
210                 val |= 1 << DB_EQ_REARM_SHIFT;
211         if (clear_int)
212                 val |= 1 << DB_EQ_CLR_SHIFT;
213         val |= 1 << DB_EQ_EVNT_SHIFT;
214         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
215         iowrite32(val, adapter->db + DB_EQ_OFFSET);
216 }
217
218 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
219 {
220         u32 val = 0;
221         val |= qid & DB_CQ_RING_ID_MASK;
222         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
224
225         if (adapter->eeh_error)
226                 return;
227
228         if (arm)
229                 val |= 1 << DB_CQ_REARM_SHIFT;
230         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
231         iowrite32(val, adapter->db + DB_CQ_OFFSET);
232 }
233
234 static int be_mac_addr_set(struct net_device *netdev, void *p)
235 {
236         struct be_adapter *adapter = netdev_priv(netdev);
237         struct sockaddr *addr = p;
238         int status = 0;
239         u8 current_mac[ETH_ALEN];
240         u32 pmac_id = adapter->pmac_id[0];
241         bool active_mac = true;
242
243         if (!is_valid_ether_addr(addr->sa_data))
244                 return -EADDRNOTAVAIL;
245
246         /* For BE VF, MAC address is already activated by PF.
247          * Hence only operation left is updating netdev->devaddr.
248          * Update it if user is passing the same MAC which was used
249          * during configuring VF MAC from PF(Hypervisor).
250          */
251         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252                 status = be_cmd_mac_addr_query(adapter, current_mac,
253                                                false, adapter->if_handle, 0);
254                 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255                         goto done;
256                 else
257                         goto err;
258         }
259
260         if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261                 goto done;
262
263         /* For Lancer check if any MAC is active.
264          * If active, get its mac id.
265          */
266         if (lancer_chip(adapter) && !be_physfn(adapter))
267                 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268                                          &pmac_id, 0);
269
270         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271                                  adapter->if_handle,
272                                  &adapter->pmac_id[0], 0);
273
274         if (status)
275                 goto err;
276
277         if (active_mac)
278                 be_cmd_pmac_del(adapter, adapter->if_handle,
279                                 pmac_id, 0);
280 done:
281         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282         return 0;
283 err:
284         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
285         return status;
286 }
287
288 /* BE2 supports only v0 cmd */
289 static void *hw_stats_from_cmd(struct be_adapter *adapter)
290 {
291         if (BE2_chip(adapter)) {
292                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
293
294                 return &cmd->hw_stats;
295         } else  {
296                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
297
298                 return &cmd->hw_stats;
299         }
300 }
301
302 /* BE2 supports only v0 cmd */
303 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
304 {
305         if (BE2_chip(adapter)) {
306                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
307
308                 return &hw_stats->erx;
309         } else {
310                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311
312                 return &hw_stats->erx;
313         }
314 }
315
316 static void populate_be_v0_stats(struct be_adapter *adapter)
317 {
318         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
319         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
320         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
321         struct be_port_rxf_stats_v0 *port_stats =
322                                         &rxf_stats->port[adapter->port_num];
323         struct be_drv_stats *drvs = &adapter->drv_stats;
324
325         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
326         drvs->rx_pause_frames = port_stats->rx_pause_frames;
327         drvs->rx_crc_errors = port_stats->rx_crc_errors;
328         drvs->rx_control_frames = port_stats->rx_control_frames;
329         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
330         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
331         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
332         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
333         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
334         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
335         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
336         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
337         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
338         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
339         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
340         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
341         drvs->rx_dropped_header_too_small =
342                 port_stats->rx_dropped_header_too_small;
343         drvs->rx_address_mismatch_drops =
344                                         port_stats->rx_address_mismatch_drops +
345                                         port_stats->rx_vlan_mismatch_drops;
346         drvs->rx_alignment_symbol_errors =
347                 port_stats->rx_alignment_symbol_errors;
348
349         drvs->tx_pauseframes = port_stats->tx_pauseframes;
350         drvs->tx_controlframes = port_stats->tx_controlframes;
351
352         if (adapter->port_num)
353                 drvs->jabber_events = rxf_stats->port1_jabber_events;
354         else
355                 drvs->jabber_events = rxf_stats->port0_jabber_events;
356         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
357         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
358         drvs->forwarded_packets = rxf_stats->forwarded_packets;
359         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
360         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
361         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
362         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
363 }
364
365 static void populate_be_v1_stats(struct be_adapter *adapter)
366 {
367         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
368         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
369         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
370         struct be_port_rxf_stats_v1 *port_stats =
371                                         &rxf_stats->port[adapter->port_num];
372         struct be_drv_stats *drvs = &adapter->drv_stats;
373
374         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
375         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
376         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
377         drvs->rx_pause_frames = port_stats->rx_pause_frames;
378         drvs->rx_crc_errors = port_stats->rx_crc_errors;
379         drvs->rx_control_frames = port_stats->rx_control_frames;
380         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
381         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
382         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
383         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
384         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
385         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
386         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
387         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
388         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
389         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
390         drvs->rx_dropped_header_too_small =
391                 port_stats->rx_dropped_header_too_small;
392         drvs->rx_input_fifo_overflow_drop =
393                 port_stats->rx_input_fifo_overflow_drop;
394         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
395         drvs->rx_alignment_symbol_errors =
396                 port_stats->rx_alignment_symbol_errors;
397         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
398         drvs->tx_pauseframes = port_stats->tx_pauseframes;
399         drvs->tx_controlframes = port_stats->tx_controlframes;
400         drvs->jabber_events = port_stats->jabber_events;
401         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
402         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
403         drvs->forwarded_packets = rxf_stats->forwarded_packets;
404         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
405         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
406         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
407         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
408 }
409
410 static void populate_lancer_stats(struct be_adapter *adapter)
411 {
412
413         struct be_drv_stats *drvs = &adapter->drv_stats;
414         struct lancer_pport_stats *pport_stats =
415                                         pport_stats_from_cmd(adapter);
416
417         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
418         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
419         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
420         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
421         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
422         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
423         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
424         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
425         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
426         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
427         drvs->rx_dropped_tcp_length =
428                                 pport_stats->rx_dropped_invalid_tcp_length;
429         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
430         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
431         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
432         drvs->rx_dropped_header_too_small =
433                                 pport_stats->rx_dropped_header_too_small;
434         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
435         drvs->rx_address_mismatch_drops =
436                                         pport_stats->rx_address_mismatch_drops +
437                                         pport_stats->rx_vlan_mismatch_drops;
438         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
439         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
440         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
441         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
442         drvs->jabber_events = pport_stats->rx_jabbers;
443         drvs->forwarded_packets = pport_stats->num_forwards_lo;
444         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
445         drvs->rx_drops_too_many_frags =
446                                 pport_stats->rx_drops_too_many_frags_lo;
447 }
448
449 static void accumulate_16bit_val(u32 *acc, u16 val)
450 {
451 #define lo(x)                   (x & 0xFFFF)
452 #define hi(x)                   (x & 0xFFFF0000)
453         bool wrapped = val < lo(*acc);
454         u32 newacc = hi(*acc) + val;
455
456         if (wrapped)
457                 newacc += 65536;
458         ACCESS_ONCE(*acc) = newacc;
459 }
460
461 void be_parse_stats(struct be_adapter *adapter)
462 {
463         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
464         struct be_rx_obj *rxo;
465         int i;
466
467         if (lancer_chip(adapter)) {
468                 populate_lancer_stats(adapter);
469         } else {
470                 if (BE2_chip(adapter))
471                         populate_be_v0_stats(adapter);
472                 else
473                         /* for BE3 and Skyhawk */
474                         populate_be_v1_stats(adapter);
475
476                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
477                 for_all_rx_queues(adapter, rxo, i) {
478                         /* below erx HW counter can actually wrap around after
479                          * 65535. Driver accumulates a 32-bit value
480                          */
481                         accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
482                                              (u16)erx->rx_drops_no_fragments \
483                                              [rxo->q.id]);
484                 }
485         }
486 }
487
488 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
489                                         struct rtnl_link_stats64 *stats)
490 {
491         struct be_adapter *adapter = netdev_priv(netdev);
492         struct be_drv_stats *drvs = &adapter->drv_stats;
493         struct be_rx_obj *rxo;
494         struct be_tx_obj *txo;
495         u64 pkts, bytes;
496         unsigned int start;
497         int i;
498
499         for_all_rx_queues(adapter, rxo, i) {
500                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
501                 do {
502                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
503                         pkts = rx_stats(rxo)->rx_pkts;
504                         bytes = rx_stats(rxo)->rx_bytes;
505                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
506                 stats->rx_packets += pkts;
507                 stats->rx_bytes += bytes;
508                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
509                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
510                                         rx_stats(rxo)->rx_drops_no_frags;
511         }
512
513         for_all_tx_queues(adapter, txo, i) {
514                 const struct be_tx_stats *tx_stats = tx_stats(txo);
515                 do {
516                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
517                         pkts = tx_stats(txo)->tx_pkts;
518                         bytes = tx_stats(txo)->tx_bytes;
519                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
520                 stats->tx_packets += pkts;
521                 stats->tx_bytes += bytes;
522         }
523
524         /* bad pkts received */
525         stats->rx_errors = drvs->rx_crc_errors +
526                 drvs->rx_alignment_symbol_errors +
527                 drvs->rx_in_range_errors +
528                 drvs->rx_out_range_errors +
529                 drvs->rx_frame_too_long +
530                 drvs->rx_dropped_too_small +
531                 drvs->rx_dropped_too_short +
532                 drvs->rx_dropped_header_too_small +
533                 drvs->rx_dropped_tcp_length +
534                 drvs->rx_dropped_runt;
535
536         /* detailed rx errors */
537         stats->rx_length_errors = drvs->rx_in_range_errors +
538                 drvs->rx_out_range_errors +
539                 drvs->rx_frame_too_long;
540
541         stats->rx_crc_errors = drvs->rx_crc_errors;
542
543         /* frame alignment errors */
544         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
545
546         /* receiver fifo overrun */
547         /* drops_no_pbuf is no per i/f, it's per BE card */
548         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
549                                 drvs->rx_input_fifo_overflow_drop +
550                                 drvs->rx_drops_no_pbuf;
551         return stats;
552 }
553
554 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
555 {
556         struct net_device *netdev = adapter->netdev;
557
558         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
559                 netif_carrier_off(netdev);
560                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
561         }
562
563         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
564                 netif_carrier_on(netdev);
565         else
566                 netif_carrier_off(netdev);
567 }
568
569 static void be_tx_stats_update(struct be_tx_obj *txo,
570                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
571 {
572         struct be_tx_stats *stats = tx_stats(txo);
573
574         u64_stats_update_begin(&stats->sync);
575         stats->tx_reqs++;
576         stats->tx_wrbs += wrb_cnt;
577         stats->tx_bytes += copied;
578         stats->tx_pkts += (gso_segs ? gso_segs : 1);
579         if (stopped)
580                 stats->tx_stops++;
581         u64_stats_update_end(&stats->sync);
582 }
583
584 /* Determine number of WRB entries needed to xmit data in an skb */
585 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
586                                                                 bool *dummy)
587 {
588         int cnt = (skb->len > skb->data_len);
589
590         cnt += skb_shinfo(skb)->nr_frags;
591
592         /* to account for hdr wrb */
593         cnt++;
594         if (lancer_chip(adapter) || !(cnt & 1)) {
595                 *dummy = false;
596         } else {
597                 /* add a dummy to make it an even num */
598                 cnt++;
599                 *dummy = true;
600         }
601         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
602         return cnt;
603 }
604
605 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
606 {
607         wrb->frag_pa_hi = upper_32_bits(addr);
608         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
609         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
610         wrb->rsvd0 = 0;
611 }
612
613 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
614                                         struct sk_buff *skb)
615 {
616         u8 vlan_prio;
617         u16 vlan_tag;
618
619         vlan_tag = vlan_tx_tag_get(skb);
620         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
621         /* If vlan priority provided by OS is NOT in available bmap */
622         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
623                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
624                                 adapter->recommended_prio;
625
626         return vlan_tag;
627 }
628
629 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
630                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
631 {
632         u16 vlan_tag;
633
634         memset(hdr, 0, sizeof(*hdr));
635
636         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
637
638         if (skb_is_gso(skb)) {
639                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
640                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
641                         hdr, skb_shinfo(skb)->gso_size);
642                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
643                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
644         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
645                 if (is_tcp_pkt(skb))
646                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
647                 else if (is_udp_pkt(skb))
648                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
649         }
650
651         if (vlan_tx_tag_present(skb)) {
652                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
653                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
654                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
655         }
656
657         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
658         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
659         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
660         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
661 }
662
663 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
664                 bool unmap_single)
665 {
666         dma_addr_t dma;
667
668         be_dws_le_to_cpu(wrb, sizeof(*wrb));
669
670         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
671         if (wrb->frag_len) {
672                 if (unmap_single)
673                         dma_unmap_single(dev, dma, wrb->frag_len,
674                                          DMA_TO_DEVICE);
675                 else
676                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
677         }
678 }
679
680 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
681                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
682 {
683         dma_addr_t busaddr;
684         int i, copied = 0;
685         struct device *dev = &adapter->pdev->dev;
686         struct sk_buff *first_skb = skb;
687         struct be_eth_wrb *wrb;
688         struct be_eth_hdr_wrb *hdr;
689         bool map_single = false;
690         u16 map_head;
691
692         hdr = queue_head_node(txq);
693         queue_head_inc(txq);
694         map_head = txq->head;
695
696         if (skb->len > skb->data_len) {
697                 int len = skb_headlen(skb);
698                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
699                 if (dma_mapping_error(dev, busaddr))
700                         goto dma_err;
701                 map_single = true;
702                 wrb = queue_head_node(txq);
703                 wrb_fill(wrb, busaddr, len);
704                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
705                 queue_head_inc(txq);
706                 copied += len;
707         }
708
709         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
710                 const struct skb_frag_struct *frag =
711                         &skb_shinfo(skb)->frags[i];
712                 busaddr = skb_frag_dma_map(dev, frag, 0,
713                                            skb_frag_size(frag), DMA_TO_DEVICE);
714                 if (dma_mapping_error(dev, busaddr))
715                         goto dma_err;
716                 wrb = queue_head_node(txq);
717                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
718                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
719                 queue_head_inc(txq);
720                 copied += skb_frag_size(frag);
721         }
722
723         if (dummy_wrb) {
724                 wrb = queue_head_node(txq);
725                 wrb_fill(wrb, 0, 0);
726                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
727                 queue_head_inc(txq);
728         }
729
730         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
731         be_dws_cpu_to_le(hdr, sizeof(*hdr));
732
733         return copied;
734 dma_err:
735         txq->head = map_head;
736         while (copied) {
737                 wrb = queue_head_node(txq);
738                 unmap_tx_frag(dev, wrb, map_single);
739                 map_single = false;
740                 copied -= wrb->frag_len;
741                 queue_head_inc(txq);
742         }
743         return 0;
744 }
745
746 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
747                                              struct sk_buff *skb)
748 {
749         u16 vlan_tag = 0;
750
751         skb = skb_share_check(skb, GFP_ATOMIC);
752         if (unlikely(!skb))
753                 return skb;
754
755         if (vlan_tx_tag_present(skb)) {
756                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
757                 skb = __vlan_put_tag(skb, vlan_tag);
758                 if (skb)
759                         skb->vlan_tci = 0;
760         }
761
762         return skb;
763 }
764
765 static netdev_tx_t be_xmit(struct sk_buff *skb,
766                         struct net_device *netdev)
767 {
768         struct be_adapter *adapter = netdev_priv(netdev);
769         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
770         struct be_queue_info *txq = &txo->q;
771         struct iphdr *ip = NULL;
772         u32 wrb_cnt = 0, copied = 0;
773         u32 start = txq->head, eth_hdr_len;
774         bool dummy_wrb, stopped = false;
775
776         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
777                 VLAN_ETH_HLEN : ETH_HLEN;
778
779         /* For padded packets, BE HW modifies tot_len field in IP header
780          * incorrecly when VLAN tag is inserted by HW.
781          */
782         if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
783                 ip = (struct iphdr *)ip_hdr(skb);
784                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
785         }
786
787         /* HW has a bug wherein it will calculate CSUM for VLAN
788          * pkts even though it is disabled.
789          * Manually insert VLAN in pkt.
790          */
791         if (skb->ip_summed != CHECKSUM_PARTIAL &&
792                         vlan_tx_tag_present(skb)) {
793                 skb = be_insert_vlan_in_pkt(adapter, skb);
794                 if (unlikely(!skb))
795                         goto tx_drop;
796         }
797
798         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
799
800         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
801         if (copied) {
802                 int gso_segs = skb_shinfo(skb)->gso_segs;
803
804                 /* record the sent skb in the sent_skb table */
805                 BUG_ON(txo->sent_skb_list[start]);
806                 txo->sent_skb_list[start] = skb;
807
808                 /* Ensure txq has space for the next skb; Else stop the queue
809                  * *BEFORE* ringing the tx doorbell, so that we serialze the
810                  * tx compls of the current transmit which'll wake up the queue
811                  */
812                 atomic_add(wrb_cnt, &txq->used);
813                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
814                                                                 txq->len) {
815                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
816                         stopped = true;
817                 }
818
819                 be_txq_notify(adapter, txq->id, wrb_cnt);
820
821                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
822         } else {
823                 txq->head = start;
824                 dev_kfree_skb_any(skb);
825         }
826 tx_drop:
827         return NETDEV_TX_OK;
828 }
829
830 static int be_change_mtu(struct net_device *netdev, int new_mtu)
831 {
832         struct be_adapter *adapter = netdev_priv(netdev);
833         if (new_mtu < BE_MIN_MTU ||
834                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
835                                         (ETH_HLEN + ETH_FCS_LEN))) {
836                 dev_info(&adapter->pdev->dev,
837                         "MTU must be between %d and %d bytes\n",
838                         BE_MIN_MTU,
839                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
840                 return -EINVAL;
841         }
842         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
843                         netdev->mtu, new_mtu);
844         netdev->mtu = new_mtu;
845         return 0;
846 }
847
848 /*
849  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
850  * If the user configures more, place BE in vlan promiscuous mode.
851  */
852 static int be_vid_config(struct be_adapter *adapter)
853 {
854         u16 vids[BE_NUM_VLANS_SUPPORTED];
855         u16 num = 0, i;
856         int status = 0;
857
858         /* No need to further configure vids if in promiscuous mode */
859         if (adapter->promiscuous)
860                 return 0;
861
862         if (adapter->vlans_added > adapter->max_vlans)
863                 goto set_vlan_promisc;
864
865         /* Construct VLAN Table to give to HW */
866         for (i = 0; i < VLAN_N_VID; i++)
867                 if (adapter->vlan_tag[i])
868                         vids[num++] = cpu_to_le16(i);
869
870         status = be_cmd_vlan_config(adapter, adapter->if_handle,
871                                     vids, num, 1, 0);
872
873         /* Set to VLAN promisc mode as setting VLAN filter failed */
874         if (status) {
875                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
876                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
877                 goto set_vlan_promisc;
878         }
879
880         return status;
881
882 set_vlan_promisc:
883         status = be_cmd_vlan_config(adapter, adapter->if_handle,
884                                     NULL, 0, 1, 1);
885         return status;
886 }
887
888 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
889 {
890         struct be_adapter *adapter = netdev_priv(netdev);
891         int status = 0;
892
893         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
894                 status = -EINVAL;
895                 goto ret;
896         }
897
898         /* Packets with VID 0 are always received by Lancer by default */
899         if (lancer_chip(adapter) && vid == 0)
900                 goto ret;
901
902         adapter->vlan_tag[vid] = 1;
903         if (adapter->vlans_added <= (adapter->max_vlans + 1))
904                 status = be_vid_config(adapter);
905
906         if (!status)
907                 adapter->vlans_added++;
908         else
909                 adapter->vlan_tag[vid] = 0;
910 ret:
911         return status;
912 }
913
914 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
915 {
916         struct be_adapter *adapter = netdev_priv(netdev);
917         int status = 0;
918
919         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
920                 status = -EINVAL;
921                 goto ret;
922         }
923
924         /* Packets with VID 0 are always received by Lancer by default */
925         if (lancer_chip(adapter) && vid == 0)
926                 goto ret;
927
928         adapter->vlan_tag[vid] = 0;
929         if (adapter->vlans_added <= adapter->max_vlans)
930                 status = be_vid_config(adapter);
931
932         if (!status)
933                 adapter->vlans_added--;
934         else
935                 adapter->vlan_tag[vid] = 1;
936 ret:
937         return status;
938 }
939
940 static void be_set_rx_mode(struct net_device *netdev)
941 {
942         struct be_adapter *adapter = netdev_priv(netdev);
943         int status;
944
945         if (netdev->flags & IFF_PROMISC) {
946                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
947                 adapter->promiscuous = true;
948                 goto done;
949         }
950
951         /* BE was previously in promiscuous mode; disable it */
952         if (adapter->promiscuous) {
953                 adapter->promiscuous = false;
954                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
955
956                 if (adapter->vlans_added)
957                         be_vid_config(adapter);
958         }
959
960         /* Enable multicast promisc if num configured exceeds what we support */
961         if (netdev->flags & IFF_ALLMULTI ||
962             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
963                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
964                 goto done;
965         }
966
967         if (netdev_uc_count(netdev) != adapter->uc_macs) {
968                 struct netdev_hw_addr *ha;
969                 int i = 1; /* First slot is claimed by the Primary MAC */
970
971                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
972                         be_cmd_pmac_del(adapter, adapter->if_handle,
973                                         adapter->pmac_id[i], 0);
974                 }
975
976                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
977                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
978                         adapter->promiscuous = true;
979                         goto done;
980                 }
981
982                 netdev_for_each_uc_addr(ha, adapter->netdev) {
983                         adapter->uc_macs++; /* First slot is for Primary MAC */
984                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
985                                         adapter->if_handle,
986                                         &adapter->pmac_id[adapter->uc_macs], 0);
987                 }
988         }
989
990         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
991
992         /* Set to MCAST promisc mode if setting MULTICAST address fails */
993         if (status) {
994                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
995                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
996                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
997         }
998 done:
999         return;
1000 }
1001
1002 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1003 {
1004         struct be_adapter *adapter = netdev_priv(netdev);
1005         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1006         int status;
1007         bool active_mac = false;
1008         u32 pmac_id;
1009         u8 old_mac[ETH_ALEN];
1010
1011         if (!sriov_enabled(adapter))
1012                 return -EPERM;
1013
1014         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1015                 return -EINVAL;
1016
1017         if (lancer_chip(adapter)) {
1018                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1019                                                   &pmac_id, vf + 1);
1020                 if (!status && active_mac)
1021                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1022                                         pmac_id, vf + 1);
1023
1024                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1025         } else {
1026                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1027                                          vf_cfg->pmac_id, vf + 1);
1028
1029                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1030                                          &vf_cfg->pmac_id, vf + 1);
1031         }
1032
1033         if (status)
1034                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1035                                 mac, vf);
1036         else
1037                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1038
1039         return status;
1040 }
1041
1042 static int be_get_vf_config(struct net_device *netdev, int vf,
1043                         struct ifla_vf_info *vi)
1044 {
1045         struct be_adapter *adapter = netdev_priv(netdev);
1046         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1047
1048         if (!sriov_enabled(adapter))
1049                 return -EPERM;
1050
1051         if (vf >= adapter->num_vfs)
1052                 return -EINVAL;
1053
1054         vi->vf = vf;
1055         vi->tx_rate = vf_cfg->tx_rate;
1056         vi->vlan = vf_cfg->vlan_tag;
1057         vi->qos = 0;
1058         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1059
1060         return 0;
1061 }
1062
1063 static int be_set_vf_vlan(struct net_device *netdev,
1064                         int vf, u16 vlan, u8 qos)
1065 {
1066         struct be_adapter *adapter = netdev_priv(netdev);
1067         int status = 0;
1068
1069         if (!sriov_enabled(adapter))
1070                 return -EPERM;
1071
1072         if (vf >= adapter->num_vfs || vlan > 4095)
1073                 return -EINVAL;
1074
1075         if (vlan) {
1076                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1077                         /* If this is new value, program it. Else skip. */
1078                         adapter->vf_cfg[vf].vlan_tag = vlan;
1079
1080                         status = be_cmd_set_hsw_config(adapter, vlan,
1081                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1082                 }
1083         } else {
1084                 /* Reset Transparent Vlan Tagging. */
1085                 adapter->vf_cfg[vf].vlan_tag = 0;
1086                 vlan = adapter->vf_cfg[vf].def_vid;
1087                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1088                         adapter->vf_cfg[vf].if_handle);
1089         }
1090
1091
1092         if (status)
1093                 dev_info(&adapter->pdev->dev,
1094                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1095         return status;
1096 }
1097
1098 static int be_set_vf_tx_rate(struct net_device *netdev,
1099                         int vf, int rate)
1100 {
1101         struct be_adapter *adapter = netdev_priv(netdev);
1102         int status = 0;
1103
1104         if (!sriov_enabled(adapter))
1105                 return -EPERM;
1106
1107         if (vf >= adapter->num_vfs)
1108                 return -EINVAL;
1109
1110         if (rate < 100 || rate > 10000) {
1111                 dev_err(&adapter->pdev->dev,
1112                         "tx rate must be between 100 and 10000 Mbps\n");
1113                 return -EINVAL;
1114         }
1115
1116         if (lancer_chip(adapter))
1117                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1118         else
1119                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1120
1121         if (status)
1122                 dev_err(&adapter->pdev->dev,
1123                                 "tx rate %d on VF %d failed\n", rate, vf);
1124         else
1125                 adapter->vf_cfg[vf].tx_rate = rate;
1126         return status;
1127 }
1128
1129 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1130 {
1131         struct pci_dev *dev, *pdev = adapter->pdev;
1132         int vfs = 0, assigned_vfs = 0, pos;
1133         u16 offset, stride;
1134
1135         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1136         if (!pos)
1137                 return 0;
1138         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1139         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1140
1141         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1142         while (dev) {
1143                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1144                         vfs++;
1145                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1146                                 assigned_vfs++;
1147                 }
1148                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1149         }
1150         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1151 }
1152
1153 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1154 {
1155         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1156         ulong now = jiffies;
1157         ulong delta = now - stats->rx_jiffies;
1158         u64 pkts;
1159         unsigned int start, eqd;
1160
1161         if (!eqo->enable_aic) {
1162                 eqd = eqo->eqd;
1163                 goto modify_eqd;
1164         }
1165
1166         if (eqo->idx >= adapter->num_rx_qs)
1167                 return;
1168
1169         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1170
1171         /* Wrapped around */
1172         if (time_before(now, stats->rx_jiffies)) {
1173                 stats->rx_jiffies = now;
1174                 return;
1175         }
1176
1177         /* Update once a second */
1178         if (delta < HZ)
1179                 return;
1180
1181         do {
1182                 start = u64_stats_fetch_begin_bh(&stats->sync);
1183                 pkts = stats->rx_pkts;
1184         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1185
1186         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1187         stats->rx_pkts_prev = pkts;
1188         stats->rx_jiffies = now;
1189         eqd = (stats->rx_pps / 110000) << 3;
1190         eqd = min(eqd, eqo->max_eqd);
1191         eqd = max(eqd, eqo->min_eqd);
1192         if (eqd < 10)
1193                 eqd = 0;
1194
1195 modify_eqd:
1196         if (eqd != eqo->cur_eqd) {
1197                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1198                 eqo->cur_eqd = eqd;
1199         }
1200 }
1201
1202 static void be_rx_stats_update(struct be_rx_obj *rxo,
1203                 struct be_rx_compl_info *rxcp)
1204 {
1205         struct be_rx_stats *stats = rx_stats(rxo);
1206
1207         u64_stats_update_begin(&stats->sync);
1208         stats->rx_compl++;
1209         stats->rx_bytes += rxcp->pkt_size;
1210         stats->rx_pkts++;
1211         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1212                 stats->rx_mcast_pkts++;
1213         if (rxcp->err)
1214                 stats->rx_compl_err++;
1215         u64_stats_update_end(&stats->sync);
1216 }
1217
1218 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1219 {
1220         /* L4 checksum is not reliable for non TCP/UDP packets.
1221          * Also ignore ipcksm for ipv6 pkts */
1222         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1223                                 (rxcp->ip_csum || rxcp->ipv6);
1224 }
1225
1226 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1227                                                 u16 frag_idx)
1228 {
1229         struct be_adapter *adapter = rxo->adapter;
1230         struct be_rx_page_info *rx_page_info;
1231         struct be_queue_info *rxq = &rxo->q;
1232
1233         rx_page_info = &rxo->page_info_tbl[frag_idx];
1234         BUG_ON(!rx_page_info->page);
1235
1236         if (rx_page_info->last_page_user) {
1237                 dma_unmap_page(&adapter->pdev->dev,
1238                                dma_unmap_addr(rx_page_info, bus),
1239                                adapter->big_page_size, DMA_FROM_DEVICE);
1240                 rx_page_info->last_page_user = false;
1241         }
1242
1243         atomic_dec(&rxq->used);
1244         return rx_page_info;
1245 }
1246
1247 /* Throwaway the data in the Rx completion */
1248 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1249                                 struct be_rx_compl_info *rxcp)
1250 {
1251         struct be_queue_info *rxq = &rxo->q;
1252         struct be_rx_page_info *page_info;
1253         u16 i, num_rcvd = rxcp->num_rcvd;
1254
1255         for (i = 0; i < num_rcvd; i++) {
1256                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1257                 put_page(page_info->page);
1258                 memset(page_info, 0, sizeof(*page_info));
1259                 index_inc(&rxcp->rxq_idx, rxq->len);
1260         }
1261 }
1262
1263 /*
1264  * skb_fill_rx_data forms a complete skb for an ether frame
1265  * indicated by rxcp.
1266  */
1267 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1268                              struct be_rx_compl_info *rxcp)
1269 {
1270         struct be_queue_info *rxq = &rxo->q;
1271         struct be_rx_page_info *page_info;
1272         u16 i, j;
1273         u16 hdr_len, curr_frag_len, remaining;
1274         u8 *start;
1275
1276         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1277         start = page_address(page_info->page) + page_info->page_offset;
1278         prefetch(start);
1279
1280         /* Copy data in the first descriptor of this completion */
1281         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1282
1283         skb->len = curr_frag_len;
1284         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1285                 memcpy(skb->data, start, curr_frag_len);
1286                 /* Complete packet has now been moved to data */
1287                 put_page(page_info->page);
1288                 skb->data_len = 0;
1289                 skb->tail += curr_frag_len;
1290         } else {
1291                 hdr_len = ETH_HLEN;
1292                 memcpy(skb->data, start, hdr_len);
1293                 skb_shinfo(skb)->nr_frags = 1;
1294                 skb_frag_set_page(skb, 0, page_info->page);
1295                 skb_shinfo(skb)->frags[0].page_offset =
1296                                         page_info->page_offset + hdr_len;
1297                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1298                 skb->data_len = curr_frag_len - hdr_len;
1299                 skb->truesize += rx_frag_size;
1300                 skb->tail += hdr_len;
1301         }
1302         page_info->page = NULL;
1303
1304         if (rxcp->pkt_size <= rx_frag_size) {
1305                 BUG_ON(rxcp->num_rcvd != 1);
1306                 return;
1307         }
1308
1309         /* More frags present for this completion */
1310         index_inc(&rxcp->rxq_idx, rxq->len);
1311         remaining = rxcp->pkt_size - curr_frag_len;
1312         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1313                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1314                 curr_frag_len = min(remaining, rx_frag_size);
1315
1316                 /* Coalesce all frags from the same physical page in one slot */
1317                 if (page_info->page_offset == 0) {
1318                         /* Fresh page */
1319                         j++;
1320                         skb_frag_set_page(skb, j, page_info->page);
1321                         skb_shinfo(skb)->frags[j].page_offset =
1322                                                         page_info->page_offset;
1323                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1324                         skb_shinfo(skb)->nr_frags++;
1325                 } else {
1326                         put_page(page_info->page);
1327                 }
1328
1329                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1330                 skb->len += curr_frag_len;
1331                 skb->data_len += curr_frag_len;
1332                 skb->truesize += rx_frag_size;
1333                 remaining -= curr_frag_len;
1334                 index_inc(&rxcp->rxq_idx, rxq->len);
1335                 page_info->page = NULL;
1336         }
1337         BUG_ON(j > MAX_SKB_FRAGS);
1338 }
1339
1340 /* Process the RX completion indicated by rxcp when GRO is disabled */
1341 static void be_rx_compl_process(struct be_rx_obj *rxo,
1342                                 struct be_rx_compl_info *rxcp)
1343 {
1344         struct be_adapter *adapter = rxo->adapter;
1345         struct net_device *netdev = adapter->netdev;
1346         struct sk_buff *skb;
1347
1348         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1349         if (unlikely(!skb)) {
1350                 rx_stats(rxo)->rx_drops_no_skbs++;
1351                 be_rx_compl_discard(rxo, rxcp);
1352                 return;
1353         }
1354
1355         skb_fill_rx_data(rxo, skb, rxcp);
1356
1357         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1358                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1359         else
1360                 skb_checksum_none_assert(skb);
1361
1362         skb->protocol = eth_type_trans(skb, netdev);
1363         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1364         if (netdev->features & NETIF_F_RXHASH)
1365                 skb->rxhash = rxcp->rss_hash;
1366
1367
1368         if (rxcp->vlanf)
1369                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1370
1371         netif_receive_skb(skb);
1372 }
1373
1374 /* Process the RX completion indicated by rxcp when GRO is enabled */
1375 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1376                              struct be_rx_compl_info *rxcp)
1377 {
1378         struct be_adapter *adapter = rxo->adapter;
1379         struct be_rx_page_info *page_info;
1380         struct sk_buff *skb = NULL;
1381         struct be_queue_info *rxq = &rxo->q;
1382         u16 remaining, curr_frag_len;
1383         u16 i, j;
1384
1385         skb = napi_get_frags(napi);
1386         if (!skb) {
1387                 be_rx_compl_discard(rxo, rxcp);
1388                 return;
1389         }
1390
1391         remaining = rxcp->pkt_size;
1392         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1393                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1394
1395                 curr_frag_len = min(remaining, rx_frag_size);
1396
1397                 /* Coalesce all frags from the same physical page in one slot */
1398                 if (i == 0 || page_info->page_offset == 0) {
1399                         /* First frag or Fresh page */
1400                         j++;
1401                         skb_frag_set_page(skb, j, page_info->page);
1402                         skb_shinfo(skb)->frags[j].page_offset =
1403                                                         page_info->page_offset;
1404                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1405                 } else {
1406                         put_page(page_info->page);
1407                 }
1408                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1409                 skb->truesize += rx_frag_size;
1410                 remaining -= curr_frag_len;
1411                 index_inc(&rxcp->rxq_idx, rxq->len);
1412                 memset(page_info, 0, sizeof(*page_info));
1413         }
1414         BUG_ON(j > MAX_SKB_FRAGS);
1415
1416         skb_shinfo(skb)->nr_frags = j + 1;
1417         skb->len = rxcp->pkt_size;
1418         skb->data_len = rxcp->pkt_size;
1419         skb->ip_summed = CHECKSUM_UNNECESSARY;
1420         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1421         if (adapter->netdev->features & NETIF_F_RXHASH)
1422                 skb->rxhash = rxcp->rss_hash;
1423
1424         if (rxcp->vlanf)
1425                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1426
1427         napi_gro_frags(napi);
1428 }
1429
1430 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1431                                  struct be_rx_compl_info *rxcp)
1432 {
1433         rxcp->pkt_size =
1434                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1435         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1436         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1437         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1438         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1439         rxcp->ip_csum =
1440                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1441         rxcp->l4_csum =
1442                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1443         rxcp->ipv6 =
1444                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1445         rxcp->rxq_idx =
1446                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1447         rxcp->num_rcvd =
1448                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1449         rxcp->pkt_type =
1450                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1451         rxcp->rss_hash =
1452                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1453         if (rxcp->vlanf) {
1454                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1455                                           compl);
1456                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1457                                                compl);
1458         }
1459         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1460 }
1461
1462 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1463                                  struct be_rx_compl_info *rxcp)
1464 {
1465         rxcp->pkt_size =
1466                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1467         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1468         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1469         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1470         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1471         rxcp->ip_csum =
1472                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1473         rxcp->l4_csum =
1474                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1475         rxcp->ipv6 =
1476                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1477         rxcp->rxq_idx =
1478                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1479         rxcp->num_rcvd =
1480                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1481         rxcp->pkt_type =
1482                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1483         rxcp->rss_hash =
1484                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1485         if (rxcp->vlanf) {
1486                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1487                                           compl);
1488                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1489                                                compl);
1490         }
1491         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1492 }
1493
1494 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1495 {
1496         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1497         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1498         struct be_adapter *adapter = rxo->adapter;
1499
1500         /* For checking the valid bit it is Ok to use either definition as the
1501          * valid bit is at the same position in both v0 and v1 Rx compl */
1502         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1503                 return NULL;
1504
1505         rmb();
1506         be_dws_le_to_cpu(compl, sizeof(*compl));
1507
1508         if (adapter->be3_native)
1509                 be_parse_rx_compl_v1(compl, rxcp);
1510         else
1511                 be_parse_rx_compl_v0(compl, rxcp);
1512
1513         if (rxcp->vlanf) {
1514                 /* vlanf could be wrongly set in some cards.
1515                  * ignore if vtm is not set */
1516                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1517                         rxcp->vlanf = 0;
1518
1519                 if (!lancer_chip(adapter))
1520                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1521
1522                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1523                     !adapter->vlan_tag[rxcp->vlan_tag])
1524                         rxcp->vlanf = 0;
1525         }
1526
1527         /* As the compl has been parsed, reset it; we wont touch it again */
1528         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1529
1530         queue_tail_inc(&rxo->cq);
1531         return rxcp;
1532 }
1533
1534 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1535 {
1536         u32 order = get_order(size);
1537
1538         if (order > 0)
1539                 gfp |= __GFP_COMP;
1540         return  alloc_pages(gfp, order);
1541 }
1542
1543 /*
1544  * Allocate a page, split it to fragments of size rx_frag_size and post as
1545  * receive buffers to BE
1546  */
1547 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1548 {
1549         struct be_adapter *adapter = rxo->adapter;
1550         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1551         struct be_queue_info *rxq = &rxo->q;
1552         struct page *pagep = NULL;
1553         struct be_eth_rx_d *rxd;
1554         u64 page_dmaaddr = 0, frag_dmaaddr;
1555         u32 posted, page_offset = 0;
1556
1557         page_info = &rxo->page_info_tbl[rxq->head];
1558         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1559                 if (!pagep) {
1560                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1561                         if (unlikely(!pagep)) {
1562                                 rx_stats(rxo)->rx_post_fail++;
1563                                 break;
1564                         }
1565                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1566                                                     0, adapter->big_page_size,
1567                                                     DMA_FROM_DEVICE);
1568                         page_info->page_offset = 0;
1569                 } else {
1570                         get_page(pagep);
1571                         page_info->page_offset = page_offset + rx_frag_size;
1572                 }
1573                 page_offset = page_info->page_offset;
1574                 page_info->page = pagep;
1575                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1576                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1577
1578                 rxd = queue_head_node(rxq);
1579                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1580                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1581
1582                 /* Any space left in the current big page for another frag? */
1583                 if ((page_offset + rx_frag_size + rx_frag_size) >
1584                                         adapter->big_page_size) {
1585                         pagep = NULL;
1586                         page_info->last_page_user = true;
1587                 }
1588
1589                 prev_page_info = page_info;
1590                 queue_head_inc(rxq);
1591                 page_info = &rxo->page_info_tbl[rxq->head];
1592         }
1593         if (pagep)
1594                 prev_page_info->last_page_user = true;
1595
1596         if (posted) {
1597                 atomic_add(posted, &rxq->used);
1598                 be_rxq_notify(adapter, rxq->id, posted);
1599         } else if (atomic_read(&rxq->used) == 0) {
1600                 /* Let be_worker replenish when memory is available */
1601                 rxo->rx_post_starved = true;
1602         }
1603 }
1604
1605 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1606 {
1607         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1608
1609         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1610                 return NULL;
1611
1612         rmb();
1613         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1614
1615         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1616
1617         queue_tail_inc(tx_cq);
1618         return txcp;
1619 }
1620
1621 static u16 be_tx_compl_process(struct be_adapter *adapter,
1622                 struct be_tx_obj *txo, u16 last_index)
1623 {
1624         struct be_queue_info *txq = &txo->q;
1625         struct be_eth_wrb *wrb;
1626         struct sk_buff **sent_skbs = txo->sent_skb_list;
1627         struct sk_buff *sent_skb;
1628         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1629         bool unmap_skb_hdr = true;
1630
1631         sent_skb = sent_skbs[txq->tail];
1632         BUG_ON(!sent_skb);
1633         sent_skbs[txq->tail] = NULL;
1634
1635         /* skip header wrb */
1636         queue_tail_inc(txq);
1637
1638         do {
1639                 cur_index = txq->tail;
1640                 wrb = queue_tail_node(txq);
1641                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1642                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1643                 unmap_skb_hdr = false;
1644
1645                 num_wrbs++;
1646                 queue_tail_inc(txq);
1647         } while (cur_index != last_index);
1648
1649         kfree_skb(sent_skb);
1650         return num_wrbs;
1651 }
1652
1653 /* Return the number of events in the event queue */
1654 static inline int events_get(struct be_eq_obj *eqo)
1655 {
1656         struct be_eq_entry *eqe;
1657         int num = 0;
1658
1659         do {
1660                 eqe = queue_tail_node(&eqo->q);
1661                 if (eqe->evt == 0)
1662                         break;
1663
1664                 rmb();
1665                 eqe->evt = 0;
1666                 num++;
1667                 queue_tail_inc(&eqo->q);
1668         } while (true);
1669
1670         return num;
1671 }
1672
1673 /* Leaves the EQ is disarmed state */
1674 static void be_eq_clean(struct be_eq_obj *eqo)
1675 {
1676         int num = events_get(eqo);
1677
1678         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1679 }
1680
1681 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1682 {
1683         struct be_rx_page_info *page_info;
1684         struct be_queue_info *rxq = &rxo->q;
1685         struct be_queue_info *rx_cq = &rxo->cq;
1686         struct be_rx_compl_info *rxcp;
1687         struct be_adapter *adapter = rxo->adapter;
1688         int flush_wait = 0;
1689         u16 tail;
1690
1691         /* Consume pending rx completions.
1692          * Wait for the flush completion (identified by zero num_rcvd)
1693          * to arrive. Notify CQ even when there are no more CQ entries
1694          * for HW to flush partially coalesced CQ entries.
1695          * In Lancer, there is no need to wait for flush compl.
1696          */
1697         for (;;) {
1698                 rxcp = be_rx_compl_get(rxo);
1699                 if (rxcp == NULL) {
1700                         if (lancer_chip(adapter))
1701                                 break;
1702
1703                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1704                                 dev_warn(&adapter->pdev->dev,
1705                                          "did not receive flush compl\n");
1706                                 break;
1707                         }
1708                         be_cq_notify(adapter, rx_cq->id, true, 0);
1709                         mdelay(1);
1710                 } else {
1711                         be_rx_compl_discard(rxo, rxcp);
1712                         be_cq_notify(adapter, rx_cq->id, true, 1);
1713                         if (rxcp->num_rcvd == 0)
1714                                 break;
1715                 }
1716         }
1717
1718         /* After cleanup, leave the CQ in unarmed state */
1719         be_cq_notify(adapter, rx_cq->id, false, 0);
1720
1721         /* Then free posted rx buffers that were not used */
1722         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1723         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1724                 page_info = get_rx_page_info(rxo, tail);
1725                 put_page(page_info->page);
1726                 memset(page_info, 0, sizeof(*page_info));
1727         }
1728         BUG_ON(atomic_read(&rxq->used));
1729         rxq->tail = rxq->head = 0;
1730 }
1731
1732 static void be_tx_compl_clean(struct be_adapter *adapter)
1733 {
1734         struct be_tx_obj *txo;
1735         struct be_queue_info *txq;
1736         struct be_eth_tx_compl *txcp;
1737         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1738         struct sk_buff *sent_skb;
1739         bool dummy_wrb;
1740         int i, pending_txqs;
1741
1742         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1743         do {
1744                 pending_txqs = adapter->num_tx_qs;
1745
1746                 for_all_tx_queues(adapter, txo, i) {
1747                         txq = &txo->q;
1748                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1749                                 end_idx =
1750                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1751                                                       wrb_index, txcp);
1752                                 num_wrbs += be_tx_compl_process(adapter, txo,
1753                                                                 end_idx);
1754                                 cmpl++;
1755                         }
1756                         if (cmpl) {
1757                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1758                                 atomic_sub(num_wrbs, &txq->used);
1759                                 cmpl = 0;
1760                                 num_wrbs = 0;
1761                         }
1762                         if (atomic_read(&txq->used) == 0)
1763                                 pending_txqs--;
1764                 }
1765
1766                 if (pending_txqs == 0 || ++timeo > 200)
1767                         break;
1768
1769                 mdelay(1);
1770         } while (true);
1771
1772         for_all_tx_queues(adapter, txo, i) {
1773                 txq = &txo->q;
1774                 if (atomic_read(&txq->used))
1775                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1776                                 atomic_read(&txq->used));
1777
1778                 /* free posted tx for which compls will never arrive */
1779                 while (atomic_read(&txq->used)) {
1780                         sent_skb = txo->sent_skb_list[txq->tail];
1781                         end_idx = txq->tail;
1782                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1783                                                    &dummy_wrb);
1784                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1785                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1786                         atomic_sub(num_wrbs, &txq->used);
1787                 }
1788         }
1789 }
1790
1791 static void be_evt_queues_destroy(struct be_adapter *adapter)
1792 {
1793         struct be_eq_obj *eqo;
1794         int i;
1795
1796         for_all_evt_queues(adapter, eqo, i) {
1797                 if (eqo->q.created) {
1798                         be_eq_clean(eqo);
1799                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1800                 }
1801                 be_queue_free(adapter, &eqo->q);
1802         }
1803 }
1804
1805 static int be_evt_queues_create(struct be_adapter *adapter)
1806 {
1807         struct be_queue_info *eq;
1808         struct be_eq_obj *eqo;
1809         int i, rc;
1810
1811         adapter->num_evt_qs = num_irqs(adapter);
1812
1813         for_all_evt_queues(adapter, eqo, i) {
1814                 eqo->adapter = adapter;
1815                 eqo->tx_budget = BE_TX_BUDGET;
1816                 eqo->idx = i;
1817                 eqo->max_eqd = BE_MAX_EQD;
1818                 eqo->enable_aic = true;
1819
1820                 eq = &eqo->q;
1821                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1822                                         sizeof(struct be_eq_entry));
1823                 if (rc)
1824                         return rc;
1825
1826                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1827                 if (rc)
1828                         return rc;
1829         }
1830         return 0;
1831 }
1832
1833 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1834 {
1835         struct be_queue_info *q;
1836
1837         q = &adapter->mcc_obj.q;
1838         if (q->created)
1839                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1840         be_queue_free(adapter, q);
1841
1842         q = &adapter->mcc_obj.cq;
1843         if (q->created)
1844                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1845         be_queue_free(adapter, q);
1846 }
1847
1848 /* Must be called only after TX qs are created as MCC shares TX EQ */
1849 static int be_mcc_queues_create(struct be_adapter *adapter)
1850 {
1851         struct be_queue_info *q, *cq;
1852
1853         cq = &adapter->mcc_obj.cq;
1854         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1855                         sizeof(struct be_mcc_compl)))
1856                 goto err;
1857
1858         /* Use the default EQ for MCC completions */
1859         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1860                 goto mcc_cq_free;
1861
1862         q = &adapter->mcc_obj.q;
1863         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1864                 goto mcc_cq_destroy;
1865
1866         if (be_cmd_mccq_create(adapter, q, cq))
1867                 goto mcc_q_free;
1868
1869         return 0;
1870
1871 mcc_q_free:
1872         be_queue_free(adapter, q);
1873 mcc_cq_destroy:
1874         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1875 mcc_cq_free:
1876         be_queue_free(adapter, cq);
1877 err:
1878         return -1;
1879 }
1880
1881 static void be_tx_queues_destroy(struct be_adapter *adapter)
1882 {
1883         struct be_queue_info *q;
1884         struct be_tx_obj *txo;
1885         u8 i;
1886
1887         for_all_tx_queues(adapter, txo, i) {
1888                 q = &txo->q;
1889                 if (q->created)
1890                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1891                 be_queue_free(adapter, q);
1892
1893                 q = &txo->cq;
1894                 if (q->created)
1895                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1896                 be_queue_free(adapter, q);
1897         }
1898 }
1899
1900 static int be_num_txqs_want(struct be_adapter *adapter)
1901 {
1902         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1903             be_is_mc(adapter) ||
1904             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
1905             BE2_chip(adapter))
1906                 return 1;
1907         else
1908                 return adapter->max_tx_queues;
1909 }
1910
1911 static int be_tx_cqs_create(struct be_adapter *adapter)
1912 {
1913         struct be_queue_info *cq, *eq;
1914         int status;
1915         struct be_tx_obj *txo;
1916         u8 i;
1917
1918         adapter->num_tx_qs = be_num_txqs_want(adapter);
1919         if (adapter->num_tx_qs != MAX_TX_QS) {
1920                 rtnl_lock();
1921                 netif_set_real_num_tx_queues(adapter->netdev,
1922                         adapter->num_tx_qs);
1923                 rtnl_unlock();
1924         }
1925
1926         for_all_tx_queues(adapter, txo, i) {
1927                 cq = &txo->cq;
1928                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1929                                         sizeof(struct be_eth_tx_compl));
1930                 if (status)
1931                         return status;
1932
1933                 /* If num_evt_qs is less than num_tx_qs, then more than
1934                  * one txq share an eq
1935                  */
1936                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1937                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1938                 if (status)
1939                         return status;
1940         }
1941         return 0;
1942 }
1943
1944 static int be_tx_qs_create(struct be_adapter *adapter)
1945 {
1946         struct be_tx_obj *txo;
1947         int i, status;
1948
1949         for_all_tx_queues(adapter, txo, i) {
1950                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1951                                         sizeof(struct be_eth_wrb));
1952                 if (status)
1953                         return status;
1954
1955                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1956                 if (status)
1957                         return status;
1958         }
1959
1960         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1961                  adapter->num_tx_qs);
1962         return 0;
1963 }
1964
1965 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1966 {
1967         struct be_queue_info *q;
1968         struct be_rx_obj *rxo;
1969         int i;
1970
1971         for_all_rx_queues(adapter, rxo, i) {
1972                 q = &rxo->cq;
1973                 if (q->created)
1974                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1975                 be_queue_free(adapter, q);
1976         }
1977 }
1978
1979 static int be_rx_cqs_create(struct be_adapter *adapter)
1980 {
1981         struct be_queue_info *eq, *cq;
1982         struct be_rx_obj *rxo;
1983         int rc, i;
1984
1985         /* We'll create as many RSS rings as there are irqs.
1986          * But when there's only one irq there's no use creating RSS rings
1987          */
1988         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1989                                 num_irqs(adapter) + 1 : 1;
1990         if (adapter->num_rx_qs != MAX_RX_QS) {
1991                 rtnl_lock();
1992                 netif_set_real_num_rx_queues(adapter->netdev,
1993                                              adapter->num_rx_qs);
1994                 rtnl_unlock();
1995         }
1996
1997         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1998         for_all_rx_queues(adapter, rxo, i) {
1999                 rxo->adapter = adapter;
2000                 cq = &rxo->cq;
2001                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2002                                 sizeof(struct be_eth_rx_compl));
2003                 if (rc)
2004                         return rc;
2005
2006                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2007                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2008                 if (rc)
2009                         return rc;
2010         }
2011
2012         dev_info(&adapter->pdev->dev,
2013                  "created %d RSS queue(s) and 1 default RX queue\n",
2014                  adapter->num_rx_qs - 1);
2015         return 0;
2016 }
2017
2018 static irqreturn_t be_intx(int irq, void *dev)
2019 {
2020         struct be_eq_obj *eqo = dev;
2021         struct be_adapter *adapter = eqo->adapter;
2022         int num_evts = 0;
2023
2024         /* IRQ is not expected when NAPI is scheduled as the EQ
2025          * will not be armed.
2026          * But, this can happen on Lancer INTx where it takes
2027          * a while to de-assert INTx or in BE2 where occasionaly
2028          * an interrupt may be raised even when EQ is unarmed.
2029          * If NAPI is already scheduled, then counting & notifying
2030          * events will orphan them.
2031          */
2032         if (napi_schedule_prep(&eqo->napi)) {
2033                 num_evts = events_get(eqo);
2034                 __napi_schedule(&eqo->napi);
2035                 if (num_evts)
2036                         eqo->spurious_intr = 0;
2037         }
2038         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2039
2040         /* Return IRQ_HANDLED only for the the first spurious intr
2041          * after a valid intr to stop the kernel from branding
2042          * this irq as a bad one!
2043          */
2044         if (num_evts || eqo->spurious_intr++ == 0)
2045                 return IRQ_HANDLED;
2046         else
2047                 return IRQ_NONE;
2048 }
2049
2050 static irqreturn_t be_msix(int irq, void *dev)
2051 {
2052         struct be_eq_obj *eqo = dev;
2053
2054         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2055         napi_schedule(&eqo->napi);
2056         return IRQ_HANDLED;
2057 }
2058
2059 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2060 {
2061         return (rxcp->tcpf && !rxcp->err) ? true : false;
2062 }
2063
2064 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2065                         int budget)
2066 {
2067         struct be_adapter *adapter = rxo->adapter;
2068         struct be_queue_info *rx_cq = &rxo->cq;
2069         struct be_rx_compl_info *rxcp;
2070         u32 work_done;
2071
2072         for (work_done = 0; work_done < budget; work_done++) {
2073                 rxcp = be_rx_compl_get(rxo);
2074                 if (!rxcp)
2075                         break;
2076
2077                 /* Is it a flush compl that has no data */
2078                 if (unlikely(rxcp->num_rcvd == 0))
2079                         goto loop_continue;
2080
2081                 /* Discard compl with partial DMA Lancer B0 */
2082                 if (unlikely(!rxcp->pkt_size)) {
2083                         be_rx_compl_discard(rxo, rxcp);
2084                         goto loop_continue;
2085                 }
2086
2087                 /* On BE drop pkts that arrive due to imperfect filtering in
2088                  * promiscuous mode on some skews
2089                  */
2090                 if (unlikely(rxcp->port != adapter->port_num &&
2091                                 !lancer_chip(adapter))) {
2092                         be_rx_compl_discard(rxo, rxcp);
2093                         goto loop_continue;
2094                 }
2095
2096                 if (do_gro(rxcp))
2097                         be_rx_compl_process_gro(rxo, napi, rxcp);
2098                 else
2099                         be_rx_compl_process(rxo, rxcp);
2100 loop_continue:
2101                 be_rx_stats_update(rxo, rxcp);
2102         }
2103
2104         if (work_done) {
2105                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2106
2107                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2108                         be_post_rx_frags(rxo, GFP_ATOMIC);
2109         }
2110
2111         return work_done;
2112 }
2113
2114 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2115                           int budget, int idx)
2116 {
2117         struct be_eth_tx_compl *txcp;
2118         int num_wrbs = 0, work_done;
2119
2120         for (work_done = 0; work_done < budget; work_done++) {
2121                 txcp = be_tx_compl_get(&txo->cq);
2122                 if (!txcp)
2123                         break;
2124                 num_wrbs += be_tx_compl_process(adapter, txo,
2125                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2126                                         wrb_index, txcp));
2127         }
2128
2129         if (work_done) {
2130                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2131                 atomic_sub(num_wrbs, &txo->q.used);
2132
2133                 /* As Tx wrbs have been freed up, wake up netdev queue
2134                  * if it was stopped due to lack of tx wrbs.  */
2135                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2136                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2137                         netif_wake_subqueue(adapter->netdev, idx);
2138                 }
2139
2140                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2141                 tx_stats(txo)->tx_compl += work_done;
2142                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2143         }
2144         return (work_done < budget); /* Done */
2145 }
2146
2147 int be_poll(struct napi_struct *napi, int budget)
2148 {
2149         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2150         struct be_adapter *adapter = eqo->adapter;
2151         int max_work = 0, work, i, num_evts;
2152         bool tx_done;
2153
2154         num_evts = events_get(eqo);
2155
2156         /* Process all TXQs serviced by this EQ */
2157         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2158                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2159                                         eqo->tx_budget, i);
2160                 if (!tx_done)
2161                         max_work = budget;
2162         }
2163
2164         /* This loop will iterate twice for EQ0 in which
2165          * completions of the last RXQ (default one) are also processed
2166          * For other EQs the loop iterates only once
2167          */
2168         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2169                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2170                 max_work = max(work, max_work);
2171         }
2172
2173         if (is_mcc_eqo(eqo))
2174                 be_process_mcc(adapter);
2175
2176         if (max_work < budget) {
2177                 napi_complete(napi);
2178                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2179         } else {
2180                 /* As we'll continue in polling mode, count and clear events */
2181                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2182         }
2183         return max_work;
2184 }
2185
2186 void be_detect_error(struct be_adapter *adapter)
2187 {
2188         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2189         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2190         u32 i;
2191
2192         if (be_hw_error(adapter))
2193                 return;
2194
2195         if (lancer_chip(adapter)) {
2196                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2197                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2198                         sliport_err1 = ioread32(adapter->db +
2199                                         SLIPORT_ERROR1_OFFSET);
2200                         sliport_err2 = ioread32(adapter->db +
2201                                         SLIPORT_ERROR2_OFFSET);
2202                 }
2203         } else {
2204                 pci_read_config_dword(adapter->pdev,
2205                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2206                 pci_read_config_dword(adapter->pdev,
2207                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2208                 pci_read_config_dword(adapter->pdev,
2209                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2210                 pci_read_config_dword(adapter->pdev,
2211                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2212
2213                 ue_lo = (ue_lo & ~ue_lo_mask);
2214                 ue_hi = (ue_hi & ~ue_hi_mask);
2215         }
2216
2217         /* On certain platforms BE hardware can indicate spurious UEs.
2218          * Allow the h/w to stop working completely in case of a real UE.
2219          * Hence not setting the hw_error for UE detection.
2220          */
2221         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2222                 adapter->hw_error = true;
2223                 dev_err(&adapter->pdev->dev,
2224                         "Error detected in the card\n");
2225         }
2226
2227         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2228                 dev_err(&adapter->pdev->dev,
2229                         "ERR: sliport status 0x%x\n", sliport_status);
2230                 dev_err(&adapter->pdev->dev,
2231                         "ERR: sliport error1 0x%x\n", sliport_err1);
2232                 dev_err(&adapter->pdev->dev,
2233                         "ERR: sliport error2 0x%x\n", sliport_err2);
2234         }
2235
2236         if (ue_lo) {
2237                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2238                         if (ue_lo & 1)
2239                                 dev_err(&adapter->pdev->dev,
2240                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2241                 }
2242         }
2243
2244         if (ue_hi) {
2245                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2246                         if (ue_hi & 1)
2247                                 dev_err(&adapter->pdev->dev,
2248                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2249                 }
2250         }
2251
2252 }
2253
2254 static void be_msix_disable(struct be_adapter *adapter)
2255 {
2256         if (msix_enabled(adapter)) {
2257                 pci_disable_msix(adapter->pdev);
2258                 adapter->num_msix_vec = 0;
2259         }
2260 }
2261
2262 static uint be_num_rss_want(struct be_adapter *adapter)
2263 {
2264         u32 num = 0;
2265
2266         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2267             (lancer_chip(adapter) ||
2268              (!sriov_want(adapter) && be_physfn(adapter)))) {
2269                 num = adapter->max_rss_queues;
2270                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2271         }
2272         return num;
2273 }
2274
2275 static void be_msix_enable(struct be_adapter *adapter)
2276 {
2277 #define BE_MIN_MSIX_VECTORS             1
2278         int i, status, num_vec, num_roce_vec = 0;
2279         struct device *dev = &adapter->pdev->dev;
2280
2281         /* If RSS queues are not used, need a vec for default RX Q */
2282         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2283         if (be_roce_supported(adapter)) {
2284                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2285                                         (num_online_cpus() + 1));
2286                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2287                 num_vec += num_roce_vec;
2288                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2289         }
2290         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2291
2292         for (i = 0; i < num_vec; i++)
2293                 adapter->msix_entries[i].entry = i;
2294
2295         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2296         if (status == 0) {
2297                 goto done;
2298         } else if (status >= BE_MIN_MSIX_VECTORS) {
2299                 num_vec = status;
2300                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2301                                 num_vec) == 0)
2302                         goto done;
2303         }
2304
2305         dev_warn(dev, "MSIx enable failed\n");
2306         return;
2307 done:
2308         if (be_roce_supported(adapter)) {
2309                 if (num_vec > num_roce_vec) {
2310                         adapter->num_msix_vec = num_vec - num_roce_vec;
2311                         adapter->num_msix_roce_vec =
2312                                 num_vec - adapter->num_msix_vec;
2313                 } else {
2314                         adapter->num_msix_vec = num_vec;
2315                         adapter->num_msix_roce_vec = 0;
2316                 }
2317         } else
2318                 adapter->num_msix_vec = num_vec;
2319         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2320         return;
2321 }
2322
2323 static inline int be_msix_vec_get(struct be_adapter *adapter,
2324                                 struct be_eq_obj *eqo)
2325 {
2326         return adapter->msix_entries[eqo->idx].vector;
2327 }
2328
2329 static int be_msix_register(struct be_adapter *adapter)
2330 {
2331         struct net_device *netdev = adapter->netdev;
2332         struct be_eq_obj *eqo;
2333         int status, i, vec;
2334
2335         for_all_evt_queues(adapter, eqo, i) {
2336                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2337                 vec = be_msix_vec_get(adapter, eqo);
2338                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2339                 if (status)
2340                         goto err_msix;
2341         }
2342
2343         return 0;
2344 err_msix:
2345         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2346                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2347         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2348                 status);
2349         be_msix_disable(adapter);
2350         return status;
2351 }
2352
2353 static int be_irq_register(struct be_adapter *adapter)
2354 {
2355         struct net_device *netdev = adapter->netdev;
2356         int status;
2357
2358         if (msix_enabled(adapter)) {
2359                 status = be_msix_register(adapter);
2360                 if (status == 0)
2361                         goto done;
2362                 /* INTx is not supported for VF */
2363                 if (!be_physfn(adapter))
2364                         return status;
2365         }
2366
2367         /* INTx: only the first EQ is used */
2368         netdev->irq = adapter->pdev->irq;
2369         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2370                              &adapter->eq_obj[0]);
2371         if (status) {
2372                 dev_err(&adapter->pdev->dev,
2373                         "INTx request IRQ failed - err %d\n", status);
2374                 return status;
2375         }
2376 done:
2377         adapter->isr_registered = true;
2378         return 0;
2379 }
2380
2381 static void be_irq_unregister(struct be_adapter *adapter)
2382 {
2383         struct net_device *netdev = adapter->netdev;
2384         struct be_eq_obj *eqo;
2385         int i;
2386
2387         if (!adapter->isr_registered)
2388                 return;
2389
2390         /* INTx */
2391         if (!msix_enabled(adapter)) {
2392                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2393                 goto done;
2394         }
2395
2396         /* MSIx */
2397         for_all_evt_queues(adapter, eqo, i)
2398                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2399
2400 done:
2401         adapter->isr_registered = false;
2402 }
2403
2404 static void be_rx_qs_destroy(struct be_adapter *adapter)
2405 {
2406         struct be_queue_info *q;
2407         struct be_rx_obj *rxo;
2408         int i;
2409
2410         for_all_rx_queues(adapter, rxo, i) {
2411                 q = &rxo->q;
2412                 if (q->created) {
2413                         be_cmd_rxq_destroy(adapter, q);
2414                         /* After the rxq is invalidated, wait for a grace time
2415                          * of 1ms for all dma to end and the flush compl to
2416                          * arrive
2417                          */
2418                         mdelay(1);
2419                         be_rx_cq_clean(rxo);
2420                 }
2421                 be_queue_free(adapter, q);
2422         }
2423 }
2424
2425 static int be_close(struct net_device *netdev)
2426 {
2427         struct be_adapter *adapter = netdev_priv(netdev);
2428         struct be_eq_obj *eqo;
2429         int i;
2430
2431         be_roce_dev_close(adapter);
2432
2433         if (!lancer_chip(adapter))
2434                 be_intr_set(adapter, false);
2435
2436         for_all_evt_queues(adapter, eqo, i)
2437                 napi_disable(&eqo->napi);
2438
2439         be_async_mcc_disable(adapter);
2440
2441         /* Wait for all pending tx completions to arrive so that
2442          * all tx skbs are freed.
2443          */
2444         be_tx_compl_clean(adapter);
2445
2446         be_rx_qs_destroy(adapter);
2447
2448         for_all_evt_queues(adapter, eqo, i) {
2449                 if (msix_enabled(adapter))
2450                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2451                 else
2452                         synchronize_irq(netdev->irq);
2453                 be_eq_clean(eqo);
2454         }
2455
2456         be_irq_unregister(adapter);
2457
2458         return 0;
2459 }
2460
2461 static int be_rx_qs_create(struct be_adapter *adapter)
2462 {
2463         struct be_rx_obj *rxo;
2464         int rc, i, j;
2465         u8 rsstable[128];
2466
2467         for_all_rx_queues(adapter, rxo, i) {
2468                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2469                                     sizeof(struct be_eth_rx_d));
2470                 if (rc)
2471                         return rc;
2472         }
2473
2474         /* The FW would like the default RXQ to be created first */
2475         rxo = default_rxo(adapter);
2476         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2477                                adapter->if_handle, false, &rxo->rss_id);
2478         if (rc)
2479                 return rc;
2480
2481         for_all_rss_queues(adapter, rxo, i) {
2482                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2483                                        rx_frag_size, adapter->if_handle,
2484                                        true, &rxo->rss_id);
2485                 if (rc)
2486                         return rc;
2487         }
2488
2489         if (be_multi_rxq(adapter)) {
2490                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2491                         for_all_rss_queues(adapter, rxo, i) {
2492                                 if ((j + i) >= 128)
2493                                         break;
2494                                 rsstable[j + i] = rxo->rss_id;
2495                         }
2496                 }
2497                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2498                 if (rc)
2499                         return rc;
2500         }
2501
2502         /* First time posting */
2503         for_all_rx_queues(adapter, rxo, i)
2504                 be_post_rx_frags(rxo, GFP_KERNEL);
2505         return 0;
2506 }
2507
2508 static int be_open(struct net_device *netdev)
2509 {
2510         struct be_adapter *adapter = netdev_priv(netdev);
2511         struct be_eq_obj *eqo;
2512         struct be_rx_obj *rxo;
2513         struct be_tx_obj *txo;
2514         u8 link_status;
2515         int status, i;
2516
2517         status = be_rx_qs_create(adapter);
2518         if (status)
2519                 goto err;
2520
2521         be_irq_register(adapter);
2522
2523         if (!lancer_chip(adapter))
2524                 be_intr_set(adapter, true);
2525
2526         for_all_rx_queues(adapter, rxo, i)
2527                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2528
2529         for_all_tx_queues(adapter, txo, i)
2530                 be_cq_notify(adapter, txo->cq.id, true, 0);
2531
2532         be_async_mcc_enable(adapter);
2533
2534         for_all_evt_queues(adapter, eqo, i) {
2535                 napi_enable(&eqo->napi);
2536                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2537         }
2538
2539         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2540         if (!status)
2541                 be_link_status_update(adapter, link_status);
2542
2543         be_roce_dev_open(adapter);
2544         return 0;
2545 err:
2546         be_close(adapter->netdev);
2547         return -EIO;
2548 }
2549
2550 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2551 {
2552         struct be_dma_mem cmd;
2553         int status = 0;
2554         u8 mac[ETH_ALEN];
2555
2556         memset(mac, 0, ETH_ALEN);
2557
2558         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2559         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2560                                     GFP_KERNEL);
2561         if (cmd.va == NULL)
2562                 return -1;
2563         memset(cmd.va, 0, cmd.size);
2564
2565         if (enable) {
2566                 status = pci_write_config_dword(adapter->pdev,
2567                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2568                 if (status) {
2569                         dev_err(&adapter->pdev->dev,
2570                                 "Could not enable Wake-on-lan\n");
2571                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2572                                           cmd.dma);
2573                         return status;
2574                 }
2575                 status = be_cmd_enable_magic_wol(adapter,
2576                                 adapter->netdev->dev_addr, &cmd);
2577                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2578                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2579         } else {
2580                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2581                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2582                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2583         }
2584
2585         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2586         return status;
2587 }
2588
2589 /*
2590  * Generate a seed MAC address from the PF MAC Address using jhash.
2591  * MAC Address for VFs are assigned incrementally starting from the seed.
2592  * These addresses are programmed in the ASIC by the PF and the VF driver
2593  * queries for the MAC address during its probe.
2594  */
2595 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2596 {
2597         u32 vf;
2598         int status = 0;
2599         u8 mac[ETH_ALEN];
2600         struct be_vf_cfg *vf_cfg;
2601
2602         be_vf_eth_addr_generate(adapter, mac);
2603
2604         for_all_vfs(adapter, vf_cfg, vf) {
2605                 if (lancer_chip(adapter)) {
2606                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2607                 } else {
2608                         status = be_cmd_pmac_add(adapter, mac,
2609                                                  vf_cfg->if_handle,
2610                                                  &vf_cfg->pmac_id, vf + 1);
2611                 }
2612
2613                 if (status)
2614                         dev_err(&adapter->pdev->dev,
2615                         "Mac address assignment failed for VF %d\n", vf);
2616                 else
2617                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2618
2619                 mac[5] += 1;
2620         }
2621         return status;
2622 }
2623
2624 static int be_vfs_mac_query(struct be_adapter *adapter)
2625 {
2626         int status, vf;
2627         u8 mac[ETH_ALEN];
2628         struct be_vf_cfg *vf_cfg;
2629         bool active;
2630
2631         for_all_vfs(adapter, vf_cfg, vf) {
2632                 be_cmd_get_mac_from_list(adapter, mac, &active,
2633                                          &vf_cfg->pmac_id, 0);
2634
2635                 status = be_cmd_mac_addr_query(adapter, mac, false,
2636                                                vf_cfg->if_handle, 0);
2637                 if (status)
2638                         return status;
2639                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2640         }
2641         return 0;
2642 }
2643
2644 static void be_vf_clear(struct be_adapter *adapter)
2645 {
2646         struct be_vf_cfg *vf_cfg;
2647         u32 vf;
2648
2649         if (be_find_vfs(adapter, ASSIGNED)) {
2650                 dev_warn(&adapter->pdev->dev,
2651                          "VFs are assigned to VMs: not disabling VFs\n");
2652                 goto done;
2653         }
2654
2655         for_all_vfs(adapter, vf_cfg, vf) {
2656                 if (lancer_chip(adapter))
2657                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2658                 else
2659                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2660                                         vf_cfg->pmac_id, vf + 1);
2661
2662                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2663         }
2664         pci_disable_sriov(adapter->pdev);
2665 done:
2666         kfree(adapter->vf_cfg);
2667         adapter->num_vfs = 0;
2668 }
2669
2670 static int be_clear(struct be_adapter *adapter)
2671 {
2672         int i = 1;
2673
2674         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2675                 cancel_delayed_work_sync(&adapter->work);
2676                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2677         }
2678
2679         if (sriov_enabled(adapter))
2680                 be_vf_clear(adapter);
2681
2682         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2683                 be_cmd_pmac_del(adapter, adapter->if_handle,
2684                         adapter->pmac_id[i], 0);
2685
2686         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2687
2688         be_mcc_queues_destroy(adapter);
2689         be_rx_cqs_destroy(adapter);
2690         be_tx_queues_destroy(adapter);
2691         be_evt_queues_destroy(adapter);
2692
2693         kfree(adapter->pmac_id);
2694         adapter->pmac_id = NULL;
2695
2696         be_msix_disable(adapter);
2697         return 0;
2698 }
2699
2700 static int be_vfs_if_create(struct be_adapter *adapter)
2701 {
2702         struct be_vf_cfg *vf_cfg;
2703         u32 cap_flags, en_flags, vf;
2704         int status;
2705
2706         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2707                     BE_IF_FLAGS_MULTICAST;
2708
2709         for_all_vfs(adapter, vf_cfg, vf) {
2710                 if (!BE3_chip(adapter))
2711                         be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
2712
2713                 /* If a FW profile exists, then cap_flags are updated */
2714                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2715                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2716                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2717                                           &vf_cfg->if_handle, vf + 1);
2718                 if (status)
2719                         goto err;
2720         }
2721 err:
2722         return status;
2723 }
2724
2725 static int be_vf_setup_init(struct be_adapter *adapter)
2726 {
2727         struct be_vf_cfg *vf_cfg;
2728         int vf;
2729
2730         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2731                                   GFP_KERNEL);
2732         if (!adapter->vf_cfg)
2733                 return -ENOMEM;
2734
2735         for_all_vfs(adapter, vf_cfg, vf) {
2736                 vf_cfg->if_handle = -1;
2737                 vf_cfg->pmac_id = -1;
2738         }
2739         return 0;
2740 }
2741
2742 static int be_vf_setup(struct be_adapter *adapter)
2743 {
2744         struct be_vf_cfg *vf_cfg;
2745         u16 def_vlan, lnk_speed;
2746         int status, old_vfs, vf;
2747         struct device *dev = &adapter->pdev->dev;
2748
2749         old_vfs = be_find_vfs(adapter, ENABLED);
2750         if (old_vfs) {
2751                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2752                 if (old_vfs != num_vfs)
2753                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2754                 adapter->num_vfs = old_vfs;
2755         } else {
2756                 if (num_vfs > adapter->dev_num_vfs)
2757                         dev_info(dev, "Device supports %d VFs and not %d\n",
2758                                  adapter->dev_num_vfs, num_vfs);
2759                 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2760
2761                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2762                 if (status) {
2763                         dev_err(dev, "SRIOV enable failed\n");
2764                         adapter->num_vfs = 0;
2765                         return 0;
2766                 }
2767         }
2768
2769         status = be_vf_setup_init(adapter);
2770         if (status)
2771                 goto err;
2772
2773         if (old_vfs) {
2774                 for_all_vfs(adapter, vf_cfg, vf) {
2775                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2776                         if (status)
2777                                 goto err;
2778                 }
2779         } else {
2780                 status = be_vfs_if_create(adapter);
2781                 if (status)
2782                         goto err;
2783         }
2784
2785         if (old_vfs) {
2786                 status = be_vfs_mac_query(adapter);
2787                 if (status)
2788                         goto err;
2789         } else {
2790                 status = be_vf_eth_addr_config(adapter);
2791                 if (status)
2792                         goto err;
2793         }
2794
2795         for_all_vfs(adapter, vf_cfg, vf) {
2796                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2797                  * Allow full available bandwidth
2798                  */
2799                 if (BE3_chip(adapter) && !old_vfs)
2800                         be_cmd_set_qos(adapter, 1000, vf+1);
2801
2802                 status = be_cmd_link_status_query(adapter, &lnk_speed,
2803                                                   NULL, vf + 1);
2804                 if (!status)
2805                         vf_cfg->tx_rate = lnk_speed;
2806
2807                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2808                                                vf + 1, vf_cfg->if_handle);
2809                 if (status)
2810                         goto err;
2811                 vf_cfg->def_vid = def_vlan;
2812
2813                 be_cmd_enable_vf(adapter, vf + 1);
2814         }
2815         return 0;
2816 err:
2817         dev_err(dev, "VF setup failed\n");
2818         be_vf_clear(adapter);
2819         return status;
2820 }
2821
2822 static void be_setup_init(struct be_adapter *adapter)
2823 {
2824         adapter->vlan_prio_bmap = 0xff;
2825         adapter->phy.link_speed = -1;
2826         adapter->if_handle = -1;
2827         adapter->be3_native = false;
2828         adapter->promiscuous = false;
2829         if (be_physfn(adapter))
2830                 adapter->cmd_privileges = MAX_PRIVILEGES;
2831         else
2832                 adapter->cmd_privileges = MIN_PRIVILEGES;
2833 }
2834
2835 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2836                            bool *active_mac, u32 *pmac_id)
2837 {
2838         int status = 0;
2839
2840         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2841                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2842                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2843                         *active_mac = true;
2844                 else
2845                         *active_mac = false;
2846
2847                 return status;
2848         }
2849
2850         if (lancer_chip(adapter)) {
2851                 status = be_cmd_get_mac_from_list(adapter, mac,
2852                                                   active_mac, pmac_id, 0);
2853                 if (*active_mac) {
2854                         status = be_cmd_mac_addr_query(adapter, mac, false,
2855                                                        if_handle, *pmac_id);
2856                 }
2857         } else if (be_physfn(adapter)) {
2858                 /* For BE3, for PF get permanent MAC */
2859                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2860                 *active_mac = false;
2861         } else {
2862                 /* For BE3, for VF get soft MAC assigned by PF*/
2863                 status = be_cmd_mac_addr_query(adapter, mac, false,
2864                                                if_handle, 0);
2865                 *active_mac = true;
2866         }
2867         return status;
2868 }
2869
2870 static void be_get_resources(struct be_adapter *adapter)
2871 {
2872         u16 dev_num_vfs;
2873         int pos, status;
2874         bool profile_present = false;
2875
2876         if (!BEx_chip(adapter)) {
2877                 status = be_cmd_get_func_config(adapter);
2878                 if (!status)
2879                         profile_present = true;
2880         }
2881
2882         if (profile_present) {
2883                 /* Sanity fixes for Lancer */
2884                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2885                                               BE_UC_PMAC_COUNT);
2886                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2887                                            BE_NUM_VLANS_SUPPORTED);
2888                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2889                                                BE_MAX_MC);
2890                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2891                                                MAX_TX_QS);
2892                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2893                                                 BE3_MAX_RSS_QS);
2894                 adapter->max_event_queues = min_t(u16,
2895                                                   adapter->max_event_queues,
2896                                                   BE3_MAX_RSS_QS);
2897
2898                 if (adapter->max_rss_queues &&
2899                     adapter->max_rss_queues == adapter->max_rx_queues)
2900                         adapter->max_rss_queues -= 1;
2901
2902                 if (adapter->max_event_queues < adapter->max_rss_queues)
2903                         adapter->max_rss_queues = adapter->max_event_queues;
2904
2905         } else {
2906                 if (be_physfn(adapter))
2907                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2908                 else
2909                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2910
2911                 if (adapter->function_mode & FLEX10_MODE)
2912                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2913                 else
2914                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2915
2916                 adapter->max_mcast_mac = BE_MAX_MC;
2917                 adapter->max_tx_queues = MAX_TX_QS;
2918                 adapter->max_rss_queues = (adapter->be3_native) ?
2919                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2920                 adapter->max_event_queues = BE3_MAX_RSS_QS;
2921
2922                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2923                                         BE_IF_FLAGS_BROADCAST |
2924                                         BE_IF_FLAGS_MULTICAST |
2925                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
2926                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
2927                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
2928                                         BE_IF_FLAGS_PROMISCUOUS;
2929
2930                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2931                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2932         }
2933
2934         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2935         if (pos) {
2936                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2937                                      &dev_num_vfs);
2938                 if (BE3_chip(adapter))
2939                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2940                 adapter->dev_num_vfs = dev_num_vfs;
2941         }
2942 }
2943
2944 /* Routine to query per function resource limits */
2945 static int be_get_config(struct be_adapter *adapter)
2946 {
2947         int status;
2948
2949         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2950                                      &adapter->function_mode,
2951                                      &adapter->function_caps);
2952         if (status)
2953                 goto err;
2954
2955         be_get_resources(adapter);
2956
2957         /* primary mac needs 1 pmac entry */
2958         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2959                                    sizeof(u32), GFP_KERNEL);
2960         if (!adapter->pmac_id) {
2961                 status = -ENOMEM;
2962                 goto err;
2963         }
2964
2965 err:
2966         return status;
2967 }
2968
2969 static int be_setup(struct be_adapter *adapter)
2970 {
2971         struct device *dev = &adapter->pdev->dev;
2972         u32 en_flags;
2973         u32 tx_fc, rx_fc;
2974         int status;
2975         u8 mac[ETH_ALEN];
2976         bool active_mac;
2977
2978         be_setup_init(adapter);
2979
2980         if (!lancer_chip(adapter))
2981                 be_cmd_req_native_mode(adapter);
2982
2983         status = be_get_config(adapter);
2984         if (status)
2985                 goto err;
2986
2987         be_msix_enable(adapter);
2988
2989         status = be_evt_queues_create(adapter);
2990         if (status)
2991                 goto err;
2992
2993         status = be_tx_cqs_create(adapter);
2994         if (status)
2995                 goto err;
2996
2997         status = be_rx_cqs_create(adapter);
2998         if (status)
2999                 goto err;
3000
3001         status = be_mcc_queues_create(adapter);
3002         if (status)
3003                 goto err;
3004
3005         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3006         /* In UMC mode FW does not return right privileges.
3007          * Override with correct privilege equivalent to PF.
3008          */
3009         if (be_is_mc(adapter))
3010                 adapter->cmd_privileges = MAX_PRIVILEGES;
3011
3012         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3013                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3014
3015         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3016                 en_flags |= BE_IF_FLAGS_RSS;
3017
3018         en_flags = en_flags & adapter->if_cap_flags;
3019
3020         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3021                                   &adapter->if_handle, 0);
3022         if (status != 0)
3023                 goto err;
3024
3025         memset(mac, 0, ETH_ALEN);
3026         active_mac = false;
3027         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3028                                  &active_mac, &adapter->pmac_id[0]);
3029         if (status != 0)
3030                 goto err;
3031
3032         if (!active_mac) {
3033                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3034                                          &adapter->pmac_id[0], 0);
3035                 if (status != 0)
3036                         goto err;
3037         }
3038
3039         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3040                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3041                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3042         }
3043
3044         status = be_tx_qs_create(adapter);
3045         if (status)
3046                 goto err;
3047
3048         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3049
3050         if (adapter->vlans_added)
3051                 be_vid_config(adapter);
3052
3053         be_set_rx_mode(adapter->netdev);
3054
3055         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3056
3057         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3058                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3059                                         adapter->rx_fc);
3060
3061         if (be_physfn(adapter) && num_vfs) {
3062                 if (adapter->dev_num_vfs)
3063                         be_vf_setup(adapter);
3064                 else
3065                         dev_warn(dev, "device doesn't support SRIOV\n");
3066         }
3067
3068         status = be_cmd_get_phy_info(adapter);
3069         if (!status && be_pause_supported(adapter))
3070                 adapter->phy.fc_autoneg = 1;
3071
3072         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3073         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3074         return 0;
3075 err:
3076         be_clear(adapter);
3077         return status;
3078 }
3079
3080 #ifdef CONFIG_NET_POLL_CONTROLLER
3081 static void be_netpoll(struct net_device *netdev)
3082 {
3083         struct be_adapter *adapter = netdev_priv(netdev);
3084         struct be_eq_obj *eqo;
3085         int i;
3086
3087         for_all_evt_queues(adapter, eqo, i) {
3088                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3089                 napi_schedule(&eqo->napi);
3090         }
3091
3092         return;
3093 }
3094 #endif
3095
3096 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3097 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3098
3099 static bool be_flash_redboot(struct be_adapter *adapter,
3100                         const u8 *p, u32 img_start, int image_size,
3101                         int hdr_size)
3102 {
3103         u32 crc_offset;
3104         u8 flashed_crc[4];
3105         int status;
3106
3107         crc_offset = hdr_size + img_start + image_size - 4;
3108
3109         p += crc_offset;
3110
3111         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3112                         (image_size - 4));
3113         if (status) {
3114                 dev_err(&adapter->pdev->dev,
3115                 "could not get crc from flash, not flashing redboot\n");
3116                 return false;
3117         }
3118
3119         /*update redboot only if crc does not match*/
3120         if (!memcmp(flashed_crc, p, 4))
3121                 return false;
3122         else
3123                 return true;
3124 }
3125
3126 static bool phy_flashing_required(struct be_adapter *adapter)
3127 {
3128         return (adapter->phy.phy_type == TN_8022 &&
3129                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3130 }
3131
3132 static bool is_comp_in_ufi(struct be_adapter *adapter,
3133                            struct flash_section_info *fsec, int type)
3134 {
3135         int i = 0, img_type = 0;
3136         struct flash_section_info_g2 *fsec_g2 = NULL;
3137
3138         if (BE2_chip(adapter))
3139                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3140
3141         for (i = 0; i < MAX_FLASH_COMP; i++) {
3142                 if (fsec_g2)
3143                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3144                 else
3145                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3146
3147                 if (img_type == type)
3148                         return true;
3149         }
3150         return false;
3151
3152 }
3153
3154 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3155                                          int header_size,
3156                                          const struct firmware *fw)
3157 {
3158         struct flash_section_info *fsec = NULL;
3159         const u8 *p = fw->data;
3160
3161         p += header_size;
3162         while (p < (fw->data + fw->size)) {
3163                 fsec = (struct flash_section_info *)p;
3164                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3165                         return fsec;
3166                 p += 32;
3167         }
3168         return NULL;
3169 }
3170
3171 static int be_flash(struct be_adapter *adapter, const u8 *img,
3172                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3173 {
3174         u32 total_bytes = 0, flash_op, num_bytes = 0;
3175         int status = 0;
3176         struct be_cmd_write_flashrom *req = flash_cmd->va;
3177
3178         total_bytes = img_size;
3179         while (total_bytes) {
3180                 num_bytes = min_t(u32, 32*1024, total_bytes);
3181
3182                 total_bytes -= num_bytes;
3183
3184                 if (!total_bytes) {
3185                         if (optype == OPTYPE_PHY_FW)
3186                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3187                         else
3188                                 flash_op = FLASHROM_OPER_FLASH;
3189                 } else {
3190                         if (optype == OPTYPE_PHY_FW)
3191                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3192                         else
3193                                 flash_op = FLASHROM_OPER_SAVE;
3194                 }
3195
3196                 memcpy(req->data_buf, img, num_bytes);
3197                 img += num_bytes;
3198                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3199                                                 flash_op, num_bytes);
3200                 if (status) {
3201                         if (status == ILLEGAL_IOCTL_REQ &&
3202                             optype == OPTYPE_PHY_FW)
3203                                 break;
3204                         dev_err(&adapter->pdev->dev,
3205                                 "cmd to write to flash rom failed.\n");
3206                         return status;
3207                 }
3208         }
3209         return 0;
3210 }
3211
3212 /* For BE2 and BE3 */
3213 static int be_flash_BEx(struct be_adapter *adapter,
3214                          const struct firmware *fw,
3215                          struct be_dma_mem *flash_cmd,
3216                          int num_of_images)
3217
3218 {
3219         int status = 0, i, filehdr_size = 0;
3220         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3221         const u8 *p = fw->data;
3222         const struct flash_comp *pflashcomp;
3223         int num_comp, redboot;
3224         struct flash_section_info *fsec = NULL;
3225
3226         struct flash_comp gen3_flash_types[] = {
3227                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3228                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3229                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3230                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3231                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3232                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3233                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3234                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3235                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3236                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3237                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3238                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3239                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3240                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3241                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3242                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3243                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3244                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3245                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3246                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3247         };
3248
3249         struct flash_comp gen2_flash_types[] = {
3250                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3251                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3252                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3253                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3254                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3255                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3256                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3257                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3258                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3259                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3260                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3261                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3262                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3263                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3264                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3265                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3266         };
3267
3268         if (BE3_chip(adapter)) {
3269                 pflashcomp = gen3_flash_types;
3270                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3271                 num_comp = ARRAY_SIZE(gen3_flash_types);
3272         } else {
3273                 pflashcomp = gen2_flash_types;
3274                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3275                 num_comp = ARRAY_SIZE(gen2_flash_types);
3276         }
3277
3278         /* Get flash section info*/
3279         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3280         if (!fsec) {
3281                 dev_err(&adapter->pdev->dev,
3282                         "Invalid Cookie. UFI corrupted ?\n");
3283                 return -1;
3284         }
3285         for (i = 0; i < num_comp; i++) {
3286                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3287                         continue;
3288
3289                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3290                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3291                         continue;
3292
3293                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3294                     !phy_flashing_required(adapter))
3295                                 continue;
3296
3297                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3298                         redboot = be_flash_redboot(adapter, fw->data,
3299                                 pflashcomp[i].offset, pflashcomp[i].size,
3300                                 filehdr_size + img_hdrs_size);
3301                         if (!redboot)
3302                                 continue;
3303                 }
3304
3305                 p = fw->data;
3306                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3307                 if (p + pflashcomp[i].size > fw->data + fw->size)
3308                         return -1;
3309
3310                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3311                                         pflashcomp[i].size);
3312                 if (status) {
3313                         dev_err(&adapter->pdev->dev,
3314                                 "Flashing section type %d failed.\n",
3315                                 pflashcomp[i].img_type);
3316                         return status;
3317                 }
3318         }
3319         return 0;
3320 }
3321
3322 static int be_flash_skyhawk(struct be_adapter *adapter,
3323                 const struct firmware *fw,
3324                 struct be_dma_mem *flash_cmd, int num_of_images)
3325 {
3326         int status = 0, i, filehdr_size = 0;
3327         int img_offset, img_size, img_optype, redboot;
3328         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3329         const u8 *p = fw->data;
3330         struct flash_section_info *fsec = NULL;
3331
3332         filehdr_size = sizeof(struct flash_file_hdr_g3);
3333         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3334         if (!fsec) {
3335                 dev_err(&adapter->pdev->dev,
3336                         "Invalid Cookie. UFI corrupted ?\n");
3337                 return -1;
3338         }
3339
3340         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3341                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3342                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3343
3344                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3345                 case IMAGE_FIRMWARE_iSCSI:
3346                         img_optype = OPTYPE_ISCSI_ACTIVE;
3347                         break;
3348                 case IMAGE_BOOT_CODE:
3349                         img_optype = OPTYPE_REDBOOT;
3350                         break;
3351                 case IMAGE_OPTION_ROM_ISCSI:
3352                         img_optype = OPTYPE_BIOS;
3353                         break;
3354                 case IMAGE_OPTION_ROM_PXE:
3355                         img_optype = OPTYPE_PXE_BIOS;
3356                         break;
3357                 case IMAGE_OPTION_ROM_FCoE:
3358                         img_optype = OPTYPE_FCOE_BIOS;
3359                         break;
3360                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3361                         img_optype = OPTYPE_ISCSI_BACKUP;
3362                         break;
3363                 case IMAGE_NCSI:
3364                         img_optype = OPTYPE_NCSI_FW;
3365                         break;
3366                 default:
3367                         continue;
3368                 }
3369
3370                 if (img_optype == OPTYPE_REDBOOT) {
3371                         redboot = be_flash_redboot(adapter, fw->data,
3372                                         img_offset, img_size,
3373                                         filehdr_size + img_hdrs_size);
3374                         if (!redboot)
3375                                 continue;
3376                 }
3377
3378                 p = fw->data;
3379                 p += filehdr_size + img_offset + img_hdrs_size;
3380                 if (p + img_size > fw->data + fw->size)
3381                         return -1;
3382
3383                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3384                 if (status) {
3385                         dev_err(&adapter->pdev->dev,
3386                                 "Flashing section type %d failed.\n",
3387                                 fsec->fsec_entry[i].type);
3388                         return status;
3389                 }
3390         }
3391         return 0;
3392 }
3393
3394 static int lancer_wait_idle(struct be_adapter *adapter)
3395 {
3396 #define SLIPORT_IDLE_TIMEOUT 30
3397         u32 reg_val;
3398         int status = 0, i;
3399
3400         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3401                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3402                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3403                         break;
3404
3405                 ssleep(1);
3406         }
3407
3408         if (i == SLIPORT_IDLE_TIMEOUT)
3409                 status = -1;
3410
3411         return status;
3412 }
3413
3414 static int lancer_fw_reset(struct be_adapter *adapter)
3415 {
3416         int status = 0;
3417
3418         status = lancer_wait_idle(adapter);
3419         if (status)
3420                 return status;
3421
3422         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3423                   PHYSDEV_CONTROL_OFFSET);
3424
3425         return status;
3426 }
3427
3428 static int lancer_fw_download(struct be_adapter *adapter,
3429                                 const struct firmware *fw)
3430 {
3431 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3432 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3433         struct be_dma_mem flash_cmd;
3434         const u8 *data_ptr = NULL;
3435         u8 *dest_image_ptr = NULL;
3436         size_t image_size = 0;
3437         u32 chunk_size = 0;
3438         u32 data_written = 0;
3439         u32 offset = 0;
3440         int status = 0;
3441         u8 add_status = 0;
3442         u8 change_status;
3443
3444         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3445                 dev_err(&adapter->pdev->dev,
3446                         "FW Image not properly aligned. "
3447                         "Length must be 4 byte aligned.\n");
3448                 status = -EINVAL;
3449                 goto lancer_fw_exit;
3450         }
3451
3452         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3453                                 + LANCER_FW_DOWNLOAD_CHUNK;
3454         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3455                                                 &flash_cmd.dma, GFP_KERNEL);
3456         if (!flash_cmd.va) {
3457                 status = -ENOMEM;
3458                 dev_err(&adapter->pdev->dev,
3459                         "Memory allocation failure while flashing\n");
3460                 goto lancer_fw_exit;
3461         }
3462
3463         dest_image_ptr = flash_cmd.va +
3464                                 sizeof(struct lancer_cmd_req_write_object);
3465         image_size = fw->size;
3466         data_ptr = fw->data;
3467
3468         while (image_size) {
3469                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3470
3471                 /* Copy the image chunk content. */
3472                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3473
3474                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3475                                                  chunk_size, offset,
3476                                                  LANCER_FW_DOWNLOAD_LOCATION,
3477                                                  &data_written, &change_status,
3478                                                  &add_status);
3479                 if (status)
3480                         break;
3481
3482                 offset += data_written;
3483                 data_ptr += data_written;
3484                 image_size -= data_written;
3485         }
3486
3487         if (!status) {
3488                 /* Commit the FW written */
3489                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3490                                                  0, offset,
3491                                                  LANCER_FW_DOWNLOAD_LOCATION,
3492                                                  &data_written, &change_status,
3493                                                  &add_status);
3494         }
3495
3496         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3497                                 flash_cmd.dma);
3498         if (status) {
3499                 dev_err(&adapter->pdev->dev,
3500                         "Firmware load error. "
3501                         "Status code: 0x%x Additional Status: 0x%x\n",
3502                         status, add_status);
3503                 goto lancer_fw_exit;
3504         }
3505
3506         if (change_status == LANCER_FW_RESET_NEEDED) {
3507                 status = lancer_fw_reset(adapter);
3508                 if (status) {
3509                         dev_err(&adapter->pdev->dev,
3510                                 "Adapter busy for FW reset.\n"
3511                                 "New FW will not be active.\n");
3512                         goto lancer_fw_exit;
3513                 }
3514         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3515                         dev_err(&adapter->pdev->dev,
3516                                 "System reboot required for new FW"
3517                                 " to be active\n");
3518         }
3519
3520         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3521 lancer_fw_exit:
3522         return status;
3523 }
3524
3525 #define UFI_TYPE2               2
3526 #define UFI_TYPE3               3
3527 #define UFI_TYPE4               4
3528 static int be_get_ufi_type(struct be_adapter *adapter,
3529                            struct flash_file_hdr_g2 *fhdr)
3530 {
3531         if (fhdr == NULL)
3532                 goto be_get_ufi_exit;
3533
3534         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3535                 return UFI_TYPE4;
3536         else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3537                 return UFI_TYPE3;
3538         else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3539                 return UFI_TYPE2;
3540
3541 be_get_ufi_exit:
3542         dev_err(&adapter->pdev->dev,
3543                 "UFI and Interface are not compatible for flashing\n");
3544         return -1;
3545 }
3546
3547 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3548 {
3549         struct flash_file_hdr_g2 *fhdr;
3550         struct flash_file_hdr_g3 *fhdr3;
3551         struct image_hdr *img_hdr_ptr = NULL;
3552         struct be_dma_mem flash_cmd;
3553         const u8 *p;
3554         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3555
3556         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3557         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3558                                           &flash_cmd.dma, GFP_KERNEL);
3559         if (!flash_cmd.va) {
3560                 status = -ENOMEM;
3561                 dev_err(&adapter->pdev->dev,
3562                         "Memory allocation failure while flashing\n");
3563                 goto be_fw_exit;
3564         }
3565
3566         p = fw->data;
3567         fhdr = (struct flash_file_hdr_g2 *)p;
3568
3569         ufi_type = be_get_ufi_type(adapter, fhdr);
3570
3571         fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3572         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3573         for (i = 0; i < num_imgs; i++) {
3574                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3575                                 (sizeof(struct flash_file_hdr_g3) +
3576                                  i * sizeof(struct image_hdr)));
3577                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3578                         if (ufi_type == UFI_TYPE4)
3579                                 status = be_flash_skyhawk(adapter, fw,
3580                                                         &flash_cmd, num_imgs);
3581                         else if (ufi_type == UFI_TYPE3)
3582                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3583                                                       num_imgs);
3584                 }
3585         }
3586
3587         if (ufi_type == UFI_TYPE2)
3588                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3589         else if (ufi_type == -1)
3590                 status = -1;
3591
3592         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3593                           flash_cmd.dma);
3594         if (status) {
3595                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3596                 goto be_fw_exit;
3597         }
3598
3599         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3600
3601 be_fw_exit:
3602         return status;
3603 }
3604
3605 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3606 {
3607         const struct firmware *fw;
3608         int status;
3609
3610         if (!netif_running(adapter->netdev)) {
3611                 dev_err(&adapter->pdev->dev,
3612                         "Firmware load not allowed (interface is down)\n");
3613                 return -1;
3614         }
3615
3616         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3617         if (status)
3618                 goto fw_exit;
3619
3620         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3621
3622         if (lancer_chip(adapter))
3623                 status = lancer_fw_download(adapter, fw);
3624         else
3625                 status = be_fw_download(adapter, fw);
3626
3627 fw_exit:
3628         release_firmware(fw);
3629         return status;
3630 }
3631
3632 static const struct net_device_ops be_netdev_ops = {
3633         .ndo_open               = be_open,
3634         .ndo_stop               = be_close,
3635         .ndo_start_xmit         = be_xmit,
3636         .ndo_set_rx_mode        = be_set_rx_mode,
3637         .ndo_set_mac_address    = be_mac_addr_set,
3638         .ndo_change_mtu         = be_change_mtu,
3639         .ndo_get_stats64        = be_get_stats64,
3640         .ndo_validate_addr      = eth_validate_addr,
3641         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3642         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3643         .ndo_set_vf_mac         = be_set_vf_mac,
3644         .ndo_set_vf_vlan        = be_set_vf_vlan,
3645         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3646         .ndo_get_vf_config      = be_get_vf_config,
3647 #ifdef CONFIG_NET_POLL_CONTROLLER
3648         .ndo_poll_controller    = be_netpoll,
3649 #endif
3650 };
3651
3652 static void be_netdev_init(struct net_device *netdev)
3653 {
3654         struct be_adapter *adapter = netdev_priv(netdev);
3655         struct be_eq_obj *eqo;
3656         int i;
3657
3658         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3659                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3660                 NETIF_F_HW_VLAN_TX;
3661         if (be_multi_rxq(adapter))
3662                 netdev->hw_features |= NETIF_F_RXHASH;
3663
3664         netdev->features |= netdev->hw_features |
3665                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3666
3667         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3668                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3669
3670         netdev->priv_flags |= IFF_UNICAST_FLT;
3671
3672         netdev->flags |= IFF_MULTICAST;
3673
3674         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3675
3676         netdev->netdev_ops = &be_netdev_ops;
3677
3678         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3679
3680         for_all_evt_queues(adapter, eqo, i)
3681                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3682 }
3683
3684 static void be_unmap_pci_bars(struct be_adapter *adapter)
3685 {
3686         if (adapter->csr)
3687                 pci_iounmap(adapter->pdev, adapter->csr);
3688         if (adapter->db)
3689                 pci_iounmap(adapter->pdev, adapter->db);
3690 }
3691
3692 static int db_bar(struct be_adapter *adapter)
3693 {
3694         if (lancer_chip(adapter) || !be_physfn(adapter))
3695                 return 0;
3696         else
3697                 return 4;
3698 }
3699
3700 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3701 {
3702         if (skyhawk_chip(adapter)) {
3703                 adapter->roce_db.size = 4096;
3704                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3705                                                               db_bar(adapter));
3706                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3707                                                                db_bar(adapter));
3708         }
3709         return 0;
3710 }
3711
3712 static int be_map_pci_bars(struct be_adapter *adapter)
3713 {
3714         u8 __iomem *addr;
3715         u32 sli_intf;
3716
3717         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3718         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3719                                 SLI_INTF_IF_TYPE_SHIFT;
3720
3721         if (BEx_chip(adapter) && be_physfn(adapter)) {
3722                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3723                 if (adapter->csr == NULL)
3724                         return -ENOMEM;
3725         }
3726
3727         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3728         if (addr == NULL)
3729                 goto pci_map_err;
3730         adapter->db = addr;
3731
3732         be_roce_map_pci_bars(adapter);
3733         return 0;
3734
3735 pci_map_err:
3736         be_unmap_pci_bars(adapter);
3737         return -ENOMEM;
3738 }
3739
3740 static void be_ctrl_cleanup(struct be_adapter *adapter)
3741 {
3742         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3743
3744         be_unmap_pci_bars(adapter);
3745
3746         if (mem->va)
3747                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3748                                   mem->dma);
3749
3750         mem = &adapter->rx_filter;
3751         if (mem->va)
3752                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3753                                   mem->dma);
3754 }
3755
3756 static int be_ctrl_init(struct be_adapter *adapter)
3757 {
3758         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3759         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3760         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3761         u32 sli_intf;
3762         int status;
3763
3764         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3765         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3766                                  SLI_INTF_FAMILY_SHIFT;
3767         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3768
3769         status = be_map_pci_bars(adapter);
3770         if (status)
3771                 goto done;
3772
3773         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3774         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3775                                                 mbox_mem_alloc->size,
3776                                                 &mbox_mem_alloc->dma,
3777                                                 GFP_KERNEL);
3778         if (!mbox_mem_alloc->va) {
3779                 status = -ENOMEM;
3780                 goto unmap_pci_bars;
3781         }
3782         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3783         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3784         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3785         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3786
3787         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3788         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3789                                         &rx_filter->dma, GFP_KERNEL);
3790         if (rx_filter->va == NULL) {
3791                 status = -ENOMEM;
3792                 goto free_mbox;
3793         }
3794         memset(rx_filter->va, 0, rx_filter->size);
3795         mutex_init(&adapter->mbox_lock);
3796         spin_lock_init(&adapter->mcc_lock);
3797         spin_lock_init(&adapter->mcc_cq_lock);
3798
3799         init_completion(&adapter->flash_compl);
3800         pci_save_state(adapter->pdev);
3801         return 0;
3802
3803 free_mbox:
3804         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3805                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3806
3807 unmap_pci_bars:
3808         be_unmap_pci_bars(adapter);
3809
3810 done:
3811         return status;
3812 }
3813
3814 static void be_stats_cleanup(struct be_adapter *adapter)
3815 {
3816         struct be_dma_mem *cmd = &adapter->stats_cmd;
3817
3818         if (cmd->va)
3819                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3820                                   cmd->va, cmd->dma);
3821 }
3822
3823 static int be_stats_init(struct be_adapter *adapter)
3824 {
3825         struct be_dma_mem *cmd = &adapter->stats_cmd;
3826
3827         if (lancer_chip(adapter))
3828                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3829         else if (BE2_chip(adapter))
3830                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3831         else
3832                 /* BE3 and Skyhawk */
3833                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3834
3835         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3836                                      GFP_KERNEL);
3837         if (cmd->va == NULL)
3838                 return -1;
3839         memset(cmd->va, 0, cmd->size);
3840         return 0;
3841 }
3842
3843 static void be_remove(struct pci_dev *pdev)
3844 {
3845         struct be_adapter *adapter = pci_get_drvdata(pdev);
3846
3847         if (!adapter)
3848                 return;
3849
3850         be_roce_dev_remove(adapter);
3851
3852         cancel_delayed_work_sync(&adapter->func_recovery_work);
3853
3854         unregister_netdev(adapter->netdev);
3855
3856         be_clear(adapter);
3857
3858         /* tell fw we're done with firing cmds */
3859         be_cmd_fw_clean(adapter);
3860
3861         be_stats_cleanup(adapter);
3862
3863         be_ctrl_cleanup(adapter);
3864
3865         pci_disable_pcie_error_reporting(pdev);
3866
3867         pci_set_drvdata(pdev, NULL);
3868         pci_release_regions(pdev);
3869         pci_disable_device(pdev);
3870
3871         free_netdev(adapter->netdev);
3872 }
3873
3874 bool be_is_wol_supported(struct be_adapter *adapter)
3875 {
3876         return ((adapter->wol_cap & BE_WOL_CAP) &&
3877                 !be_is_wol_excluded(adapter)) ? true : false;
3878 }
3879
3880 u32 be_get_fw_log_level(struct be_adapter *adapter)
3881 {
3882         struct be_dma_mem extfat_cmd;
3883         struct be_fat_conf_params *cfgs;
3884         int status;
3885         u32 level = 0;
3886         int j;
3887
3888         if (lancer_chip(adapter))
3889                 return 0;
3890
3891         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3892         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3893         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3894                                              &extfat_cmd.dma);
3895
3896         if (!extfat_cmd.va) {
3897                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3898                         __func__);
3899                 goto err;
3900         }
3901
3902         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3903         if (!status) {
3904                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3905                                                 sizeof(struct be_cmd_resp_hdr));
3906                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3907                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3908                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3909                 }
3910         }
3911         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3912                             extfat_cmd.dma);
3913 err:
3914         return level;
3915 }
3916
3917 static int be_get_initial_config(struct be_adapter *adapter)
3918 {
3919         int status;
3920         u32 level;
3921
3922         status = be_cmd_get_cntl_attributes(adapter);
3923         if (status)
3924                 return status;
3925
3926         status = be_cmd_get_acpi_wol_cap(adapter);
3927         if (status) {
3928                 /* in case of a failure to get wol capabillities
3929                  * check the exclusion list to determine WOL capability */
3930                 if (!be_is_wol_excluded(adapter))
3931                         adapter->wol_cap |= BE_WOL_CAP;
3932         }
3933
3934         if (be_is_wol_supported(adapter))
3935                 adapter->wol = true;
3936
3937         /* Must be a power of 2 or else MODULO will BUG_ON */
3938         adapter->be_get_temp_freq = 64;
3939
3940         level = be_get_fw_log_level(adapter);
3941         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3942
3943         return 0;
3944 }
3945
3946 static int lancer_recover_func(struct be_adapter *adapter)
3947 {
3948         int status;
3949
3950         status = lancer_test_and_set_rdy_state(adapter);
3951         if (status)
3952                 goto err;
3953
3954         if (netif_running(adapter->netdev))
3955                 be_close(adapter->netdev);
3956
3957         be_clear(adapter);
3958
3959         adapter->hw_error = false;
3960         adapter->fw_timeout = false;
3961
3962         status = be_setup(adapter);
3963         if (status)
3964                 goto err;
3965
3966         if (netif_running(adapter->netdev)) {
3967                 status = be_open(adapter->netdev);
3968                 if (status)
3969                         goto err;
3970         }
3971
3972         dev_err(&adapter->pdev->dev,
3973                 "Adapter SLIPORT recovery succeeded\n");
3974         return 0;
3975 err:
3976         if (adapter->eeh_error)
3977                 dev_err(&adapter->pdev->dev,
3978                         "Adapter SLIPORT recovery failed\n");
3979
3980         return status;
3981 }
3982
3983 static void be_func_recovery_task(struct work_struct *work)
3984 {
3985         struct be_adapter *adapter =
3986                 container_of(work, struct be_adapter,  func_recovery_work.work);
3987         int status;
3988
3989         be_detect_error(adapter);
3990
3991         if (adapter->hw_error && lancer_chip(adapter)) {
3992
3993                 if (adapter->eeh_error)
3994                         goto out;
3995
3996                 rtnl_lock();
3997                 netif_device_detach(adapter->netdev);
3998                 rtnl_unlock();
3999
4000                 status = lancer_recover_func(adapter);
4001
4002                 if (!status)
4003                         netif_device_attach(adapter->netdev);
4004         }
4005
4006 out:
4007         schedule_delayed_work(&adapter->func_recovery_work,
4008                               msecs_to_jiffies(1000));
4009 }
4010
4011 static void be_worker(struct work_struct *work)
4012 {
4013         struct be_adapter *adapter =
4014                 container_of(work, struct be_adapter, work.work);
4015         struct be_rx_obj *rxo;
4016         struct be_eq_obj *eqo;
4017         int i;
4018
4019         /* when interrupts are not yet enabled, just reap any pending
4020         * mcc completions */
4021         if (!netif_running(adapter->netdev)) {
4022                 local_bh_disable();
4023                 be_process_mcc(adapter);
4024                 local_bh_enable();
4025                 goto reschedule;
4026         }
4027
4028         if (!adapter->stats_cmd_sent) {
4029                 if (lancer_chip(adapter))
4030                         lancer_cmd_get_pport_stats(adapter,
4031                                                 &adapter->stats_cmd);
4032                 else
4033                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4034         }
4035
4036         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4037                 be_cmd_get_die_temperature(adapter);
4038
4039         for_all_rx_queues(adapter, rxo, i) {
4040                 if (rxo->rx_post_starved) {
4041                         rxo->rx_post_starved = false;
4042                         be_post_rx_frags(rxo, GFP_KERNEL);
4043                 }
4044         }
4045
4046         for_all_evt_queues(adapter, eqo, i)
4047                 be_eqd_update(adapter, eqo);
4048
4049 reschedule:
4050         adapter->work_counter++;
4051         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4052 }
4053
4054 static bool be_reset_required(struct be_adapter *adapter)
4055 {
4056         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4057 }
4058
4059 static char *mc_name(struct be_adapter *adapter)
4060 {
4061         if (adapter->function_mode & FLEX10_MODE)
4062                 return "FLEX10";
4063         else if (adapter->function_mode & VNIC_MODE)
4064                 return "vNIC";
4065         else if (adapter->function_mode & UMC_ENABLED)
4066                 return "UMC";
4067         else
4068                 return "";
4069 }
4070
4071 static inline char *func_name(struct be_adapter *adapter)
4072 {
4073         return be_physfn(adapter) ? "PF" : "VF";
4074 }
4075
4076 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4077 {
4078         int status = 0;
4079         struct be_adapter *adapter;
4080         struct net_device *netdev;
4081         char port_name;
4082
4083         status = pci_enable_device(pdev);
4084         if (status)
4085                 goto do_none;
4086
4087         status = pci_request_regions(pdev, DRV_NAME);
4088         if (status)
4089                 goto disable_dev;
4090         pci_set_master(pdev);
4091
4092         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4093         if (netdev == NULL) {
4094                 status = -ENOMEM;
4095                 goto rel_reg;
4096         }
4097         adapter = netdev_priv(netdev);
4098         adapter->pdev = pdev;
4099         pci_set_drvdata(pdev, adapter);
4100         adapter->netdev = netdev;
4101         SET_NETDEV_DEV(netdev, &pdev->dev);
4102
4103         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4104         if (!status) {
4105                 netdev->features |= NETIF_F_HIGHDMA;
4106         } else {
4107                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4108                 if (status) {
4109                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4110                         goto free_netdev;
4111                 }
4112         }
4113
4114         status = pci_enable_pcie_error_reporting(pdev);
4115         if (status)
4116                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4117
4118         status = be_ctrl_init(adapter);
4119         if (status)
4120                 goto free_netdev;
4121
4122         /* sync up with fw's ready state */
4123         if (be_physfn(adapter)) {
4124                 status = be_fw_wait_ready(adapter);
4125                 if (status)
4126                         goto ctrl_clean;
4127         }
4128
4129         /* tell fw we're ready to fire cmds */
4130         status = be_cmd_fw_init(adapter);
4131         if (status)
4132                 goto ctrl_clean;
4133
4134         if (be_reset_required(adapter)) {
4135                 status = be_cmd_reset_function(adapter);
4136                 if (status)
4137                         goto ctrl_clean;
4138         }
4139
4140         /* The INTR bit may be set in the card when probed by a kdump kernel
4141          * after a crash.
4142          */
4143         if (!lancer_chip(adapter))
4144                 be_intr_set(adapter, false);
4145
4146         status = be_stats_init(adapter);
4147         if (status)
4148                 goto ctrl_clean;
4149
4150         status = be_get_initial_config(adapter);
4151         if (status)
4152                 goto stats_clean;
4153
4154         INIT_DELAYED_WORK(&adapter->work, be_worker);
4155         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4156         adapter->rx_fc = adapter->tx_fc = true;
4157
4158         status = be_setup(adapter);
4159         if (status)
4160                 goto stats_clean;
4161
4162         be_netdev_init(netdev);
4163         status = register_netdev(netdev);
4164         if (status != 0)
4165                 goto unsetup;
4166
4167         be_roce_dev_add(adapter);
4168
4169         schedule_delayed_work(&adapter->func_recovery_work,
4170                               msecs_to_jiffies(1000));
4171
4172         be_cmd_query_port_name(adapter, &port_name);
4173
4174         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4175                  func_name(adapter), mc_name(adapter), port_name);
4176
4177         return 0;
4178
4179 unsetup:
4180         be_clear(adapter);
4181 stats_clean:
4182         be_stats_cleanup(adapter);
4183 ctrl_clean:
4184         be_ctrl_cleanup(adapter);
4185 free_netdev:
4186         free_netdev(netdev);
4187         pci_set_drvdata(pdev, NULL);
4188 rel_reg:
4189         pci_release_regions(pdev);
4190 disable_dev:
4191         pci_disable_device(pdev);
4192 do_none:
4193         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4194         return status;
4195 }
4196
4197 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4198 {
4199         struct be_adapter *adapter = pci_get_drvdata(pdev);
4200         struct net_device *netdev =  adapter->netdev;
4201
4202         if (adapter->wol)
4203                 be_setup_wol(adapter, true);
4204
4205         cancel_delayed_work_sync(&adapter->func_recovery_work);
4206
4207         netif_device_detach(netdev);
4208         if (netif_running(netdev)) {
4209                 rtnl_lock();
4210                 be_close(netdev);
4211                 rtnl_unlock();
4212         }
4213         be_clear(adapter);
4214
4215         pci_save_state(pdev);
4216         pci_disable_device(pdev);
4217         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4218         return 0;
4219 }
4220
4221 static int be_resume(struct pci_dev *pdev)
4222 {
4223         int status = 0;
4224         struct be_adapter *adapter = pci_get_drvdata(pdev);
4225         struct net_device *netdev =  adapter->netdev;
4226
4227         netif_device_detach(netdev);
4228
4229         status = pci_enable_device(pdev);
4230         if (status)
4231                 return status;
4232
4233         pci_set_power_state(pdev, 0);
4234         pci_restore_state(pdev);
4235
4236         /* tell fw we're ready to fire cmds */
4237         status = be_cmd_fw_init(adapter);
4238         if (status)
4239                 return status;
4240
4241         be_setup(adapter);
4242         if (netif_running(netdev)) {
4243                 rtnl_lock();
4244                 be_open(netdev);
4245                 rtnl_unlock();
4246         }
4247
4248         schedule_delayed_work(&adapter->func_recovery_work,
4249                               msecs_to_jiffies(1000));
4250         netif_device_attach(netdev);
4251
4252         if (adapter->wol)
4253                 be_setup_wol(adapter, false);
4254
4255         return 0;
4256 }
4257
4258 /*
4259  * An FLR will stop BE from DMAing any data.
4260  */
4261 static void be_shutdown(struct pci_dev *pdev)
4262 {
4263         struct be_adapter *adapter = pci_get_drvdata(pdev);
4264
4265         if (!adapter)
4266                 return;
4267
4268         cancel_delayed_work_sync(&adapter->work);
4269         cancel_delayed_work_sync(&adapter->func_recovery_work);
4270
4271         netif_device_detach(adapter->netdev);
4272
4273         be_cmd_reset_function(adapter);
4274
4275         pci_disable_device(pdev);
4276 }
4277
4278 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4279                                 pci_channel_state_t state)
4280 {
4281         struct be_adapter *adapter = pci_get_drvdata(pdev);
4282         struct net_device *netdev =  adapter->netdev;
4283
4284         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4285
4286         adapter->eeh_error = true;
4287
4288         cancel_delayed_work_sync(&adapter->func_recovery_work);
4289
4290         rtnl_lock();
4291         netif_device_detach(netdev);
4292         rtnl_unlock();
4293
4294         if (netif_running(netdev)) {
4295                 rtnl_lock();
4296                 be_close(netdev);
4297                 rtnl_unlock();
4298         }
4299         be_clear(adapter);
4300
4301         if (state == pci_channel_io_perm_failure)
4302                 return PCI_ERS_RESULT_DISCONNECT;
4303
4304         pci_disable_device(pdev);
4305
4306         /* The error could cause the FW to trigger a flash debug dump.
4307          * Resetting the card while flash dump is in progress
4308          * can cause it not to recover; wait for it to finish.
4309          * Wait only for first function as it is needed only once per
4310          * adapter.
4311          */
4312         if (pdev->devfn == 0)
4313                 ssleep(30);
4314
4315         return PCI_ERS_RESULT_NEED_RESET;
4316 }
4317
4318 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4319 {
4320         struct be_adapter *adapter = pci_get_drvdata(pdev);
4321         int status;
4322
4323         dev_info(&adapter->pdev->dev, "EEH reset\n");
4324         be_clear_all_error(adapter);
4325
4326         status = pci_enable_device(pdev);
4327         if (status)
4328                 return PCI_ERS_RESULT_DISCONNECT;
4329
4330         pci_set_master(pdev);
4331         pci_set_power_state(pdev, 0);
4332         pci_restore_state(pdev);
4333
4334         /* Check if card is ok and fw is ready */
4335         dev_info(&adapter->pdev->dev,
4336                  "Waiting for FW to be ready after EEH reset\n");
4337         status = be_fw_wait_ready(adapter);
4338         if (status)
4339                 return PCI_ERS_RESULT_DISCONNECT;
4340
4341         pci_cleanup_aer_uncorrect_error_status(pdev);
4342         return PCI_ERS_RESULT_RECOVERED;
4343 }
4344
4345 static void be_eeh_resume(struct pci_dev *pdev)
4346 {
4347         int status = 0;
4348         struct be_adapter *adapter = pci_get_drvdata(pdev);
4349         struct net_device *netdev =  adapter->netdev;
4350
4351         dev_info(&adapter->pdev->dev, "EEH resume\n");
4352
4353         pci_save_state(pdev);
4354
4355         /* tell fw we're ready to fire cmds */
4356         status = be_cmd_fw_init(adapter);
4357         if (status)
4358                 goto err;
4359
4360         status = be_cmd_reset_function(adapter);
4361         if (status)
4362                 goto err;
4363
4364         status = be_setup(adapter);
4365         if (status)
4366                 goto err;
4367
4368         if (netif_running(netdev)) {
4369                 status = be_open(netdev);
4370                 if (status)
4371                         goto err;
4372         }
4373
4374         schedule_delayed_work(&adapter->func_recovery_work,
4375                               msecs_to_jiffies(1000));
4376         netif_device_attach(netdev);
4377         return;
4378 err:
4379         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4380 }
4381
4382 static const struct pci_error_handlers be_eeh_handlers = {
4383         .error_detected = be_eeh_err_detected,
4384         .slot_reset = be_eeh_reset,
4385         .resume = be_eeh_resume,
4386 };
4387
4388 static struct pci_driver be_driver = {
4389         .name = DRV_NAME,
4390         .id_table = be_dev_ids,
4391         .probe = be_probe,
4392         .remove = be_remove,
4393         .suspend = be_suspend,
4394         .resume = be_resume,
4395         .shutdown = be_shutdown,
4396         .err_handler = &be_eeh_handlers
4397 };
4398
4399 static int __init be_init_module(void)
4400 {
4401         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4402             rx_frag_size != 2048) {
4403                 printk(KERN_WARNING DRV_NAME
4404                         " : Module param rx_frag_size must be 2048/4096/8192."
4405                         " Using 2048\n");
4406                 rx_frag_size = 2048;
4407         }
4408
4409         return pci_register_driver(&be_driver);
4410 }
4411 module_init(be_init_module);
4412
4413 static void __exit be_exit_module(void)
4414 {
4415         pci_unregister_driver(&be_driver);
4416 }
4417 module_exit(be_exit_module);