]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/emulex/benet/be_main.c
ASoC: Blackfin: ADAU1X81 eval board support
[karo-tx-linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2014 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26 #include <net/vxlan.h>
27
28 MODULE_VERSION(DRV_VER);
29 MODULE_DEVICE_TABLE(pci, be_dev_ids);
30 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
31 MODULE_AUTHOR("Emulex Corporation");
32 MODULE_LICENSE("GPL");
33
34 static unsigned int num_vfs;
35 module_param(num_vfs, uint, S_IRUGO);
36 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
37
38 static ushort rx_frag_size = 2048;
39 module_param(rx_frag_size, ushort, S_IRUGO);
40 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
42 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
43         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
44         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
45         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
48         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
49         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
50         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
51         { 0 }
52 };
53 MODULE_DEVICE_TABLE(pci, be_dev_ids);
54 /* UE Status Low CSR */
55 static const char * const ue_status_low_desc[] = {
56         "CEV",
57         "CTX",
58         "DBUF",
59         "ERX",
60         "Host",
61         "MPU",
62         "NDMA",
63         "PTC ",
64         "RDMA ",
65         "RXF ",
66         "RXIPS ",
67         "RXULP0 ",
68         "RXULP1 ",
69         "RXULP2 ",
70         "TIM ",
71         "TPOST ",
72         "TPRE ",
73         "TXIPS ",
74         "TXULP0 ",
75         "TXULP1 ",
76         "UC ",
77         "WDMA ",
78         "TXULP2 ",
79         "HOST1 ",
80         "P0_OB_LINK ",
81         "P1_OB_LINK ",
82         "HOST_GPIO ",
83         "MBOX ",
84         "AXGMAC0",
85         "AXGMAC1",
86         "JTAG",
87         "MPU_INTPEND"
88 };
89 /* UE Status High CSR */
90 static const char * const ue_status_hi_desc[] = {
91         "LPCMEMHOST",
92         "MGMT_MAC",
93         "PCS0ONLINE",
94         "MPU_IRAM",
95         "PCS1ONLINE",
96         "PCTL0",
97         "PCTL1",
98         "PMEM",
99         "RR",
100         "TXPB",
101         "RXPP",
102         "XAUI",
103         "TXP",
104         "ARM",
105         "IPC",
106         "HOST2",
107         "HOST3",
108         "HOST4",
109         "HOST5",
110         "HOST6",
111         "HOST7",
112         "HOST8",
113         "HOST9",
114         "NETC",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown",
121         "Unknown",
122         "Unknown"
123 };
124
125
126 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127 {
128         struct be_dma_mem *mem = &q->dma_mem;
129         if (mem->va) {
130                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131                                   mem->dma);
132                 mem->va = NULL;
133         }
134 }
135
136 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
137                 u16 len, u16 entry_size)
138 {
139         struct be_dma_mem *mem = &q->dma_mem;
140
141         memset(q, 0, sizeof(*q));
142         q->len = len;
143         q->entry_size = entry_size;
144         mem->size = len * entry_size;
145         mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146                                       GFP_KERNEL);
147         if (!mem->va)
148                 return -ENOMEM;
149         return 0;
150 }
151
152 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
153 {
154         u32 reg, enabled;
155
156         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
157                                 &reg);
158         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
160         if (!enabled && enable)
161                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162         else if (enabled && !enable)
163                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164         else
165                 return;
166
167         pci_write_config_dword(adapter->pdev,
168                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
169 }
170
171 static void be_intr_set(struct be_adapter *adapter, bool enable)
172 {
173         int status = 0;
174
175         /* On lancer interrupts can't be controlled via this register */
176         if (lancer_chip(adapter))
177                 return;
178
179         if (adapter->eeh_error)
180                 return;
181
182         status = be_cmd_intr_set(adapter, enable);
183         if (status)
184                 be_reg_intr_set(adapter, enable);
185 }
186
187 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
188 {
189         u32 val = 0;
190         val |= qid & DB_RQ_RING_ID_MASK;
191         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
192
193         wmb();
194         iowrite32(val, adapter->db + DB_RQ_OFFSET);
195 }
196
197 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198                           u16 posted)
199 {
200         u32 val = 0;
201         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
202         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
203
204         wmb();
205         iowrite32(val, adapter->db + txo->db_offset);
206 }
207
208 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
209                 bool arm, bool clear_int, u16 num_popped)
210 {
211         u32 val = 0;
212         val |= qid & DB_EQ_RING_ID_MASK;
213         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
214                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
215
216         if (adapter->eeh_error)
217                 return;
218
219         if (arm)
220                 val |= 1 << DB_EQ_REARM_SHIFT;
221         if (clear_int)
222                 val |= 1 << DB_EQ_CLR_SHIFT;
223         val |= 1 << DB_EQ_EVNT_SHIFT;
224         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
225         iowrite32(val, adapter->db + DB_EQ_OFFSET);
226 }
227
228 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
229 {
230         u32 val = 0;
231         val |= qid & DB_CQ_RING_ID_MASK;
232         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
233                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
234
235         if (adapter->eeh_error)
236                 return;
237
238         if (arm)
239                 val |= 1 << DB_CQ_REARM_SHIFT;
240         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
241         iowrite32(val, adapter->db + DB_CQ_OFFSET);
242 }
243
244 static int be_mac_addr_set(struct net_device *netdev, void *p)
245 {
246         struct be_adapter *adapter = netdev_priv(netdev);
247         struct device *dev = &adapter->pdev->dev;
248         struct sockaddr *addr = p;
249         int status;
250         u8 mac[ETH_ALEN];
251         u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
252
253         if (!is_valid_ether_addr(addr->sa_data))
254                 return -EADDRNOTAVAIL;
255
256         /* Proceed further only if, User provided MAC is different
257          * from active MAC
258          */
259         if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
260                 return 0;
261
262         /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
263          * privilege or if PF did not provision the new MAC address.
264          * On BE3, this cmd will always fail if the VF doesn't have the
265          * FILTMGMT privilege. This failure is OK, only if the PF programmed
266          * the MAC for the VF.
267          */
268         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
269                                  adapter->if_handle, &adapter->pmac_id[0], 0);
270         if (!status) {
271                 curr_pmac_id = adapter->pmac_id[0];
272
273                 /* Delete the old programmed MAC. This call may fail if the
274                  * old MAC was already deleted by the PF driver.
275                  */
276                 if (adapter->pmac_id[0] != old_pmac_id)
277                         be_cmd_pmac_del(adapter, adapter->if_handle,
278                                         old_pmac_id, 0);
279         }
280
281         /* Decide if the new MAC is successfully activated only after
282          * querying the FW
283          */
284         status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
285                                        adapter->if_handle, true, 0);
286         if (status)
287                 goto err;
288
289         /* The MAC change did not happen, either due to lack of privilege
290          * or PF didn't pre-provision.
291          */
292         if (!ether_addr_equal(addr->sa_data, mac)) {
293                 status = -EPERM;
294                 goto err;
295         }
296
297         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
298         dev_info(dev, "MAC address changed to %pM\n", mac);
299         return 0;
300 err:
301         dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
302         return status;
303 }
304
305 /* BE2 supports only v0 cmd */
306 static void *hw_stats_from_cmd(struct be_adapter *adapter)
307 {
308         if (BE2_chip(adapter)) {
309                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
310
311                 return &cmd->hw_stats;
312         } else if (BE3_chip(adapter)) {
313                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
314
315                 return &cmd->hw_stats;
316         } else {
317                 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
318
319                 return &cmd->hw_stats;
320         }
321 }
322
323 /* BE2 supports only v0 cmd */
324 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
325 {
326         if (BE2_chip(adapter)) {
327                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
328
329                 return &hw_stats->erx;
330         } else if (BE3_chip(adapter)) {
331                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
332
333                 return &hw_stats->erx;
334         } else {
335                 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
336
337                 return &hw_stats->erx;
338         }
339 }
340
341 static void populate_be_v0_stats(struct be_adapter *adapter)
342 {
343         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
344         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
345         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
346         struct be_port_rxf_stats_v0 *port_stats =
347                                         &rxf_stats->port[adapter->port_num];
348         struct be_drv_stats *drvs = &adapter->drv_stats;
349
350         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
351         drvs->rx_pause_frames = port_stats->rx_pause_frames;
352         drvs->rx_crc_errors = port_stats->rx_crc_errors;
353         drvs->rx_control_frames = port_stats->rx_control_frames;
354         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
355         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
356         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
357         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
358         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
359         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
360         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
361         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
362         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
363         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
364         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
365         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
366         drvs->rx_dropped_header_too_small =
367                 port_stats->rx_dropped_header_too_small;
368         drvs->rx_address_filtered =
369                                         port_stats->rx_address_filtered +
370                                         port_stats->rx_vlan_filtered;
371         drvs->rx_alignment_symbol_errors =
372                 port_stats->rx_alignment_symbol_errors;
373
374         drvs->tx_pauseframes = port_stats->tx_pauseframes;
375         drvs->tx_controlframes = port_stats->tx_controlframes;
376
377         if (adapter->port_num)
378                 drvs->jabber_events = rxf_stats->port1_jabber_events;
379         else
380                 drvs->jabber_events = rxf_stats->port0_jabber_events;
381         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
382         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
383         drvs->forwarded_packets = rxf_stats->forwarded_packets;
384         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
385         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
386         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
387         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
388 }
389
390 static void populate_be_v1_stats(struct be_adapter *adapter)
391 {
392         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
393         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
394         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
395         struct be_port_rxf_stats_v1 *port_stats =
396                                         &rxf_stats->port[adapter->port_num];
397         struct be_drv_stats *drvs = &adapter->drv_stats;
398
399         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
400         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
401         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
402         drvs->rx_pause_frames = port_stats->rx_pause_frames;
403         drvs->rx_crc_errors = port_stats->rx_crc_errors;
404         drvs->rx_control_frames = port_stats->rx_control_frames;
405         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
406         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
407         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
408         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
409         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
410         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
411         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
412         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
413         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
414         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
415         drvs->rx_dropped_header_too_small =
416                 port_stats->rx_dropped_header_too_small;
417         drvs->rx_input_fifo_overflow_drop =
418                 port_stats->rx_input_fifo_overflow_drop;
419         drvs->rx_address_filtered = port_stats->rx_address_filtered;
420         drvs->rx_alignment_symbol_errors =
421                 port_stats->rx_alignment_symbol_errors;
422         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
423         drvs->tx_pauseframes = port_stats->tx_pauseframes;
424         drvs->tx_controlframes = port_stats->tx_controlframes;
425         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
426         drvs->jabber_events = port_stats->jabber_events;
427         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
428         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
429         drvs->forwarded_packets = rxf_stats->forwarded_packets;
430         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
431         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
432         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
433         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
434 }
435
436 static void populate_be_v2_stats(struct be_adapter *adapter)
437 {
438         struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
439         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
440         struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
441         struct be_port_rxf_stats_v2 *port_stats =
442                                         &rxf_stats->port[adapter->port_num];
443         struct be_drv_stats *drvs = &adapter->drv_stats;
444
445         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
446         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
447         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
448         drvs->rx_pause_frames = port_stats->rx_pause_frames;
449         drvs->rx_crc_errors = port_stats->rx_crc_errors;
450         drvs->rx_control_frames = port_stats->rx_control_frames;
451         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
452         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
453         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
454         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
455         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
456         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
457         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
458         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
459         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
460         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
461         drvs->rx_dropped_header_too_small =
462                 port_stats->rx_dropped_header_too_small;
463         drvs->rx_input_fifo_overflow_drop =
464                 port_stats->rx_input_fifo_overflow_drop;
465         drvs->rx_address_filtered = port_stats->rx_address_filtered;
466         drvs->rx_alignment_symbol_errors =
467                 port_stats->rx_alignment_symbol_errors;
468         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
469         drvs->tx_pauseframes = port_stats->tx_pauseframes;
470         drvs->tx_controlframes = port_stats->tx_controlframes;
471         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
472         drvs->jabber_events = port_stats->jabber_events;
473         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
474         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
475         drvs->forwarded_packets = rxf_stats->forwarded_packets;
476         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
477         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
478         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
479         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
480         if (be_roce_supported(adapter))  {
481                 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
482                 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
483                 drvs->rx_roce_frames = port_stats->roce_frames_received;
484                 drvs->roce_drops_crc = port_stats->roce_drops_crc;
485                 drvs->roce_drops_payload_len =
486                         port_stats->roce_drops_payload_len;
487         }
488 }
489
490 static void populate_lancer_stats(struct be_adapter *adapter)
491 {
492
493         struct be_drv_stats *drvs = &adapter->drv_stats;
494         struct lancer_pport_stats *pport_stats =
495                                         pport_stats_from_cmd(adapter);
496
497         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
498         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
499         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
500         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
501         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
502         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
503         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
504         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
505         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
506         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
507         drvs->rx_dropped_tcp_length =
508                                 pport_stats->rx_dropped_invalid_tcp_length;
509         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
510         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
511         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
512         drvs->rx_dropped_header_too_small =
513                                 pport_stats->rx_dropped_header_too_small;
514         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
515         drvs->rx_address_filtered =
516                                         pport_stats->rx_address_filtered +
517                                         pport_stats->rx_vlan_filtered;
518         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
519         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
520         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
521         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
522         drvs->jabber_events = pport_stats->rx_jabbers;
523         drvs->forwarded_packets = pport_stats->num_forwards_lo;
524         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
525         drvs->rx_drops_too_many_frags =
526                                 pport_stats->rx_drops_too_many_frags_lo;
527 }
528
529 static void accumulate_16bit_val(u32 *acc, u16 val)
530 {
531 #define lo(x)                   (x & 0xFFFF)
532 #define hi(x)                   (x & 0xFFFF0000)
533         bool wrapped = val < lo(*acc);
534         u32 newacc = hi(*acc) + val;
535
536         if (wrapped)
537                 newacc += 65536;
538         ACCESS_ONCE(*acc) = newacc;
539 }
540
541 static void populate_erx_stats(struct be_adapter *adapter,
542                         struct be_rx_obj *rxo,
543                         u32 erx_stat)
544 {
545         if (!BEx_chip(adapter))
546                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
547         else
548                 /* below erx HW counter can actually wrap around after
549                  * 65535. Driver accumulates a 32-bit value
550                  */
551                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
552                                      (u16)erx_stat);
553 }
554
555 void be_parse_stats(struct be_adapter *adapter)
556 {
557         struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
558         struct be_rx_obj *rxo;
559         int i;
560         u32 erx_stat;
561
562         if (lancer_chip(adapter)) {
563                 populate_lancer_stats(adapter);
564         } else {
565                 if (BE2_chip(adapter))
566                         populate_be_v0_stats(adapter);
567                 else if (BE3_chip(adapter))
568                         /* for BE3 */
569                         populate_be_v1_stats(adapter);
570                 else
571                         populate_be_v2_stats(adapter);
572
573                 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
574                 for_all_rx_queues(adapter, rxo, i) {
575                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
576                         populate_erx_stats(adapter, rxo, erx_stat);
577                 }
578         }
579 }
580
581 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
582                                         struct rtnl_link_stats64 *stats)
583 {
584         struct be_adapter *adapter = netdev_priv(netdev);
585         struct be_drv_stats *drvs = &adapter->drv_stats;
586         struct be_rx_obj *rxo;
587         struct be_tx_obj *txo;
588         u64 pkts, bytes;
589         unsigned int start;
590         int i;
591
592         for_all_rx_queues(adapter, rxo, i) {
593                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
594                 do {
595                         start = u64_stats_fetch_begin_irq(&rx_stats->sync);
596                         pkts = rx_stats(rxo)->rx_pkts;
597                         bytes = rx_stats(rxo)->rx_bytes;
598                 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
599                 stats->rx_packets += pkts;
600                 stats->rx_bytes += bytes;
601                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
602                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
603                                         rx_stats(rxo)->rx_drops_no_frags;
604         }
605
606         for_all_tx_queues(adapter, txo, i) {
607                 const struct be_tx_stats *tx_stats = tx_stats(txo);
608                 do {
609                         start = u64_stats_fetch_begin_irq(&tx_stats->sync);
610                         pkts = tx_stats(txo)->tx_pkts;
611                         bytes = tx_stats(txo)->tx_bytes;
612                 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
613                 stats->tx_packets += pkts;
614                 stats->tx_bytes += bytes;
615         }
616
617         /* bad pkts received */
618         stats->rx_errors = drvs->rx_crc_errors +
619                 drvs->rx_alignment_symbol_errors +
620                 drvs->rx_in_range_errors +
621                 drvs->rx_out_range_errors +
622                 drvs->rx_frame_too_long +
623                 drvs->rx_dropped_too_small +
624                 drvs->rx_dropped_too_short +
625                 drvs->rx_dropped_header_too_small +
626                 drvs->rx_dropped_tcp_length +
627                 drvs->rx_dropped_runt;
628
629         /* detailed rx errors */
630         stats->rx_length_errors = drvs->rx_in_range_errors +
631                 drvs->rx_out_range_errors +
632                 drvs->rx_frame_too_long;
633
634         stats->rx_crc_errors = drvs->rx_crc_errors;
635
636         /* frame alignment errors */
637         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
638
639         /* receiver fifo overrun */
640         /* drops_no_pbuf is no per i/f, it's per BE card */
641         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
642                                 drvs->rx_input_fifo_overflow_drop +
643                                 drvs->rx_drops_no_pbuf;
644         return stats;
645 }
646
647 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
648 {
649         struct net_device *netdev = adapter->netdev;
650
651         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
652                 netif_carrier_off(netdev);
653                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
654         }
655
656         if (link_status)
657                 netif_carrier_on(netdev);
658         else
659                 netif_carrier_off(netdev);
660 }
661
662 static void be_tx_stats_update(struct be_tx_obj *txo,
663                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
664 {
665         struct be_tx_stats *stats = tx_stats(txo);
666
667         u64_stats_update_begin(&stats->sync);
668         stats->tx_reqs++;
669         stats->tx_wrbs += wrb_cnt;
670         stats->tx_bytes += copied;
671         stats->tx_pkts += (gso_segs ? gso_segs : 1);
672         if (stopped)
673                 stats->tx_stops++;
674         u64_stats_update_end(&stats->sync);
675 }
676
677 /* Determine number of WRB entries needed to xmit data in an skb */
678 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
679                                                                 bool *dummy)
680 {
681         int cnt = (skb->len > skb->data_len);
682
683         cnt += skb_shinfo(skb)->nr_frags;
684
685         /* to account for hdr wrb */
686         cnt++;
687         if (lancer_chip(adapter) || !(cnt & 1)) {
688                 *dummy = false;
689         } else {
690                 /* add a dummy to make it an even num */
691                 cnt++;
692                 *dummy = true;
693         }
694         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
695         return cnt;
696 }
697
698 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
699 {
700         wrb->frag_pa_hi = upper_32_bits(addr);
701         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
702         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
703         wrb->rsvd0 = 0;
704 }
705
706 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
707                                         struct sk_buff *skb)
708 {
709         u8 vlan_prio;
710         u16 vlan_tag;
711
712         vlan_tag = vlan_tx_tag_get(skb);
713         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
714         /* If vlan priority provided by OS is NOT in available bmap */
715         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
716                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
717                                 adapter->recommended_prio;
718
719         return vlan_tag;
720 }
721
722 /* Used only for IP tunnel packets */
723 static u16 skb_inner_ip_proto(struct sk_buff *skb)
724 {
725         return (inner_ip_hdr(skb)->version == 4) ?
726                 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
727 }
728
729 static u16 skb_ip_proto(struct sk_buff *skb)
730 {
731         return (ip_hdr(skb)->version == 4) ?
732                 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
733 }
734
735 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
736                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
737 {
738         u16 vlan_tag, proto;
739
740         memset(hdr, 0, sizeof(*hdr));
741
742         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
743
744         if (skb_is_gso(skb)) {
745                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
746                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
747                         hdr, skb_shinfo(skb)->gso_size);
748                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
749                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
750         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
751                 if (skb->encapsulation) {
752                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
753                         proto = skb_inner_ip_proto(skb);
754                 } else {
755                         proto = skb_ip_proto(skb);
756                 }
757                 if (proto == IPPROTO_TCP)
758                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
759                 else if (proto == IPPROTO_UDP)
760                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
761         }
762
763         if (vlan_tx_tag_present(skb)) {
764                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
765                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
766                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
767         }
768
769         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
770         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
771         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
772         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
773         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
774 }
775
776 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
777                 bool unmap_single)
778 {
779         dma_addr_t dma;
780
781         be_dws_le_to_cpu(wrb, sizeof(*wrb));
782
783         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
784         if (wrb->frag_len) {
785                 if (unmap_single)
786                         dma_unmap_single(dev, dma, wrb->frag_len,
787                                          DMA_TO_DEVICE);
788                 else
789                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
790         }
791 }
792
793 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
794                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
795                 bool skip_hw_vlan)
796 {
797         dma_addr_t busaddr;
798         int i, copied = 0;
799         struct device *dev = &adapter->pdev->dev;
800         struct sk_buff *first_skb = skb;
801         struct be_eth_wrb *wrb;
802         struct be_eth_hdr_wrb *hdr;
803         bool map_single = false;
804         u16 map_head;
805
806         hdr = queue_head_node(txq);
807         queue_head_inc(txq);
808         map_head = txq->head;
809
810         if (skb->len > skb->data_len) {
811                 int len = skb_headlen(skb);
812                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
813                 if (dma_mapping_error(dev, busaddr))
814                         goto dma_err;
815                 map_single = true;
816                 wrb = queue_head_node(txq);
817                 wrb_fill(wrb, busaddr, len);
818                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
819                 queue_head_inc(txq);
820                 copied += len;
821         }
822
823         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
824                 const struct skb_frag_struct *frag =
825                         &skb_shinfo(skb)->frags[i];
826                 busaddr = skb_frag_dma_map(dev, frag, 0,
827                                            skb_frag_size(frag), DMA_TO_DEVICE);
828                 if (dma_mapping_error(dev, busaddr))
829                         goto dma_err;
830                 wrb = queue_head_node(txq);
831                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
832                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
833                 queue_head_inc(txq);
834                 copied += skb_frag_size(frag);
835         }
836
837         if (dummy_wrb) {
838                 wrb = queue_head_node(txq);
839                 wrb_fill(wrb, 0, 0);
840                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
841                 queue_head_inc(txq);
842         }
843
844         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
845         be_dws_cpu_to_le(hdr, sizeof(*hdr));
846
847         return copied;
848 dma_err:
849         txq->head = map_head;
850         while (copied) {
851                 wrb = queue_head_node(txq);
852                 unmap_tx_frag(dev, wrb, map_single);
853                 map_single = false;
854                 copied -= wrb->frag_len;
855                 queue_head_inc(txq);
856         }
857         return 0;
858 }
859
860 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
861                                              struct sk_buff *skb,
862                                              bool *skip_hw_vlan)
863 {
864         u16 vlan_tag = 0;
865
866         skb = skb_share_check(skb, GFP_ATOMIC);
867         if (unlikely(!skb))
868                 return skb;
869
870         if (vlan_tx_tag_present(skb))
871                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
872
873         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
874                 if (!vlan_tag)
875                         vlan_tag = adapter->pvid;
876                 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
877                  * skip VLAN insertion
878                  */
879                 if (skip_hw_vlan)
880                         *skip_hw_vlan = true;
881         }
882
883         if (vlan_tag) {
884                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
885                 if (unlikely(!skb))
886                         return skb;
887                 skb->vlan_tci = 0;
888         }
889
890         /* Insert the outer VLAN, if any */
891         if (adapter->qnq_vid) {
892                 vlan_tag = adapter->qnq_vid;
893                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
894                 if (unlikely(!skb))
895                         return skb;
896                 if (skip_hw_vlan)
897                         *skip_hw_vlan = true;
898         }
899
900         return skb;
901 }
902
903 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
904 {
905         struct ethhdr *eh = (struct ethhdr *)skb->data;
906         u16 offset = ETH_HLEN;
907
908         if (eh->h_proto == htons(ETH_P_IPV6)) {
909                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
910
911                 offset += sizeof(struct ipv6hdr);
912                 if (ip6h->nexthdr != NEXTHDR_TCP &&
913                     ip6h->nexthdr != NEXTHDR_UDP) {
914                         struct ipv6_opt_hdr *ehdr =
915                                 (struct ipv6_opt_hdr *) (skb->data + offset);
916
917                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
918                         if (ehdr->hdrlen == 0xff)
919                                 return true;
920                 }
921         }
922         return false;
923 }
924
925 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
926 {
927         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
928 }
929
930 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
931                                 struct sk_buff *skb)
932 {
933         return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
934 }
935
936 static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
937                                                   struct sk_buff *skb,
938                                                   bool *skip_hw_vlan)
939 {
940         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
941         unsigned int eth_hdr_len;
942         struct iphdr *ip;
943
944         /* For padded packets, BE HW modifies tot_len field in IP header
945          * incorrecly when VLAN tag is inserted by HW.
946          * For padded packets, Lancer computes incorrect checksum.
947          */
948         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
949                                                 VLAN_ETH_HLEN : ETH_HLEN;
950         if (skb->len <= 60 &&
951             (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
952             is_ipv4_pkt(skb)) {
953                 ip = (struct iphdr *)ip_hdr(skb);
954                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
955         }
956
957         /* If vlan tag is already inlined in the packet, skip HW VLAN
958          * tagging in pvid-tagging mode
959          */
960         if (be_pvid_tagging_enabled(adapter) &&
961             veh->h_vlan_proto == htons(ETH_P_8021Q))
962                         *skip_hw_vlan = true;
963
964         /* HW has a bug wherein it will calculate CSUM for VLAN
965          * pkts even though it is disabled.
966          * Manually insert VLAN in pkt.
967          */
968         if (skb->ip_summed != CHECKSUM_PARTIAL &&
969             vlan_tx_tag_present(skb)) {
970                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
971                 if (unlikely(!skb))
972                         goto err;
973         }
974
975         /* HW may lockup when VLAN HW tagging is requested on
976          * certain ipv6 packets. Drop such pkts if the HW workaround to
977          * skip HW tagging is not enabled by FW.
978          */
979         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
980             (adapter->pvid || adapter->qnq_vid) &&
981             !qnq_async_evt_rcvd(adapter)))
982                 goto tx_drop;
983
984         /* Manual VLAN tag insertion to prevent:
985          * ASIC lockup when the ASIC inserts VLAN tag into
986          * certain ipv6 packets. Insert VLAN tags in driver,
987          * and set event, completion, vlan bits accordingly
988          * in the Tx WRB.
989          */
990         if (be_ipv6_tx_stall_chk(adapter, skb) &&
991             be_vlan_tag_tx_chk(adapter, skb)) {
992                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
993                 if (unlikely(!skb))
994                         goto err;
995         }
996
997         return skb;
998 tx_drop:
999         dev_kfree_skb_any(skb);
1000 err:
1001         return NULL;
1002 }
1003
1004 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1005                                            struct sk_buff *skb,
1006                                            bool *skip_hw_vlan)
1007 {
1008         /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1009          * less may cause a transmit stall on that port. So the work-around is
1010          * to pad short packets (<= 32 bytes) to a 36-byte length.
1011          */
1012         if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1013                 if (skb_padto(skb, 36))
1014                         return NULL;
1015                 skb->len = 36;
1016         }
1017
1018         if (BEx_chip(adapter) || lancer_chip(adapter)) {
1019                 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1020                 if (!skb)
1021                         return NULL;
1022         }
1023
1024         return skb;
1025 }
1026
1027 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1028 {
1029         struct be_adapter *adapter = netdev_priv(netdev);
1030         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1031         struct be_queue_info *txq = &txo->q;
1032         bool dummy_wrb, stopped = false;
1033         u32 wrb_cnt = 0, copied = 0;
1034         bool skip_hw_vlan = false;
1035         u32 start = txq->head;
1036
1037         skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1038         if (!skb) {
1039                 tx_stats(txo)->tx_drv_drops++;
1040                 return NETDEV_TX_OK;
1041         }
1042
1043         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
1044
1045         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1046                               skip_hw_vlan);
1047         if (copied) {
1048                 int gso_segs = skb_shinfo(skb)->gso_segs;
1049
1050                 /* record the sent skb in the sent_skb table */
1051                 BUG_ON(txo->sent_skb_list[start]);
1052                 txo->sent_skb_list[start] = skb;
1053
1054                 /* Ensure txq has space for the next skb; Else stop the queue
1055                  * *BEFORE* ringing the tx doorbell, so that we serialze the
1056                  * tx compls of the current transmit which'll wake up the queue
1057                  */
1058                 atomic_add(wrb_cnt, &txq->used);
1059                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1060                                                                 txq->len) {
1061                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
1062                         stopped = true;
1063                 }
1064
1065                 be_txq_notify(adapter, txo, wrb_cnt);
1066
1067                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
1068         } else {
1069                 txq->head = start;
1070                 tx_stats(txo)->tx_drv_drops++;
1071                 dev_kfree_skb_any(skb);
1072         }
1073         return NETDEV_TX_OK;
1074 }
1075
1076 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1077 {
1078         struct be_adapter *adapter = netdev_priv(netdev);
1079         if (new_mtu < BE_MIN_MTU ||
1080                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1081                                         (ETH_HLEN + ETH_FCS_LEN))) {
1082                 dev_info(&adapter->pdev->dev,
1083                         "MTU must be between %d and %d bytes\n",
1084                         BE_MIN_MTU,
1085                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1086                 return -EINVAL;
1087         }
1088         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1089                         netdev->mtu, new_mtu);
1090         netdev->mtu = new_mtu;
1091         return 0;
1092 }
1093
1094 /*
1095  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1096  * If the user configures more, place BE in vlan promiscuous mode.
1097  */
1098 static int be_vid_config(struct be_adapter *adapter)
1099 {
1100         u16 vids[BE_NUM_VLANS_SUPPORTED];
1101         u16 num = 0, i;
1102         int status = 0;
1103
1104         /* No need to further configure vids if in promiscuous mode */
1105         if (adapter->promiscuous)
1106                 return 0;
1107
1108         if (adapter->vlans_added > be_max_vlans(adapter))
1109                 goto set_vlan_promisc;
1110
1111         /* Construct VLAN Table to give to HW */
1112         for (i = 0; i < VLAN_N_VID; i++)
1113                 if (adapter->vlan_tag[i])
1114                         vids[num++] = cpu_to_le16(i);
1115
1116         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1117                                     vids, num, 0);
1118
1119         if (status) {
1120                 /* Set to VLAN promisc mode as setting VLAN filter failed */
1121                 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1122                         goto set_vlan_promisc;
1123                 dev_err(&adapter->pdev->dev,
1124                         "Setting HW VLAN filtering failed.\n");
1125         } else {
1126                 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1127                         /* hw VLAN filtering re-enabled. */
1128                         status = be_cmd_rx_filter(adapter,
1129                                                   BE_FLAGS_VLAN_PROMISC, OFF);
1130                         if (!status) {
1131                                 dev_info(&adapter->pdev->dev,
1132                                          "Disabling VLAN Promiscuous mode.\n");
1133                                 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1134                         }
1135                 }
1136         }
1137
1138         return status;
1139
1140 set_vlan_promisc:
1141         if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1142                 return 0;
1143
1144         status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1145         if (!status) {
1146                 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1147                 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1148         } else
1149                 dev_err(&adapter->pdev->dev,
1150                         "Failed to enable VLAN Promiscuous mode.\n");
1151         return status;
1152 }
1153
1154 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1155 {
1156         struct be_adapter *adapter = netdev_priv(netdev);
1157         int status = 0;
1158
1159         /* Packets with VID 0 are always received by Lancer by default */
1160         if (lancer_chip(adapter) && vid == 0)
1161                 return status;
1162
1163         if (adapter->vlan_tag[vid])
1164                 return status;
1165
1166         adapter->vlan_tag[vid] = 1;
1167         adapter->vlans_added++;
1168
1169         status = be_vid_config(adapter);
1170         if (status) {
1171                 adapter->vlans_added--;
1172                 adapter->vlan_tag[vid] = 0;
1173         }
1174
1175         return status;
1176 }
1177
1178 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1179 {
1180         struct be_adapter *adapter = netdev_priv(netdev);
1181         int status = 0;
1182
1183         /* Packets with VID 0 are always received by Lancer by default */
1184         if (lancer_chip(adapter) && vid == 0)
1185                 goto ret;
1186
1187         adapter->vlan_tag[vid] = 0;
1188         status = be_vid_config(adapter);
1189         if (!status)
1190                 adapter->vlans_added--;
1191         else
1192                 adapter->vlan_tag[vid] = 1;
1193 ret:
1194         return status;
1195 }
1196
1197 static void be_clear_promisc(struct be_adapter *adapter)
1198 {
1199         adapter->promiscuous = false;
1200         adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1201
1202         be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1203 }
1204
1205 static void be_set_rx_mode(struct net_device *netdev)
1206 {
1207         struct be_adapter *adapter = netdev_priv(netdev);
1208         int status;
1209
1210         if (netdev->flags & IFF_PROMISC) {
1211                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1212                 adapter->promiscuous = true;
1213                 goto done;
1214         }
1215
1216         /* BE was previously in promiscuous mode; disable it */
1217         if (adapter->promiscuous) {
1218                 be_clear_promisc(adapter);
1219                 if (adapter->vlans_added)
1220                         be_vid_config(adapter);
1221         }
1222
1223         /* Enable multicast promisc if num configured exceeds what we support */
1224         if (netdev->flags & IFF_ALLMULTI ||
1225             netdev_mc_count(netdev) > be_max_mc(adapter)) {
1226                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1227                 goto done;
1228         }
1229
1230         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1231                 struct netdev_hw_addr *ha;
1232                 int i = 1; /* First slot is claimed by the Primary MAC */
1233
1234                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1235                         be_cmd_pmac_del(adapter, adapter->if_handle,
1236                                         adapter->pmac_id[i], 0);
1237                 }
1238
1239                 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1240                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1241                         adapter->promiscuous = true;
1242                         goto done;
1243                 }
1244
1245                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1246                         adapter->uc_macs++; /* First slot is for Primary MAC */
1247                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1248                                         adapter->if_handle,
1249                                         &adapter->pmac_id[adapter->uc_macs], 0);
1250                 }
1251         }
1252
1253         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1254
1255         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1256         if (status) {
1257                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1258                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1259                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1260         }
1261 done:
1262         return;
1263 }
1264
1265 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1266 {
1267         struct be_adapter *adapter = netdev_priv(netdev);
1268         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1269         int status;
1270
1271         if (!sriov_enabled(adapter))
1272                 return -EPERM;
1273
1274         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1275                 return -EINVAL;
1276
1277         if (BEx_chip(adapter)) {
1278                 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1279                                 vf + 1);
1280
1281                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1282                                          &vf_cfg->pmac_id, vf + 1);
1283         } else {
1284                 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1285                                         vf + 1);
1286         }
1287
1288         if (status)
1289                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1290                                 mac, vf);
1291         else
1292                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1293
1294         return status;
1295 }
1296
1297 static int be_get_vf_config(struct net_device *netdev, int vf,
1298                         struct ifla_vf_info *vi)
1299 {
1300         struct be_adapter *adapter = netdev_priv(netdev);
1301         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1302
1303         if (!sriov_enabled(adapter))
1304                 return -EPERM;
1305
1306         if (vf >= adapter->num_vfs)
1307                 return -EINVAL;
1308
1309         vi->vf = vf;
1310         vi->tx_rate = vf_cfg->tx_rate;
1311         vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1312         vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1313         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1314         vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
1315
1316         return 0;
1317 }
1318
1319 static int be_set_vf_vlan(struct net_device *netdev,
1320                         int vf, u16 vlan, u8 qos)
1321 {
1322         struct be_adapter *adapter = netdev_priv(netdev);
1323         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1324         int status = 0;
1325
1326         if (!sriov_enabled(adapter))
1327                 return -EPERM;
1328
1329         if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1330                 return -EINVAL;
1331
1332         if (vlan || qos) {
1333                 vlan |= qos << VLAN_PRIO_SHIFT;
1334                 if (vf_cfg->vlan_tag != vlan)
1335                         status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1336                                                        vf_cfg->if_handle, 0);
1337         } else {
1338                 /* Reset Transparent Vlan Tagging. */
1339                 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1340                                                vf + 1, vf_cfg->if_handle, 0);
1341         }
1342
1343         if (!status)
1344                 vf_cfg->vlan_tag = vlan;
1345         else
1346                 dev_info(&adapter->pdev->dev,
1347                          "VLAN %d config on VF %d failed\n", vlan, vf);
1348         return status;
1349 }
1350
1351 static int be_set_vf_tx_rate(struct net_device *netdev,
1352                         int vf, int rate)
1353 {
1354         struct be_adapter *adapter = netdev_priv(netdev);
1355         int status = 0;
1356
1357         if (!sriov_enabled(adapter))
1358                 return -EPERM;
1359
1360         if (vf >= adapter->num_vfs)
1361                 return -EINVAL;
1362
1363         if (rate < 100 || rate > 10000) {
1364                 dev_err(&adapter->pdev->dev,
1365                         "tx rate must be between 100 and 10000 Mbps\n");
1366                 return -EINVAL;
1367         }
1368
1369         status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
1370         if (status)
1371                 dev_err(&adapter->pdev->dev,
1372                                 "tx rate %d on VF %d failed\n", rate, vf);
1373         else
1374                 adapter->vf_cfg[vf].tx_rate = rate;
1375         return status;
1376 }
1377 static int be_set_vf_link_state(struct net_device *netdev, int vf,
1378                                 int link_state)
1379 {
1380         struct be_adapter *adapter = netdev_priv(netdev);
1381         int status;
1382
1383         if (!sriov_enabled(adapter))
1384                 return -EPERM;
1385
1386         if (vf >= adapter->num_vfs)
1387                 return -EINVAL;
1388
1389         status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1390         if (!status)
1391                 adapter->vf_cfg[vf].plink_tracking = link_state;
1392
1393         return status;
1394 }
1395
1396 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1397                           ulong now)
1398 {
1399         aic->rx_pkts_prev = rx_pkts;
1400         aic->tx_reqs_prev = tx_pkts;
1401         aic->jiffies = now;
1402 }
1403
1404 static void be_eqd_update(struct be_adapter *adapter)
1405 {
1406         struct be_set_eqd set_eqd[MAX_EVT_QS];
1407         int eqd, i, num = 0, start;
1408         struct be_aic_obj *aic;
1409         struct be_eq_obj *eqo;
1410         struct be_rx_obj *rxo;
1411         struct be_tx_obj *txo;
1412         u64 rx_pkts, tx_pkts;
1413         ulong now;
1414         u32 pps, delta;
1415
1416         for_all_evt_queues(adapter, eqo, i) {
1417                 aic = &adapter->aic_obj[eqo->idx];
1418                 if (!aic->enable) {
1419                         if (aic->jiffies)
1420                                 aic->jiffies = 0;
1421                         eqd = aic->et_eqd;
1422                         goto modify_eqd;
1423                 }
1424
1425                 rxo = &adapter->rx_obj[eqo->idx];
1426                 do {
1427                         start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1428                         rx_pkts = rxo->stats.rx_pkts;
1429                 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1430
1431                 txo = &adapter->tx_obj[eqo->idx];
1432                 do {
1433                         start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1434                         tx_pkts = txo->stats.tx_reqs;
1435                 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1436
1437
1438                 /* Skip, if wrapped around or first calculation */
1439                 now = jiffies;
1440                 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1441                     rx_pkts < aic->rx_pkts_prev ||
1442                     tx_pkts < aic->tx_reqs_prev) {
1443                         be_aic_update(aic, rx_pkts, tx_pkts, now);
1444                         continue;
1445                 }
1446
1447                 delta = jiffies_to_msecs(now - aic->jiffies);
1448                 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1449                         (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1450                 eqd = (pps / 15000) << 2;
1451
1452                 if (eqd < 8)
1453                         eqd = 0;
1454                 eqd = min_t(u32, eqd, aic->max_eqd);
1455                 eqd = max_t(u32, eqd, aic->min_eqd);
1456
1457                 be_aic_update(aic, rx_pkts, tx_pkts, now);
1458 modify_eqd:
1459                 if (eqd != aic->prev_eqd) {
1460                         set_eqd[num].delay_multiplier = (eqd * 65)/100;
1461                         set_eqd[num].eq_id = eqo->q.id;
1462                         aic->prev_eqd = eqd;
1463                         num++;
1464                 }
1465         }
1466
1467         if (num)
1468                 be_cmd_modify_eqd(adapter, set_eqd, num);
1469 }
1470
1471 static void be_rx_stats_update(struct be_rx_obj *rxo,
1472                 struct be_rx_compl_info *rxcp)
1473 {
1474         struct be_rx_stats *stats = rx_stats(rxo);
1475
1476         u64_stats_update_begin(&stats->sync);
1477         stats->rx_compl++;
1478         stats->rx_bytes += rxcp->pkt_size;
1479         stats->rx_pkts++;
1480         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1481                 stats->rx_mcast_pkts++;
1482         if (rxcp->err)
1483                 stats->rx_compl_err++;
1484         u64_stats_update_end(&stats->sync);
1485 }
1486
1487 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1488 {
1489         /* L4 checksum is not reliable for non TCP/UDP packets.
1490          * Also ignore ipcksm for ipv6 pkts
1491          */
1492         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1493                 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
1494 }
1495
1496 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
1497 {
1498         struct be_adapter *adapter = rxo->adapter;
1499         struct be_rx_page_info *rx_page_info;
1500         struct be_queue_info *rxq = &rxo->q;
1501         u16 frag_idx = rxq->tail;
1502
1503         rx_page_info = &rxo->page_info_tbl[frag_idx];
1504         BUG_ON(!rx_page_info->page);
1505
1506         if (rx_page_info->last_frag) {
1507                 dma_unmap_page(&adapter->pdev->dev,
1508                                dma_unmap_addr(rx_page_info, bus),
1509                                adapter->big_page_size, DMA_FROM_DEVICE);
1510                 rx_page_info->last_frag = false;
1511         } else {
1512                 dma_sync_single_for_cpu(&adapter->pdev->dev,
1513                                         dma_unmap_addr(rx_page_info, bus),
1514                                         rx_frag_size, DMA_FROM_DEVICE);
1515         }
1516
1517         queue_tail_inc(rxq);
1518         atomic_dec(&rxq->used);
1519         return rx_page_info;
1520 }
1521
1522 /* Throwaway the data in the Rx completion */
1523 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1524                                 struct be_rx_compl_info *rxcp)
1525 {
1526         struct be_rx_page_info *page_info;
1527         u16 i, num_rcvd = rxcp->num_rcvd;
1528
1529         for (i = 0; i < num_rcvd; i++) {
1530                 page_info = get_rx_page_info(rxo);
1531                 put_page(page_info->page);
1532                 memset(page_info, 0, sizeof(*page_info));
1533         }
1534 }
1535
1536 /*
1537  * skb_fill_rx_data forms a complete skb for an ether frame
1538  * indicated by rxcp.
1539  */
1540 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1541                              struct be_rx_compl_info *rxcp)
1542 {
1543         struct be_rx_page_info *page_info;
1544         u16 i, j;
1545         u16 hdr_len, curr_frag_len, remaining;
1546         u8 *start;
1547
1548         page_info = get_rx_page_info(rxo);
1549         start = page_address(page_info->page) + page_info->page_offset;
1550         prefetch(start);
1551
1552         /* Copy data in the first descriptor of this completion */
1553         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1554
1555         skb->len = curr_frag_len;
1556         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1557                 memcpy(skb->data, start, curr_frag_len);
1558                 /* Complete packet has now been moved to data */
1559                 put_page(page_info->page);
1560                 skb->data_len = 0;
1561                 skb->tail += curr_frag_len;
1562         } else {
1563                 hdr_len = ETH_HLEN;
1564                 memcpy(skb->data, start, hdr_len);
1565                 skb_shinfo(skb)->nr_frags = 1;
1566                 skb_frag_set_page(skb, 0, page_info->page);
1567                 skb_shinfo(skb)->frags[0].page_offset =
1568                                         page_info->page_offset + hdr_len;
1569                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1570                 skb->data_len = curr_frag_len - hdr_len;
1571                 skb->truesize += rx_frag_size;
1572                 skb->tail += hdr_len;
1573         }
1574         page_info->page = NULL;
1575
1576         if (rxcp->pkt_size <= rx_frag_size) {
1577                 BUG_ON(rxcp->num_rcvd != 1);
1578                 return;
1579         }
1580
1581         /* More frags present for this completion */
1582         remaining = rxcp->pkt_size - curr_frag_len;
1583         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1584                 page_info = get_rx_page_info(rxo);
1585                 curr_frag_len = min(remaining, rx_frag_size);
1586
1587                 /* Coalesce all frags from the same physical page in one slot */
1588                 if (page_info->page_offset == 0) {
1589                         /* Fresh page */
1590                         j++;
1591                         skb_frag_set_page(skb, j, page_info->page);
1592                         skb_shinfo(skb)->frags[j].page_offset =
1593                                                         page_info->page_offset;
1594                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1595                         skb_shinfo(skb)->nr_frags++;
1596                 } else {
1597                         put_page(page_info->page);
1598                 }
1599
1600                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1601                 skb->len += curr_frag_len;
1602                 skb->data_len += curr_frag_len;
1603                 skb->truesize += rx_frag_size;
1604                 remaining -= curr_frag_len;
1605                 page_info->page = NULL;
1606         }
1607         BUG_ON(j > MAX_SKB_FRAGS);
1608 }
1609
1610 /* Process the RX completion indicated by rxcp when GRO is disabled */
1611 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1612                                 struct be_rx_compl_info *rxcp)
1613 {
1614         struct be_adapter *adapter = rxo->adapter;
1615         struct net_device *netdev = adapter->netdev;
1616         struct sk_buff *skb;
1617
1618         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1619         if (unlikely(!skb)) {
1620                 rx_stats(rxo)->rx_drops_no_skbs++;
1621                 be_rx_compl_discard(rxo, rxcp);
1622                 return;
1623         }
1624
1625         skb_fill_rx_data(rxo, skb, rxcp);
1626
1627         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1628                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1629         else
1630                 skb_checksum_none_assert(skb);
1631
1632         skb->protocol = eth_type_trans(skb, netdev);
1633         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1634         if (netdev->features & NETIF_F_RXHASH)
1635                 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1636
1637         skb->encapsulation = rxcp->tunneled;
1638         skb_mark_napi_id(skb, napi);
1639
1640         if (rxcp->vlanf)
1641                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1642
1643         netif_receive_skb(skb);
1644 }
1645
1646 /* Process the RX completion indicated by rxcp when GRO is enabled */
1647 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1648                                     struct napi_struct *napi,
1649                                     struct be_rx_compl_info *rxcp)
1650 {
1651         struct be_adapter *adapter = rxo->adapter;
1652         struct be_rx_page_info *page_info;
1653         struct sk_buff *skb = NULL;
1654         u16 remaining, curr_frag_len;
1655         u16 i, j;
1656
1657         skb = napi_get_frags(napi);
1658         if (!skb) {
1659                 be_rx_compl_discard(rxo, rxcp);
1660                 return;
1661         }
1662
1663         remaining = rxcp->pkt_size;
1664         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1665                 page_info = get_rx_page_info(rxo);
1666
1667                 curr_frag_len = min(remaining, rx_frag_size);
1668
1669                 /* Coalesce all frags from the same physical page in one slot */
1670                 if (i == 0 || page_info->page_offset == 0) {
1671                         /* First frag or Fresh page */
1672                         j++;
1673                         skb_frag_set_page(skb, j, page_info->page);
1674                         skb_shinfo(skb)->frags[j].page_offset =
1675                                                         page_info->page_offset;
1676                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1677                 } else {
1678                         put_page(page_info->page);
1679                 }
1680                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1681                 skb->truesize += rx_frag_size;
1682                 remaining -= curr_frag_len;
1683                 memset(page_info, 0, sizeof(*page_info));
1684         }
1685         BUG_ON(j > MAX_SKB_FRAGS);
1686
1687         skb_shinfo(skb)->nr_frags = j + 1;
1688         skb->len = rxcp->pkt_size;
1689         skb->data_len = rxcp->pkt_size;
1690         skb->ip_summed = CHECKSUM_UNNECESSARY;
1691         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1692         if (adapter->netdev->features & NETIF_F_RXHASH)
1693                 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1694
1695         skb->encapsulation = rxcp->tunneled;
1696         skb_mark_napi_id(skb, napi);
1697
1698         if (rxcp->vlanf)
1699                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1700
1701         napi_gro_frags(napi);
1702 }
1703
1704 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1705                                  struct be_rx_compl_info *rxcp)
1706 {
1707         rxcp->pkt_size =
1708                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1709         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1710         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1711         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1712         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1713         rxcp->ip_csum =
1714                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1715         rxcp->l4_csum =
1716                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1717         rxcp->ipv6 =
1718                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1719         rxcp->num_rcvd =
1720                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1721         rxcp->pkt_type =
1722                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1723         rxcp->rss_hash =
1724                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1725         if (rxcp->vlanf) {
1726                 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
1727                                           compl);
1728                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1729                                                compl);
1730         }
1731         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1732         rxcp->tunneled =
1733                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
1734 }
1735
1736 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1737                                  struct be_rx_compl_info *rxcp)
1738 {
1739         rxcp->pkt_size =
1740                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1741         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1742         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1743         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1744         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1745         rxcp->ip_csum =
1746                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1747         rxcp->l4_csum =
1748                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1749         rxcp->ipv6 =
1750                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1751         rxcp->num_rcvd =
1752                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1753         rxcp->pkt_type =
1754                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1755         rxcp->rss_hash =
1756                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1757         if (rxcp->vlanf) {
1758                 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
1759                                           compl);
1760                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1761                                                compl);
1762         }
1763         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1764         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1765                                       ip_frag, compl);
1766 }
1767
1768 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1769 {
1770         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1771         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1772         struct be_adapter *adapter = rxo->adapter;
1773
1774         /* For checking the valid bit it is Ok to use either definition as the
1775          * valid bit is at the same position in both v0 and v1 Rx compl */
1776         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1777                 return NULL;
1778
1779         rmb();
1780         be_dws_le_to_cpu(compl, sizeof(*compl));
1781
1782         if (adapter->be3_native)
1783                 be_parse_rx_compl_v1(compl, rxcp);
1784         else
1785                 be_parse_rx_compl_v0(compl, rxcp);
1786
1787         if (rxcp->ip_frag)
1788                 rxcp->l4_csum = 0;
1789
1790         if (rxcp->vlanf) {
1791                 /* In QNQ modes, if qnq bit is not set, then the packet was
1792                  * tagged only with the transparent outer vlan-tag and must
1793                  * not be treated as a vlan packet by host
1794                  */
1795                 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
1796                         rxcp->vlanf = 0;
1797
1798                 if (!lancer_chip(adapter))
1799                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1800
1801                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1802                     !adapter->vlan_tag[rxcp->vlan_tag])
1803                         rxcp->vlanf = 0;
1804         }
1805
1806         /* As the compl has been parsed, reset it; we wont touch it again */
1807         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1808
1809         queue_tail_inc(&rxo->cq);
1810         return rxcp;
1811 }
1812
1813 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1814 {
1815         u32 order = get_order(size);
1816
1817         if (order > 0)
1818                 gfp |= __GFP_COMP;
1819         return  alloc_pages(gfp, order);
1820 }
1821
1822 /*
1823  * Allocate a page, split it to fragments of size rx_frag_size and post as
1824  * receive buffers to BE
1825  */
1826 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1827 {
1828         struct be_adapter *adapter = rxo->adapter;
1829         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1830         struct be_queue_info *rxq = &rxo->q;
1831         struct page *pagep = NULL;
1832         struct device *dev = &adapter->pdev->dev;
1833         struct be_eth_rx_d *rxd;
1834         u64 page_dmaaddr = 0, frag_dmaaddr;
1835         u32 posted, page_offset = 0;
1836
1837         page_info = &rxo->page_info_tbl[rxq->head];
1838         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1839                 if (!pagep) {
1840                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1841                         if (unlikely(!pagep)) {
1842                                 rx_stats(rxo)->rx_post_fail++;
1843                                 break;
1844                         }
1845                         page_dmaaddr = dma_map_page(dev, pagep, 0,
1846                                                     adapter->big_page_size,
1847                                                     DMA_FROM_DEVICE);
1848                         if (dma_mapping_error(dev, page_dmaaddr)) {
1849                                 put_page(pagep);
1850                                 pagep = NULL;
1851                                 rx_stats(rxo)->rx_post_fail++;
1852                                 break;
1853                         }
1854                         page_offset = 0;
1855                 } else {
1856                         get_page(pagep);
1857                         page_offset += rx_frag_size;
1858                 }
1859                 page_info->page_offset = page_offset;
1860                 page_info->page = pagep;
1861
1862                 rxd = queue_head_node(rxq);
1863                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1864                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1865                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1866
1867                 /* Any space left in the current big page for another frag? */
1868                 if ((page_offset + rx_frag_size + rx_frag_size) >
1869                                         adapter->big_page_size) {
1870                         pagep = NULL;
1871                         page_info->last_frag = true;
1872                         dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1873                 } else {
1874                         dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
1875                 }
1876
1877                 prev_page_info = page_info;
1878                 queue_head_inc(rxq);
1879                 page_info = &rxo->page_info_tbl[rxq->head];
1880         }
1881
1882         /* Mark the last frag of a page when we break out of the above loop
1883          * with no more slots available in the RXQ
1884          */
1885         if (pagep) {
1886                 prev_page_info->last_frag = true;
1887                 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1888         }
1889
1890         if (posted) {
1891                 atomic_add(posted, &rxq->used);
1892                 if (rxo->rx_post_starved)
1893                         rxo->rx_post_starved = false;
1894                 be_rxq_notify(adapter, rxq->id, posted);
1895         } else if (atomic_read(&rxq->used) == 0) {
1896                 /* Let be_worker replenish when memory is available */
1897                 rxo->rx_post_starved = true;
1898         }
1899 }
1900
1901 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1902 {
1903         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1904
1905         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1906                 return NULL;
1907
1908         rmb();
1909         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1910
1911         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1912
1913         queue_tail_inc(tx_cq);
1914         return txcp;
1915 }
1916
1917 static u16 be_tx_compl_process(struct be_adapter *adapter,
1918                 struct be_tx_obj *txo, u16 last_index)
1919 {
1920         struct be_queue_info *txq = &txo->q;
1921         struct be_eth_wrb *wrb;
1922         struct sk_buff **sent_skbs = txo->sent_skb_list;
1923         struct sk_buff *sent_skb;
1924         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1925         bool unmap_skb_hdr = true;
1926
1927         sent_skb = sent_skbs[txq->tail];
1928         BUG_ON(!sent_skb);
1929         sent_skbs[txq->tail] = NULL;
1930
1931         /* skip header wrb */
1932         queue_tail_inc(txq);
1933
1934         do {
1935                 cur_index = txq->tail;
1936                 wrb = queue_tail_node(txq);
1937                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1938                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1939                 unmap_skb_hdr = false;
1940
1941                 num_wrbs++;
1942                 queue_tail_inc(txq);
1943         } while (cur_index != last_index);
1944
1945         dev_kfree_skb_any(sent_skb);
1946         return num_wrbs;
1947 }
1948
1949 /* Return the number of events in the event queue */
1950 static inline int events_get(struct be_eq_obj *eqo)
1951 {
1952         struct be_eq_entry *eqe;
1953         int num = 0;
1954
1955         do {
1956                 eqe = queue_tail_node(&eqo->q);
1957                 if (eqe->evt == 0)
1958                         break;
1959
1960                 rmb();
1961                 eqe->evt = 0;
1962                 num++;
1963                 queue_tail_inc(&eqo->q);
1964         } while (true);
1965
1966         return num;
1967 }
1968
1969 /* Leaves the EQ is disarmed state */
1970 static void be_eq_clean(struct be_eq_obj *eqo)
1971 {
1972         int num = events_get(eqo);
1973
1974         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1975 }
1976
1977 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1978 {
1979         struct be_rx_page_info *page_info;
1980         struct be_queue_info *rxq = &rxo->q;
1981         struct be_queue_info *rx_cq = &rxo->cq;
1982         struct be_rx_compl_info *rxcp;
1983         struct be_adapter *adapter = rxo->adapter;
1984         int flush_wait = 0;
1985
1986         /* Consume pending rx completions.
1987          * Wait for the flush completion (identified by zero num_rcvd)
1988          * to arrive. Notify CQ even when there are no more CQ entries
1989          * for HW to flush partially coalesced CQ entries.
1990          * In Lancer, there is no need to wait for flush compl.
1991          */
1992         for (;;) {
1993                 rxcp = be_rx_compl_get(rxo);
1994                 if (rxcp == NULL) {
1995                         if (lancer_chip(adapter))
1996                                 break;
1997
1998                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1999                                 dev_warn(&adapter->pdev->dev,
2000                                          "did not receive flush compl\n");
2001                                 break;
2002                         }
2003                         be_cq_notify(adapter, rx_cq->id, true, 0);
2004                         mdelay(1);
2005                 } else {
2006                         be_rx_compl_discard(rxo, rxcp);
2007                         be_cq_notify(adapter, rx_cq->id, false, 1);
2008                         if (rxcp->num_rcvd == 0)
2009                                 break;
2010                 }
2011         }
2012
2013         /* After cleanup, leave the CQ in unarmed state */
2014         be_cq_notify(adapter, rx_cq->id, false, 0);
2015
2016         /* Then free posted rx buffers that were not used */
2017         while (atomic_read(&rxq->used) > 0) {
2018                 page_info = get_rx_page_info(rxo);
2019                 put_page(page_info->page);
2020                 memset(page_info, 0, sizeof(*page_info));
2021         }
2022         BUG_ON(atomic_read(&rxq->used));
2023         rxq->tail = rxq->head = 0;
2024 }
2025
2026 static void be_tx_compl_clean(struct be_adapter *adapter)
2027 {
2028         struct be_tx_obj *txo;
2029         struct be_queue_info *txq;
2030         struct be_eth_tx_compl *txcp;
2031         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2032         struct sk_buff *sent_skb;
2033         bool dummy_wrb;
2034         int i, pending_txqs;
2035
2036         /* Wait for a max of 200ms for all the tx-completions to arrive. */
2037         do {
2038                 pending_txqs = adapter->num_tx_qs;
2039
2040                 for_all_tx_queues(adapter, txo, i) {
2041                         txq = &txo->q;
2042                         while ((txcp = be_tx_compl_get(&txo->cq))) {
2043                                 end_idx =
2044                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
2045                                                       wrb_index, txcp);
2046                                 num_wrbs += be_tx_compl_process(adapter, txo,
2047                                                                 end_idx);
2048                                 cmpl++;
2049                         }
2050                         if (cmpl) {
2051                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2052                                 atomic_sub(num_wrbs, &txq->used);
2053                                 cmpl = 0;
2054                                 num_wrbs = 0;
2055                         }
2056                         if (atomic_read(&txq->used) == 0)
2057                                 pending_txqs--;
2058                 }
2059
2060                 if (pending_txqs == 0 || ++timeo > 200)
2061                         break;
2062
2063                 mdelay(1);
2064         } while (true);
2065
2066         for_all_tx_queues(adapter, txo, i) {
2067                 txq = &txo->q;
2068                 if (atomic_read(&txq->used))
2069                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2070                                 atomic_read(&txq->used));
2071
2072                 /* free posted tx for which compls will never arrive */
2073                 while (atomic_read(&txq->used)) {
2074                         sent_skb = txo->sent_skb_list[txq->tail];
2075                         end_idx = txq->tail;
2076                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2077                                                    &dummy_wrb);
2078                         index_adv(&end_idx, num_wrbs - 1, txq->len);
2079                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2080                         atomic_sub(num_wrbs, &txq->used);
2081                 }
2082         }
2083 }
2084
2085 static void be_evt_queues_destroy(struct be_adapter *adapter)
2086 {
2087         struct be_eq_obj *eqo;
2088         int i;
2089
2090         for_all_evt_queues(adapter, eqo, i) {
2091                 if (eqo->q.created) {
2092                         be_eq_clean(eqo);
2093                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2094                         napi_hash_del(&eqo->napi);
2095                         netif_napi_del(&eqo->napi);
2096                 }
2097                 be_queue_free(adapter, &eqo->q);
2098         }
2099 }
2100
2101 static int be_evt_queues_create(struct be_adapter *adapter)
2102 {
2103         struct be_queue_info *eq;
2104         struct be_eq_obj *eqo;
2105         struct be_aic_obj *aic;
2106         int i, rc;
2107
2108         adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2109                                     adapter->cfg_num_qs);
2110
2111         for_all_evt_queues(adapter, eqo, i) {
2112                 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2113                                BE_NAPI_WEIGHT);
2114                 napi_hash_add(&eqo->napi);
2115                 aic = &adapter->aic_obj[i];
2116                 eqo->adapter = adapter;
2117                 eqo->tx_budget = BE_TX_BUDGET;
2118                 eqo->idx = i;
2119                 aic->max_eqd = BE_MAX_EQD;
2120                 aic->enable = true;
2121
2122                 eq = &eqo->q;
2123                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2124                                         sizeof(struct be_eq_entry));
2125                 if (rc)
2126                         return rc;
2127
2128                 rc = be_cmd_eq_create(adapter, eqo);
2129                 if (rc)
2130                         return rc;
2131         }
2132         return 0;
2133 }
2134
2135 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2136 {
2137         struct be_queue_info *q;
2138
2139         q = &adapter->mcc_obj.q;
2140         if (q->created)
2141                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2142         be_queue_free(adapter, q);
2143
2144         q = &adapter->mcc_obj.cq;
2145         if (q->created)
2146                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2147         be_queue_free(adapter, q);
2148 }
2149
2150 /* Must be called only after TX qs are created as MCC shares TX EQ */
2151 static int be_mcc_queues_create(struct be_adapter *adapter)
2152 {
2153         struct be_queue_info *q, *cq;
2154
2155         cq = &adapter->mcc_obj.cq;
2156         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2157                         sizeof(struct be_mcc_compl)))
2158                 goto err;
2159
2160         /* Use the default EQ for MCC completions */
2161         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2162                 goto mcc_cq_free;
2163
2164         q = &adapter->mcc_obj.q;
2165         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2166                 goto mcc_cq_destroy;
2167
2168         if (be_cmd_mccq_create(adapter, q, cq))
2169                 goto mcc_q_free;
2170
2171         return 0;
2172
2173 mcc_q_free:
2174         be_queue_free(adapter, q);
2175 mcc_cq_destroy:
2176         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2177 mcc_cq_free:
2178         be_queue_free(adapter, cq);
2179 err:
2180         return -1;
2181 }
2182
2183 static void be_tx_queues_destroy(struct be_adapter *adapter)
2184 {
2185         struct be_queue_info *q;
2186         struct be_tx_obj *txo;
2187         u8 i;
2188
2189         for_all_tx_queues(adapter, txo, i) {
2190                 q = &txo->q;
2191                 if (q->created)
2192                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2193                 be_queue_free(adapter, q);
2194
2195                 q = &txo->cq;
2196                 if (q->created)
2197                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2198                 be_queue_free(adapter, q);
2199         }
2200 }
2201
2202 static int be_tx_qs_create(struct be_adapter *adapter)
2203 {
2204         struct be_queue_info *cq, *eq;
2205         struct be_tx_obj *txo;
2206         int status, i;
2207
2208         adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2209
2210         for_all_tx_queues(adapter, txo, i) {
2211                 cq = &txo->cq;
2212                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2213                                         sizeof(struct be_eth_tx_compl));
2214                 if (status)
2215                         return status;
2216
2217                 u64_stats_init(&txo->stats.sync);
2218                 u64_stats_init(&txo->stats.sync_compl);
2219
2220                 /* If num_evt_qs is less than num_tx_qs, then more than
2221                  * one txq share an eq
2222                  */
2223                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2224                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2225                 if (status)
2226                         return status;
2227
2228                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2229                                         sizeof(struct be_eth_wrb));
2230                 if (status)
2231                         return status;
2232
2233                 status = be_cmd_txq_create(adapter, txo);
2234                 if (status)
2235                         return status;
2236         }
2237
2238         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2239                  adapter->num_tx_qs);
2240         return 0;
2241 }
2242
2243 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2244 {
2245         struct be_queue_info *q;
2246         struct be_rx_obj *rxo;
2247         int i;
2248
2249         for_all_rx_queues(adapter, rxo, i) {
2250                 q = &rxo->cq;
2251                 if (q->created)
2252                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2253                 be_queue_free(adapter, q);
2254         }
2255 }
2256
2257 static int be_rx_cqs_create(struct be_adapter *adapter)
2258 {
2259         struct be_queue_info *eq, *cq;
2260         struct be_rx_obj *rxo;
2261         int rc, i;
2262
2263         /* We can create as many RSS rings as there are EQs. */
2264         adapter->num_rx_qs = adapter->num_evt_qs;
2265
2266         /* We'll use RSS only if atleast 2 RSS rings are supported.
2267          * When RSS is used, we'll need a default RXQ for non-IP traffic.
2268          */
2269         if (adapter->num_rx_qs > 1)
2270                 adapter->num_rx_qs++;
2271
2272         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2273         for_all_rx_queues(adapter, rxo, i) {
2274                 rxo->adapter = adapter;
2275                 cq = &rxo->cq;
2276                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2277                                 sizeof(struct be_eth_rx_compl));
2278                 if (rc)
2279                         return rc;
2280
2281                 u64_stats_init(&rxo->stats.sync);
2282                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2283                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2284                 if (rc)
2285                         return rc;
2286         }
2287
2288         dev_info(&adapter->pdev->dev,
2289                  "created %d RSS queue(s) and 1 default RX queue\n",
2290                  adapter->num_rx_qs - 1);
2291         return 0;
2292 }
2293
2294 static irqreturn_t be_intx(int irq, void *dev)
2295 {
2296         struct be_eq_obj *eqo = dev;
2297         struct be_adapter *adapter = eqo->adapter;
2298         int num_evts = 0;
2299
2300         /* IRQ is not expected when NAPI is scheduled as the EQ
2301          * will not be armed.
2302          * But, this can happen on Lancer INTx where it takes
2303          * a while to de-assert INTx or in BE2 where occasionaly
2304          * an interrupt may be raised even when EQ is unarmed.
2305          * If NAPI is already scheduled, then counting & notifying
2306          * events will orphan them.
2307          */
2308         if (napi_schedule_prep(&eqo->napi)) {
2309                 num_evts = events_get(eqo);
2310                 __napi_schedule(&eqo->napi);
2311                 if (num_evts)
2312                         eqo->spurious_intr = 0;
2313         }
2314         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2315
2316         /* Return IRQ_HANDLED only for the the first spurious intr
2317          * after a valid intr to stop the kernel from branding
2318          * this irq as a bad one!
2319          */
2320         if (num_evts || eqo->spurious_intr++ == 0)
2321                 return IRQ_HANDLED;
2322         else
2323                 return IRQ_NONE;
2324 }
2325
2326 static irqreturn_t be_msix(int irq, void *dev)
2327 {
2328         struct be_eq_obj *eqo = dev;
2329
2330         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2331         napi_schedule(&eqo->napi);
2332         return IRQ_HANDLED;
2333 }
2334
2335 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2336 {
2337         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2338 }
2339
2340 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2341                         int budget, int polling)
2342 {
2343         struct be_adapter *adapter = rxo->adapter;
2344         struct be_queue_info *rx_cq = &rxo->cq;
2345         struct be_rx_compl_info *rxcp;
2346         u32 work_done;
2347
2348         for (work_done = 0; work_done < budget; work_done++) {
2349                 rxcp = be_rx_compl_get(rxo);
2350                 if (!rxcp)
2351                         break;
2352
2353                 /* Is it a flush compl that has no data */
2354                 if (unlikely(rxcp->num_rcvd == 0))
2355                         goto loop_continue;
2356
2357                 /* Discard compl with partial DMA Lancer B0 */
2358                 if (unlikely(!rxcp->pkt_size)) {
2359                         be_rx_compl_discard(rxo, rxcp);
2360                         goto loop_continue;
2361                 }
2362
2363                 /* On BE drop pkts that arrive due to imperfect filtering in
2364                  * promiscuous mode on some skews
2365                  */
2366                 if (unlikely(rxcp->port != adapter->port_num &&
2367                                 !lancer_chip(adapter))) {
2368                         be_rx_compl_discard(rxo, rxcp);
2369                         goto loop_continue;
2370                 }
2371
2372                 /* Don't do gro when we're busy_polling */
2373                 if (do_gro(rxcp) && polling != BUSY_POLLING)
2374                         be_rx_compl_process_gro(rxo, napi, rxcp);
2375                 else
2376                         be_rx_compl_process(rxo, napi, rxcp);
2377
2378 loop_continue:
2379                 be_rx_stats_update(rxo, rxcp);
2380         }
2381
2382         if (work_done) {
2383                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2384
2385                 /* When an rx-obj gets into post_starved state, just
2386                  * let be_worker do the posting.
2387                  */
2388                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2389                     !rxo->rx_post_starved)
2390                         be_post_rx_frags(rxo, GFP_ATOMIC);
2391         }
2392
2393         return work_done;
2394 }
2395
2396 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2397                           int budget, int idx)
2398 {
2399         struct be_eth_tx_compl *txcp;
2400         int num_wrbs = 0, work_done;
2401
2402         for (work_done = 0; work_done < budget; work_done++) {
2403                 txcp = be_tx_compl_get(&txo->cq);
2404                 if (!txcp)
2405                         break;
2406                 num_wrbs += be_tx_compl_process(adapter, txo,
2407                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2408                                         wrb_index, txcp));
2409         }
2410
2411         if (work_done) {
2412                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2413                 atomic_sub(num_wrbs, &txo->q.used);
2414
2415                 /* As Tx wrbs have been freed up, wake up netdev queue
2416                  * if it was stopped due to lack of tx wrbs.  */
2417                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2418                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2419                         netif_wake_subqueue(adapter->netdev, idx);
2420                 }
2421
2422                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2423                 tx_stats(txo)->tx_compl += work_done;
2424                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2425         }
2426         return (work_done < budget); /* Done */
2427 }
2428
2429 int be_poll(struct napi_struct *napi, int budget)
2430 {
2431         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2432         struct be_adapter *adapter = eqo->adapter;
2433         int max_work = 0, work, i, num_evts;
2434         struct be_rx_obj *rxo;
2435         bool tx_done;
2436
2437         num_evts = events_get(eqo);
2438
2439         /* Process all TXQs serviced by this EQ */
2440         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2441                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2442                                         eqo->tx_budget, i);
2443                 if (!tx_done)
2444                         max_work = budget;
2445         }
2446
2447         if (be_lock_napi(eqo)) {
2448                 /* This loop will iterate twice for EQ0 in which
2449                  * completions of the last RXQ (default one) are also processed
2450                  * For other EQs the loop iterates only once
2451                  */
2452                 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2453                         work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2454                         max_work = max(work, max_work);
2455                 }
2456                 be_unlock_napi(eqo);
2457         } else {
2458                 max_work = budget;
2459         }
2460
2461         if (is_mcc_eqo(eqo))
2462                 be_process_mcc(adapter);
2463
2464         if (max_work < budget) {
2465                 napi_complete(napi);
2466                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2467         } else {
2468                 /* As we'll continue in polling mode, count and clear events */
2469                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2470         }
2471         return max_work;
2472 }
2473
2474 #ifdef CONFIG_NET_RX_BUSY_POLL
2475 static int be_busy_poll(struct napi_struct *napi)
2476 {
2477         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2478         struct be_adapter *adapter = eqo->adapter;
2479         struct be_rx_obj *rxo;
2480         int i, work = 0;
2481
2482         if (!be_lock_busy_poll(eqo))
2483                 return LL_FLUSH_BUSY;
2484
2485         for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2486                 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2487                 if (work)
2488                         break;
2489         }
2490
2491         be_unlock_busy_poll(eqo);
2492         return work;
2493 }
2494 #endif
2495
2496 void be_detect_error(struct be_adapter *adapter)
2497 {
2498         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2499         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2500         u32 i;
2501         bool error_detected = false;
2502         struct device *dev = &adapter->pdev->dev;
2503         struct net_device *netdev = adapter->netdev;
2504
2505         if (be_hw_error(adapter))
2506                 return;
2507
2508         if (lancer_chip(adapter)) {
2509                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2510                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2511                         sliport_err1 = ioread32(adapter->db +
2512                                         SLIPORT_ERROR1_OFFSET);
2513                         sliport_err2 = ioread32(adapter->db +
2514                                         SLIPORT_ERROR2_OFFSET);
2515                         adapter->hw_error = true;
2516                         /* Do not log error messages if its a FW reset */
2517                         if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2518                             sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2519                                 dev_info(dev, "Firmware update in progress\n");
2520                         } else {
2521                                 error_detected = true;
2522                                 dev_err(dev, "Error detected in the card\n");
2523                                 dev_err(dev, "ERR: sliport status 0x%x\n",
2524                                         sliport_status);
2525                                 dev_err(dev, "ERR: sliport error1 0x%x\n",
2526                                         sliport_err1);
2527                                 dev_err(dev, "ERR: sliport error2 0x%x\n",
2528                                         sliport_err2);
2529                         }
2530                 }
2531         } else {
2532                 pci_read_config_dword(adapter->pdev,
2533                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2534                 pci_read_config_dword(adapter->pdev,
2535                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2536                 pci_read_config_dword(adapter->pdev,
2537                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2538                 pci_read_config_dword(adapter->pdev,
2539                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2540
2541                 ue_lo = (ue_lo & ~ue_lo_mask);
2542                 ue_hi = (ue_hi & ~ue_hi_mask);
2543
2544                 /* On certain platforms BE hardware can indicate spurious UEs.
2545                  * Allow HW to stop working completely in case of a real UE.
2546                  * Hence not setting the hw_error for UE detection.
2547                  */
2548
2549                 if (ue_lo || ue_hi) {
2550                         error_detected = true;
2551                         dev_err(dev,
2552                                 "Unrecoverable Error detected in the adapter");
2553                         dev_err(dev, "Please reboot server to recover");
2554                         if (skyhawk_chip(adapter))
2555                                 adapter->hw_error = true;
2556                         for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2557                                 if (ue_lo & 1)
2558                                         dev_err(dev, "UE: %s bit set\n",
2559                                                 ue_status_low_desc[i]);
2560                         }
2561                         for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2562                                 if (ue_hi & 1)
2563                                         dev_err(dev, "UE: %s bit set\n",
2564                                                 ue_status_hi_desc[i]);
2565                         }
2566                 }
2567         }
2568         if (error_detected)
2569                 netif_carrier_off(netdev);
2570 }
2571
2572 static void be_msix_disable(struct be_adapter *adapter)
2573 {
2574         if (msix_enabled(adapter)) {
2575                 pci_disable_msix(adapter->pdev);
2576                 adapter->num_msix_vec = 0;
2577                 adapter->num_msix_roce_vec = 0;
2578         }
2579 }
2580
2581 static int be_msix_enable(struct be_adapter *adapter)
2582 {
2583         int i, num_vec;
2584         struct device *dev = &adapter->pdev->dev;
2585
2586         /* If RoCE is supported, program the max number of NIC vectors that
2587          * may be configured via set-channels, along with vectors needed for
2588          * RoCe. Else, just program the number we'll use initially.
2589          */
2590         if (be_roce_supported(adapter))
2591                 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2592                                 2 * num_online_cpus());
2593         else
2594                 num_vec = adapter->cfg_num_qs;
2595
2596         for (i = 0; i < num_vec; i++)
2597                 adapter->msix_entries[i].entry = i;
2598
2599         num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2600                                         MIN_MSIX_VECTORS, num_vec);
2601         if (num_vec < 0)
2602                 goto fail;
2603
2604         if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2605                 adapter->num_msix_roce_vec = num_vec / 2;
2606                 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2607                          adapter->num_msix_roce_vec);
2608         }
2609
2610         adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2611
2612         dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2613                  adapter->num_msix_vec);
2614         return 0;
2615
2616 fail:
2617         dev_warn(dev, "MSIx enable failed\n");
2618
2619         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2620         if (!be_physfn(adapter))
2621                 return num_vec;
2622         return 0;
2623 }
2624
2625 static inline int be_msix_vec_get(struct be_adapter *adapter,
2626                                 struct be_eq_obj *eqo)
2627 {
2628         return adapter->msix_entries[eqo->msix_idx].vector;
2629 }
2630
2631 static int be_msix_register(struct be_adapter *adapter)
2632 {
2633         struct net_device *netdev = adapter->netdev;
2634         struct be_eq_obj *eqo;
2635         int status, i, vec;
2636
2637         for_all_evt_queues(adapter, eqo, i) {
2638                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2639                 vec = be_msix_vec_get(adapter, eqo);
2640                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2641                 if (status)
2642                         goto err_msix;
2643         }
2644
2645         return 0;
2646 err_msix:
2647         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2648                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2649         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2650                 status);
2651         be_msix_disable(adapter);
2652         return status;
2653 }
2654
2655 static int be_irq_register(struct be_adapter *adapter)
2656 {
2657         struct net_device *netdev = adapter->netdev;
2658         int status;
2659
2660         if (msix_enabled(adapter)) {
2661                 status = be_msix_register(adapter);
2662                 if (status == 0)
2663                         goto done;
2664                 /* INTx is not supported for VF */
2665                 if (!be_physfn(adapter))
2666                         return status;
2667         }
2668
2669         /* INTx: only the first EQ is used */
2670         netdev->irq = adapter->pdev->irq;
2671         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2672                              &adapter->eq_obj[0]);
2673         if (status) {
2674                 dev_err(&adapter->pdev->dev,
2675                         "INTx request IRQ failed - err %d\n", status);
2676                 return status;
2677         }
2678 done:
2679         adapter->isr_registered = true;
2680         return 0;
2681 }
2682
2683 static void be_irq_unregister(struct be_adapter *adapter)
2684 {
2685         struct net_device *netdev = adapter->netdev;
2686         struct be_eq_obj *eqo;
2687         int i;
2688
2689         if (!adapter->isr_registered)
2690                 return;
2691
2692         /* INTx */
2693         if (!msix_enabled(adapter)) {
2694                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2695                 goto done;
2696         }
2697
2698         /* MSIx */
2699         for_all_evt_queues(adapter, eqo, i)
2700                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2701
2702 done:
2703         adapter->isr_registered = false;
2704 }
2705
2706 static void be_rx_qs_destroy(struct be_adapter *adapter)
2707 {
2708         struct be_queue_info *q;
2709         struct be_rx_obj *rxo;
2710         int i;
2711
2712         for_all_rx_queues(adapter, rxo, i) {
2713                 q = &rxo->q;
2714                 if (q->created) {
2715                         be_cmd_rxq_destroy(adapter, q);
2716                         be_rx_cq_clean(rxo);
2717                 }
2718                 be_queue_free(adapter, q);
2719         }
2720 }
2721
2722 static int be_close(struct net_device *netdev)
2723 {
2724         struct be_adapter *adapter = netdev_priv(netdev);
2725         struct be_eq_obj *eqo;
2726         int i;
2727
2728         be_roce_dev_close(adapter);
2729
2730         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2731                 for_all_evt_queues(adapter, eqo, i) {
2732                         napi_disable(&eqo->napi);
2733                         be_disable_busy_poll(eqo);
2734                 }
2735                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2736         }
2737
2738         be_async_mcc_disable(adapter);
2739
2740         /* Wait for all pending tx completions to arrive so that
2741          * all tx skbs are freed.
2742          */
2743         netif_tx_disable(netdev);
2744         be_tx_compl_clean(adapter);
2745
2746         be_rx_qs_destroy(adapter);
2747
2748         for (i = 1; i < (adapter->uc_macs + 1); i++)
2749                 be_cmd_pmac_del(adapter, adapter->if_handle,
2750                                 adapter->pmac_id[i], 0);
2751         adapter->uc_macs = 0;
2752
2753         for_all_evt_queues(adapter, eqo, i) {
2754                 if (msix_enabled(adapter))
2755                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2756                 else
2757                         synchronize_irq(netdev->irq);
2758                 be_eq_clean(eqo);
2759         }
2760
2761         be_irq_unregister(adapter);
2762
2763         return 0;
2764 }
2765
2766 static int be_rx_qs_create(struct be_adapter *adapter)
2767 {
2768         struct be_rx_obj *rxo;
2769         int rc, i, j;
2770         u8 rsstable[128];
2771
2772         for_all_rx_queues(adapter, rxo, i) {
2773                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2774                                     sizeof(struct be_eth_rx_d));
2775                 if (rc)
2776                         return rc;
2777         }
2778
2779         /* The FW would like the default RXQ to be created first */
2780         rxo = default_rxo(adapter);
2781         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2782                                adapter->if_handle, false, &rxo->rss_id);
2783         if (rc)
2784                 return rc;
2785
2786         for_all_rss_queues(adapter, rxo, i) {
2787                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2788                                        rx_frag_size, adapter->if_handle,
2789                                        true, &rxo->rss_id);
2790                 if (rc)
2791                         return rc;
2792         }
2793
2794         if (be_multi_rxq(adapter)) {
2795                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2796                         for_all_rss_queues(adapter, rxo, i) {
2797                                 if ((j + i) >= 128)
2798                                         break;
2799                                 rsstable[j + i] = rxo->rss_id;
2800                         }
2801                 }
2802                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2803                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2804
2805                 if (!BEx_chip(adapter))
2806                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2807                                                 RSS_ENABLE_UDP_IPV6;
2808         } else {
2809                 /* Disable RSS, if only default RX Q is created */
2810                 adapter->rss_flags = RSS_ENABLE_NONE;
2811         }
2812
2813         rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2814                                128);
2815         if (rc) {
2816                 adapter->rss_flags = RSS_ENABLE_NONE;
2817                 return rc;
2818         }
2819
2820         /* First time posting */
2821         for_all_rx_queues(adapter, rxo, i)
2822                 be_post_rx_frags(rxo, GFP_KERNEL);
2823         return 0;
2824 }
2825
2826 static int be_open(struct net_device *netdev)
2827 {
2828         struct be_adapter *adapter = netdev_priv(netdev);
2829         struct be_eq_obj *eqo;
2830         struct be_rx_obj *rxo;
2831         struct be_tx_obj *txo;
2832         u8 link_status;
2833         int status, i;
2834
2835         status = be_rx_qs_create(adapter);
2836         if (status)
2837                 goto err;
2838
2839         status = be_irq_register(adapter);
2840         if (status)
2841                 goto err;
2842
2843         for_all_rx_queues(adapter, rxo, i)
2844                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2845
2846         for_all_tx_queues(adapter, txo, i)
2847                 be_cq_notify(adapter, txo->cq.id, true, 0);
2848
2849         be_async_mcc_enable(adapter);
2850
2851         for_all_evt_queues(adapter, eqo, i) {
2852                 napi_enable(&eqo->napi);
2853                 be_enable_busy_poll(eqo);
2854                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2855         }
2856         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2857
2858         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2859         if (!status)
2860                 be_link_status_update(adapter, link_status);
2861
2862         netif_tx_start_all_queues(netdev);
2863         be_roce_dev_open(adapter);
2864
2865 #ifdef CONFIG_BE2NET_VXLAN
2866         if (skyhawk_chip(adapter))
2867                 vxlan_get_rx_port(netdev);
2868 #endif
2869
2870         return 0;
2871 err:
2872         be_close(adapter->netdev);
2873         return -EIO;
2874 }
2875
2876 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2877 {
2878         struct be_dma_mem cmd;
2879         int status = 0;
2880         u8 mac[ETH_ALEN];
2881
2882         memset(mac, 0, ETH_ALEN);
2883
2884         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2885         cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2886                                      GFP_KERNEL);
2887         if (cmd.va == NULL)
2888                 return -1;
2889
2890         if (enable) {
2891                 status = pci_write_config_dword(adapter->pdev,
2892                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2893                 if (status) {
2894                         dev_err(&adapter->pdev->dev,
2895                                 "Could not enable Wake-on-lan\n");
2896                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2897                                           cmd.dma);
2898                         return status;
2899                 }
2900                 status = be_cmd_enable_magic_wol(adapter,
2901                                 adapter->netdev->dev_addr, &cmd);
2902                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2903                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2904         } else {
2905                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2906                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2907                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2908         }
2909
2910         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2911         return status;
2912 }
2913
2914 /*
2915  * Generate a seed MAC address from the PF MAC Address using jhash.
2916  * MAC Address for VFs are assigned incrementally starting from the seed.
2917  * These addresses are programmed in the ASIC by the PF and the VF driver
2918  * queries for the MAC address during its probe.
2919  */
2920 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2921 {
2922         u32 vf;
2923         int status = 0;
2924         u8 mac[ETH_ALEN];
2925         struct be_vf_cfg *vf_cfg;
2926
2927         be_vf_eth_addr_generate(adapter, mac);
2928
2929         for_all_vfs(adapter, vf_cfg, vf) {
2930                 if (BEx_chip(adapter))
2931                         status = be_cmd_pmac_add(adapter, mac,
2932                                                  vf_cfg->if_handle,
2933                                                  &vf_cfg->pmac_id, vf + 1);
2934                 else
2935                         status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2936                                                 vf + 1);
2937
2938                 if (status)
2939                         dev_err(&adapter->pdev->dev,
2940                         "Mac address assignment failed for VF %d\n", vf);
2941                 else
2942                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2943
2944                 mac[5] += 1;
2945         }
2946         return status;
2947 }
2948
2949 static int be_vfs_mac_query(struct be_adapter *adapter)
2950 {
2951         int status, vf;
2952         u8 mac[ETH_ALEN];
2953         struct be_vf_cfg *vf_cfg;
2954
2955         for_all_vfs(adapter, vf_cfg, vf) {
2956                 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2957                                                mac, vf_cfg->if_handle,
2958                                                false, vf+1);
2959                 if (status)
2960                         return status;
2961                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2962         }
2963         return 0;
2964 }
2965
2966 static void be_vf_clear(struct be_adapter *adapter)
2967 {
2968         struct be_vf_cfg *vf_cfg;
2969         u32 vf;
2970
2971         if (pci_vfs_assigned(adapter->pdev)) {
2972                 dev_warn(&adapter->pdev->dev,
2973                          "VFs are assigned to VMs: not disabling VFs\n");
2974                 goto done;
2975         }
2976
2977         pci_disable_sriov(adapter->pdev);
2978
2979         for_all_vfs(adapter, vf_cfg, vf) {
2980                 if (BEx_chip(adapter))
2981                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2982                                         vf_cfg->pmac_id, vf + 1);
2983                 else
2984                         be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2985                                        vf + 1);
2986
2987                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2988         }
2989 done:
2990         kfree(adapter->vf_cfg);
2991         adapter->num_vfs = 0;
2992 }
2993
2994 static void be_clear_queues(struct be_adapter *adapter)
2995 {
2996         be_mcc_queues_destroy(adapter);
2997         be_rx_cqs_destroy(adapter);
2998         be_tx_queues_destroy(adapter);
2999         be_evt_queues_destroy(adapter);
3000 }
3001
3002 static void be_cancel_worker(struct be_adapter *adapter)
3003 {
3004         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3005                 cancel_delayed_work_sync(&adapter->work);
3006                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3007         }
3008 }
3009
3010 static void be_mac_clear(struct be_adapter *adapter)
3011 {
3012         int i;
3013
3014         if (adapter->pmac_id) {
3015                 for (i = 0; i < (adapter->uc_macs + 1); i++)
3016                         be_cmd_pmac_del(adapter, adapter->if_handle,
3017                                         adapter->pmac_id[i], 0);
3018                 adapter->uc_macs = 0;
3019
3020                 kfree(adapter->pmac_id);
3021                 adapter->pmac_id = NULL;
3022         }
3023 }
3024
3025 #ifdef CONFIG_BE2NET_VXLAN
3026 static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3027 {
3028         if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3029                 be_cmd_manage_iface(adapter, adapter->if_handle,
3030                                     OP_CONVERT_TUNNEL_TO_NORMAL);
3031
3032         if (adapter->vxlan_port)
3033                 be_cmd_set_vxlan_port(adapter, 0);
3034
3035         adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3036         adapter->vxlan_port = 0;
3037 }
3038 #endif
3039
3040 static int be_clear(struct be_adapter *adapter)
3041 {
3042         be_cancel_worker(adapter);
3043
3044         if (sriov_enabled(adapter))
3045                 be_vf_clear(adapter);
3046
3047 #ifdef CONFIG_BE2NET_VXLAN
3048         be_disable_vxlan_offloads(adapter);
3049 #endif
3050         /* delete the primary mac along with the uc-mac list */
3051         be_mac_clear(adapter);
3052
3053         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
3054
3055         be_clear_queues(adapter);
3056
3057         be_msix_disable(adapter);
3058         return 0;
3059 }
3060
3061 static int be_vfs_if_create(struct be_adapter *adapter)
3062 {
3063         struct be_resources res = {0};
3064         struct be_vf_cfg *vf_cfg;
3065         u32 cap_flags, en_flags, vf;
3066         int status = 0;
3067
3068         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3069                     BE_IF_FLAGS_MULTICAST;
3070
3071         for_all_vfs(adapter, vf_cfg, vf) {
3072                 if (!BE3_chip(adapter)) {
3073                         status = be_cmd_get_profile_config(adapter, &res,
3074                                                            vf + 1);
3075                         if (!status)
3076                                 cap_flags = res.if_cap_flags;
3077                 }
3078
3079                 /* If a FW profile exists, then cap_flags are updated */
3080                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3081                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
3082                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3083                                           &vf_cfg->if_handle, vf + 1);
3084                 if (status)
3085                         goto err;
3086         }
3087 err:
3088         return status;
3089 }
3090
3091 static int be_vf_setup_init(struct be_adapter *adapter)
3092 {
3093         struct be_vf_cfg *vf_cfg;
3094         int vf;
3095
3096         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3097                                   GFP_KERNEL);
3098         if (!adapter->vf_cfg)
3099                 return -ENOMEM;
3100
3101         for_all_vfs(adapter, vf_cfg, vf) {
3102                 vf_cfg->if_handle = -1;
3103                 vf_cfg->pmac_id = -1;
3104         }
3105         return 0;
3106 }
3107
3108 static int be_vf_setup(struct be_adapter *adapter)
3109 {
3110         struct device *dev = &adapter->pdev->dev;
3111         struct be_vf_cfg *vf_cfg;
3112         int status, old_vfs, vf;
3113         u32 privileges;
3114         u16 lnk_speed;
3115
3116         old_vfs = pci_num_vf(adapter->pdev);
3117         if (old_vfs) {
3118                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3119                 if (old_vfs != num_vfs)
3120                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3121                 adapter->num_vfs = old_vfs;
3122         } else {
3123                 if (num_vfs > be_max_vfs(adapter))
3124                         dev_info(dev, "Device supports %d VFs and not %d\n",
3125                                  be_max_vfs(adapter), num_vfs);
3126                 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3127                 if (!adapter->num_vfs)
3128                         return 0;
3129         }
3130
3131         status = be_vf_setup_init(adapter);
3132         if (status)
3133                 goto err;
3134
3135         if (old_vfs) {
3136                 for_all_vfs(adapter, vf_cfg, vf) {
3137                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3138                         if (status)
3139                                 goto err;
3140                 }
3141         } else {
3142                 status = be_vfs_if_create(adapter);
3143                 if (status)
3144                         goto err;
3145         }
3146
3147         if (old_vfs) {
3148                 status = be_vfs_mac_query(adapter);
3149                 if (status)
3150                         goto err;
3151         } else {
3152                 status = be_vf_eth_addr_config(adapter);
3153                 if (status)
3154                         goto err;
3155         }
3156
3157         for_all_vfs(adapter, vf_cfg, vf) {
3158                 /* Allow VFs to programs MAC/VLAN filters */
3159                 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3160                 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3161                         status = be_cmd_set_fn_privileges(adapter,
3162                                                           privileges |
3163                                                           BE_PRIV_FILTMGMT,
3164                                                           vf + 1);
3165                         if (!status)
3166                                 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3167                                          vf);
3168                 }
3169
3170                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3171                  * Allow full available bandwidth
3172                  */
3173                 if (BE3_chip(adapter) && !old_vfs)
3174                         be_cmd_config_qos(adapter, 1000, vf + 1);
3175
3176                 status = be_cmd_link_status_query(adapter, &lnk_speed,
3177                                                   NULL, vf + 1);
3178                 if (!status)
3179                         vf_cfg->tx_rate = lnk_speed;
3180
3181                 if (!old_vfs) {
3182                         be_cmd_enable_vf(adapter, vf + 1);
3183                         be_cmd_set_logical_link_config(adapter,
3184                                                        IFLA_VF_LINK_STATE_AUTO,
3185                                                        vf+1);
3186                 }
3187         }
3188
3189         if (!old_vfs) {
3190                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3191                 if (status) {
3192                         dev_err(dev, "SRIOV enable failed\n");
3193                         adapter->num_vfs = 0;
3194                         goto err;
3195                 }
3196         }
3197         return 0;
3198 err:
3199         dev_err(dev, "VF setup failed\n");
3200         be_vf_clear(adapter);
3201         return status;
3202 }
3203
3204 /* Converting function_mode bits on BE3 to SH mc_type enums */
3205
3206 static u8 be_convert_mc_type(u32 function_mode)
3207 {
3208         if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
3209                 return vNIC1;
3210         else if (function_mode & FLEX10_MODE)
3211                 return FLEX10;
3212         else if (function_mode & VNIC_MODE)
3213                 return vNIC2;
3214         else if (function_mode & UMC_ENABLED)
3215                 return UMC;
3216         else
3217                 return MC_NONE;
3218 }
3219
3220 /* On BE2/BE3 FW does not suggest the supported limits */
3221 static void BEx_get_resources(struct be_adapter *adapter,
3222                               struct be_resources *res)
3223 {
3224         struct pci_dev *pdev = adapter->pdev;
3225         bool use_sriov = false;
3226         int max_vfs = 0;
3227
3228         if (be_physfn(adapter) && BE3_chip(adapter)) {
3229                 be_cmd_get_profile_config(adapter, res, 0);
3230                 /* Some old versions of BE3 FW don't report max_vfs value */
3231                 if (res->max_vfs == 0) {
3232                         max_vfs = pci_sriov_get_totalvfs(pdev);
3233                         res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3234                 }
3235                 use_sriov = res->max_vfs && sriov_want(adapter);
3236         }
3237
3238         if (be_physfn(adapter))
3239                 res->max_uc_mac = BE_UC_PMAC_COUNT;
3240         else
3241                 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3242
3243         adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3244
3245         if (be_is_mc(adapter)) {
3246                 /* Assuming that there are 4 channels per port,
3247                  * when multi-channel is enabled
3248                  */
3249                 if (be_is_qnq_mode(adapter))
3250                         res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3251                 else
3252                         /* In a non-qnq multichannel mode, the pvid
3253                          * takes up one vlan entry
3254                          */
3255                         res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3256         } else {
3257                 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3258         }
3259
3260         res->max_mcast_mac = BE_MAX_MC;
3261
3262         /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3263          * 2) Create multiple TX rings on a BE3-R multi-channel interface
3264          *    *only* if it is RSS-capable.
3265          */
3266         if (BE2_chip(adapter) || use_sriov ||  (adapter->port_num > 1) ||
3267             !be_physfn(adapter) || (be_is_mc(adapter) &&
3268             !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
3269                 res->max_tx_qs = 1;
3270         else
3271                 res->max_tx_qs = BE3_MAX_TX_QS;
3272
3273         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3274             !use_sriov && be_physfn(adapter))
3275                 res->max_rss_qs = (adapter->be3_native) ?
3276                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3277         res->max_rx_qs = res->max_rss_qs + 1;
3278
3279         if (be_physfn(adapter))
3280                 res->max_evt_qs = (res->max_vfs > 0) ?
3281                                         BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3282         else
3283                 res->max_evt_qs = 1;
3284
3285         res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3286         if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3287                 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3288 }
3289
3290 static void be_setup_init(struct be_adapter *adapter)
3291 {
3292         adapter->vlan_prio_bmap = 0xff;
3293         adapter->phy.link_speed = -1;
3294         adapter->if_handle = -1;
3295         adapter->be3_native = false;
3296         adapter->promiscuous = false;
3297         if (be_physfn(adapter))
3298                 adapter->cmd_privileges = MAX_PRIVILEGES;
3299         else
3300                 adapter->cmd_privileges = MIN_PRIVILEGES;
3301 }
3302
3303 static int be_get_resources(struct be_adapter *adapter)
3304 {
3305         struct device *dev = &adapter->pdev->dev;
3306         struct be_resources res = {0};
3307         int status;
3308
3309         if (BEx_chip(adapter)) {
3310                 BEx_get_resources(adapter, &res);
3311                 adapter->res = res;
3312         }
3313
3314         /* For Lancer, SH etc read per-function resource limits from FW.
3315          * GET_FUNC_CONFIG returns per function guaranteed limits.
3316          * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3317          */
3318         if (!BEx_chip(adapter)) {
3319                 status = be_cmd_get_func_config(adapter, &res);
3320                 if (status)
3321                         return status;
3322
3323                 /* If RoCE may be enabled stash away half the EQs for RoCE */
3324                 if (be_roce_supported(adapter))
3325                         res.max_evt_qs /= 2;
3326                 adapter->res = res;
3327
3328                 if (be_physfn(adapter)) {
3329                         status = be_cmd_get_profile_config(adapter, &res, 0);
3330                         if (status)
3331                                 return status;
3332                         adapter->res.max_vfs = res.max_vfs;
3333                 }
3334
3335                 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3336                          be_max_txqs(adapter), be_max_rxqs(adapter),
3337                          be_max_rss(adapter), be_max_eqs(adapter),
3338                          be_max_vfs(adapter));
3339                 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3340                          be_max_uc(adapter), be_max_mc(adapter),
3341                          be_max_vlans(adapter));
3342         }
3343
3344         return 0;
3345 }
3346
3347 /* Routine to query per function resource limits */
3348 static int be_get_config(struct be_adapter *adapter)
3349 {
3350         u16 profile_id;
3351         int status;
3352
3353         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3354                                      &adapter->function_mode,
3355                                      &adapter->function_caps,
3356                                      &adapter->asic_rev);
3357         if (status)
3358                 return status;
3359
3360          if (be_physfn(adapter)) {
3361                 status = be_cmd_get_active_profile(adapter, &profile_id);
3362                 if (!status)
3363                         dev_info(&adapter->pdev->dev,
3364                                  "Using profile 0x%x\n", profile_id);
3365         }
3366
3367         status = be_get_resources(adapter);
3368         if (status)
3369                 return status;
3370
3371         adapter->pmac_id = kcalloc(be_max_uc(adapter),
3372                                    sizeof(*adapter->pmac_id), GFP_KERNEL);
3373         if (!adapter->pmac_id)
3374                 return -ENOMEM;
3375
3376         /* Sanitize cfg_num_qs based on HW and platform limits */
3377         adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3378
3379         return 0;
3380 }
3381
3382 static int be_mac_setup(struct be_adapter *adapter)
3383 {
3384         u8 mac[ETH_ALEN];
3385         int status;
3386
3387         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3388                 status = be_cmd_get_perm_mac(adapter, mac);
3389                 if (status)
3390                         return status;
3391
3392                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3393                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3394         } else {
3395                 /* Maybe the HW was reset; dev_addr must be re-programmed */
3396                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3397         }
3398
3399         /* For BE3-R VFs, the PF programs the initial MAC address */
3400         if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3401                 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3402                                 &adapter->pmac_id[0], 0);
3403         return 0;
3404 }
3405
3406 static void be_schedule_worker(struct be_adapter *adapter)
3407 {
3408         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3409         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3410 }
3411
3412 static int be_setup_queues(struct be_adapter *adapter)
3413 {
3414         struct net_device *netdev = adapter->netdev;
3415         int status;
3416
3417         status = be_evt_queues_create(adapter);
3418         if (status)
3419                 goto err;
3420
3421         status = be_tx_qs_create(adapter);
3422         if (status)
3423                 goto err;
3424
3425         status = be_rx_cqs_create(adapter);
3426         if (status)
3427                 goto err;
3428
3429         status = be_mcc_queues_create(adapter);
3430         if (status)
3431                 goto err;
3432
3433         status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3434         if (status)
3435                 goto err;
3436
3437         status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3438         if (status)
3439                 goto err;
3440
3441         return 0;
3442 err:
3443         dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3444         return status;
3445 }
3446
3447 int be_update_queues(struct be_adapter *adapter)
3448 {
3449         struct net_device *netdev = adapter->netdev;
3450         int status;
3451
3452         if (netif_running(netdev))
3453                 be_close(netdev);
3454
3455         be_cancel_worker(adapter);
3456
3457         /* If any vectors have been shared with RoCE we cannot re-program
3458          * the MSIx table.
3459          */
3460         if (!adapter->num_msix_roce_vec)
3461                 be_msix_disable(adapter);
3462
3463         be_clear_queues(adapter);
3464
3465         if (!msix_enabled(adapter)) {
3466                 status = be_msix_enable(adapter);
3467                 if (status)
3468                         return status;
3469         }
3470
3471         status = be_setup_queues(adapter);
3472         if (status)
3473                 return status;
3474
3475         be_schedule_worker(adapter);
3476
3477         if (netif_running(netdev))
3478                 status = be_open(netdev);
3479
3480         return status;
3481 }
3482
3483 static int be_setup(struct be_adapter *adapter)
3484 {
3485         struct device *dev = &adapter->pdev->dev;
3486         u32 tx_fc, rx_fc, en_flags;
3487         int status;
3488
3489         be_setup_init(adapter);
3490
3491         if (!lancer_chip(adapter))
3492                 be_cmd_req_native_mode(adapter);
3493
3494         status = be_get_config(adapter);
3495         if (status)
3496                 goto err;
3497
3498         status = be_msix_enable(adapter);
3499         if (status)
3500                 goto err;
3501
3502         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3503                    BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3504         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3505                 en_flags |= BE_IF_FLAGS_RSS;
3506         en_flags = en_flags & be_if_cap_flags(adapter);
3507         status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3508                                   &adapter->if_handle, 0);
3509         if (status)
3510                 goto err;
3511
3512         /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3513         rtnl_lock();
3514         status = be_setup_queues(adapter);
3515         rtnl_unlock();
3516         if (status)
3517                 goto err;
3518
3519         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3520
3521         status = be_mac_setup(adapter);
3522         if (status)
3523                 goto err;
3524
3525         be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3526
3527         if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3528                 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3529                         adapter->fw_ver);
3530                 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3531         }
3532
3533         if (adapter->vlans_added)
3534                 be_vid_config(adapter);
3535
3536         be_set_rx_mode(adapter->netdev);
3537
3538         be_cmd_get_acpi_wol_cap(adapter);
3539
3540         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3541
3542         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3543                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3544                                         adapter->rx_fc);
3545
3546         if (be_physfn(adapter))
3547                 be_cmd_set_logical_link_config(adapter,
3548                                                IFLA_VF_LINK_STATE_AUTO, 0);
3549
3550         if (sriov_want(adapter)) {
3551                 if (be_max_vfs(adapter))
3552                         be_vf_setup(adapter);
3553                 else
3554                         dev_warn(dev, "device doesn't support SRIOV\n");
3555         }
3556
3557         status = be_cmd_get_phy_info(adapter);
3558         if (!status && be_pause_supported(adapter))
3559                 adapter->phy.fc_autoneg = 1;
3560
3561         be_schedule_worker(adapter);
3562         return 0;
3563 err:
3564         be_clear(adapter);
3565         return status;
3566 }
3567
3568 #ifdef CONFIG_NET_POLL_CONTROLLER
3569 static void be_netpoll(struct net_device *netdev)
3570 {
3571         struct be_adapter *adapter = netdev_priv(netdev);
3572         struct be_eq_obj *eqo;
3573         int i;
3574
3575         for_all_evt_queues(adapter, eqo, i) {
3576                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3577                 napi_schedule(&eqo->napi);
3578         }
3579
3580         return;
3581 }
3582 #endif
3583
3584 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3585 static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3586
3587 static bool be_flash_redboot(struct be_adapter *adapter,
3588                         const u8 *p, u32 img_start, int image_size,
3589                         int hdr_size)
3590 {
3591         u32 crc_offset;
3592         u8 flashed_crc[4];
3593         int status;
3594
3595         crc_offset = hdr_size + img_start + image_size - 4;
3596
3597         p += crc_offset;
3598
3599         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3600                         (image_size - 4));
3601         if (status) {
3602                 dev_err(&adapter->pdev->dev,
3603                 "could not get crc from flash, not flashing redboot\n");
3604                 return false;
3605         }
3606
3607         /*update redboot only if crc does not match*/
3608         if (!memcmp(flashed_crc, p, 4))
3609                 return false;
3610         else
3611                 return true;
3612 }
3613
3614 static bool phy_flashing_required(struct be_adapter *adapter)
3615 {
3616         return (adapter->phy.phy_type == TN_8022 &&
3617                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3618 }
3619
3620 static bool is_comp_in_ufi(struct be_adapter *adapter,
3621                            struct flash_section_info *fsec, int type)
3622 {
3623         int i = 0, img_type = 0;
3624         struct flash_section_info_g2 *fsec_g2 = NULL;
3625
3626         if (BE2_chip(adapter))
3627                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3628
3629         for (i = 0; i < MAX_FLASH_COMP; i++) {
3630                 if (fsec_g2)
3631                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3632                 else
3633                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3634
3635                 if (img_type == type)
3636                         return true;
3637         }
3638         return false;
3639
3640 }
3641
3642 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3643                                          int header_size,
3644                                          const struct firmware *fw)
3645 {
3646         struct flash_section_info *fsec = NULL;
3647         const u8 *p = fw->data;
3648
3649         p += header_size;
3650         while (p < (fw->data + fw->size)) {
3651                 fsec = (struct flash_section_info *)p;
3652                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3653                         return fsec;
3654                 p += 32;
3655         }
3656         return NULL;
3657 }
3658
3659 static int be_flash(struct be_adapter *adapter, const u8 *img,
3660                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3661 {
3662         u32 total_bytes = 0, flash_op, num_bytes = 0;
3663         int status = 0;
3664         struct be_cmd_write_flashrom *req = flash_cmd->va;
3665
3666         total_bytes = img_size;
3667         while (total_bytes) {
3668                 num_bytes = min_t(u32, 32*1024, total_bytes);
3669
3670                 total_bytes -= num_bytes;
3671
3672                 if (!total_bytes) {
3673                         if (optype == OPTYPE_PHY_FW)
3674                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3675                         else
3676                                 flash_op = FLASHROM_OPER_FLASH;
3677                 } else {
3678                         if (optype == OPTYPE_PHY_FW)
3679                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3680                         else
3681                                 flash_op = FLASHROM_OPER_SAVE;
3682                 }
3683
3684                 memcpy(req->data_buf, img, num_bytes);
3685                 img += num_bytes;
3686                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3687                                                 flash_op, num_bytes);
3688                 if (status) {
3689                         if (status == ILLEGAL_IOCTL_REQ &&
3690                             optype == OPTYPE_PHY_FW)
3691                                 break;
3692                         dev_err(&adapter->pdev->dev,
3693                                 "cmd to write to flash rom failed.\n");
3694                         return status;
3695                 }
3696         }
3697         return 0;
3698 }
3699
3700 /* For BE2, BE3 and BE3-R */
3701 static int be_flash_BEx(struct be_adapter *adapter,
3702                          const struct firmware *fw,
3703                          struct be_dma_mem *flash_cmd,
3704                          int num_of_images)
3705
3706 {
3707         int status = 0, i, filehdr_size = 0;
3708         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3709         const u8 *p = fw->data;
3710         const struct flash_comp *pflashcomp;
3711         int num_comp, redboot;
3712         struct flash_section_info *fsec = NULL;
3713
3714         struct flash_comp gen3_flash_types[] = {
3715                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3716                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3717                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3718                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3719                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3720                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3721                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3722                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3723                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3724                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3725                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3726                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3727                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3728                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3729                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3730                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3731                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3732                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3733                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3734                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3735         };
3736
3737         struct flash_comp gen2_flash_types[] = {
3738                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3739                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3740                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3741                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3742                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3743                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3744                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3745                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3746                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3747                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3748                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3749                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3750                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3751                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3752                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3753                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3754         };
3755
3756         if (BE3_chip(adapter)) {
3757                 pflashcomp = gen3_flash_types;
3758                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3759                 num_comp = ARRAY_SIZE(gen3_flash_types);
3760         } else {
3761                 pflashcomp = gen2_flash_types;
3762                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3763                 num_comp = ARRAY_SIZE(gen2_flash_types);
3764         }
3765
3766         /* Get flash section info*/
3767         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3768         if (!fsec) {
3769                 dev_err(&adapter->pdev->dev,
3770                         "Invalid Cookie. UFI corrupted ?\n");
3771                 return -1;
3772         }
3773         for (i = 0; i < num_comp; i++) {
3774                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3775                         continue;
3776
3777                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3778                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3779                         continue;
3780
3781                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3782                     !phy_flashing_required(adapter))
3783                                 continue;
3784
3785                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3786                         redboot = be_flash_redboot(adapter, fw->data,
3787                                 pflashcomp[i].offset, pflashcomp[i].size,
3788                                 filehdr_size + img_hdrs_size);
3789                         if (!redboot)
3790                                 continue;
3791                 }
3792
3793                 p = fw->data;
3794                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3795                 if (p + pflashcomp[i].size > fw->data + fw->size)
3796                         return -1;
3797
3798                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3799                                         pflashcomp[i].size);
3800                 if (status) {
3801                         dev_err(&adapter->pdev->dev,
3802                                 "Flashing section type %d failed.\n",
3803                                 pflashcomp[i].img_type);
3804                         return status;
3805                 }
3806         }
3807         return 0;
3808 }
3809
3810 static int be_flash_skyhawk(struct be_adapter *adapter,
3811                 const struct firmware *fw,
3812                 struct be_dma_mem *flash_cmd, int num_of_images)
3813 {
3814         int status = 0, i, filehdr_size = 0;
3815         int img_offset, img_size, img_optype, redboot;
3816         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3817         const u8 *p = fw->data;
3818         struct flash_section_info *fsec = NULL;
3819
3820         filehdr_size = sizeof(struct flash_file_hdr_g3);
3821         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3822         if (!fsec) {
3823                 dev_err(&adapter->pdev->dev,
3824                         "Invalid Cookie. UFI corrupted ?\n");
3825                 return -1;
3826         }
3827
3828         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3829                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3830                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3831
3832                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3833                 case IMAGE_FIRMWARE_iSCSI:
3834                         img_optype = OPTYPE_ISCSI_ACTIVE;
3835                         break;
3836                 case IMAGE_BOOT_CODE:
3837                         img_optype = OPTYPE_REDBOOT;
3838                         break;
3839                 case IMAGE_OPTION_ROM_ISCSI:
3840                         img_optype = OPTYPE_BIOS;
3841                         break;
3842                 case IMAGE_OPTION_ROM_PXE:
3843                         img_optype = OPTYPE_PXE_BIOS;
3844                         break;
3845                 case IMAGE_OPTION_ROM_FCoE:
3846                         img_optype = OPTYPE_FCOE_BIOS;
3847                         break;
3848                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3849                         img_optype = OPTYPE_ISCSI_BACKUP;
3850                         break;
3851                 case IMAGE_NCSI:
3852                         img_optype = OPTYPE_NCSI_FW;
3853                         break;
3854                 default:
3855                         continue;
3856                 }
3857
3858                 if (img_optype == OPTYPE_REDBOOT) {
3859                         redboot = be_flash_redboot(adapter, fw->data,
3860                                         img_offset, img_size,
3861                                         filehdr_size + img_hdrs_size);
3862                         if (!redboot)
3863                                 continue;
3864                 }
3865
3866                 p = fw->data;
3867                 p += filehdr_size + img_offset + img_hdrs_size;
3868                 if (p + img_size > fw->data + fw->size)
3869                         return -1;
3870
3871                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3872                 if (status) {
3873                         dev_err(&adapter->pdev->dev,
3874                                 "Flashing section type %d failed.\n",
3875                                 fsec->fsec_entry[i].type);
3876                         return status;
3877                 }
3878         }
3879         return 0;
3880 }
3881
3882 static int lancer_fw_download(struct be_adapter *adapter,
3883                                 const struct firmware *fw)
3884 {
3885 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3886 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3887         struct be_dma_mem flash_cmd;
3888         const u8 *data_ptr = NULL;
3889         u8 *dest_image_ptr = NULL;
3890         size_t image_size = 0;
3891         u32 chunk_size = 0;
3892         u32 data_written = 0;
3893         u32 offset = 0;
3894         int status = 0;
3895         u8 add_status = 0;
3896         u8 change_status;
3897
3898         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3899                 dev_err(&adapter->pdev->dev,
3900                         "FW Image not properly aligned. "
3901                         "Length must be 4 byte aligned.\n");
3902                 status = -EINVAL;
3903                 goto lancer_fw_exit;
3904         }
3905
3906         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3907                                 + LANCER_FW_DOWNLOAD_CHUNK;
3908         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3909                                           &flash_cmd.dma, GFP_KERNEL);
3910         if (!flash_cmd.va) {
3911                 status = -ENOMEM;
3912                 goto lancer_fw_exit;
3913         }
3914
3915         dest_image_ptr = flash_cmd.va +
3916                                 sizeof(struct lancer_cmd_req_write_object);
3917         image_size = fw->size;
3918         data_ptr = fw->data;
3919
3920         while (image_size) {
3921                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3922
3923                 /* Copy the image chunk content. */
3924                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3925
3926                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3927                                                  chunk_size, offset,
3928                                                  LANCER_FW_DOWNLOAD_LOCATION,
3929                                                  &data_written, &change_status,
3930                                                  &add_status);
3931                 if (status)
3932                         break;
3933
3934                 offset += data_written;
3935                 data_ptr += data_written;
3936                 image_size -= data_written;
3937         }
3938
3939         if (!status) {
3940                 /* Commit the FW written */
3941                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3942                                                  0, offset,
3943                                                  LANCER_FW_DOWNLOAD_LOCATION,
3944                                                  &data_written, &change_status,
3945                                                  &add_status);
3946         }
3947
3948         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3949                                 flash_cmd.dma);
3950         if (status) {
3951                 dev_err(&adapter->pdev->dev,
3952                         "Firmware load error. "
3953                         "Status code: 0x%x Additional Status: 0x%x\n",
3954                         status, add_status);
3955                 goto lancer_fw_exit;
3956         }
3957
3958         if (change_status == LANCER_FW_RESET_NEEDED) {
3959                 dev_info(&adapter->pdev->dev,
3960                          "Resetting adapter to activate new FW\n");
3961                 status = lancer_physdev_ctrl(adapter,
3962                                              PHYSDEV_CONTROL_FW_RESET_MASK);
3963                 if (status) {
3964                         dev_err(&adapter->pdev->dev,
3965                                 "Adapter busy for FW reset.\n"
3966                                 "New FW will not be active.\n");
3967                         goto lancer_fw_exit;
3968                 }
3969         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3970                         dev_err(&adapter->pdev->dev,
3971                                 "System reboot required for new FW"
3972                                 " to be active\n");
3973         }
3974
3975         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3976 lancer_fw_exit:
3977         return status;
3978 }
3979
3980 #define UFI_TYPE2               2
3981 #define UFI_TYPE3               3
3982 #define UFI_TYPE3R              10
3983 #define UFI_TYPE4               4
3984 static int be_get_ufi_type(struct be_adapter *adapter,
3985                            struct flash_file_hdr_g3 *fhdr)
3986 {
3987         if (fhdr == NULL)
3988                 goto be_get_ufi_exit;
3989
3990         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3991                 return UFI_TYPE4;
3992         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3993                 if (fhdr->asic_type_rev == 0x10)
3994                         return UFI_TYPE3R;
3995                 else
3996                         return UFI_TYPE3;
3997         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3998                 return UFI_TYPE2;
3999
4000 be_get_ufi_exit:
4001         dev_err(&adapter->pdev->dev,
4002                 "UFI and Interface are not compatible for flashing\n");
4003         return -1;
4004 }
4005
4006 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4007 {
4008         struct flash_file_hdr_g3 *fhdr3;
4009         struct image_hdr *img_hdr_ptr = NULL;
4010         struct be_dma_mem flash_cmd;
4011         const u8 *p;
4012         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
4013
4014         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4015         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4016                                           &flash_cmd.dma, GFP_KERNEL);
4017         if (!flash_cmd.va) {
4018                 status = -ENOMEM;
4019                 goto be_fw_exit;
4020         }
4021
4022         p = fw->data;
4023         fhdr3 = (struct flash_file_hdr_g3 *)p;
4024
4025         ufi_type = be_get_ufi_type(adapter, fhdr3);
4026
4027         num_imgs = le32_to_cpu(fhdr3->num_imgs);
4028         for (i = 0; i < num_imgs; i++) {
4029                 img_hdr_ptr = (struct image_hdr *)(fw->data +
4030                                 (sizeof(struct flash_file_hdr_g3) +
4031                                  i * sizeof(struct image_hdr)));
4032                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
4033                         switch (ufi_type) {
4034                         case UFI_TYPE4:
4035                                 status = be_flash_skyhawk(adapter, fw,
4036                                                         &flash_cmd, num_imgs);
4037                                 break;
4038                         case UFI_TYPE3R:
4039                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
4040                                                       num_imgs);
4041                                 break;
4042                         case UFI_TYPE3:
4043                                 /* Do not flash this ufi on BE3-R cards */
4044                                 if (adapter->asic_rev < 0x10)
4045                                         status = be_flash_BEx(adapter, fw,
4046                                                               &flash_cmd,
4047                                                               num_imgs);
4048                                 else {
4049                                         status = -1;
4050                                         dev_err(&adapter->pdev->dev,
4051                                                 "Can't load BE3 UFI on BE3R\n");
4052                                 }
4053                         }
4054                 }
4055         }
4056
4057         if (ufi_type == UFI_TYPE2)
4058                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
4059         else if (ufi_type == -1)
4060                 status = -1;
4061
4062         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4063                           flash_cmd.dma);
4064         if (status) {
4065                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
4066                 goto be_fw_exit;
4067         }
4068
4069         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
4070
4071 be_fw_exit:
4072         return status;
4073 }
4074
4075 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4076 {
4077         const struct firmware *fw;
4078         int status;
4079
4080         if (!netif_running(adapter->netdev)) {
4081                 dev_err(&adapter->pdev->dev,
4082                         "Firmware load not allowed (interface is down)\n");
4083                 return -1;
4084         }
4085
4086         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4087         if (status)
4088                 goto fw_exit;
4089
4090         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4091
4092         if (lancer_chip(adapter))
4093                 status = lancer_fw_download(adapter, fw);
4094         else
4095                 status = be_fw_download(adapter, fw);
4096
4097         if (!status)
4098                 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4099                                   adapter->fw_on_flash);
4100
4101 fw_exit:
4102         release_firmware(fw);
4103         return status;
4104 }
4105
4106 static int be_ndo_bridge_setlink(struct net_device *dev,
4107                                     struct nlmsghdr *nlh)
4108 {
4109         struct be_adapter *adapter = netdev_priv(dev);
4110         struct nlattr *attr, *br_spec;
4111         int rem;
4112         int status = 0;
4113         u16 mode = 0;
4114
4115         if (!sriov_enabled(adapter))
4116                 return -EOPNOTSUPP;
4117
4118         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4119
4120         nla_for_each_nested(attr, br_spec, rem) {
4121                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4122                         continue;
4123
4124                 mode = nla_get_u16(attr);
4125                 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4126                         return -EINVAL;
4127
4128                 status = be_cmd_set_hsw_config(adapter, 0, 0,
4129                                                adapter->if_handle,
4130                                                mode == BRIDGE_MODE_VEPA ?
4131                                                PORT_FWD_TYPE_VEPA :
4132                                                PORT_FWD_TYPE_VEB);
4133                 if (status)
4134                         goto err;
4135
4136                 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4137                          mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4138
4139                 return status;
4140         }
4141 err:
4142         dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4143                 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4144
4145         return status;
4146 }
4147
4148 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4149                                     struct net_device *dev,
4150                                     u32 filter_mask)
4151 {
4152         struct be_adapter *adapter = netdev_priv(dev);
4153         int status = 0;
4154         u8 hsw_mode;
4155
4156         if (!sriov_enabled(adapter))
4157                 return 0;
4158
4159         /* BE and Lancer chips support VEB mode only */
4160         if (BEx_chip(adapter) || lancer_chip(adapter)) {
4161                 hsw_mode = PORT_FWD_TYPE_VEB;
4162         } else {
4163                 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4164                                                adapter->if_handle, &hsw_mode);
4165                 if (status)
4166                         return 0;
4167         }
4168
4169         return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4170                                        hsw_mode == PORT_FWD_TYPE_VEPA ?
4171                                        BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4172 }
4173
4174 #ifdef CONFIG_BE2NET_VXLAN
4175 static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4176                               __be16 port)
4177 {
4178         struct be_adapter *adapter = netdev_priv(netdev);
4179         struct device *dev = &adapter->pdev->dev;
4180         int status;
4181
4182         if (lancer_chip(adapter) || BEx_chip(adapter))
4183                 return;
4184
4185         if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4186                 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4187                          be16_to_cpu(port));
4188                 dev_info(dev,
4189                          "Only one UDP port supported for VxLAN offloads\n");
4190                 return;
4191         }
4192
4193         status = be_cmd_manage_iface(adapter, adapter->if_handle,
4194                                      OP_CONVERT_NORMAL_TO_TUNNEL);
4195         if (status) {
4196                 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4197                 goto err;
4198         }
4199
4200         status = be_cmd_set_vxlan_port(adapter, port);
4201         if (status) {
4202                 dev_warn(dev, "Failed to add VxLAN port\n");
4203                 goto err;
4204         }
4205         adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4206         adapter->vxlan_port = port;
4207
4208         dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4209                  be16_to_cpu(port));
4210         return;
4211 err:
4212         be_disable_vxlan_offloads(adapter);
4213         return;
4214 }
4215
4216 static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4217                               __be16 port)
4218 {
4219         struct be_adapter *adapter = netdev_priv(netdev);
4220
4221         if (lancer_chip(adapter) || BEx_chip(adapter))
4222                 return;
4223
4224         if (adapter->vxlan_port != port)
4225                 return;
4226
4227         be_disable_vxlan_offloads(adapter);
4228
4229         dev_info(&adapter->pdev->dev,
4230                  "Disabled VxLAN offloads for UDP port %d\n",
4231                  be16_to_cpu(port));
4232 }
4233 #endif
4234
4235 static const struct net_device_ops be_netdev_ops = {
4236         .ndo_open               = be_open,
4237         .ndo_stop               = be_close,
4238         .ndo_start_xmit         = be_xmit,
4239         .ndo_set_rx_mode        = be_set_rx_mode,
4240         .ndo_set_mac_address    = be_mac_addr_set,
4241         .ndo_change_mtu         = be_change_mtu,
4242         .ndo_get_stats64        = be_get_stats64,
4243         .ndo_validate_addr      = eth_validate_addr,
4244         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
4245         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
4246         .ndo_set_vf_mac         = be_set_vf_mac,
4247         .ndo_set_vf_vlan        = be_set_vf_vlan,
4248         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
4249         .ndo_get_vf_config      = be_get_vf_config,
4250         .ndo_set_vf_link_state  = be_set_vf_link_state,
4251 #ifdef CONFIG_NET_POLL_CONTROLLER
4252         .ndo_poll_controller    = be_netpoll,
4253 #endif
4254         .ndo_bridge_setlink     = be_ndo_bridge_setlink,
4255         .ndo_bridge_getlink     = be_ndo_bridge_getlink,
4256 #ifdef CONFIG_NET_RX_BUSY_POLL
4257         .ndo_busy_poll          = be_busy_poll,
4258 #endif
4259 #ifdef CONFIG_BE2NET_VXLAN
4260         .ndo_add_vxlan_port     = be_add_vxlan_port,
4261         .ndo_del_vxlan_port     = be_del_vxlan_port,
4262 #endif
4263 };
4264
4265 static void be_netdev_init(struct net_device *netdev)
4266 {
4267         struct be_adapter *adapter = netdev_priv(netdev);
4268
4269         if (skyhawk_chip(adapter)) {
4270                 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4271                                            NETIF_F_TSO | NETIF_F_TSO6 |
4272                                            NETIF_F_GSO_UDP_TUNNEL;
4273                 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4274         }
4275         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4276                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4277                 NETIF_F_HW_VLAN_CTAG_TX;
4278         if (be_multi_rxq(adapter))
4279                 netdev->hw_features |= NETIF_F_RXHASH;
4280
4281         netdev->features |= netdev->hw_features |
4282                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4283
4284         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4285                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4286
4287         netdev->priv_flags |= IFF_UNICAST_FLT;
4288
4289         netdev->flags |= IFF_MULTICAST;
4290
4291         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4292
4293         netdev->netdev_ops = &be_netdev_ops;
4294
4295         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
4296 }
4297
4298 static void be_unmap_pci_bars(struct be_adapter *adapter)
4299 {
4300         if (adapter->csr)
4301                 pci_iounmap(adapter->pdev, adapter->csr);
4302         if (adapter->db)
4303                 pci_iounmap(adapter->pdev, adapter->db);
4304 }
4305
4306 static int db_bar(struct be_adapter *adapter)
4307 {
4308         if (lancer_chip(adapter) || !be_physfn(adapter))
4309                 return 0;
4310         else
4311                 return 4;
4312 }
4313
4314 static int be_roce_map_pci_bars(struct be_adapter *adapter)
4315 {
4316         if (skyhawk_chip(adapter)) {
4317                 adapter->roce_db.size = 4096;
4318                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4319                                                               db_bar(adapter));
4320                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4321                                                                db_bar(adapter));
4322         }
4323         return 0;
4324 }
4325
4326 static int be_map_pci_bars(struct be_adapter *adapter)
4327 {
4328         u8 __iomem *addr;
4329
4330         if (BEx_chip(adapter) && be_physfn(adapter)) {
4331                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4332                 if (adapter->csr == NULL)
4333                         return -ENOMEM;
4334         }
4335
4336         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
4337         if (addr == NULL)
4338                 goto pci_map_err;
4339         adapter->db = addr;
4340
4341         be_roce_map_pci_bars(adapter);
4342         return 0;
4343
4344 pci_map_err:
4345         be_unmap_pci_bars(adapter);
4346         return -ENOMEM;
4347 }
4348
4349 static void be_ctrl_cleanup(struct be_adapter *adapter)
4350 {
4351         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
4352
4353         be_unmap_pci_bars(adapter);
4354
4355         if (mem->va)
4356                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4357                                   mem->dma);
4358
4359         mem = &adapter->rx_filter;
4360         if (mem->va)
4361                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4362                                   mem->dma);
4363 }
4364
4365 static int be_ctrl_init(struct be_adapter *adapter)
4366 {
4367         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4368         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4369         struct be_dma_mem *rx_filter = &adapter->rx_filter;
4370         u32 sli_intf;
4371         int status;
4372
4373         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4374         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4375                                  SLI_INTF_FAMILY_SHIFT;
4376         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4377
4378         status = be_map_pci_bars(adapter);
4379         if (status)
4380                 goto done;
4381
4382         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4383         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4384                                                 mbox_mem_alloc->size,
4385                                                 &mbox_mem_alloc->dma,
4386                                                 GFP_KERNEL);
4387         if (!mbox_mem_alloc->va) {
4388                 status = -ENOMEM;
4389                 goto unmap_pci_bars;
4390         }
4391         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4392         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4393         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4394         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4395
4396         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4397         rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4398                                             rx_filter->size, &rx_filter->dma,
4399                                             GFP_KERNEL);
4400         if (rx_filter->va == NULL) {
4401                 status = -ENOMEM;
4402                 goto free_mbox;
4403         }
4404
4405         mutex_init(&adapter->mbox_lock);
4406         spin_lock_init(&adapter->mcc_lock);
4407         spin_lock_init(&adapter->mcc_cq_lock);
4408
4409         init_completion(&adapter->et_cmd_compl);
4410         pci_save_state(adapter->pdev);
4411         return 0;
4412
4413 free_mbox:
4414         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4415                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
4416
4417 unmap_pci_bars:
4418         be_unmap_pci_bars(adapter);
4419
4420 done:
4421         return status;
4422 }
4423
4424 static void be_stats_cleanup(struct be_adapter *adapter)
4425 {
4426         struct be_dma_mem *cmd = &adapter->stats_cmd;
4427
4428         if (cmd->va)
4429                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4430                                   cmd->va, cmd->dma);
4431 }
4432
4433 static int be_stats_init(struct be_adapter *adapter)
4434 {
4435         struct be_dma_mem *cmd = &adapter->stats_cmd;
4436
4437         if (lancer_chip(adapter))
4438                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4439         else if (BE2_chip(adapter))
4440                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4441         else if (BE3_chip(adapter))
4442                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4443         else
4444                 /* ALL non-BE ASICs */
4445                 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
4446
4447         cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4448                                       GFP_KERNEL);
4449         if (cmd->va == NULL)
4450                 return -1;
4451         return 0;
4452 }
4453
4454 static void be_remove(struct pci_dev *pdev)
4455 {
4456         struct be_adapter *adapter = pci_get_drvdata(pdev);
4457
4458         if (!adapter)
4459                 return;
4460
4461         be_roce_dev_remove(adapter);
4462         be_intr_set(adapter, false);
4463
4464         cancel_delayed_work_sync(&adapter->func_recovery_work);
4465
4466         unregister_netdev(adapter->netdev);
4467
4468         be_clear(adapter);
4469
4470         /* tell fw we're done with firing cmds */
4471         be_cmd_fw_clean(adapter);
4472
4473         be_stats_cleanup(adapter);
4474
4475         be_ctrl_cleanup(adapter);
4476
4477         pci_disable_pcie_error_reporting(pdev);
4478
4479         pci_release_regions(pdev);
4480         pci_disable_device(pdev);
4481
4482         free_netdev(adapter->netdev);
4483 }
4484
4485 static int be_get_initial_config(struct be_adapter *adapter)
4486 {
4487         int status, level;
4488
4489         status = be_cmd_get_cntl_attributes(adapter);
4490         if (status)
4491                 return status;
4492
4493         /* Must be a power of 2 or else MODULO will BUG_ON */
4494         adapter->be_get_temp_freq = 64;
4495
4496         if (BEx_chip(adapter)) {
4497                 level = be_cmd_get_fw_log_level(adapter);
4498                 adapter->msg_enable =
4499                         level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4500         }
4501
4502         adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4503         return 0;
4504 }
4505
4506 static int lancer_recover_func(struct be_adapter *adapter)
4507 {
4508         struct device *dev = &adapter->pdev->dev;
4509         int status;
4510
4511         status = lancer_test_and_set_rdy_state(adapter);
4512         if (status)
4513                 goto err;
4514
4515         if (netif_running(adapter->netdev))
4516                 be_close(adapter->netdev);
4517
4518         be_clear(adapter);
4519
4520         be_clear_all_error(adapter);
4521
4522         status = be_setup(adapter);
4523         if (status)
4524                 goto err;
4525
4526         if (netif_running(adapter->netdev)) {
4527                 status = be_open(adapter->netdev);
4528                 if (status)
4529                         goto err;
4530         }
4531
4532         dev_err(dev, "Adapter recovery successful\n");
4533         return 0;
4534 err:
4535         if (status == -EAGAIN)
4536                 dev_err(dev, "Waiting for resource provisioning\n");
4537         else
4538                 dev_err(dev, "Adapter recovery failed\n");
4539
4540         return status;
4541 }
4542
4543 static void be_func_recovery_task(struct work_struct *work)
4544 {
4545         struct be_adapter *adapter =
4546                 container_of(work, struct be_adapter,  func_recovery_work.work);
4547         int status = 0;
4548
4549         be_detect_error(adapter);
4550
4551         if (adapter->hw_error && lancer_chip(adapter)) {
4552
4553                 rtnl_lock();
4554                 netif_device_detach(adapter->netdev);
4555                 rtnl_unlock();
4556
4557                 status = lancer_recover_func(adapter);
4558                 if (!status)
4559                         netif_device_attach(adapter->netdev);
4560         }
4561
4562         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4563          * no need to attempt further recovery.
4564          */
4565         if (!status || status == -EAGAIN)
4566                 schedule_delayed_work(&adapter->func_recovery_work,
4567                                       msecs_to_jiffies(1000));
4568 }
4569
4570 static void be_worker(struct work_struct *work)
4571 {
4572         struct be_adapter *adapter =
4573                 container_of(work, struct be_adapter, work.work);
4574         struct be_rx_obj *rxo;
4575         int i;
4576
4577         /* when interrupts are not yet enabled, just reap any pending
4578         * mcc completions */
4579         if (!netif_running(adapter->netdev)) {
4580                 local_bh_disable();
4581                 be_process_mcc(adapter);
4582                 local_bh_enable();
4583                 goto reschedule;
4584         }
4585
4586         if (!adapter->stats_cmd_sent) {
4587                 if (lancer_chip(adapter))
4588                         lancer_cmd_get_pport_stats(adapter,
4589                                                 &adapter->stats_cmd);
4590                 else
4591                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4592         }
4593
4594         if (be_physfn(adapter) &&
4595             MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4596                 be_cmd_get_die_temperature(adapter);
4597
4598         for_all_rx_queues(adapter, rxo, i) {
4599                 /* Replenish RX-queues starved due to memory
4600                  * allocation failures.
4601                  */
4602                 if (rxo->rx_post_starved)
4603                         be_post_rx_frags(rxo, GFP_KERNEL);
4604         }
4605
4606         be_eqd_update(adapter);
4607
4608 reschedule:
4609         adapter->work_counter++;
4610         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4611 }
4612
4613 /* If any VFs are already enabled don't FLR the PF */
4614 static bool be_reset_required(struct be_adapter *adapter)
4615 {
4616         return pci_num_vf(adapter->pdev) ? false : true;
4617 }
4618
4619 static char *mc_name(struct be_adapter *adapter)
4620 {
4621         char *str = ""; /* default */
4622
4623         switch (adapter->mc_type) {
4624         case UMC:
4625                 str = "UMC";
4626                 break;
4627         case FLEX10:
4628                 str = "FLEX10";
4629                 break;
4630         case vNIC1:
4631                 str = "vNIC-1";
4632                 break;
4633         case nPAR:
4634                 str = "nPAR";
4635                 break;
4636         case UFP:
4637                 str = "UFP";
4638                 break;
4639         case vNIC2:
4640                 str = "vNIC-2";
4641                 break;
4642         default:
4643                 str = "";
4644         }
4645
4646         return str;
4647 }
4648
4649 static inline char *func_name(struct be_adapter *adapter)
4650 {
4651         return be_physfn(adapter) ? "PF" : "VF";
4652 }
4653
4654 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4655 {
4656         int status = 0;
4657         struct be_adapter *adapter;
4658         struct net_device *netdev;
4659         char port_name;
4660
4661         status = pci_enable_device(pdev);
4662         if (status)
4663                 goto do_none;
4664
4665         status = pci_request_regions(pdev, DRV_NAME);
4666         if (status)
4667                 goto disable_dev;
4668         pci_set_master(pdev);
4669
4670         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4671         if (netdev == NULL) {
4672                 status = -ENOMEM;
4673                 goto rel_reg;
4674         }
4675         adapter = netdev_priv(netdev);
4676         adapter->pdev = pdev;
4677         pci_set_drvdata(pdev, adapter);
4678         adapter->netdev = netdev;
4679         SET_NETDEV_DEV(netdev, &pdev->dev);
4680
4681         status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4682         if (!status) {
4683                 netdev->features |= NETIF_F_HIGHDMA;
4684         } else {
4685                 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4686                 if (status) {
4687                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4688                         goto free_netdev;
4689                 }
4690         }
4691
4692         if (be_physfn(adapter)) {
4693                 status = pci_enable_pcie_error_reporting(pdev);
4694                 if (!status)
4695                         dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4696         }
4697
4698         status = be_ctrl_init(adapter);
4699         if (status)
4700                 goto free_netdev;
4701
4702         /* sync up with fw's ready state */
4703         if (be_physfn(adapter)) {
4704                 status = be_fw_wait_ready(adapter);
4705                 if (status)
4706                         goto ctrl_clean;
4707         }
4708
4709         if (be_reset_required(adapter)) {
4710                 status = be_cmd_reset_function(adapter);
4711                 if (status)
4712                         goto ctrl_clean;
4713
4714                 /* Wait for interrupts to quiesce after an FLR */
4715                 msleep(100);
4716         }
4717
4718         /* Allow interrupts for other ULPs running on NIC function */
4719         be_intr_set(adapter, true);
4720
4721         /* tell fw we're ready to fire cmds */
4722         status = be_cmd_fw_init(adapter);
4723         if (status)
4724                 goto ctrl_clean;
4725
4726         status = be_stats_init(adapter);
4727         if (status)
4728                 goto ctrl_clean;
4729
4730         status = be_get_initial_config(adapter);
4731         if (status)
4732                 goto stats_clean;
4733
4734         INIT_DELAYED_WORK(&adapter->work, be_worker);
4735         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4736         adapter->rx_fc = adapter->tx_fc = true;
4737
4738         status = be_setup(adapter);
4739         if (status)
4740                 goto stats_clean;
4741
4742         be_netdev_init(netdev);
4743         status = register_netdev(netdev);
4744         if (status != 0)
4745                 goto unsetup;
4746
4747         be_roce_dev_add(adapter);
4748
4749         schedule_delayed_work(&adapter->func_recovery_work,
4750                               msecs_to_jiffies(1000));
4751
4752         be_cmd_query_port_name(adapter, &port_name);
4753
4754         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4755                  func_name(adapter), mc_name(adapter), port_name);
4756
4757         return 0;
4758
4759 unsetup:
4760         be_clear(adapter);
4761 stats_clean:
4762         be_stats_cleanup(adapter);
4763 ctrl_clean:
4764         be_ctrl_cleanup(adapter);
4765 free_netdev:
4766         free_netdev(netdev);
4767 rel_reg:
4768         pci_release_regions(pdev);
4769 disable_dev:
4770         pci_disable_device(pdev);
4771 do_none:
4772         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4773         return status;
4774 }
4775
4776 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4777 {
4778         struct be_adapter *adapter = pci_get_drvdata(pdev);
4779         struct net_device *netdev =  adapter->netdev;
4780
4781         if (adapter->wol_en)
4782                 be_setup_wol(adapter, true);
4783
4784         be_intr_set(adapter, false);
4785         cancel_delayed_work_sync(&adapter->func_recovery_work);
4786
4787         netif_device_detach(netdev);
4788         if (netif_running(netdev)) {
4789                 rtnl_lock();
4790                 be_close(netdev);
4791                 rtnl_unlock();
4792         }
4793         be_clear(adapter);
4794
4795         pci_save_state(pdev);
4796         pci_disable_device(pdev);
4797         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4798         return 0;
4799 }
4800
4801 static int be_resume(struct pci_dev *pdev)
4802 {
4803         int status = 0;
4804         struct be_adapter *adapter = pci_get_drvdata(pdev);
4805         struct net_device *netdev =  adapter->netdev;
4806
4807         netif_device_detach(netdev);
4808
4809         status = pci_enable_device(pdev);
4810         if (status)
4811                 return status;
4812
4813         pci_set_power_state(pdev, PCI_D0);
4814         pci_restore_state(pdev);
4815
4816         status = be_fw_wait_ready(adapter);
4817         if (status)
4818                 return status;
4819
4820         be_intr_set(adapter, true);
4821         /* tell fw we're ready to fire cmds */
4822         status = be_cmd_fw_init(adapter);
4823         if (status)
4824                 return status;
4825
4826         be_setup(adapter);
4827         if (netif_running(netdev)) {
4828                 rtnl_lock();
4829                 be_open(netdev);
4830                 rtnl_unlock();
4831         }
4832
4833         schedule_delayed_work(&adapter->func_recovery_work,
4834                               msecs_to_jiffies(1000));
4835         netif_device_attach(netdev);
4836
4837         if (adapter->wol_en)
4838                 be_setup_wol(adapter, false);
4839
4840         return 0;
4841 }
4842
4843 /*
4844  * An FLR will stop BE from DMAing any data.
4845  */
4846 static void be_shutdown(struct pci_dev *pdev)
4847 {
4848         struct be_adapter *adapter = pci_get_drvdata(pdev);
4849
4850         if (!adapter)
4851                 return;
4852
4853         cancel_delayed_work_sync(&adapter->work);
4854         cancel_delayed_work_sync(&adapter->func_recovery_work);
4855
4856         netif_device_detach(adapter->netdev);
4857
4858         be_cmd_reset_function(adapter);
4859
4860         pci_disable_device(pdev);
4861 }
4862
4863 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4864                                 pci_channel_state_t state)
4865 {
4866         struct be_adapter *adapter = pci_get_drvdata(pdev);
4867         struct net_device *netdev =  adapter->netdev;
4868
4869         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4870
4871         if (!adapter->eeh_error) {
4872                 adapter->eeh_error = true;
4873
4874                 cancel_delayed_work_sync(&adapter->func_recovery_work);
4875
4876                 rtnl_lock();
4877                 netif_device_detach(netdev);
4878                 if (netif_running(netdev))
4879                         be_close(netdev);
4880                 rtnl_unlock();
4881
4882                 be_clear(adapter);
4883         }
4884
4885         if (state == pci_channel_io_perm_failure)
4886                 return PCI_ERS_RESULT_DISCONNECT;
4887
4888         pci_disable_device(pdev);
4889
4890         /* The error could cause the FW to trigger a flash debug dump.
4891          * Resetting the card while flash dump is in progress
4892          * can cause it not to recover; wait for it to finish.
4893          * Wait only for first function as it is needed only once per
4894          * adapter.
4895          */
4896         if (pdev->devfn == 0)
4897                 ssleep(30);
4898
4899         return PCI_ERS_RESULT_NEED_RESET;
4900 }
4901
4902 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4903 {
4904         struct be_adapter *adapter = pci_get_drvdata(pdev);
4905         int status;
4906
4907         dev_info(&adapter->pdev->dev, "EEH reset\n");
4908
4909         status = pci_enable_device(pdev);
4910         if (status)
4911                 return PCI_ERS_RESULT_DISCONNECT;
4912
4913         pci_set_master(pdev);
4914         pci_set_power_state(pdev, PCI_D0);
4915         pci_restore_state(pdev);
4916
4917         /* Check if card is ok and fw is ready */
4918         dev_info(&adapter->pdev->dev,
4919                  "Waiting for FW to be ready after EEH reset\n");
4920         status = be_fw_wait_ready(adapter);
4921         if (status)
4922                 return PCI_ERS_RESULT_DISCONNECT;
4923
4924         pci_cleanup_aer_uncorrect_error_status(pdev);
4925         be_clear_all_error(adapter);
4926         return PCI_ERS_RESULT_RECOVERED;
4927 }
4928
4929 static void be_eeh_resume(struct pci_dev *pdev)
4930 {
4931         int status = 0;
4932         struct be_adapter *adapter = pci_get_drvdata(pdev);
4933         struct net_device *netdev =  adapter->netdev;
4934
4935         dev_info(&adapter->pdev->dev, "EEH resume\n");
4936
4937         pci_save_state(pdev);
4938
4939         status = be_cmd_reset_function(adapter);
4940         if (status)
4941                 goto err;
4942
4943         /* tell fw we're ready to fire cmds */
4944         status = be_cmd_fw_init(adapter);
4945         if (status)
4946                 goto err;
4947
4948         status = be_setup(adapter);
4949         if (status)
4950                 goto err;
4951
4952         if (netif_running(netdev)) {
4953                 status = be_open(netdev);
4954                 if (status)
4955                         goto err;
4956         }
4957
4958         schedule_delayed_work(&adapter->func_recovery_work,
4959                               msecs_to_jiffies(1000));
4960         netif_device_attach(netdev);
4961         return;
4962 err:
4963         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4964 }
4965
4966 static const struct pci_error_handlers be_eeh_handlers = {
4967         .error_detected = be_eeh_err_detected,
4968         .slot_reset = be_eeh_reset,
4969         .resume = be_eeh_resume,
4970 };
4971
4972 static struct pci_driver be_driver = {
4973         .name = DRV_NAME,
4974         .id_table = be_dev_ids,
4975         .probe = be_probe,
4976         .remove = be_remove,
4977         .suspend = be_suspend,
4978         .resume = be_resume,
4979         .shutdown = be_shutdown,
4980         .err_handler = &be_eeh_handlers
4981 };
4982
4983 static int __init be_init_module(void)
4984 {
4985         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4986             rx_frag_size != 2048) {
4987                 printk(KERN_WARNING DRV_NAME
4988                         " : Module param rx_frag_size must be 2048/4096/8192."
4989                         " Using 2048\n");
4990                 rx_frag_size = 2048;
4991         }
4992
4993         return pci_register_driver(&be_driver);
4994 }
4995 module_init(be_init_module);
4996
4997 static void __exit be_exit_module(void)
4998 {
4999         pci_unregister_driver(&be_driver);
5000 }
5001 module_exit(be_exit_module);