]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/emulex/benet/be_main.c
be2net: Fix number of VLANs supported in UMC mode for BE3-R.
[karo-tx-linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25
26 MODULE_VERSION(DRV_VER);
27 MODULE_DEVICE_TABLE(pci, be_dev_ids);
28 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
29 MODULE_AUTHOR("Emulex Corporation");
30 MODULE_LICENSE("GPL");
31
32 static unsigned int num_vfs;
33 module_param(num_vfs, uint, S_IRUGO);
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35
36 static ushort rx_frag_size = 2048;
37 module_param(rx_frag_size, ushort, S_IRUGO);
38 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
39
40 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
42         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
44         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
48         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
49         { 0 }
50 };
51 MODULE_DEVICE_TABLE(pci, be_dev_ids);
52 /* UE Status Low CSR */
53 static const char * const ue_status_low_desc[] = {
54         "CEV",
55         "CTX",
56         "DBUF",
57         "ERX",
58         "Host",
59         "MPU",
60         "NDMA",
61         "PTC ",
62         "RDMA ",
63         "RXF ",
64         "RXIPS ",
65         "RXULP0 ",
66         "RXULP1 ",
67         "RXULP2 ",
68         "TIM ",
69         "TPOST ",
70         "TPRE ",
71         "TXIPS ",
72         "TXULP0 ",
73         "TXULP1 ",
74         "UC ",
75         "WDMA ",
76         "TXULP2 ",
77         "HOST1 ",
78         "P0_OB_LINK ",
79         "P1_OB_LINK ",
80         "HOST_GPIO ",
81         "MBOX ",
82         "AXGMAC0",
83         "AXGMAC1",
84         "JTAG",
85         "MPU_INTPEND"
86 };
87 /* UE Status High CSR */
88 static const char * const ue_status_hi_desc[] = {
89         "LPCMEMHOST",
90         "MGMT_MAC",
91         "PCS0ONLINE",
92         "MPU_IRAM",
93         "PCS1ONLINE",
94         "PCTL0",
95         "PCTL1",
96         "PMEM",
97         "RR",
98         "TXPB",
99         "RXPP",
100         "XAUI",
101         "TXP",
102         "ARM",
103         "IPC",
104         "HOST2",
105         "HOST3",
106         "HOST4",
107         "HOST5",
108         "HOST6",
109         "HOST7",
110         "HOST8",
111         "HOST9",
112         "NETC",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown"
121 };
122
123 /* Is BE in a multi-channel mode */
124 static inline bool be_is_mc(struct be_adapter *adapter) {
125         return (adapter->function_mode & FLEX10_MODE ||
126                 adapter->function_mode & VNIC_MODE ||
127                 adapter->function_mode & UMC_ENABLED);
128 }
129
130 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
131 {
132         struct be_dma_mem *mem = &q->dma_mem;
133         if (mem->va) {
134                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
135                                   mem->dma);
136                 mem->va = NULL;
137         }
138 }
139
140 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
141                 u16 len, u16 entry_size)
142 {
143         struct be_dma_mem *mem = &q->dma_mem;
144
145         memset(q, 0, sizeof(*q));
146         q->len = len;
147         q->entry_size = entry_size;
148         mem->size = len * entry_size;
149         mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
150                                       GFP_KERNEL);
151         if (!mem->va)
152                 return -ENOMEM;
153         return 0;
154 }
155
156 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
157 {
158         u32 reg, enabled;
159
160         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
161                                 &reg);
162         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163
164         if (!enabled && enable)
165                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166         else if (enabled && !enable)
167                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
168         else
169                 return;
170
171         pci_write_config_dword(adapter->pdev,
172                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
173 }
174
175 static void be_intr_set(struct be_adapter *adapter, bool enable)
176 {
177         int status = 0;
178
179         /* On lancer interrupts can't be controlled via this register */
180         if (lancer_chip(adapter))
181                 return;
182
183         if (adapter->eeh_error)
184                 return;
185
186         status = be_cmd_intr_set(adapter, enable);
187         if (status)
188                 be_reg_intr_set(adapter, enable);
189 }
190
191 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
192 {
193         u32 val = 0;
194         val |= qid & DB_RQ_RING_ID_MASK;
195         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
196
197         wmb();
198         iowrite32(val, adapter->db + DB_RQ_OFFSET);
199 }
200
201 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202                           u16 posted)
203 {
204         u32 val = 0;
205         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
206         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
207
208         wmb();
209         iowrite32(val, adapter->db + txo->db_offset);
210 }
211
212 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
213                 bool arm, bool clear_int, u16 num_popped)
214 {
215         u32 val = 0;
216         val |= qid & DB_EQ_RING_ID_MASK;
217         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
218                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
219
220         if (adapter->eeh_error)
221                 return;
222
223         if (arm)
224                 val |= 1 << DB_EQ_REARM_SHIFT;
225         if (clear_int)
226                 val |= 1 << DB_EQ_CLR_SHIFT;
227         val |= 1 << DB_EQ_EVNT_SHIFT;
228         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
229         iowrite32(val, adapter->db + DB_EQ_OFFSET);
230 }
231
232 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
233 {
234         u32 val = 0;
235         val |= qid & DB_CQ_RING_ID_MASK;
236         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
238
239         if (adapter->eeh_error)
240                 return;
241
242         if (arm)
243                 val |= 1 << DB_CQ_REARM_SHIFT;
244         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
245         iowrite32(val, adapter->db + DB_CQ_OFFSET);
246 }
247
248 static int be_mac_addr_set(struct net_device *netdev, void *p)
249 {
250         struct be_adapter *adapter = netdev_priv(netdev);
251         struct device *dev = &adapter->pdev->dev;
252         struct sockaddr *addr = p;
253         int status;
254         u8 mac[ETH_ALEN];
255         u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
256
257         if (!is_valid_ether_addr(addr->sa_data))
258                 return -EADDRNOTAVAIL;
259
260         /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
261          * privilege or if PF did not provision the new MAC address.
262          * On BE3, this cmd will always fail if the VF doesn't have the
263          * FILTMGMT privilege. This failure is OK, only if the PF programmed
264          * the MAC for the VF.
265          */
266         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
267                                  adapter->if_handle, &adapter->pmac_id[0], 0);
268         if (!status) {
269                 curr_pmac_id = adapter->pmac_id[0];
270
271                 /* Delete the old programmed MAC. This call may fail if the
272                  * old MAC was already deleted by the PF driver.
273                  */
274                 if (adapter->pmac_id[0] != old_pmac_id)
275                         be_cmd_pmac_del(adapter, adapter->if_handle,
276                                         old_pmac_id, 0);
277         }
278
279         /* Decide if the new MAC is successfully activated only after
280          * querying the FW
281          */
282         status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
283         if (status)
284                 goto err;
285
286         /* The MAC change did not happen, either due to lack of privilege
287          * or PF didn't pre-provision.
288          */
289         if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
290                 status = -EPERM;
291                 goto err;
292         }
293
294         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295         dev_info(dev, "MAC address changed to %pM\n", mac);
296         return 0;
297 err:
298         dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
299         return status;
300 }
301
302 /* BE2 supports only v0 cmd */
303 static void *hw_stats_from_cmd(struct be_adapter *adapter)
304 {
305         if (BE2_chip(adapter)) {
306                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
307
308                 return &cmd->hw_stats;
309         } else  {
310                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
311
312                 return &cmd->hw_stats;
313         }
314 }
315
316 /* BE2 supports only v0 cmd */
317 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
318 {
319         if (BE2_chip(adapter)) {
320                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
321
322                 return &hw_stats->erx;
323         } else {
324                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
325
326                 return &hw_stats->erx;
327         }
328 }
329
330 static void populate_be_v0_stats(struct be_adapter *adapter)
331 {
332         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
333         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
334         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
335         struct be_port_rxf_stats_v0 *port_stats =
336                                         &rxf_stats->port[adapter->port_num];
337         struct be_drv_stats *drvs = &adapter->drv_stats;
338
339         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
340         drvs->rx_pause_frames = port_stats->rx_pause_frames;
341         drvs->rx_crc_errors = port_stats->rx_crc_errors;
342         drvs->rx_control_frames = port_stats->rx_control_frames;
343         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
344         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
345         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
346         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
347         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
348         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
349         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
350         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
351         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
352         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
353         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
354         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
355         drvs->rx_dropped_header_too_small =
356                 port_stats->rx_dropped_header_too_small;
357         drvs->rx_address_filtered =
358                                         port_stats->rx_address_filtered +
359                                         port_stats->rx_vlan_filtered;
360         drvs->rx_alignment_symbol_errors =
361                 port_stats->rx_alignment_symbol_errors;
362
363         drvs->tx_pauseframes = port_stats->tx_pauseframes;
364         drvs->tx_controlframes = port_stats->tx_controlframes;
365
366         if (adapter->port_num)
367                 drvs->jabber_events = rxf_stats->port1_jabber_events;
368         else
369                 drvs->jabber_events = rxf_stats->port0_jabber_events;
370         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
371         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
372         drvs->forwarded_packets = rxf_stats->forwarded_packets;
373         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
374         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
375         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
376         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
377 }
378
379 static void populate_be_v1_stats(struct be_adapter *adapter)
380 {
381         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
382         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
383         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
384         struct be_port_rxf_stats_v1 *port_stats =
385                                         &rxf_stats->port[adapter->port_num];
386         struct be_drv_stats *drvs = &adapter->drv_stats;
387
388         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
389         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
390         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
391         drvs->rx_pause_frames = port_stats->rx_pause_frames;
392         drvs->rx_crc_errors = port_stats->rx_crc_errors;
393         drvs->rx_control_frames = port_stats->rx_control_frames;
394         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
395         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
396         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
397         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
398         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
399         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
400         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
401         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
402         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
403         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
404         drvs->rx_dropped_header_too_small =
405                 port_stats->rx_dropped_header_too_small;
406         drvs->rx_input_fifo_overflow_drop =
407                 port_stats->rx_input_fifo_overflow_drop;
408         drvs->rx_address_filtered = port_stats->rx_address_filtered;
409         drvs->rx_alignment_symbol_errors =
410                 port_stats->rx_alignment_symbol_errors;
411         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
412         drvs->tx_pauseframes = port_stats->tx_pauseframes;
413         drvs->tx_controlframes = port_stats->tx_controlframes;
414         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
415         drvs->jabber_events = port_stats->jabber_events;
416         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
417         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
418         drvs->forwarded_packets = rxf_stats->forwarded_packets;
419         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
420         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
421         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
422         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
423 }
424
425 static void populate_lancer_stats(struct be_adapter *adapter)
426 {
427
428         struct be_drv_stats *drvs = &adapter->drv_stats;
429         struct lancer_pport_stats *pport_stats =
430                                         pport_stats_from_cmd(adapter);
431
432         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
433         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
434         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
435         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
436         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
437         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
438         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
439         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
440         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
441         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
442         drvs->rx_dropped_tcp_length =
443                                 pport_stats->rx_dropped_invalid_tcp_length;
444         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
445         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
446         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
447         drvs->rx_dropped_header_too_small =
448                                 pport_stats->rx_dropped_header_too_small;
449         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
450         drvs->rx_address_filtered =
451                                         pport_stats->rx_address_filtered +
452                                         pport_stats->rx_vlan_filtered;
453         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
454         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
455         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
456         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
457         drvs->jabber_events = pport_stats->rx_jabbers;
458         drvs->forwarded_packets = pport_stats->num_forwards_lo;
459         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
460         drvs->rx_drops_too_many_frags =
461                                 pport_stats->rx_drops_too_many_frags_lo;
462 }
463
464 static void accumulate_16bit_val(u32 *acc, u16 val)
465 {
466 #define lo(x)                   (x & 0xFFFF)
467 #define hi(x)                   (x & 0xFFFF0000)
468         bool wrapped = val < lo(*acc);
469         u32 newacc = hi(*acc) + val;
470
471         if (wrapped)
472                 newacc += 65536;
473         ACCESS_ONCE(*acc) = newacc;
474 }
475
476 static void populate_erx_stats(struct be_adapter *adapter,
477                         struct be_rx_obj *rxo,
478                         u32 erx_stat)
479 {
480         if (!BEx_chip(adapter))
481                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
482         else
483                 /* below erx HW counter can actually wrap around after
484                  * 65535. Driver accumulates a 32-bit value
485                  */
486                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
487                                      (u16)erx_stat);
488 }
489
490 void be_parse_stats(struct be_adapter *adapter)
491 {
492         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
493         struct be_rx_obj *rxo;
494         int i;
495         u32 erx_stat;
496
497         if (lancer_chip(adapter)) {
498                 populate_lancer_stats(adapter);
499         } else {
500                 if (BE2_chip(adapter))
501                         populate_be_v0_stats(adapter);
502                 else
503                         /* for BE3 and Skyhawk */
504                         populate_be_v1_stats(adapter);
505
506                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
507                 for_all_rx_queues(adapter, rxo, i) {
508                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
509                         populate_erx_stats(adapter, rxo, erx_stat);
510                 }
511         }
512 }
513
514 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
515                                         struct rtnl_link_stats64 *stats)
516 {
517         struct be_adapter *adapter = netdev_priv(netdev);
518         struct be_drv_stats *drvs = &adapter->drv_stats;
519         struct be_rx_obj *rxo;
520         struct be_tx_obj *txo;
521         u64 pkts, bytes;
522         unsigned int start;
523         int i;
524
525         for_all_rx_queues(adapter, rxo, i) {
526                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
527                 do {
528                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
529                         pkts = rx_stats(rxo)->rx_pkts;
530                         bytes = rx_stats(rxo)->rx_bytes;
531                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
532                 stats->rx_packets += pkts;
533                 stats->rx_bytes += bytes;
534                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
535                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
536                                         rx_stats(rxo)->rx_drops_no_frags;
537         }
538
539         for_all_tx_queues(adapter, txo, i) {
540                 const struct be_tx_stats *tx_stats = tx_stats(txo);
541                 do {
542                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
543                         pkts = tx_stats(txo)->tx_pkts;
544                         bytes = tx_stats(txo)->tx_bytes;
545                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
546                 stats->tx_packets += pkts;
547                 stats->tx_bytes += bytes;
548         }
549
550         /* bad pkts received */
551         stats->rx_errors = drvs->rx_crc_errors +
552                 drvs->rx_alignment_symbol_errors +
553                 drvs->rx_in_range_errors +
554                 drvs->rx_out_range_errors +
555                 drvs->rx_frame_too_long +
556                 drvs->rx_dropped_too_small +
557                 drvs->rx_dropped_too_short +
558                 drvs->rx_dropped_header_too_small +
559                 drvs->rx_dropped_tcp_length +
560                 drvs->rx_dropped_runt;
561
562         /* detailed rx errors */
563         stats->rx_length_errors = drvs->rx_in_range_errors +
564                 drvs->rx_out_range_errors +
565                 drvs->rx_frame_too_long;
566
567         stats->rx_crc_errors = drvs->rx_crc_errors;
568
569         /* frame alignment errors */
570         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
571
572         /* receiver fifo overrun */
573         /* drops_no_pbuf is no per i/f, it's per BE card */
574         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
575                                 drvs->rx_input_fifo_overflow_drop +
576                                 drvs->rx_drops_no_pbuf;
577         return stats;
578 }
579
580 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
581 {
582         struct net_device *netdev = adapter->netdev;
583
584         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
585                 netif_carrier_off(netdev);
586                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
587         }
588
589         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
590                 netif_carrier_on(netdev);
591         else
592                 netif_carrier_off(netdev);
593 }
594
595 static void be_tx_stats_update(struct be_tx_obj *txo,
596                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
597 {
598         struct be_tx_stats *stats = tx_stats(txo);
599
600         u64_stats_update_begin(&stats->sync);
601         stats->tx_reqs++;
602         stats->tx_wrbs += wrb_cnt;
603         stats->tx_bytes += copied;
604         stats->tx_pkts += (gso_segs ? gso_segs : 1);
605         if (stopped)
606                 stats->tx_stops++;
607         u64_stats_update_end(&stats->sync);
608 }
609
610 /* Determine number of WRB entries needed to xmit data in an skb */
611 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
612                                                                 bool *dummy)
613 {
614         int cnt = (skb->len > skb->data_len);
615
616         cnt += skb_shinfo(skb)->nr_frags;
617
618         /* to account for hdr wrb */
619         cnt++;
620         if (lancer_chip(adapter) || !(cnt & 1)) {
621                 *dummy = false;
622         } else {
623                 /* add a dummy to make it an even num */
624                 cnt++;
625                 *dummy = true;
626         }
627         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
628         return cnt;
629 }
630
631 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
632 {
633         wrb->frag_pa_hi = upper_32_bits(addr);
634         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
635         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
636         wrb->rsvd0 = 0;
637 }
638
639 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
640                                         struct sk_buff *skb)
641 {
642         u8 vlan_prio;
643         u16 vlan_tag;
644
645         vlan_tag = vlan_tx_tag_get(skb);
646         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
647         /* If vlan priority provided by OS is NOT in available bmap */
648         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
649                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
650                                 adapter->recommended_prio;
651
652         return vlan_tag;
653 }
654
655 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
656                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
657 {
658         u16 vlan_tag;
659
660         memset(hdr, 0, sizeof(*hdr));
661
662         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
663
664         if (skb_is_gso(skb)) {
665                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
666                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
667                         hdr, skb_shinfo(skb)->gso_size);
668                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
669                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
670         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
671                 if (is_tcp_pkt(skb))
672                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
673                 else if (is_udp_pkt(skb))
674                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
675         }
676
677         if (vlan_tx_tag_present(skb)) {
678                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
679                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
680                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
681         }
682
683         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
684         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
685         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
686         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
687         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
688 }
689
690 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
691                 bool unmap_single)
692 {
693         dma_addr_t dma;
694
695         be_dws_le_to_cpu(wrb, sizeof(*wrb));
696
697         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
698         if (wrb->frag_len) {
699                 if (unmap_single)
700                         dma_unmap_single(dev, dma, wrb->frag_len,
701                                          DMA_TO_DEVICE);
702                 else
703                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
704         }
705 }
706
707 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
708                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
709                 bool skip_hw_vlan)
710 {
711         dma_addr_t busaddr;
712         int i, copied = 0;
713         struct device *dev = &adapter->pdev->dev;
714         struct sk_buff *first_skb = skb;
715         struct be_eth_wrb *wrb;
716         struct be_eth_hdr_wrb *hdr;
717         bool map_single = false;
718         u16 map_head;
719
720         hdr = queue_head_node(txq);
721         queue_head_inc(txq);
722         map_head = txq->head;
723
724         if (skb->len > skb->data_len) {
725                 int len = skb_headlen(skb);
726                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
727                 if (dma_mapping_error(dev, busaddr))
728                         goto dma_err;
729                 map_single = true;
730                 wrb = queue_head_node(txq);
731                 wrb_fill(wrb, busaddr, len);
732                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
733                 queue_head_inc(txq);
734                 copied += len;
735         }
736
737         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
738                 const struct skb_frag_struct *frag =
739                         &skb_shinfo(skb)->frags[i];
740                 busaddr = skb_frag_dma_map(dev, frag, 0,
741                                            skb_frag_size(frag), DMA_TO_DEVICE);
742                 if (dma_mapping_error(dev, busaddr))
743                         goto dma_err;
744                 wrb = queue_head_node(txq);
745                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
746                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
747                 queue_head_inc(txq);
748                 copied += skb_frag_size(frag);
749         }
750
751         if (dummy_wrb) {
752                 wrb = queue_head_node(txq);
753                 wrb_fill(wrb, 0, 0);
754                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
755                 queue_head_inc(txq);
756         }
757
758         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
759         be_dws_cpu_to_le(hdr, sizeof(*hdr));
760
761         return copied;
762 dma_err:
763         txq->head = map_head;
764         while (copied) {
765                 wrb = queue_head_node(txq);
766                 unmap_tx_frag(dev, wrb, map_single);
767                 map_single = false;
768                 copied -= wrb->frag_len;
769                 queue_head_inc(txq);
770         }
771         return 0;
772 }
773
774 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
775                                              struct sk_buff *skb,
776                                              bool *skip_hw_vlan)
777 {
778         u16 vlan_tag = 0;
779
780         skb = skb_share_check(skb, GFP_ATOMIC);
781         if (unlikely(!skb))
782                 return skb;
783
784         if (vlan_tx_tag_present(skb))
785                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
786
787         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
788                 if (!vlan_tag)
789                         vlan_tag = adapter->pvid;
790                 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
791                  * skip VLAN insertion
792                  */
793                 if (skip_hw_vlan)
794                         *skip_hw_vlan = true;
795         }
796
797         if (vlan_tag) {
798                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
799                 if (unlikely(!skb))
800                         return skb;
801                 skb->vlan_tci = 0;
802         }
803
804         /* Insert the outer VLAN, if any */
805         if (adapter->qnq_vid) {
806                 vlan_tag = adapter->qnq_vid;
807                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
808                 if (unlikely(!skb))
809                         return skb;
810                 if (skip_hw_vlan)
811                         *skip_hw_vlan = true;
812         }
813
814         return skb;
815 }
816
817 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
818 {
819         struct ethhdr *eh = (struct ethhdr *)skb->data;
820         u16 offset = ETH_HLEN;
821
822         if (eh->h_proto == htons(ETH_P_IPV6)) {
823                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
824
825                 offset += sizeof(struct ipv6hdr);
826                 if (ip6h->nexthdr != NEXTHDR_TCP &&
827                     ip6h->nexthdr != NEXTHDR_UDP) {
828                         struct ipv6_opt_hdr *ehdr =
829                                 (struct ipv6_opt_hdr *) (skb->data + offset);
830
831                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
832                         if (ehdr->hdrlen == 0xff)
833                                 return true;
834                 }
835         }
836         return false;
837 }
838
839 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
840 {
841         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
842 }
843
844 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
845                                 struct sk_buff *skb)
846 {
847         return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
848 }
849
850 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
851                                            struct sk_buff *skb,
852                                            bool *skip_hw_vlan)
853 {
854         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
855         unsigned int eth_hdr_len;
856         struct iphdr *ip;
857
858         /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
859          * may cause a transmit stall on that port. So the work-around is to
860          * pad short packets (<= 32 bytes) to a 36-byte length.
861          */
862         if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
863                 if (skb_padto(skb, 36))
864                         goto tx_drop;
865                 skb->len = 36;
866         }
867
868         /* For padded packets, BE HW modifies tot_len field in IP header
869          * incorrecly when VLAN tag is inserted by HW.
870          * For padded packets, Lancer computes incorrect checksum.
871          */
872         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
873                                                 VLAN_ETH_HLEN : ETH_HLEN;
874         if (skb->len <= 60 &&
875             (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
876             is_ipv4_pkt(skb)) {
877                 ip = (struct iphdr *)ip_hdr(skb);
878                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
879         }
880
881         /* If vlan tag is already inlined in the packet, skip HW VLAN
882          * tagging in UMC mode
883          */
884         if ((adapter->function_mode & UMC_ENABLED) &&
885             veh->h_vlan_proto == htons(ETH_P_8021Q))
886                         *skip_hw_vlan = true;
887
888         /* HW has a bug wherein it will calculate CSUM for VLAN
889          * pkts even though it is disabled.
890          * Manually insert VLAN in pkt.
891          */
892         if (skb->ip_summed != CHECKSUM_PARTIAL &&
893             vlan_tx_tag_present(skb)) {
894                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
895                 if (unlikely(!skb))
896                         goto tx_drop;
897         }
898
899         /* HW may lockup when VLAN HW tagging is requested on
900          * certain ipv6 packets. Drop such pkts if the HW workaround to
901          * skip HW tagging is not enabled by FW.
902          */
903         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
904             (adapter->pvid || adapter->qnq_vid) &&
905             !qnq_async_evt_rcvd(adapter)))
906                 goto tx_drop;
907
908         /* Manual VLAN tag insertion to prevent:
909          * ASIC lockup when the ASIC inserts VLAN tag into
910          * certain ipv6 packets. Insert VLAN tags in driver,
911          * and set event, completion, vlan bits accordingly
912          * in the Tx WRB.
913          */
914         if (be_ipv6_tx_stall_chk(adapter, skb) &&
915             be_vlan_tag_tx_chk(adapter, skb)) {
916                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
917                 if (unlikely(!skb))
918                         goto tx_drop;
919         }
920
921         return skb;
922 tx_drop:
923         dev_kfree_skb_any(skb);
924         return NULL;
925 }
926
927 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
928 {
929         struct be_adapter *adapter = netdev_priv(netdev);
930         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
931         struct be_queue_info *txq = &txo->q;
932         bool dummy_wrb, stopped = false;
933         u32 wrb_cnt = 0, copied = 0;
934         bool skip_hw_vlan = false;
935         u32 start = txq->head;
936
937         skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
938         if (!skb)
939                 return NETDEV_TX_OK;
940
941         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
942
943         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
944                               skip_hw_vlan);
945         if (copied) {
946                 int gso_segs = skb_shinfo(skb)->gso_segs;
947
948                 /* record the sent skb in the sent_skb table */
949                 BUG_ON(txo->sent_skb_list[start]);
950                 txo->sent_skb_list[start] = skb;
951
952                 /* Ensure txq has space for the next skb; Else stop the queue
953                  * *BEFORE* ringing the tx doorbell, so that we serialze the
954                  * tx compls of the current transmit which'll wake up the queue
955                  */
956                 atomic_add(wrb_cnt, &txq->used);
957                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
958                                                                 txq->len) {
959                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
960                         stopped = true;
961                 }
962
963                 be_txq_notify(adapter, txo, wrb_cnt);
964
965                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
966         } else {
967                 txq->head = start;
968                 dev_kfree_skb_any(skb);
969         }
970         return NETDEV_TX_OK;
971 }
972
973 static int be_change_mtu(struct net_device *netdev, int new_mtu)
974 {
975         struct be_adapter *adapter = netdev_priv(netdev);
976         if (new_mtu < BE_MIN_MTU ||
977                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
978                                         (ETH_HLEN + ETH_FCS_LEN))) {
979                 dev_info(&adapter->pdev->dev,
980                         "MTU must be between %d and %d bytes\n",
981                         BE_MIN_MTU,
982                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
983                 return -EINVAL;
984         }
985         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
986                         netdev->mtu, new_mtu);
987         netdev->mtu = new_mtu;
988         return 0;
989 }
990
991 /*
992  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
993  * If the user configures more, place BE in vlan promiscuous mode.
994  */
995 static int be_vid_config(struct be_adapter *adapter)
996 {
997         u16 vids[BE_NUM_VLANS_SUPPORTED];
998         u16 num = 0, i;
999         int status = 0;
1000
1001         /* No need to further configure vids if in promiscuous mode */
1002         if (adapter->promiscuous)
1003                 return 0;
1004
1005         if (adapter->vlans_added > be_max_vlans(adapter))
1006                 goto set_vlan_promisc;
1007
1008         /* Construct VLAN Table to give to HW */
1009         for (i = 0; i < VLAN_N_VID; i++)
1010                 if (adapter->vlan_tag[i])
1011                         vids[num++] = cpu_to_le16(i);
1012
1013         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1014                                     vids, num, 1, 0);
1015
1016         if (status) {
1017                 /* Set to VLAN promisc mode as setting VLAN filter failed */
1018                 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1019                         goto set_vlan_promisc;
1020                 dev_err(&adapter->pdev->dev,
1021                         "Setting HW VLAN filtering failed.\n");
1022         } else {
1023                 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1024                         /* hw VLAN filtering re-enabled. */
1025                         status = be_cmd_rx_filter(adapter,
1026                                                   BE_FLAGS_VLAN_PROMISC, OFF);
1027                         if (!status) {
1028                                 dev_info(&adapter->pdev->dev,
1029                                          "Disabling VLAN Promiscuous mode.\n");
1030                                 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1031                                 dev_info(&adapter->pdev->dev,
1032                                          "Re-Enabling HW VLAN filtering\n");
1033                         }
1034                 }
1035         }
1036
1037         return status;
1038
1039 set_vlan_promisc:
1040         dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1041
1042         status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1043         if (!status) {
1044                 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1045                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1046                 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1047         } else
1048                 dev_err(&adapter->pdev->dev,
1049                         "Failed to enable VLAN Promiscuous mode.\n");
1050         return status;
1051 }
1052
1053 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1054 {
1055         struct be_adapter *adapter = netdev_priv(netdev);
1056         int status = 0;
1057
1058         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1059                 status = -EINVAL;
1060                 goto ret;
1061         }
1062
1063         /* Packets with VID 0 are always received by Lancer by default */
1064         if (lancer_chip(adapter) && vid == 0)
1065                 goto ret;
1066
1067         adapter->vlan_tag[vid] = 1;
1068         if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
1069                 status = be_vid_config(adapter);
1070
1071         if (!status)
1072                 adapter->vlans_added++;
1073         else
1074                 adapter->vlan_tag[vid] = 0;
1075 ret:
1076         return status;
1077 }
1078
1079 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1080 {
1081         struct be_adapter *adapter = netdev_priv(netdev);
1082         int status = 0;
1083
1084         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1085                 status = -EINVAL;
1086                 goto ret;
1087         }
1088
1089         /* Packets with VID 0 are always received by Lancer by default */
1090         if (lancer_chip(adapter) && vid == 0)
1091                 goto ret;
1092
1093         adapter->vlan_tag[vid] = 0;
1094         if (adapter->vlans_added <= be_max_vlans(adapter))
1095                 status = be_vid_config(adapter);
1096
1097         if (!status)
1098                 adapter->vlans_added--;
1099         else
1100                 adapter->vlan_tag[vid] = 1;
1101 ret:
1102         return status;
1103 }
1104
1105 static void be_set_rx_mode(struct net_device *netdev)
1106 {
1107         struct be_adapter *adapter = netdev_priv(netdev);
1108         int status;
1109
1110         if (netdev->flags & IFF_PROMISC) {
1111                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1112                 adapter->promiscuous = true;
1113                 goto done;
1114         }
1115
1116         /* BE was previously in promiscuous mode; disable it */
1117         if (adapter->promiscuous) {
1118                 adapter->promiscuous = false;
1119                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1120
1121                 if (adapter->vlans_added)
1122                         be_vid_config(adapter);
1123         }
1124
1125         /* Enable multicast promisc if num configured exceeds what we support */
1126         if (netdev->flags & IFF_ALLMULTI ||
1127             netdev_mc_count(netdev) > be_max_mc(adapter)) {
1128                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1129                 goto done;
1130         }
1131
1132         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1133                 struct netdev_hw_addr *ha;
1134                 int i = 1; /* First slot is claimed by the Primary MAC */
1135
1136                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1137                         be_cmd_pmac_del(adapter, adapter->if_handle,
1138                                         adapter->pmac_id[i], 0);
1139                 }
1140
1141                 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1142                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1143                         adapter->promiscuous = true;
1144                         goto done;
1145                 }
1146
1147                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1148                         adapter->uc_macs++; /* First slot is for Primary MAC */
1149                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1150                                         adapter->if_handle,
1151                                         &adapter->pmac_id[adapter->uc_macs], 0);
1152                 }
1153         }
1154
1155         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1156
1157         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1158         if (status) {
1159                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1160                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1161                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1162         }
1163 done:
1164         return;
1165 }
1166
1167 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1168 {
1169         struct be_adapter *adapter = netdev_priv(netdev);
1170         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1171         int status;
1172
1173         if (!sriov_enabled(adapter))
1174                 return -EPERM;
1175
1176         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1177                 return -EINVAL;
1178
1179         if (BEx_chip(adapter)) {
1180                 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1181                                 vf + 1);
1182
1183                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1184                                          &vf_cfg->pmac_id, vf + 1);
1185         } else {
1186                 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1187                                         vf + 1);
1188         }
1189
1190         if (status)
1191                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1192                                 mac, vf);
1193         else
1194                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1195
1196         return status;
1197 }
1198
1199 static int be_get_vf_config(struct net_device *netdev, int vf,
1200                         struct ifla_vf_info *vi)
1201 {
1202         struct be_adapter *adapter = netdev_priv(netdev);
1203         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1204
1205         if (!sriov_enabled(adapter))
1206                 return -EPERM;
1207
1208         if (vf >= adapter->num_vfs)
1209                 return -EINVAL;
1210
1211         vi->vf = vf;
1212         vi->tx_rate = vf_cfg->tx_rate;
1213         vi->vlan = vf_cfg->vlan_tag;
1214         vi->qos = 0;
1215         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1216
1217         return 0;
1218 }
1219
1220 static int be_set_vf_vlan(struct net_device *netdev,
1221                         int vf, u16 vlan, u8 qos)
1222 {
1223         struct be_adapter *adapter = netdev_priv(netdev);
1224         int status = 0;
1225
1226         if (!sriov_enabled(adapter))
1227                 return -EPERM;
1228
1229         if (vf >= adapter->num_vfs || vlan > 4095)
1230                 return -EINVAL;
1231
1232         if (vlan) {
1233                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1234                         /* If this is new value, program it. Else skip. */
1235                         adapter->vf_cfg[vf].vlan_tag = vlan;
1236
1237                         status = be_cmd_set_hsw_config(adapter, vlan,
1238                                 vf + 1, adapter->vf_cfg[vf].if_handle, 0);
1239                 }
1240         } else {
1241                 /* Reset Transparent Vlan Tagging. */
1242                 adapter->vf_cfg[vf].vlan_tag = 0;
1243                 vlan = adapter->vf_cfg[vf].def_vid;
1244                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1245                         adapter->vf_cfg[vf].if_handle, 0);
1246         }
1247
1248
1249         if (status)
1250                 dev_info(&adapter->pdev->dev,
1251                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1252         return status;
1253 }
1254
1255 static int be_set_vf_tx_rate(struct net_device *netdev,
1256                         int vf, int rate)
1257 {
1258         struct be_adapter *adapter = netdev_priv(netdev);
1259         int status = 0;
1260
1261         if (!sriov_enabled(adapter))
1262                 return -EPERM;
1263
1264         if (vf >= adapter->num_vfs)
1265                 return -EINVAL;
1266
1267         if (rate < 100 || rate > 10000) {
1268                 dev_err(&adapter->pdev->dev,
1269                         "tx rate must be between 100 and 10000 Mbps\n");
1270                 return -EINVAL;
1271         }
1272
1273         if (lancer_chip(adapter))
1274                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1275         else
1276                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1277
1278         if (status)
1279                 dev_err(&adapter->pdev->dev,
1280                                 "tx rate %d on VF %d failed\n", rate, vf);
1281         else
1282                 adapter->vf_cfg[vf].tx_rate = rate;
1283         return status;
1284 }
1285
1286 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1287 {
1288         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1289         ulong now = jiffies;
1290         ulong delta = now - stats->rx_jiffies;
1291         u64 pkts;
1292         unsigned int start, eqd;
1293
1294         if (!eqo->enable_aic) {
1295                 eqd = eqo->eqd;
1296                 goto modify_eqd;
1297         }
1298
1299         if (eqo->idx >= adapter->num_rx_qs)
1300                 return;
1301
1302         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1303
1304         /* Wrapped around */
1305         if (time_before(now, stats->rx_jiffies)) {
1306                 stats->rx_jiffies = now;
1307                 return;
1308         }
1309
1310         /* Update once a second */
1311         if (delta < HZ)
1312                 return;
1313
1314         do {
1315                 start = u64_stats_fetch_begin_bh(&stats->sync);
1316                 pkts = stats->rx_pkts;
1317         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1318
1319         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1320         stats->rx_pkts_prev = pkts;
1321         stats->rx_jiffies = now;
1322         eqd = (stats->rx_pps / 110000) << 3;
1323         eqd = min(eqd, eqo->max_eqd);
1324         eqd = max(eqd, eqo->min_eqd);
1325         if (eqd < 10)
1326                 eqd = 0;
1327
1328 modify_eqd:
1329         if (eqd != eqo->cur_eqd) {
1330                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1331                 eqo->cur_eqd = eqd;
1332         }
1333 }
1334
1335 static void be_rx_stats_update(struct be_rx_obj *rxo,
1336                 struct be_rx_compl_info *rxcp)
1337 {
1338         struct be_rx_stats *stats = rx_stats(rxo);
1339
1340         u64_stats_update_begin(&stats->sync);
1341         stats->rx_compl++;
1342         stats->rx_bytes += rxcp->pkt_size;
1343         stats->rx_pkts++;
1344         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1345                 stats->rx_mcast_pkts++;
1346         if (rxcp->err)
1347                 stats->rx_compl_err++;
1348         u64_stats_update_end(&stats->sync);
1349 }
1350
1351 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1352 {
1353         /* L4 checksum is not reliable for non TCP/UDP packets.
1354          * Also ignore ipcksm for ipv6 pkts */
1355         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1356                                 (rxcp->ip_csum || rxcp->ipv6);
1357 }
1358
1359 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1360                                                 u16 frag_idx)
1361 {
1362         struct be_adapter *adapter = rxo->adapter;
1363         struct be_rx_page_info *rx_page_info;
1364         struct be_queue_info *rxq = &rxo->q;
1365
1366         rx_page_info = &rxo->page_info_tbl[frag_idx];
1367         BUG_ON(!rx_page_info->page);
1368
1369         if (rx_page_info->last_page_user) {
1370                 dma_unmap_page(&adapter->pdev->dev,
1371                                dma_unmap_addr(rx_page_info, bus),
1372                                adapter->big_page_size, DMA_FROM_DEVICE);
1373                 rx_page_info->last_page_user = false;
1374         }
1375
1376         atomic_dec(&rxq->used);
1377         return rx_page_info;
1378 }
1379
1380 /* Throwaway the data in the Rx completion */
1381 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1382                                 struct be_rx_compl_info *rxcp)
1383 {
1384         struct be_queue_info *rxq = &rxo->q;
1385         struct be_rx_page_info *page_info;
1386         u16 i, num_rcvd = rxcp->num_rcvd;
1387
1388         for (i = 0; i < num_rcvd; i++) {
1389                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1390                 put_page(page_info->page);
1391                 memset(page_info, 0, sizeof(*page_info));
1392                 index_inc(&rxcp->rxq_idx, rxq->len);
1393         }
1394 }
1395
1396 /*
1397  * skb_fill_rx_data forms a complete skb for an ether frame
1398  * indicated by rxcp.
1399  */
1400 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1401                              struct be_rx_compl_info *rxcp)
1402 {
1403         struct be_queue_info *rxq = &rxo->q;
1404         struct be_rx_page_info *page_info;
1405         u16 i, j;
1406         u16 hdr_len, curr_frag_len, remaining;
1407         u8 *start;
1408
1409         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1410         start = page_address(page_info->page) + page_info->page_offset;
1411         prefetch(start);
1412
1413         /* Copy data in the first descriptor of this completion */
1414         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1415
1416         skb->len = curr_frag_len;
1417         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1418                 memcpy(skb->data, start, curr_frag_len);
1419                 /* Complete packet has now been moved to data */
1420                 put_page(page_info->page);
1421                 skb->data_len = 0;
1422                 skb->tail += curr_frag_len;
1423         } else {
1424                 hdr_len = ETH_HLEN;
1425                 memcpy(skb->data, start, hdr_len);
1426                 skb_shinfo(skb)->nr_frags = 1;
1427                 skb_frag_set_page(skb, 0, page_info->page);
1428                 skb_shinfo(skb)->frags[0].page_offset =
1429                                         page_info->page_offset + hdr_len;
1430                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1431                 skb->data_len = curr_frag_len - hdr_len;
1432                 skb->truesize += rx_frag_size;
1433                 skb->tail += hdr_len;
1434         }
1435         page_info->page = NULL;
1436
1437         if (rxcp->pkt_size <= rx_frag_size) {
1438                 BUG_ON(rxcp->num_rcvd != 1);
1439                 return;
1440         }
1441
1442         /* More frags present for this completion */
1443         index_inc(&rxcp->rxq_idx, rxq->len);
1444         remaining = rxcp->pkt_size - curr_frag_len;
1445         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1446                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1447                 curr_frag_len = min(remaining, rx_frag_size);
1448
1449                 /* Coalesce all frags from the same physical page in one slot */
1450                 if (page_info->page_offset == 0) {
1451                         /* Fresh page */
1452                         j++;
1453                         skb_frag_set_page(skb, j, page_info->page);
1454                         skb_shinfo(skb)->frags[j].page_offset =
1455                                                         page_info->page_offset;
1456                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1457                         skb_shinfo(skb)->nr_frags++;
1458                 } else {
1459                         put_page(page_info->page);
1460                 }
1461
1462                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1463                 skb->len += curr_frag_len;
1464                 skb->data_len += curr_frag_len;
1465                 skb->truesize += rx_frag_size;
1466                 remaining -= curr_frag_len;
1467                 index_inc(&rxcp->rxq_idx, rxq->len);
1468                 page_info->page = NULL;
1469         }
1470         BUG_ON(j > MAX_SKB_FRAGS);
1471 }
1472
1473 /* Process the RX completion indicated by rxcp when GRO is disabled */
1474 static void be_rx_compl_process(struct be_rx_obj *rxo,
1475                                 struct be_rx_compl_info *rxcp)
1476 {
1477         struct be_adapter *adapter = rxo->adapter;
1478         struct net_device *netdev = adapter->netdev;
1479         struct sk_buff *skb;
1480
1481         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1482         if (unlikely(!skb)) {
1483                 rx_stats(rxo)->rx_drops_no_skbs++;
1484                 be_rx_compl_discard(rxo, rxcp);
1485                 return;
1486         }
1487
1488         skb_fill_rx_data(rxo, skb, rxcp);
1489
1490         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1491                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1492         else
1493                 skb_checksum_none_assert(skb);
1494
1495         skb->protocol = eth_type_trans(skb, netdev);
1496         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1497         if (netdev->features & NETIF_F_RXHASH)
1498                 skb->rxhash = rxcp->rss_hash;
1499
1500
1501         if (rxcp->vlanf)
1502                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1503
1504         netif_receive_skb(skb);
1505 }
1506
1507 /* Process the RX completion indicated by rxcp when GRO is enabled */
1508 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1509                                     struct napi_struct *napi,
1510                                     struct be_rx_compl_info *rxcp)
1511 {
1512         struct be_adapter *adapter = rxo->adapter;
1513         struct be_rx_page_info *page_info;
1514         struct sk_buff *skb = NULL;
1515         struct be_queue_info *rxq = &rxo->q;
1516         u16 remaining, curr_frag_len;
1517         u16 i, j;
1518
1519         skb = napi_get_frags(napi);
1520         if (!skb) {
1521                 be_rx_compl_discard(rxo, rxcp);
1522                 return;
1523         }
1524
1525         remaining = rxcp->pkt_size;
1526         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1527                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1528
1529                 curr_frag_len = min(remaining, rx_frag_size);
1530
1531                 /* Coalesce all frags from the same physical page in one slot */
1532                 if (i == 0 || page_info->page_offset == 0) {
1533                         /* First frag or Fresh page */
1534                         j++;
1535                         skb_frag_set_page(skb, j, page_info->page);
1536                         skb_shinfo(skb)->frags[j].page_offset =
1537                                                         page_info->page_offset;
1538                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1539                 } else {
1540                         put_page(page_info->page);
1541                 }
1542                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1543                 skb->truesize += rx_frag_size;
1544                 remaining -= curr_frag_len;
1545                 index_inc(&rxcp->rxq_idx, rxq->len);
1546                 memset(page_info, 0, sizeof(*page_info));
1547         }
1548         BUG_ON(j > MAX_SKB_FRAGS);
1549
1550         skb_shinfo(skb)->nr_frags = j + 1;
1551         skb->len = rxcp->pkt_size;
1552         skb->data_len = rxcp->pkt_size;
1553         skb->ip_summed = CHECKSUM_UNNECESSARY;
1554         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1555         if (adapter->netdev->features & NETIF_F_RXHASH)
1556                 skb->rxhash = rxcp->rss_hash;
1557
1558         if (rxcp->vlanf)
1559                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1560
1561         napi_gro_frags(napi);
1562 }
1563
1564 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1565                                  struct be_rx_compl_info *rxcp)
1566 {
1567         rxcp->pkt_size =
1568                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1569         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1570         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1571         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1572         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1573         rxcp->ip_csum =
1574                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1575         rxcp->l4_csum =
1576                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1577         rxcp->ipv6 =
1578                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1579         rxcp->rxq_idx =
1580                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1581         rxcp->num_rcvd =
1582                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1583         rxcp->pkt_type =
1584                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1585         rxcp->rss_hash =
1586                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1587         if (rxcp->vlanf) {
1588                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1589                                           compl);
1590                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1591                                                compl);
1592         }
1593         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1594 }
1595
1596 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1597                                  struct be_rx_compl_info *rxcp)
1598 {
1599         rxcp->pkt_size =
1600                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1601         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1602         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1603         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1604         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1605         rxcp->ip_csum =
1606                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1607         rxcp->l4_csum =
1608                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1609         rxcp->ipv6 =
1610                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1611         rxcp->rxq_idx =
1612                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1613         rxcp->num_rcvd =
1614                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1615         rxcp->pkt_type =
1616                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1617         rxcp->rss_hash =
1618                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1619         if (rxcp->vlanf) {
1620                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1621                                           compl);
1622                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1623                                                compl);
1624         }
1625         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1626         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1627                                       ip_frag, compl);
1628 }
1629
1630 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1631 {
1632         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1633         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1634         struct be_adapter *adapter = rxo->adapter;
1635
1636         /* For checking the valid bit it is Ok to use either definition as the
1637          * valid bit is at the same position in both v0 and v1 Rx compl */
1638         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1639                 return NULL;
1640
1641         rmb();
1642         be_dws_le_to_cpu(compl, sizeof(*compl));
1643
1644         if (adapter->be3_native)
1645                 be_parse_rx_compl_v1(compl, rxcp);
1646         else
1647                 be_parse_rx_compl_v0(compl, rxcp);
1648
1649         if (rxcp->ip_frag)
1650                 rxcp->l4_csum = 0;
1651
1652         if (rxcp->vlanf) {
1653                 /* vlanf could be wrongly set in some cards.
1654                  * ignore if vtm is not set */
1655                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1656                         rxcp->vlanf = 0;
1657
1658                 if (!lancer_chip(adapter))
1659                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1660
1661                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1662                     !adapter->vlan_tag[rxcp->vlan_tag])
1663                         rxcp->vlanf = 0;
1664         }
1665
1666         /* As the compl has been parsed, reset it; we wont touch it again */
1667         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1668
1669         queue_tail_inc(&rxo->cq);
1670         return rxcp;
1671 }
1672
1673 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1674 {
1675         u32 order = get_order(size);
1676
1677         if (order > 0)
1678                 gfp |= __GFP_COMP;
1679         return  alloc_pages(gfp, order);
1680 }
1681
1682 /*
1683  * Allocate a page, split it to fragments of size rx_frag_size and post as
1684  * receive buffers to BE
1685  */
1686 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1687 {
1688         struct be_adapter *adapter = rxo->adapter;
1689         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1690         struct be_queue_info *rxq = &rxo->q;
1691         struct page *pagep = NULL;
1692         struct be_eth_rx_d *rxd;
1693         u64 page_dmaaddr = 0, frag_dmaaddr;
1694         u32 posted, page_offset = 0;
1695
1696         page_info = &rxo->page_info_tbl[rxq->head];
1697         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1698                 if (!pagep) {
1699                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1700                         if (unlikely(!pagep)) {
1701                                 rx_stats(rxo)->rx_post_fail++;
1702                                 break;
1703                         }
1704                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1705                                                     0, adapter->big_page_size,
1706                                                     DMA_FROM_DEVICE);
1707                         page_info->page_offset = 0;
1708                 } else {
1709                         get_page(pagep);
1710                         page_info->page_offset = page_offset + rx_frag_size;
1711                 }
1712                 page_offset = page_info->page_offset;
1713                 page_info->page = pagep;
1714                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1715                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1716
1717                 rxd = queue_head_node(rxq);
1718                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1719                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1720
1721                 /* Any space left in the current big page for another frag? */
1722                 if ((page_offset + rx_frag_size + rx_frag_size) >
1723                                         adapter->big_page_size) {
1724                         pagep = NULL;
1725                         page_info->last_page_user = true;
1726                 }
1727
1728                 prev_page_info = page_info;
1729                 queue_head_inc(rxq);
1730                 page_info = &rxo->page_info_tbl[rxq->head];
1731         }
1732         if (pagep)
1733                 prev_page_info->last_page_user = true;
1734
1735         if (posted) {
1736                 atomic_add(posted, &rxq->used);
1737                 be_rxq_notify(adapter, rxq->id, posted);
1738         } else if (atomic_read(&rxq->used) == 0) {
1739                 /* Let be_worker replenish when memory is available */
1740                 rxo->rx_post_starved = true;
1741         }
1742 }
1743
1744 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1745 {
1746         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1747
1748         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1749                 return NULL;
1750
1751         rmb();
1752         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1753
1754         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1755
1756         queue_tail_inc(tx_cq);
1757         return txcp;
1758 }
1759
1760 static u16 be_tx_compl_process(struct be_adapter *adapter,
1761                 struct be_tx_obj *txo, u16 last_index)
1762 {
1763         struct be_queue_info *txq = &txo->q;
1764         struct be_eth_wrb *wrb;
1765         struct sk_buff **sent_skbs = txo->sent_skb_list;
1766         struct sk_buff *sent_skb;
1767         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1768         bool unmap_skb_hdr = true;
1769
1770         sent_skb = sent_skbs[txq->tail];
1771         BUG_ON(!sent_skb);
1772         sent_skbs[txq->tail] = NULL;
1773
1774         /* skip header wrb */
1775         queue_tail_inc(txq);
1776
1777         do {
1778                 cur_index = txq->tail;
1779                 wrb = queue_tail_node(txq);
1780                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1781                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1782                 unmap_skb_hdr = false;
1783
1784                 num_wrbs++;
1785                 queue_tail_inc(txq);
1786         } while (cur_index != last_index);
1787
1788         kfree_skb(sent_skb);
1789         return num_wrbs;
1790 }
1791
1792 /* Return the number of events in the event queue */
1793 static inline int events_get(struct be_eq_obj *eqo)
1794 {
1795         struct be_eq_entry *eqe;
1796         int num = 0;
1797
1798         do {
1799                 eqe = queue_tail_node(&eqo->q);
1800                 if (eqe->evt == 0)
1801                         break;
1802
1803                 rmb();
1804                 eqe->evt = 0;
1805                 num++;
1806                 queue_tail_inc(&eqo->q);
1807         } while (true);
1808
1809         return num;
1810 }
1811
1812 /* Leaves the EQ is disarmed state */
1813 static void be_eq_clean(struct be_eq_obj *eqo)
1814 {
1815         int num = events_get(eqo);
1816
1817         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1818 }
1819
1820 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1821 {
1822         struct be_rx_page_info *page_info;
1823         struct be_queue_info *rxq = &rxo->q;
1824         struct be_queue_info *rx_cq = &rxo->cq;
1825         struct be_rx_compl_info *rxcp;
1826         struct be_adapter *adapter = rxo->adapter;
1827         int flush_wait = 0;
1828         u16 tail;
1829
1830         /* Consume pending rx completions.
1831          * Wait for the flush completion (identified by zero num_rcvd)
1832          * to arrive. Notify CQ even when there are no more CQ entries
1833          * for HW to flush partially coalesced CQ entries.
1834          * In Lancer, there is no need to wait for flush compl.
1835          */
1836         for (;;) {
1837                 rxcp = be_rx_compl_get(rxo);
1838                 if (rxcp == NULL) {
1839                         if (lancer_chip(adapter))
1840                                 break;
1841
1842                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1843                                 dev_warn(&adapter->pdev->dev,
1844                                          "did not receive flush compl\n");
1845                                 break;
1846                         }
1847                         be_cq_notify(adapter, rx_cq->id, true, 0);
1848                         mdelay(1);
1849                 } else {
1850                         be_rx_compl_discard(rxo, rxcp);
1851                         be_cq_notify(adapter, rx_cq->id, false, 1);
1852                         if (rxcp->num_rcvd == 0)
1853                                 break;
1854                 }
1855         }
1856
1857         /* After cleanup, leave the CQ in unarmed state */
1858         be_cq_notify(adapter, rx_cq->id, false, 0);
1859
1860         /* Then free posted rx buffers that were not used */
1861         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1862         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1863                 page_info = get_rx_page_info(rxo, tail);
1864                 put_page(page_info->page);
1865                 memset(page_info, 0, sizeof(*page_info));
1866         }
1867         BUG_ON(atomic_read(&rxq->used));
1868         rxq->tail = rxq->head = 0;
1869 }
1870
1871 static void be_tx_compl_clean(struct be_adapter *adapter)
1872 {
1873         struct be_tx_obj *txo;
1874         struct be_queue_info *txq;
1875         struct be_eth_tx_compl *txcp;
1876         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1877         struct sk_buff *sent_skb;
1878         bool dummy_wrb;
1879         int i, pending_txqs;
1880
1881         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1882         do {
1883                 pending_txqs = adapter->num_tx_qs;
1884
1885                 for_all_tx_queues(adapter, txo, i) {
1886                         txq = &txo->q;
1887                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1888                                 end_idx =
1889                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1890                                                       wrb_index, txcp);
1891                                 num_wrbs += be_tx_compl_process(adapter, txo,
1892                                                                 end_idx);
1893                                 cmpl++;
1894                         }
1895                         if (cmpl) {
1896                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1897                                 atomic_sub(num_wrbs, &txq->used);
1898                                 cmpl = 0;
1899                                 num_wrbs = 0;
1900                         }
1901                         if (atomic_read(&txq->used) == 0)
1902                                 pending_txqs--;
1903                 }
1904
1905                 if (pending_txqs == 0 || ++timeo > 200)
1906                         break;
1907
1908                 mdelay(1);
1909         } while (true);
1910
1911         for_all_tx_queues(adapter, txo, i) {
1912                 txq = &txo->q;
1913                 if (atomic_read(&txq->used))
1914                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1915                                 atomic_read(&txq->used));
1916
1917                 /* free posted tx for which compls will never arrive */
1918                 while (atomic_read(&txq->used)) {
1919                         sent_skb = txo->sent_skb_list[txq->tail];
1920                         end_idx = txq->tail;
1921                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1922                                                    &dummy_wrb);
1923                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1924                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1925                         atomic_sub(num_wrbs, &txq->used);
1926                 }
1927         }
1928 }
1929
1930 static void be_evt_queues_destroy(struct be_adapter *adapter)
1931 {
1932         struct be_eq_obj *eqo;
1933         int i;
1934
1935         for_all_evt_queues(adapter, eqo, i) {
1936                 if (eqo->q.created) {
1937                         be_eq_clean(eqo);
1938                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1939                         netif_napi_del(&eqo->napi);
1940                 }
1941                 be_queue_free(adapter, &eqo->q);
1942         }
1943 }
1944
1945 static int be_evt_queues_create(struct be_adapter *adapter)
1946 {
1947         struct be_queue_info *eq;
1948         struct be_eq_obj *eqo;
1949         int i, rc;
1950
1951         adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
1952                                     adapter->cfg_num_qs);
1953
1954         for_all_evt_queues(adapter, eqo, i) {
1955                 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
1956                                BE_NAPI_WEIGHT);
1957                 eqo->adapter = adapter;
1958                 eqo->tx_budget = BE_TX_BUDGET;
1959                 eqo->idx = i;
1960                 eqo->max_eqd = BE_MAX_EQD;
1961                 eqo->enable_aic = true;
1962
1963                 eq = &eqo->q;
1964                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1965                                         sizeof(struct be_eq_entry));
1966                 if (rc)
1967                         return rc;
1968
1969                 rc = be_cmd_eq_create(adapter, eqo);
1970                 if (rc)
1971                         return rc;
1972         }
1973         return 0;
1974 }
1975
1976 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1977 {
1978         struct be_queue_info *q;
1979
1980         q = &adapter->mcc_obj.q;
1981         if (q->created)
1982                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1983         be_queue_free(adapter, q);
1984
1985         q = &adapter->mcc_obj.cq;
1986         if (q->created)
1987                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1988         be_queue_free(adapter, q);
1989 }
1990
1991 /* Must be called only after TX qs are created as MCC shares TX EQ */
1992 static int be_mcc_queues_create(struct be_adapter *adapter)
1993 {
1994         struct be_queue_info *q, *cq;
1995
1996         cq = &adapter->mcc_obj.cq;
1997         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1998                         sizeof(struct be_mcc_compl)))
1999                 goto err;
2000
2001         /* Use the default EQ for MCC completions */
2002         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2003                 goto mcc_cq_free;
2004
2005         q = &adapter->mcc_obj.q;
2006         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2007                 goto mcc_cq_destroy;
2008
2009         if (be_cmd_mccq_create(adapter, q, cq))
2010                 goto mcc_q_free;
2011
2012         return 0;
2013
2014 mcc_q_free:
2015         be_queue_free(adapter, q);
2016 mcc_cq_destroy:
2017         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2018 mcc_cq_free:
2019         be_queue_free(adapter, cq);
2020 err:
2021         return -1;
2022 }
2023
2024 static void be_tx_queues_destroy(struct be_adapter *adapter)
2025 {
2026         struct be_queue_info *q;
2027         struct be_tx_obj *txo;
2028         u8 i;
2029
2030         for_all_tx_queues(adapter, txo, i) {
2031                 q = &txo->q;
2032                 if (q->created)
2033                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2034                 be_queue_free(adapter, q);
2035
2036                 q = &txo->cq;
2037                 if (q->created)
2038                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2039                 be_queue_free(adapter, q);
2040         }
2041 }
2042
2043 static int be_tx_qs_create(struct be_adapter *adapter)
2044 {
2045         struct be_queue_info *cq, *eq;
2046         struct be_tx_obj *txo;
2047         int status, i;
2048
2049         adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2050
2051         for_all_tx_queues(adapter, txo, i) {
2052                 cq = &txo->cq;
2053                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2054                                         sizeof(struct be_eth_tx_compl));
2055                 if (status)
2056                         return status;
2057
2058                 /* If num_evt_qs is less than num_tx_qs, then more than
2059                  * one txq share an eq
2060                  */
2061                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2062                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2063                 if (status)
2064                         return status;
2065
2066                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2067                                         sizeof(struct be_eth_wrb));
2068                 if (status)
2069                         return status;
2070
2071                 status = be_cmd_txq_create(adapter, txo);
2072                 if (status)
2073                         return status;
2074         }
2075
2076         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2077                  adapter->num_tx_qs);
2078         return 0;
2079 }
2080
2081 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2082 {
2083         struct be_queue_info *q;
2084         struct be_rx_obj *rxo;
2085         int i;
2086
2087         for_all_rx_queues(adapter, rxo, i) {
2088                 q = &rxo->cq;
2089                 if (q->created)
2090                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2091                 be_queue_free(adapter, q);
2092         }
2093 }
2094
2095 static int be_rx_cqs_create(struct be_adapter *adapter)
2096 {
2097         struct be_queue_info *eq, *cq;
2098         struct be_rx_obj *rxo;
2099         int rc, i;
2100
2101         /* We can create as many RSS rings as there are EQs. */
2102         adapter->num_rx_qs = adapter->num_evt_qs;
2103
2104         /* We'll use RSS only if atleast 2 RSS rings are supported.
2105          * When RSS is used, we'll need a default RXQ for non-IP traffic.
2106          */
2107         if (adapter->num_rx_qs > 1)
2108                 adapter->num_rx_qs++;
2109
2110         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2111         for_all_rx_queues(adapter, rxo, i) {
2112                 rxo->adapter = adapter;
2113                 cq = &rxo->cq;
2114                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2115                                 sizeof(struct be_eth_rx_compl));
2116                 if (rc)
2117                         return rc;
2118
2119                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2120                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2121                 if (rc)
2122                         return rc;
2123         }
2124
2125         dev_info(&adapter->pdev->dev,
2126                  "created %d RSS queue(s) and 1 default RX queue\n",
2127                  adapter->num_rx_qs - 1);
2128         return 0;
2129 }
2130
2131 static irqreturn_t be_intx(int irq, void *dev)
2132 {
2133         struct be_eq_obj *eqo = dev;
2134         struct be_adapter *adapter = eqo->adapter;
2135         int num_evts = 0;
2136
2137         /* IRQ is not expected when NAPI is scheduled as the EQ
2138          * will not be armed.
2139          * But, this can happen on Lancer INTx where it takes
2140          * a while to de-assert INTx or in BE2 where occasionaly
2141          * an interrupt may be raised even when EQ is unarmed.
2142          * If NAPI is already scheduled, then counting & notifying
2143          * events will orphan them.
2144          */
2145         if (napi_schedule_prep(&eqo->napi)) {
2146                 num_evts = events_get(eqo);
2147                 __napi_schedule(&eqo->napi);
2148                 if (num_evts)
2149                         eqo->spurious_intr = 0;
2150         }
2151         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2152
2153         /* Return IRQ_HANDLED only for the the first spurious intr
2154          * after a valid intr to stop the kernel from branding
2155          * this irq as a bad one!
2156          */
2157         if (num_evts || eqo->spurious_intr++ == 0)
2158                 return IRQ_HANDLED;
2159         else
2160                 return IRQ_NONE;
2161 }
2162
2163 static irqreturn_t be_msix(int irq, void *dev)
2164 {
2165         struct be_eq_obj *eqo = dev;
2166
2167         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2168         napi_schedule(&eqo->napi);
2169         return IRQ_HANDLED;
2170 }
2171
2172 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2173 {
2174         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2175 }
2176
2177 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2178                         int budget)
2179 {
2180         struct be_adapter *adapter = rxo->adapter;
2181         struct be_queue_info *rx_cq = &rxo->cq;
2182         struct be_rx_compl_info *rxcp;
2183         u32 work_done;
2184
2185         for (work_done = 0; work_done < budget; work_done++) {
2186                 rxcp = be_rx_compl_get(rxo);
2187                 if (!rxcp)
2188                         break;
2189
2190                 /* Is it a flush compl that has no data */
2191                 if (unlikely(rxcp->num_rcvd == 0))
2192                         goto loop_continue;
2193
2194                 /* Discard compl with partial DMA Lancer B0 */
2195                 if (unlikely(!rxcp->pkt_size)) {
2196                         be_rx_compl_discard(rxo, rxcp);
2197                         goto loop_continue;
2198                 }
2199
2200                 /* On BE drop pkts that arrive due to imperfect filtering in
2201                  * promiscuous mode on some skews
2202                  */
2203                 if (unlikely(rxcp->port != adapter->port_num &&
2204                                 !lancer_chip(adapter))) {
2205                         be_rx_compl_discard(rxo, rxcp);
2206                         goto loop_continue;
2207                 }
2208
2209                 if (do_gro(rxcp))
2210                         be_rx_compl_process_gro(rxo, napi, rxcp);
2211                 else
2212                         be_rx_compl_process(rxo, rxcp);
2213 loop_continue:
2214                 be_rx_stats_update(rxo, rxcp);
2215         }
2216
2217         if (work_done) {
2218                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2219
2220                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2221                         be_post_rx_frags(rxo, GFP_ATOMIC);
2222         }
2223
2224         return work_done;
2225 }
2226
2227 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2228                           int budget, int idx)
2229 {
2230         struct be_eth_tx_compl *txcp;
2231         int num_wrbs = 0, work_done;
2232
2233         for (work_done = 0; work_done < budget; work_done++) {
2234                 txcp = be_tx_compl_get(&txo->cq);
2235                 if (!txcp)
2236                         break;
2237                 num_wrbs += be_tx_compl_process(adapter, txo,
2238                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2239                                         wrb_index, txcp));
2240         }
2241
2242         if (work_done) {
2243                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2244                 atomic_sub(num_wrbs, &txo->q.used);
2245
2246                 /* As Tx wrbs have been freed up, wake up netdev queue
2247                  * if it was stopped due to lack of tx wrbs.  */
2248                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2249                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2250                         netif_wake_subqueue(adapter->netdev, idx);
2251                 }
2252
2253                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2254                 tx_stats(txo)->tx_compl += work_done;
2255                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2256         }
2257         return (work_done < budget); /* Done */
2258 }
2259
2260 int be_poll(struct napi_struct *napi, int budget)
2261 {
2262         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2263         struct be_adapter *adapter = eqo->adapter;
2264         int max_work = 0, work, i, num_evts;
2265         bool tx_done;
2266
2267         num_evts = events_get(eqo);
2268
2269         /* Process all TXQs serviced by this EQ */
2270         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2271                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2272                                         eqo->tx_budget, i);
2273                 if (!tx_done)
2274                         max_work = budget;
2275         }
2276
2277         /* This loop will iterate twice for EQ0 in which
2278          * completions of the last RXQ (default one) are also processed
2279          * For other EQs the loop iterates only once
2280          */
2281         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2282                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2283                 max_work = max(work, max_work);
2284         }
2285
2286         if (is_mcc_eqo(eqo))
2287                 be_process_mcc(adapter);
2288
2289         if (max_work < budget) {
2290                 napi_complete(napi);
2291                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2292         } else {
2293                 /* As we'll continue in polling mode, count and clear events */
2294                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2295         }
2296         return max_work;
2297 }
2298
2299 void be_detect_error(struct be_adapter *adapter)
2300 {
2301         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2302         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2303         u32 i;
2304
2305         if (be_hw_error(adapter))
2306                 return;
2307
2308         if (lancer_chip(adapter)) {
2309                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2310                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2311                         sliport_err1 = ioread32(adapter->db +
2312                                         SLIPORT_ERROR1_OFFSET);
2313                         sliport_err2 = ioread32(adapter->db +
2314                                         SLIPORT_ERROR2_OFFSET);
2315                 }
2316         } else {
2317                 pci_read_config_dword(adapter->pdev,
2318                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2319                 pci_read_config_dword(adapter->pdev,
2320                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2321                 pci_read_config_dword(adapter->pdev,
2322                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2323                 pci_read_config_dword(adapter->pdev,
2324                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2325
2326                 ue_lo = (ue_lo & ~ue_lo_mask);
2327                 ue_hi = (ue_hi & ~ue_hi_mask);
2328         }
2329
2330         /* On certain platforms BE hardware can indicate spurious UEs.
2331          * Allow the h/w to stop working completely in case of a real UE.
2332          * Hence not setting the hw_error for UE detection.
2333          */
2334         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2335                 adapter->hw_error = true;
2336                 dev_err(&adapter->pdev->dev,
2337                         "Error detected in the card\n");
2338         }
2339
2340         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2341                 dev_err(&adapter->pdev->dev,
2342                         "ERR: sliport status 0x%x\n", sliport_status);
2343                 dev_err(&adapter->pdev->dev,
2344                         "ERR: sliport error1 0x%x\n", sliport_err1);
2345                 dev_err(&adapter->pdev->dev,
2346                         "ERR: sliport error2 0x%x\n", sliport_err2);
2347         }
2348
2349         if (ue_lo) {
2350                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2351                         if (ue_lo & 1)
2352                                 dev_err(&adapter->pdev->dev,
2353                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2354                 }
2355         }
2356
2357         if (ue_hi) {
2358                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2359                         if (ue_hi & 1)
2360                                 dev_err(&adapter->pdev->dev,
2361                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2362                 }
2363         }
2364
2365 }
2366
2367 static void be_msix_disable(struct be_adapter *adapter)
2368 {
2369         if (msix_enabled(adapter)) {
2370                 pci_disable_msix(adapter->pdev);
2371                 adapter->num_msix_vec = 0;
2372                 adapter->num_msix_roce_vec = 0;
2373         }
2374 }
2375
2376 static int be_msix_enable(struct be_adapter *adapter)
2377 {
2378         int i, status, num_vec;
2379         struct device *dev = &adapter->pdev->dev;
2380
2381         /* If RoCE is supported, program the max number of NIC vectors that
2382          * may be configured via set-channels, along with vectors needed for
2383          * RoCe. Else, just program the number we'll use initially.
2384          */
2385         if (be_roce_supported(adapter))
2386                 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2387                                 2 * num_online_cpus());
2388         else
2389                 num_vec = adapter->cfg_num_qs;
2390
2391         for (i = 0; i < num_vec; i++)
2392                 adapter->msix_entries[i].entry = i;
2393
2394         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2395         if (status == 0) {
2396                 goto done;
2397         } else if (status >= MIN_MSIX_VECTORS) {
2398                 num_vec = status;
2399                 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2400                                          num_vec);
2401                 if (!status)
2402                         goto done;
2403         }
2404
2405         dev_warn(dev, "MSIx enable failed\n");
2406
2407         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2408         if (!be_physfn(adapter))
2409                 return status;
2410         return 0;
2411 done:
2412         if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2413                 adapter->num_msix_roce_vec = num_vec / 2;
2414                 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2415                          adapter->num_msix_roce_vec);
2416         }
2417
2418         adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2419
2420         dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2421                  adapter->num_msix_vec);
2422         return 0;
2423 }
2424
2425 static inline int be_msix_vec_get(struct be_adapter *adapter,
2426                                 struct be_eq_obj *eqo)
2427 {
2428         return adapter->msix_entries[eqo->msix_idx].vector;
2429 }
2430
2431 static int be_msix_register(struct be_adapter *adapter)
2432 {
2433         struct net_device *netdev = adapter->netdev;
2434         struct be_eq_obj *eqo;
2435         int status, i, vec;
2436
2437         for_all_evt_queues(adapter, eqo, i) {
2438                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2439                 vec = be_msix_vec_get(adapter, eqo);
2440                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2441                 if (status)
2442                         goto err_msix;
2443         }
2444
2445         return 0;
2446 err_msix:
2447         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2448                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2449         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2450                 status);
2451         be_msix_disable(adapter);
2452         return status;
2453 }
2454
2455 static int be_irq_register(struct be_adapter *adapter)
2456 {
2457         struct net_device *netdev = adapter->netdev;
2458         int status;
2459
2460         if (msix_enabled(adapter)) {
2461                 status = be_msix_register(adapter);
2462                 if (status == 0)
2463                         goto done;
2464                 /* INTx is not supported for VF */
2465                 if (!be_physfn(adapter))
2466                         return status;
2467         }
2468
2469         /* INTx: only the first EQ is used */
2470         netdev->irq = adapter->pdev->irq;
2471         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2472                              &adapter->eq_obj[0]);
2473         if (status) {
2474                 dev_err(&adapter->pdev->dev,
2475                         "INTx request IRQ failed - err %d\n", status);
2476                 return status;
2477         }
2478 done:
2479         adapter->isr_registered = true;
2480         return 0;
2481 }
2482
2483 static void be_irq_unregister(struct be_adapter *adapter)
2484 {
2485         struct net_device *netdev = adapter->netdev;
2486         struct be_eq_obj *eqo;
2487         int i;
2488
2489         if (!adapter->isr_registered)
2490                 return;
2491
2492         /* INTx */
2493         if (!msix_enabled(adapter)) {
2494                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2495                 goto done;
2496         }
2497
2498         /* MSIx */
2499         for_all_evt_queues(adapter, eqo, i)
2500                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2501
2502 done:
2503         adapter->isr_registered = false;
2504 }
2505
2506 static void be_rx_qs_destroy(struct be_adapter *adapter)
2507 {
2508         struct be_queue_info *q;
2509         struct be_rx_obj *rxo;
2510         int i;
2511
2512         for_all_rx_queues(adapter, rxo, i) {
2513                 q = &rxo->q;
2514                 if (q->created) {
2515                         be_cmd_rxq_destroy(adapter, q);
2516                         be_rx_cq_clean(rxo);
2517                 }
2518                 be_queue_free(adapter, q);
2519         }
2520 }
2521
2522 static int be_close(struct net_device *netdev)
2523 {
2524         struct be_adapter *adapter = netdev_priv(netdev);
2525         struct be_eq_obj *eqo;
2526         int i;
2527
2528         be_roce_dev_close(adapter);
2529
2530         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2531                 for_all_evt_queues(adapter, eqo, i)
2532                         napi_disable(&eqo->napi);
2533                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2534         }
2535
2536         be_async_mcc_disable(adapter);
2537
2538         /* Wait for all pending tx completions to arrive so that
2539          * all tx skbs are freed.
2540          */
2541         netif_tx_disable(netdev);
2542         be_tx_compl_clean(adapter);
2543
2544         be_rx_qs_destroy(adapter);
2545
2546         for_all_evt_queues(adapter, eqo, i) {
2547                 if (msix_enabled(adapter))
2548                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2549                 else
2550                         synchronize_irq(netdev->irq);
2551                 be_eq_clean(eqo);
2552         }
2553
2554         be_irq_unregister(adapter);
2555
2556         return 0;
2557 }
2558
2559 static int be_rx_qs_create(struct be_adapter *adapter)
2560 {
2561         struct be_rx_obj *rxo;
2562         int rc, i, j;
2563         u8 rsstable[128];
2564
2565         for_all_rx_queues(adapter, rxo, i) {
2566                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2567                                     sizeof(struct be_eth_rx_d));
2568                 if (rc)
2569                         return rc;
2570         }
2571
2572         /* The FW would like the default RXQ to be created first */
2573         rxo = default_rxo(adapter);
2574         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2575                                adapter->if_handle, false, &rxo->rss_id);
2576         if (rc)
2577                 return rc;
2578
2579         for_all_rss_queues(adapter, rxo, i) {
2580                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2581                                        rx_frag_size, adapter->if_handle,
2582                                        true, &rxo->rss_id);
2583                 if (rc)
2584                         return rc;
2585         }
2586
2587         if (be_multi_rxq(adapter)) {
2588                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2589                         for_all_rss_queues(adapter, rxo, i) {
2590                                 if ((j + i) >= 128)
2591                                         break;
2592                                 rsstable[j + i] = rxo->rss_id;
2593                         }
2594                 }
2595                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2596                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2597
2598                 if (!BEx_chip(adapter))
2599                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2600                                                 RSS_ENABLE_UDP_IPV6;
2601
2602                 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2603                                        128);
2604                 if (rc) {
2605                         adapter->rss_flags = 0;
2606                         return rc;
2607                 }
2608         }
2609
2610         /* First time posting */
2611         for_all_rx_queues(adapter, rxo, i)
2612                 be_post_rx_frags(rxo, GFP_KERNEL);
2613         return 0;
2614 }
2615
2616 static int be_open(struct net_device *netdev)
2617 {
2618         struct be_adapter *adapter = netdev_priv(netdev);
2619         struct be_eq_obj *eqo;
2620         struct be_rx_obj *rxo;
2621         struct be_tx_obj *txo;
2622         u8 link_status;
2623         int status, i;
2624
2625         status = be_rx_qs_create(adapter);
2626         if (status)
2627                 goto err;
2628
2629         status = be_irq_register(adapter);
2630         if (status)
2631                 goto err;
2632
2633         for_all_rx_queues(adapter, rxo, i)
2634                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2635
2636         for_all_tx_queues(adapter, txo, i)
2637                 be_cq_notify(adapter, txo->cq.id, true, 0);
2638
2639         be_async_mcc_enable(adapter);
2640
2641         for_all_evt_queues(adapter, eqo, i) {
2642                 napi_enable(&eqo->napi);
2643                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2644         }
2645         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2646
2647         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2648         if (!status)
2649                 be_link_status_update(adapter, link_status);
2650
2651         netif_tx_start_all_queues(netdev);
2652         be_roce_dev_open(adapter);
2653         return 0;
2654 err:
2655         be_close(adapter->netdev);
2656         return -EIO;
2657 }
2658
2659 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2660 {
2661         struct be_dma_mem cmd;
2662         int status = 0;
2663         u8 mac[ETH_ALEN];
2664
2665         memset(mac, 0, ETH_ALEN);
2666
2667         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2668         cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2669                                      GFP_KERNEL);
2670         if (cmd.va == NULL)
2671                 return -1;
2672
2673         if (enable) {
2674                 status = pci_write_config_dword(adapter->pdev,
2675                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2676                 if (status) {
2677                         dev_err(&adapter->pdev->dev,
2678                                 "Could not enable Wake-on-lan\n");
2679                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2680                                           cmd.dma);
2681                         return status;
2682                 }
2683                 status = be_cmd_enable_magic_wol(adapter,
2684                                 adapter->netdev->dev_addr, &cmd);
2685                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2686                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2687         } else {
2688                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2689                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2690                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2691         }
2692
2693         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2694         return status;
2695 }
2696
2697 /*
2698  * Generate a seed MAC address from the PF MAC Address using jhash.
2699  * MAC Address for VFs are assigned incrementally starting from the seed.
2700  * These addresses are programmed in the ASIC by the PF and the VF driver
2701  * queries for the MAC address during its probe.
2702  */
2703 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2704 {
2705         u32 vf;
2706         int status = 0;
2707         u8 mac[ETH_ALEN];
2708         struct be_vf_cfg *vf_cfg;
2709
2710         be_vf_eth_addr_generate(adapter, mac);
2711
2712         for_all_vfs(adapter, vf_cfg, vf) {
2713                 if (BEx_chip(adapter))
2714                         status = be_cmd_pmac_add(adapter, mac,
2715                                                  vf_cfg->if_handle,
2716                                                  &vf_cfg->pmac_id, vf + 1);
2717                 else
2718                         status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2719                                                 vf + 1);
2720
2721                 if (status)
2722                         dev_err(&adapter->pdev->dev,
2723                         "Mac address assignment failed for VF %d\n", vf);
2724                 else
2725                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2726
2727                 mac[5] += 1;
2728         }
2729         return status;
2730 }
2731
2732 static int be_vfs_mac_query(struct be_adapter *adapter)
2733 {
2734         int status, vf;
2735         u8 mac[ETH_ALEN];
2736         struct be_vf_cfg *vf_cfg;
2737         bool active = false;
2738
2739         for_all_vfs(adapter, vf_cfg, vf) {
2740                 be_cmd_get_mac_from_list(adapter, mac, &active,
2741                                          &vf_cfg->pmac_id, 0);
2742
2743                 status = be_cmd_mac_addr_query(adapter, mac, false,
2744                                                vf_cfg->if_handle, 0);
2745                 if (status)
2746                         return status;
2747                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2748         }
2749         return 0;
2750 }
2751
2752 static void be_vf_clear(struct be_adapter *adapter)
2753 {
2754         struct be_vf_cfg *vf_cfg;
2755         u32 vf;
2756
2757         if (pci_vfs_assigned(adapter->pdev)) {
2758                 dev_warn(&adapter->pdev->dev,
2759                          "VFs are assigned to VMs: not disabling VFs\n");
2760                 goto done;
2761         }
2762
2763         pci_disable_sriov(adapter->pdev);
2764
2765         for_all_vfs(adapter, vf_cfg, vf) {
2766                 if (BEx_chip(adapter))
2767                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2768                                         vf_cfg->pmac_id, vf + 1);
2769                 else
2770                         be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2771                                        vf + 1);
2772
2773                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2774         }
2775 done:
2776         kfree(adapter->vf_cfg);
2777         adapter->num_vfs = 0;
2778 }
2779
2780 static void be_clear_queues(struct be_adapter *adapter)
2781 {
2782         be_mcc_queues_destroy(adapter);
2783         be_rx_cqs_destroy(adapter);
2784         be_tx_queues_destroy(adapter);
2785         be_evt_queues_destroy(adapter);
2786 }
2787
2788 static void be_cancel_worker(struct be_adapter *adapter)
2789 {
2790         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2791                 cancel_delayed_work_sync(&adapter->work);
2792                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2793         }
2794 }
2795
2796 static int be_clear(struct be_adapter *adapter)
2797 {
2798         int i;
2799
2800         be_cancel_worker(adapter);
2801
2802         if (sriov_enabled(adapter))
2803                 be_vf_clear(adapter);
2804
2805         /* delete the primary mac along with the uc-mac list */
2806         for (i = 0; i < (adapter->uc_macs + 1); i++)
2807                 be_cmd_pmac_del(adapter, adapter->if_handle,
2808                                 adapter->pmac_id[i], 0);
2809         adapter->uc_macs = 0;
2810
2811         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2812
2813         be_clear_queues(adapter);
2814
2815         kfree(adapter->pmac_id);
2816         adapter->pmac_id = NULL;
2817
2818         be_msix_disable(adapter);
2819         return 0;
2820 }
2821
2822 static int be_vfs_if_create(struct be_adapter *adapter)
2823 {
2824         struct be_resources res = {0};
2825         struct be_vf_cfg *vf_cfg;
2826         u32 cap_flags, en_flags, vf;
2827         int status = 0;
2828
2829         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2830                     BE_IF_FLAGS_MULTICAST;
2831
2832         for_all_vfs(adapter, vf_cfg, vf) {
2833                 if (!BE3_chip(adapter)) {
2834                         status = be_cmd_get_profile_config(adapter, &res,
2835                                                            vf + 1);
2836                         if (!status)
2837                                 cap_flags = res.if_cap_flags;
2838                 }
2839
2840                 /* If a FW profile exists, then cap_flags are updated */
2841                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2842                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2843                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2844                                           &vf_cfg->if_handle, vf + 1);
2845                 if (status)
2846                         goto err;
2847         }
2848 err:
2849         return status;
2850 }
2851
2852 static int be_vf_setup_init(struct be_adapter *adapter)
2853 {
2854         struct be_vf_cfg *vf_cfg;
2855         int vf;
2856
2857         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2858                                   GFP_KERNEL);
2859         if (!adapter->vf_cfg)
2860                 return -ENOMEM;
2861
2862         for_all_vfs(adapter, vf_cfg, vf) {
2863                 vf_cfg->if_handle = -1;
2864                 vf_cfg->pmac_id = -1;
2865         }
2866         return 0;
2867 }
2868
2869 static int be_vf_setup(struct be_adapter *adapter)
2870 {
2871         struct be_vf_cfg *vf_cfg;
2872         u16 def_vlan, lnk_speed;
2873         int status, old_vfs, vf;
2874         struct device *dev = &adapter->pdev->dev;
2875         u32 privileges;
2876
2877         old_vfs = pci_num_vf(adapter->pdev);
2878         if (old_vfs) {
2879                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2880                 if (old_vfs != num_vfs)
2881                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2882                 adapter->num_vfs = old_vfs;
2883         } else {
2884                 if (num_vfs > be_max_vfs(adapter))
2885                         dev_info(dev, "Device supports %d VFs and not %d\n",
2886                                  be_max_vfs(adapter), num_vfs);
2887                 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
2888                 if (!adapter->num_vfs)
2889                         return 0;
2890         }
2891
2892         status = be_vf_setup_init(adapter);
2893         if (status)
2894                 goto err;
2895
2896         if (old_vfs) {
2897                 for_all_vfs(adapter, vf_cfg, vf) {
2898                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2899                         if (status)
2900                                 goto err;
2901                 }
2902         } else {
2903                 status = be_vfs_if_create(adapter);
2904                 if (status)
2905                         goto err;
2906         }
2907
2908         if (old_vfs) {
2909                 status = be_vfs_mac_query(adapter);
2910                 if (status)
2911                         goto err;
2912         } else {
2913                 status = be_vf_eth_addr_config(adapter);
2914                 if (status)
2915                         goto err;
2916         }
2917
2918         for_all_vfs(adapter, vf_cfg, vf) {
2919                 /* Allow VFs to programs MAC/VLAN filters */
2920                 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
2921                 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
2922                         status = be_cmd_set_fn_privileges(adapter,
2923                                                           privileges |
2924                                                           BE_PRIV_FILTMGMT,
2925                                                           vf + 1);
2926                         if (!status)
2927                                 dev_info(dev, "VF%d has FILTMGMT privilege\n",
2928                                          vf);
2929                 }
2930
2931                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2932                  * Allow full available bandwidth
2933                  */
2934                 if (BE3_chip(adapter) && !old_vfs)
2935                         be_cmd_set_qos(adapter, 1000, vf+1);
2936
2937                 status = be_cmd_link_status_query(adapter, &lnk_speed,
2938                                                   NULL, vf + 1);
2939                 if (!status)
2940                         vf_cfg->tx_rate = lnk_speed;
2941
2942                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2943                                                vf + 1, vf_cfg->if_handle, NULL);
2944                 if (status)
2945                         goto err;
2946                 vf_cfg->def_vid = def_vlan;
2947
2948                 be_cmd_enable_vf(adapter, vf + 1);
2949         }
2950
2951         if (!old_vfs) {
2952                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2953                 if (status) {
2954                         dev_err(dev, "SRIOV enable failed\n");
2955                         adapter->num_vfs = 0;
2956                         goto err;
2957                 }
2958         }
2959         return 0;
2960 err:
2961         dev_err(dev, "VF setup failed\n");
2962         be_vf_clear(adapter);
2963         return status;
2964 }
2965
2966 /* On BE2/BE3 FW does not suggest the supported limits */
2967 static void BEx_get_resources(struct be_adapter *adapter,
2968                               struct be_resources *res)
2969 {
2970         struct pci_dev *pdev = adapter->pdev;
2971         bool use_sriov = false;
2972
2973         if (BE3_chip(adapter) && be_physfn(adapter)) {
2974                 int max_vfs;
2975
2976                 max_vfs = pci_sriov_get_totalvfs(pdev);
2977                 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
2978                 use_sriov = res->max_vfs && num_vfs;
2979         }
2980
2981         if (be_physfn(adapter))
2982                 res->max_uc_mac = BE_UC_PMAC_COUNT;
2983         else
2984                 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
2985
2986         if (adapter->function_mode & FLEX10_MODE)
2987                 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2988         else if (adapter->function_mode & UMC_ENABLED)
2989                 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
2990         else
2991                 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
2992         res->max_mcast_mac = BE_MAX_MC;
2993
2994         if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
2995             !be_physfn(adapter))
2996                 res->max_tx_qs = 1;
2997         else
2998                 res->max_tx_qs = BE3_MAX_TX_QS;
2999
3000         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3001             !use_sriov && be_physfn(adapter))
3002                 res->max_rss_qs = (adapter->be3_native) ?
3003                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3004         res->max_rx_qs = res->max_rss_qs + 1;
3005
3006         res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
3007
3008         res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3009         if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3010                 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3011 }
3012
3013 static void be_setup_init(struct be_adapter *adapter)
3014 {
3015         adapter->vlan_prio_bmap = 0xff;
3016         adapter->phy.link_speed = -1;
3017         adapter->if_handle = -1;
3018         adapter->be3_native = false;
3019         adapter->promiscuous = false;
3020         if (be_physfn(adapter))
3021                 adapter->cmd_privileges = MAX_PRIVILEGES;
3022         else
3023                 adapter->cmd_privileges = MIN_PRIVILEGES;
3024 }
3025
3026 static int be_get_resources(struct be_adapter *adapter)
3027 {
3028         struct device *dev = &adapter->pdev->dev;
3029         struct be_resources res = {0};
3030         int status;
3031
3032         if (BEx_chip(adapter)) {
3033                 BEx_get_resources(adapter, &res);
3034                 adapter->res = res;
3035         }
3036
3037         /* For BE3 only check if FW suggests a different max-txqs value */
3038         if (BE3_chip(adapter)) {
3039                 status = be_cmd_get_profile_config(adapter, &res, 0);
3040                 if (!status && res.max_tx_qs)
3041                         adapter->res.max_tx_qs =
3042                                 min(adapter->res.max_tx_qs, res.max_tx_qs);
3043         }
3044
3045         /* For Lancer, SH etc read per-function resource limits from FW.
3046          * GET_FUNC_CONFIG returns per function guaranteed limits.
3047          * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3048          */
3049         if (!BEx_chip(adapter)) {
3050                 status = be_cmd_get_func_config(adapter, &res);
3051                 if (status)
3052                         return status;
3053
3054                 /* If RoCE may be enabled stash away half the EQs for RoCE */
3055                 if (be_roce_supported(adapter))
3056                         res.max_evt_qs /= 2;
3057                 adapter->res = res;
3058
3059                 if (be_physfn(adapter)) {
3060                         status = be_cmd_get_profile_config(adapter, &res, 0);
3061                         if (status)
3062                                 return status;
3063                         adapter->res.max_vfs = res.max_vfs;
3064                 }
3065
3066                 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3067                          be_max_txqs(adapter), be_max_rxqs(adapter),
3068                          be_max_rss(adapter), be_max_eqs(adapter),
3069                          be_max_vfs(adapter));
3070                 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3071                          be_max_uc(adapter), be_max_mc(adapter),
3072                          be_max_vlans(adapter));
3073         }
3074
3075         return 0;
3076 }
3077
3078 /* Routine to query per function resource limits */
3079 static int be_get_config(struct be_adapter *adapter)
3080 {
3081         int status;
3082
3083         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3084                                      &adapter->function_mode,
3085                                      &adapter->function_caps,
3086                                      &adapter->asic_rev);
3087         if (status)
3088                 return status;
3089
3090         status = be_get_resources(adapter);
3091         if (status)
3092                 return status;
3093
3094         /* primary mac needs 1 pmac entry */
3095         adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3096                                    GFP_KERNEL);
3097         if (!adapter->pmac_id)
3098                 return -ENOMEM;
3099
3100         /* Sanitize cfg_num_qs based on HW and platform limits */
3101         adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3102
3103         return 0;
3104 }
3105
3106 static int be_mac_setup(struct be_adapter *adapter)
3107 {
3108         u8 mac[ETH_ALEN];
3109         int status;
3110
3111         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3112                 status = be_cmd_get_perm_mac(adapter, mac);
3113                 if (status)
3114                         return status;
3115
3116                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3117                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3118         } else {
3119                 /* Maybe the HW was reset; dev_addr must be re-programmed */
3120                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3121         }
3122
3123         /* On BE3 VFs this cmd may fail due to lack of privilege.
3124          * Ignore the failure as in this case pmac_id is fetched
3125          * in the IFACE_CREATE cmd.
3126          */
3127         be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3128                         &adapter->pmac_id[0], 0);
3129         return 0;
3130 }
3131
3132 static void be_schedule_worker(struct be_adapter *adapter)
3133 {
3134         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3135         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3136 }
3137
3138 static int be_setup_queues(struct be_adapter *adapter)
3139 {
3140         struct net_device *netdev = adapter->netdev;
3141         int status;
3142
3143         status = be_evt_queues_create(adapter);
3144         if (status)
3145                 goto err;
3146
3147         status = be_tx_qs_create(adapter);
3148         if (status)
3149                 goto err;
3150
3151         status = be_rx_cqs_create(adapter);
3152         if (status)
3153                 goto err;
3154
3155         status = be_mcc_queues_create(adapter);
3156         if (status)
3157                 goto err;
3158
3159         status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3160         if (status)
3161                 goto err;
3162
3163         status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3164         if (status)
3165                 goto err;
3166
3167         return 0;
3168 err:
3169         dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3170         return status;
3171 }
3172
3173 int be_update_queues(struct be_adapter *adapter)
3174 {
3175         struct net_device *netdev = adapter->netdev;
3176         int status;
3177
3178         if (netif_running(netdev))
3179                 be_close(netdev);
3180
3181         be_cancel_worker(adapter);
3182
3183         /* If any vectors have been shared with RoCE we cannot re-program
3184          * the MSIx table.
3185          */
3186         if (!adapter->num_msix_roce_vec)
3187                 be_msix_disable(adapter);
3188
3189         be_clear_queues(adapter);
3190
3191         if (!msix_enabled(adapter)) {
3192                 status = be_msix_enable(adapter);
3193                 if (status)
3194                         return status;
3195         }
3196
3197         status = be_setup_queues(adapter);
3198         if (status)
3199                 return status;
3200
3201         be_schedule_worker(adapter);
3202
3203         if (netif_running(netdev))
3204                 status = be_open(netdev);
3205
3206         return status;
3207 }
3208
3209 static int be_setup(struct be_adapter *adapter)
3210 {
3211         struct device *dev = &adapter->pdev->dev;
3212         u32 tx_fc, rx_fc, en_flags;
3213         int status;
3214
3215         be_setup_init(adapter);
3216
3217         if (!lancer_chip(adapter))
3218                 be_cmd_req_native_mode(adapter);
3219
3220         status = be_get_config(adapter);
3221         if (status)
3222                 goto err;
3223
3224         status = be_msix_enable(adapter);
3225         if (status)
3226                 goto err;
3227
3228         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3229                    BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3230         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3231                 en_flags |= BE_IF_FLAGS_RSS;
3232         en_flags = en_flags & be_if_cap_flags(adapter);
3233         status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3234                                   &adapter->if_handle, 0);
3235         if (status)
3236                 goto err;
3237
3238         /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3239         rtnl_lock();
3240         status = be_setup_queues(adapter);
3241         rtnl_unlock();
3242         if (status)
3243                 goto err;
3244
3245         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3246         /* In UMC mode FW does not return right privileges.
3247          * Override with correct privilege equivalent to PF.
3248          */
3249         if (be_is_mc(adapter))
3250                 adapter->cmd_privileges = MAX_PRIVILEGES;
3251
3252         status = be_mac_setup(adapter);
3253         if (status)
3254                 goto err;
3255
3256         be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3257
3258         if (adapter->vlans_added)
3259                 be_vid_config(adapter);
3260
3261         be_set_rx_mode(adapter->netdev);
3262
3263         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3264
3265         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3266                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3267                                         adapter->rx_fc);
3268
3269         if (be_physfn(adapter) && num_vfs) {
3270                 if (be_max_vfs(adapter))
3271                         be_vf_setup(adapter);
3272                 else
3273                         dev_warn(dev, "device doesn't support SRIOV\n");
3274         }
3275
3276         status = be_cmd_get_phy_info(adapter);
3277         if (!status && be_pause_supported(adapter))
3278                 adapter->phy.fc_autoneg = 1;
3279
3280         be_schedule_worker(adapter);
3281         return 0;
3282 err:
3283         be_clear(adapter);
3284         return status;
3285 }
3286
3287 #ifdef CONFIG_NET_POLL_CONTROLLER
3288 static void be_netpoll(struct net_device *netdev)
3289 {
3290         struct be_adapter *adapter = netdev_priv(netdev);
3291         struct be_eq_obj *eqo;
3292         int i;
3293
3294         for_all_evt_queues(adapter, eqo, i) {
3295                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3296                 napi_schedule(&eqo->napi);
3297         }
3298
3299         return;
3300 }
3301 #endif
3302
3303 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3304 static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3305
3306 static bool be_flash_redboot(struct be_adapter *adapter,
3307                         const u8 *p, u32 img_start, int image_size,
3308                         int hdr_size)
3309 {
3310         u32 crc_offset;
3311         u8 flashed_crc[4];
3312         int status;
3313
3314         crc_offset = hdr_size + img_start + image_size - 4;
3315
3316         p += crc_offset;
3317
3318         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3319                         (image_size - 4));
3320         if (status) {
3321                 dev_err(&adapter->pdev->dev,
3322                 "could not get crc from flash, not flashing redboot\n");
3323                 return false;
3324         }
3325
3326         /*update redboot only if crc does not match*/
3327         if (!memcmp(flashed_crc, p, 4))
3328                 return false;
3329         else
3330                 return true;
3331 }
3332
3333 static bool phy_flashing_required(struct be_adapter *adapter)
3334 {
3335         return (adapter->phy.phy_type == TN_8022 &&
3336                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3337 }
3338
3339 static bool is_comp_in_ufi(struct be_adapter *adapter,
3340                            struct flash_section_info *fsec, int type)
3341 {
3342         int i = 0, img_type = 0;
3343         struct flash_section_info_g2 *fsec_g2 = NULL;
3344
3345         if (BE2_chip(adapter))
3346                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3347
3348         for (i = 0; i < MAX_FLASH_COMP; i++) {
3349                 if (fsec_g2)
3350                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3351                 else
3352                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3353
3354                 if (img_type == type)
3355                         return true;
3356         }
3357         return false;
3358
3359 }
3360
3361 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3362                                          int header_size,
3363                                          const struct firmware *fw)
3364 {
3365         struct flash_section_info *fsec = NULL;
3366         const u8 *p = fw->data;
3367
3368         p += header_size;
3369         while (p < (fw->data + fw->size)) {
3370                 fsec = (struct flash_section_info *)p;
3371                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3372                         return fsec;
3373                 p += 32;
3374         }
3375         return NULL;
3376 }
3377
3378 static int be_flash(struct be_adapter *adapter, const u8 *img,
3379                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3380 {
3381         u32 total_bytes = 0, flash_op, num_bytes = 0;
3382         int status = 0;
3383         struct be_cmd_write_flashrom *req = flash_cmd->va;
3384
3385         total_bytes = img_size;
3386         while (total_bytes) {
3387                 num_bytes = min_t(u32, 32*1024, total_bytes);
3388
3389                 total_bytes -= num_bytes;
3390
3391                 if (!total_bytes) {
3392                         if (optype == OPTYPE_PHY_FW)
3393                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3394                         else
3395                                 flash_op = FLASHROM_OPER_FLASH;
3396                 } else {
3397                         if (optype == OPTYPE_PHY_FW)
3398                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3399                         else
3400                                 flash_op = FLASHROM_OPER_SAVE;
3401                 }
3402
3403                 memcpy(req->data_buf, img, num_bytes);
3404                 img += num_bytes;
3405                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3406                                                 flash_op, num_bytes);
3407                 if (status) {
3408                         if (status == ILLEGAL_IOCTL_REQ &&
3409                             optype == OPTYPE_PHY_FW)
3410                                 break;
3411                         dev_err(&adapter->pdev->dev,
3412                                 "cmd to write to flash rom failed.\n");
3413                         return status;
3414                 }
3415         }
3416         return 0;
3417 }
3418
3419 /* For BE2, BE3 and BE3-R */
3420 static int be_flash_BEx(struct be_adapter *adapter,
3421                          const struct firmware *fw,
3422                          struct be_dma_mem *flash_cmd,
3423                          int num_of_images)
3424
3425 {
3426         int status = 0, i, filehdr_size = 0;
3427         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3428         const u8 *p = fw->data;
3429         const struct flash_comp *pflashcomp;
3430         int num_comp, redboot;
3431         struct flash_section_info *fsec = NULL;
3432
3433         struct flash_comp gen3_flash_types[] = {
3434                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3435                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3436                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3437                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3438                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3439                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3440                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3441                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3442                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3443                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3444                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3445                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3446                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3447                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3448                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3449                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3450                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3451                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3452                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3453                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3454         };
3455
3456         struct flash_comp gen2_flash_types[] = {
3457                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3458                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3459                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3460                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3461                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3462                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3463                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3464                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3465                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3466                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3467                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3468                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3469                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3470                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3471                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3472                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3473         };
3474
3475         if (BE3_chip(adapter)) {
3476                 pflashcomp = gen3_flash_types;
3477                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3478                 num_comp = ARRAY_SIZE(gen3_flash_types);
3479         } else {
3480                 pflashcomp = gen2_flash_types;
3481                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3482                 num_comp = ARRAY_SIZE(gen2_flash_types);
3483         }
3484
3485         /* Get flash section info*/
3486         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3487         if (!fsec) {
3488                 dev_err(&adapter->pdev->dev,
3489                         "Invalid Cookie. UFI corrupted ?\n");
3490                 return -1;
3491         }
3492         for (i = 0; i < num_comp; i++) {
3493                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3494                         continue;
3495
3496                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3497                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3498                         continue;
3499
3500                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3501                     !phy_flashing_required(adapter))
3502                                 continue;
3503
3504                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3505                         redboot = be_flash_redboot(adapter, fw->data,
3506                                 pflashcomp[i].offset, pflashcomp[i].size,
3507                                 filehdr_size + img_hdrs_size);
3508                         if (!redboot)
3509                                 continue;
3510                 }
3511
3512                 p = fw->data;
3513                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3514                 if (p + pflashcomp[i].size > fw->data + fw->size)
3515                         return -1;
3516
3517                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3518                                         pflashcomp[i].size);
3519                 if (status) {
3520                         dev_err(&adapter->pdev->dev,
3521                                 "Flashing section type %d failed.\n",
3522                                 pflashcomp[i].img_type);
3523                         return status;
3524                 }
3525         }
3526         return 0;
3527 }
3528
3529 static int be_flash_skyhawk(struct be_adapter *adapter,
3530                 const struct firmware *fw,
3531                 struct be_dma_mem *flash_cmd, int num_of_images)
3532 {
3533         int status = 0, i, filehdr_size = 0;
3534         int img_offset, img_size, img_optype, redboot;
3535         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3536         const u8 *p = fw->data;
3537         struct flash_section_info *fsec = NULL;
3538
3539         filehdr_size = sizeof(struct flash_file_hdr_g3);
3540         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3541         if (!fsec) {
3542                 dev_err(&adapter->pdev->dev,
3543                         "Invalid Cookie. UFI corrupted ?\n");
3544                 return -1;
3545         }
3546
3547         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3548                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3549                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3550
3551                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3552                 case IMAGE_FIRMWARE_iSCSI:
3553                         img_optype = OPTYPE_ISCSI_ACTIVE;
3554                         break;
3555                 case IMAGE_BOOT_CODE:
3556                         img_optype = OPTYPE_REDBOOT;
3557                         break;
3558                 case IMAGE_OPTION_ROM_ISCSI:
3559                         img_optype = OPTYPE_BIOS;
3560                         break;
3561                 case IMAGE_OPTION_ROM_PXE:
3562                         img_optype = OPTYPE_PXE_BIOS;
3563                         break;
3564                 case IMAGE_OPTION_ROM_FCoE:
3565                         img_optype = OPTYPE_FCOE_BIOS;
3566                         break;
3567                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3568                         img_optype = OPTYPE_ISCSI_BACKUP;
3569                         break;
3570                 case IMAGE_NCSI:
3571                         img_optype = OPTYPE_NCSI_FW;
3572                         break;
3573                 default:
3574                         continue;
3575                 }
3576
3577                 if (img_optype == OPTYPE_REDBOOT) {
3578                         redboot = be_flash_redboot(adapter, fw->data,
3579                                         img_offset, img_size,
3580                                         filehdr_size + img_hdrs_size);
3581                         if (!redboot)
3582                                 continue;
3583                 }
3584
3585                 p = fw->data;
3586                 p += filehdr_size + img_offset + img_hdrs_size;
3587                 if (p + img_size > fw->data + fw->size)
3588                         return -1;
3589
3590                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3591                 if (status) {
3592                         dev_err(&adapter->pdev->dev,
3593                                 "Flashing section type %d failed.\n",
3594                                 fsec->fsec_entry[i].type);
3595                         return status;
3596                 }
3597         }
3598         return 0;
3599 }
3600
3601 static int lancer_fw_download(struct be_adapter *adapter,
3602                                 const struct firmware *fw)
3603 {
3604 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3605 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3606         struct be_dma_mem flash_cmd;
3607         const u8 *data_ptr = NULL;
3608         u8 *dest_image_ptr = NULL;
3609         size_t image_size = 0;
3610         u32 chunk_size = 0;
3611         u32 data_written = 0;
3612         u32 offset = 0;
3613         int status = 0;
3614         u8 add_status = 0;
3615         u8 change_status;
3616
3617         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3618                 dev_err(&adapter->pdev->dev,
3619                         "FW Image not properly aligned. "
3620                         "Length must be 4 byte aligned.\n");
3621                 status = -EINVAL;
3622                 goto lancer_fw_exit;
3623         }
3624
3625         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3626                                 + LANCER_FW_DOWNLOAD_CHUNK;
3627         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3628                                           &flash_cmd.dma, GFP_KERNEL);
3629         if (!flash_cmd.va) {
3630                 status = -ENOMEM;
3631                 goto lancer_fw_exit;
3632         }
3633
3634         dest_image_ptr = flash_cmd.va +
3635                                 sizeof(struct lancer_cmd_req_write_object);
3636         image_size = fw->size;
3637         data_ptr = fw->data;
3638
3639         while (image_size) {
3640                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3641
3642                 /* Copy the image chunk content. */
3643                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3644
3645                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3646                                                  chunk_size, offset,
3647                                                  LANCER_FW_DOWNLOAD_LOCATION,
3648                                                  &data_written, &change_status,
3649                                                  &add_status);
3650                 if (status)
3651                         break;
3652
3653                 offset += data_written;
3654                 data_ptr += data_written;
3655                 image_size -= data_written;
3656         }
3657
3658         if (!status) {
3659                 /* Commit the FW written */
3660                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3661                                                  0, offset,
3662                                                  LANCER_FW_DOWNLOAD_LOCATION,
3663                                                  &data_written, &change_status,
3664                                                  &add_status);
3665         }
3666
3667         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3668                                 flash_cmd.dma);
3669         if (status) {
3670                 dev_err(&adapter->pdev->dev,
3671                         "Firmware load error. "
3672                         "Status code: 0x%x Additional Status: 0x%x\n",
3673                         status, add_status);
3674                 goto lancer_fw_exit;
3675         }
3676
3677         if (change_status == LANCER_FW_RESET_NEEDED) {
3678                 status = lancer_physdev_ctrl(adapter,
3679                                              PHYSDEV_CONTROL_FW_RESET_MASK);
3680                 if (status) {
3681                         dev_err(&adapter->pdev->dev,
3682                                 "Adapter busy for FW reset.\n"
3683                                 "New FW will not be active.\n");
3684                         goto lancer_fw_exit;
3685                 }
3686         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3687                         dev_err(&adapter->pdev->dev,
3688                                 "System reboot required for new FW"
3689                                 " to be active\n");
3690         }
3691
3692         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3693 lancer_fw_exit:
3694         return status;
3695 }
3696
3697 #define UFI_TYPE2               2
3698 #define UFI_TYPE3               3
3699 #define UFI_TYPE3R              10
3700 #define UFI_TYPE4               4
3701 static int be_get_ufi_type(struct be_adapter *adapter,
3702                            struct flash_file_hdr_g3 *fhdr)
3703 {
3704         if (fhdr == NULL)
3705                 goto be_get_ufi_exit;
3706
3707         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3708                 return UFI_TYPE4;
3709         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3710                 if (fhdr->asic_type_rev == 0x10)
3711                         return UFI_TYPE3R;
3712                 else
3713                         return UFI_TYPE3;
3714         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3715                 return UFI_TYPE2;
3716
3717 be_get_ufi_exit:
3718         dev_err(&adapter->pdev->dev,
3719                 "UFI and Interface are not compatible for flashing\n");
3720         return -1;
3721 }
3722
3723 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3724 {
3725         struct flash_file_hdr_g3 *fhdr3;
3726         struct image_hdr *img_hdr_ptr = NULL;
3727         struct be_dma_mem flash_cmd;
3728         const u8 *p;
3729         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3730
3731         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3732         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3733                                           &flash_cmd.dma, GFP_KERNEL);
3734         if (!flash_cmd.va) {
3735                 status = -ENOMEM;
3736                 goto be_fw_exit;
3737         }
3738
3739         p = fw->data;
3740         fhdr3 = (struct flash_file_hdr_g3 *)p;
3741
3742         ufi_type = be_get_ufi_type(adapter, fhdr3);
3743
3744         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3745         for (i = 0; i < num_imgs; i++) {
3746                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3747                                 (sizeof(struct flash_file_hdr_g3) +
3748                                  i * sizeof(struct image_hdr)));
3749                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3750                         switch (ufi_type) {
3751                         case UFI_TYPE4:
3752                                 status = be_flash_skyhawk(adapter, fw,
3753                                                         &flash_cmd, num_imgs);
3754                                 break;
3755                         case UFI_TYPE3R:
3756                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3757                                                       num_imgs);
3758                                 break;
3759                         case UFI_TYPE3:
3760                                 /* Do not flash this ufi on BE3-R cards */
3761                                 if (adapter->asic_rev < 0x10)
3762                                         status = be_flash_BEx(adapter, fw,
3763                                                               &flash_cmd,
3764                                                               num_imgs);
3765                                 else {
3766                                         status = -1;
3767                                         dev_err(&adapter->pdev->dev,
3768                                                 "Can't load BE3 UFI on BE3R\n");
3769                                 }
3770                         }
3771                 }
3772         }
3773
3774         if (ufi_type == UFI_TYPE2)
3775                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3776         else if (ufi_type == -1)
3777                 status = -1;
3778
3779         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3780                           flash_cmd.dma);
3781         if (status) {
3782                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3783                 goto be_fw_exit;
3784         }
3785
3786         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3787
3788 be_fw_exit:
3789         return status;
3790 }
3791
3792 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3793 {
3794         const struct firmware *fw;
3795         int status;
3796
3797         if (!netif_running(adapter->netdev)) {
3798                 dev_err(&adapter->pdev->dev,
3799                         "Firmware load not allowed (interface is down)\n");
3800                 return -1;
3801         }
3802
3803         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3804         if (status)
3805                 goto fw_exit;
3806
3807         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3808
3809         if (lancer_chip(adapter))
3810                 status = lancer_fw_download(adapter, fw);
3811         else
3812                 status = be_fw_download(adapter, fw);
3813
3814         if (!status)
3815                 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3816                                   adapter->fw_on_flash);
3817
3818 fw_exit:
3819         release_firmware(fw);
3820         return status;
3821 }
3822
3823 static int be_ndo_bridge_setlink(struct net_device *dev,
3824                                     struct nlmsghdr *nlh)
3825 {
3826         struct be_adapter *adapter = netdev_priv(dev);
3827         struct nlattr *attr, *br_spec;
3828         int rem;
3829         int status = 0;
3830         u16 mode = 0;
3831
3832         if (!sriov_enabled(adapter))
3833                 return -EOPNOTSUPP;
3834
3835         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3836
3837         nla_for_each_nested(attr, br_spec, rem) {
3838                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3839                         continue;
3840
3841                 mode = nla_get_u16(attr);
3842                 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3843                         return -EINVAL;
3844
3845                 status = be_cmd_set_hsw_config(adapter, 0, 0,
3846                                                adapter->if_handle,
3847                                                mode == BRIDGE_MODE_VEPA ?
3848                                                PORT_FWD_TYPE_VEPA :
3849                                                PORT_FWD_TYPE_VEB);
3850                 if (status)
3851                         goto err;
3852
3853                 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
3854                          mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3855
3856                 return status;
3857         }
3858 err:
3859         dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
3860                 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3861
3862         return status;
3863 }
3864
3865 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3866                                     struct net_device *dev,
3867                                     u32 filter_mask)
3868 {
3869         struct be_adapter *adapter = netdev_priv(dev);
3870         int status = 0;
3871         u8 hsw_mode;
3872
3873         if (!sriov_enabled(adapter))
3874                 return 0;
3875
3876         /* BE and Lancer chips support VEB mode only */
3877         if (BEx_chip(adapter) || lancer_chip(adapter)) {
3878                 hsw_mode = PORT_FWD_TYPE_VEB;
3879         } else {
3880                 status = be_cmd_get_hsw_config(adapter, NULL, 0,
3881                                                adapter->if_handle, &hsw_mode);
3882                 if (status)
3883                         return 0;
3884         }
3885
3886         return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
3887                                        hsw_mode == PORT_FWD_TYPE_VEPA ?
3888                                        BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
3889 }
3890
3891 static const struct net_device_ops be_netdev_ops = {
3892         .ndo_open               = be_open,
3893         .ndo_stop               = be_close,
3894         .ndo_start_xmit         = be_xmit,
3895         .ndo_set_rx_mode        = be_set_rx_mode,
3896         .ndo_set_mac_address    = be_mac_addr_set,
3897         .ndo_change_mtu         = be_change_mtu,
3898         .ndo_get_stats64        = be_get_stats64,
3899         .ndo_validate_addr      = eth_validate_addr,
3900         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3901         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3902         .ndo_set_vf_mac         = be_set_vf_mac,
3903         .ndo_set_vf_vlan        = be_set_vf_vlan,
3904         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3905         .ndo_get_vf_config      = be_get_vf_config,
3906 #ifdef CONFIG_NET_POLL_CONTROLLER
3907         .ndo_poll_controller    = be_netpoll,
3908 #endif
3909         .ndo_bridge_setlink     = be_ndo_bridge_setlink,
3910         .ndo_bridge_getlink     = be_ndo_bridge_getlink,
3911 };
3912
3913 static void be_netdev_init(struct net_device *netdev)
3914 {
3915         struct be_adapter *adapter = netdev_priv(netdev);
3916
3917         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3918                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3919                 NETIF_F_HW_VLAN_CTAG_TX;
3920         if (be_multi_rxq(adapter))
3921                 netdev->hw_features |= NETIF_F_RXHASH;
3922
3923         netdev->features |= netdev->hw_features |
3924                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3925
3926         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3927                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3928
3929         netdev->priv_flags |= IFF_UNICAST_FLT;
3930
3931         netdev->flags |= IFF_MULTICAST;
3932
3933         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3934
3935         netdev->netdev_ops = &be_netdev_ops;
3936
3937         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3938 }
3939
3940 static void be_unmap_pci_bars(struct be_adapter *adapter)
3941 {
3942         if (adapter->csr)
3943                 pci_iounmap(adapter->pdev, adapter->csr);
3944         if (adapter->db)
3945                 pci_iounmap(adapter->pdev, adapter->db);
3946 }
3947
3948 static int db_bar(struct be_adapter *adapter)
3949 {
3950         if (lancer_chip(adapter) || !be_physfn(adapter))
3951                 return 0;
3952         else
3953                 return 4;
3954 }
3955
3956 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3957 {
3958         if (skyhawk_chip(adapter)) {
3959                 adapter->roce_db.size = 4096;
3960                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3961                                                               db_bar(adapter));
3962                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3963                                                                db_bar(adapter));
3964         }
3965         return 0;
3966 }
3967
3968 static int be_map_pci_bars(struct be_adapter *adapter)
3969 {
3970         u8 __iomem *addr;
3971         u32 sli_intf;
3972
3973         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3974         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3975                                 SLI_INTF_IF_TYPE_SHIFT;
3976
3977         if (BEx_chip(adapter) && be_physfn(adapter)) {
3978                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3979                 if (adapter->csr == NULL)
3980                         return -ENOMEM;
3981         }
3982
3983         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3984         if (addr == NULL)
3985                 goto pci_map_err;
3986         adapter->db = addr;
3987
3988         be_roce_map_pci_bars(adapter);
3989         return 0;
3990
3991 pci_map_err:
3992         be_unmap_pci_bars(adapter);
3993         return -ENOMEM;
3994 }
3995
3996 static void be_ctrl_cleanup(struct be_adapter *adapter)
3997 {
3998         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3999
4000         be_unmap_pci_bars(adapter);
4001
4002         if (mem->va)
4003                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4004                                   mem->dma);
4005
4006         mem = &adapter->rx_filter;
4007         if (mem->va)
4008                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4009                                   mem->dma);
4010 }
4011
4012 static int be_ctrl_init(struct be_adapter *adapter)
4013 {
4014         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4015         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4016         struct be_dma_mem *rx_filter = &adapter->rx_filter;
4017         u32 sli_intf;
4018         int status;
4019
4020         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4021         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4022                                  SLI_INTF_FAMILY_SHIFT;
4023         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4024
4025         status = be_map_pci_bars(adapter);
4026         if (status)
4027                 goto done;
4028
4029         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4030         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4031                                                 mbox_mem_alloc->size,
4032                                                 &mbox_mem_alloc->dma,
4033                                                 GFP_KERNEL);
4034         if (!mbox_mem_alloc->va) {
4035                 status = -ENOMEM;
4036                 goto unmap_pci_bars;
4037         }
4038         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4039         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4040         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4041         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4042
4043         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4044         rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4045                                             rx_filter->size, &rx_filter->dma,
4046                                             GFP_KERNEL);
4047         if (rx_filter->va == NULL) {
4048                 status = -ENOMEM;
4049                 goto free_mbox;
4050         }
4051
4052         mutex_init(&adapter->mbox_lock);
4053         spin_lock_init(&adapter->mcc_lock);
4054         spin_lock_init(&adapter->mcc_cq_lock);
4055
4056         init_completion(&adapter->flash_compl);
4057         pci_save_state(adapter->pdev);
4058         return 0;
4059
4060 free_mbox:
4061         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4062                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
4063
4064 unmap_pci_bars:
4065         be_unmap_pci_bars(adapter);
4066
4067 done:
4068         return status;
4069 }
4070
4071 static void be_stats_cleanup(struct be_adapter *adapter)
4072 {
4073         struct be_dma_mem *cmd = &adapter->stats_cmd;
4074
4075         if (cmd->va)
4076                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4077                                   cmd->va, cmd->dma);
4078 }
4079
4080 static int be_stats_init(struct be_adapter *adapter)
4081 {
4082         struct be_dma_mem *cmd = &adapter->stats_cmd;
4083
4084         if (lancer_chip(adapter))
4085                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4086         else if (BE2_chip(adapter))
4087                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4088         else
4089                 /* BE3 and Skyhawk */
4090                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4091
4092         cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4093                                       GFP_KERNEL);
4094         if (cmd->va == NULL)
4095                 return -1;
4096         return 0;
4097 }
4098
4099 static void be_remove(struct pci_dev *pdev)
4100 {
4101         struct be_adapter *adapter = pci_get_drvdata(pdev);
4102
4103         if (!adapter)
4104                 return;
4105
4106         be_roce_dev_remove(adapter);
4107         be_intr_set(adapter, false);
4108
4109         cancel_delayed_work_sync(&adapter->func_recovery_work);
4110
4111         unregister_netdev(adapter->netdev);
4112
4113         be_clear(adapter);
4114
4115         /* tell fw we're done with firing cmds */
4116         be_cmd_fw_clean(adapter);
4117
4118         be_stats_cleanup(adapter);
4119
4120         be_ctrl_cleanup(adapter);
4121
4122         pci_disable_pcie_error_reporting(pdev);
4123
4124         pci_set_drvdata(pdev, NULL);
4125         pci_release_regions(pdev);
4126         pci_disable_device(pdev);
4127
4128         free_netdev(adapter->netdev);
4129 }
4130
4131 bool be_is_wol_supported(struct be_adapter *adapter)
4132 {
4133         return ((adapter->wol_cap & BE_WOL_CAP) &&
4134                 !be_is_wol_excluded(adapter)) ? true : false;
4135 }
4136
4137 u32 be_get_fw_log_level(struct be_adapter *adapter)
4138 {
4139         struct be_dma_mem extfat_cmd;
4140         struct be_fat_conf_params *cfgs;
4141         int status;
4142         u32 level = 0;
4143         int j;
4144
4145         if (lancer_chip(adapter))
4146                 return 0;
4147
4148         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4149         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4150         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4151                                              &extfat_cmd.dma);
4152
4153         if (!extfat_cmd.va) {
4154                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4155                         __func__);
4156                 goto err;
4157         }
4158
4159         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4160         if (!status) {
4161                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4162                                                 sizeof(struct be_cmd_resp_hdr));
4163                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4164                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4165                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4166                 }
4167         }
4168         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4169                             extfat_cmd.dma);
4170 err:
4171         return level;
4172 }
4173
4174 static int be_get_initial_config(struct be_adapter *adapter)
4175 {
4176         int status;
4177         u32 level;
4178
4179         status = be_cmd_get_cntl_attributes(adapter);
4180         if (status)
4181                 return status;
4182
4183         status = be_cmd_get_acpi_wol_cap(adapter);
4184         if (status) {
4185                 /* in case of a failure to get wol capabillities
4186                  * check the exclusion list to determine WOL capability */
4187                 if (!be_is_wol_excluded(adapter))
4188                         adapter->wol_cap |= BE_WOL_CAP;
4189         }
4190
4191         if (be_is_wol_supported(adapter))
4192                 adapter->wol = true;
4193
4194         /* Must be a power of 2 or else MODULO will BUG_ON */
4195         adapter->be_get_temp_freq = 64;
4196
4197         level = be_get_fw_log_level(adapter);
4198         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4199
4200         adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4201         return 0;
4202 }
4203
4204 static int lancer_recover_func(struct be_adapter *adapter)
4205 {
4206         struct device *dev = &adapter->pdev->dev;
4207         int status;
4208
4209         status = lancer_test_and_set_rdy_state(adapter);
4210         if (status)
4211                 goto err;
4212
4213         if (netif_running(adapter->netdev))
4214                 be_close(adapter->netdev);
4215
4216         be_clear(adapter);
4217
4218         be_clear_all_error(adapter);
4219
4220         status = be_setup(adapter);
4221         if (status)
4222                 goto err;
4223
4224         if (netif_running(adapter->netdev)) {
4225                 status = be_open(adapter->netdev);
4226                 if (status)
4227                         goto err;
4228         }
4229
4230         dev_err(dev, "Error recovery successful\n");
4231         return 0;
4232 err:
4233         if (status == -EAGAIN)
4234                 dev_err(dev, "Waiting for resource provisioning\n");
4235         else
4236                 dev_err(dev, "Error recovery failed\n");
4237
4238         return status;
4239 }
4240
4241 static void be_func_recovery_task(struct work_struct *work)
4242 {
4243         struct be_adapter *adapter =
4244                 container_of(work, struct be_adapter,  func_recovery_work.work);
4245         int status = 0;
4246
4247         be_detect_error(adapter);
4248
4249         if (adapter->hw_error && lancer_chip(adapter)) {
4250
4251                 rtnl_lock();
4252                 netif_device_detach(adapter->netdev);
4253                 rtnl_unlock();
4254
4255                 status = lancer_recover_func(adapter);
4256                 if (!status)
4257                         netif_device_attach(adapter->netdev);
4258         }
4259
4260         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4261          * no need to attempt further recovery.
4262          */
4263         if (!status || status == -EAGAIN)
4264                 schedule_delayed_work(&adapter->func_recovery_work,
4265                                       msecs_to_jiffies(1000));
4266 }
4267
4268 static void be_worker(struct work_struct *work)
4269 {
4270         struct be_adapter *adapter =
4271                 container_of(work, struct be_adapter, work.work);
4272         struct be_rx_obj *rxo;
4273         struct be_eq_obj *eqo;
4274         int i;
4275
4276         /* when interrupts are not yet enabled, just reap any pending
4277         * mcc completions */
4278         if (!netif_running(adapter->netdev)) {
4279                 local_bh_disable();
4280                 be_process_mcc(adapter);
4281                 local_bh_enable();
4282                 goto reschedule;
4283         }
4284
4285         if (!adapter->stats_cmd_sent) {
4286                 if (lancer_chip(adapter))
4287                         lancer_cmd_get_pport_stats(adapter,
4288                                                 &adapter->stats_cmd);
4289                 else
4290                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4291         }
4292
4293         if (be_physfn(adapter) &&
4294             MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4295                 be_cmd_get_die_temperature(adapter);
4296
4297         for_all_rx_queues(adapter, rxo, i) {
4298                 if (rxo->rx_post_starved) {
4299                         rxo->rx_post_starved = false;
4300                         be_post_rx_frags(rxo, GFP_KERNEL);
4301                 }
4302         }
4303
4304         for_all_evt_queues(adapter, eqo, i)
4305                 be_eqd_update(adapter, eqo);
4306
4307 reschedule:
4308         adapter->work_counter++;
4309         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4310 }
4311
4312 /* If any VFs are already enabled don't FLR the PF */
4313 static bool be_reset_required(struct be_adapter *adapter)
4314 {
4315         return pci_num_vf(adapter->pdev) ? false : true;
4316 }
4317
4318 static char *mc_name(struct be_adapter *adapter)
4319 {
4320         if (adapter->function_mode & FLEX10_MODE)
4321                 return "FLEX10";
4322         else if (adapter->function_mode & VNIC_MODE)
4323                 return "vNIC";
4324         else if (adapter->function_mode & UMC_ENABLED)
4325                 return "UMC";
4326         else
4327                 return "";
4328 }
4329
4330 static inline char *func_name(struct be_adapter *adapter)
4331 {
4332         return be_physfn(adapter) ? "PF" : "VF";
4333 }
4334
4335 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4336 {
4337         int status = 0;
4338         struct be_adapter *adapter;
4339         struct net_device *netdev;
4340         char port_name;
4341
4342         status = pci_enable_device(pdev);
4343         if (status)
4344                 goto do_none;
4345
4346         status = pci_request_regions(pdev, DRV_NAME);
4347         if (status)
4348                 goto disable_dev;
4349         pci_set_master(pdev);
4350
4351         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4352         if (netdev == NULL) {
4353                 status = -ENOMEM;
4354                 goto rel_reg;
4355         }
4356         adapter = netdev_priv(netdev);
4357         adapter->pdev = pdev;
4358         pci_set_drvdata(pdev, adapter);
4359         adapter->netdev = netdev;
4360         SET_NETDEV_DEV(netdev, &pdev->dev);
4361
4362         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4363         if (!status) {
4364                 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4365                 if (status < 0) {
4366                         dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4367                         goto free_netdev;
4368                 }
4369                 netdev->features |= NETIF_F_HIGHDMA;
4370         } else {
4371                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4372                 if (!status)
4373                         status = dma_set_coherent_mask(&pdev->dev,
4374                                                        DMA_BIT_MASK(32));
4375                 if (status) {
4376                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4377                         goto free_netdev;
4378                 }
4379         }
4380
4381         status = pci_enable_pcie_error_reporting(pdev);
4382         if (status)
4383                 dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
4384
4385         status = be_ctrl_init(adapter);
4386         if (status)
4387                 goto free_netdev;
4388
4389         /* sync up with fw's ready state */
4390         if (be_physfn(adapter)) {
4391                 status = be_fw_wait_ready(adapter);
4392                 if (status)
4393                         goto ctrl_clean;
4394         }
4395
4396         if (be_reset_required(adapter)) {
4397                 status = be_cmd_reset_function(adapter);
4398                 if (status)
4399                         goto ctrl_clean;
4400
4401                 /* Wait for interrupts to quiesce after an FLR */
4402                 msleep(100);
4403         }
4404
4405         /* Allow interrupts for other ULPs running on NIC function */
4406         be_intr_set(adapter, true);
4407
4408         /* tell fw we're ready to fire cmds */
4409         status = be_cmd_fw_init(adapter);
4410         if (status)
4411                 goto ctrl_clean;
4412
4413         status = be_stats_init(adapter);
4414         if (status)
4415                 goto ctrl_clean;
4416
4417         status = be_get_initial_config(adapter);
4418         if (status)
4419                 goto stats_clean;
4420
4421         INIT_DELAYED_WORK(&adapter->work, be_worker);
4422         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4423         adapter->rx_fc = adapter->tx_fc = true;
4424
4425         status = be_setup(adapter);
4426         if (status)
4427                 goto stats_clean;
4428
4429         be_netdev_init(netdev);
4430         status = register_netdev(netdev);
4431         if (status != 0)
4432                 goto unsetup;
4433
4434         be_roce_dev_add(adapter);
4435
4436         schedule_delayed_work(&adapter->func_recovery_work,
4437                               msecs_to_jiffies(1000));
4438
4439         be_cmd_query_port_name(adapter, &port_name);
4440
4441         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4442                  func_name(adapter), mc_name(adapter), port_name);
4443
4444         return 0;
4445
4446 unsetup:
4447         be_clear(adapter);
4448 stats_clean:
4449         be_stats_cleanup(adapter);
4450 ctrl_clean:
4451         be_ctrl_cleanup(adapter);
4452 free_netdev:
4453         free_netdev(netdev);
4454         pci_set_drvdata(pdev, NULL);
4455 rel_reg:
4456         pci_release_regions(pdev);
4457 disable_dev:
4458         pci_disable_device(pdev);
4459 do_none:
4460         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4461         return status;
4462 }
4463
4464 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4465 {
4466         struct be_adapter *adapter = pci_get_drvdata(pdev);
4467         struct net_device *netdev =  adapter->netdev;
4468
4469         if (adapter->wol)
4470                 be_setup_wol(adapter, true);
4471
4472         cancel_delayed_work_sync(&adapter->func_recovery_work);
4473
4474         netif_device_detach(netdev);
4475         if (netif_running(netdev)) {
4476                 rtnl_lock();
4477                 be_close(netdev);
4478                 rtnl_unlock();
4479         }
4480         be_clear(adapter);
4481
4482         pci_save_state(pdev);
4483         pci_disable_device(pdev);
4484         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4485         return 0;
4486 }
4487
4488 static int be_resume(struct pci_dev *pdev)
4489 {
4490         int status = 0;
4491         struct be_adapter *adapter = pci_get_drvdata(pdev);
4492         struct net_device *netdev =  adapter->netdev;
4493
4494         netif_device_detach(netdev);
4495
4496         status = pci_enable_device(pdev);
4497         if (status)
4498                 return status;
4499
4500         pci_set_power_state(pdev, PCI_D0);
4501         pci_restore_state(pdev);
4502
4503         status = be_fw_wait_ready(adapter);
4504         if (status)
4505                 return status;
4506
4507         /* tell fw we're ready to fire cmds */
4508         status = be_cmd_fw_init(adapter);
4509         if (status)
4510                 return status;
4511
4512         be_setup(adapter);
4513         if (netif_running(netdev)) {
4514                 rtnl_lock();
4515                 be_open(netdev);
4516                 rtnl_unlock();
4517         }
4518
4519         schedule_delayed_work(&adapter->func_recovery_work,
4520                               msecs_to_jiffies(1000));
4521         netif_device_attach(netdev);
4522
4523         if (adapter->wol)
4524                 be_setup_wol(adapter, false);
4525
4526         return 0;
4527 }
4528
4529 /*
4530  * An FLR will stop BE from DMAing any data.
4531  */
4532 static void be_shutdown(struct pci_dev *pdev)
4533 {
4534         struct be_adapter *adapter = pci_get_drvdata(pdev);
4535
4536         if (!adapter)
4537                 return;
4538
4539         cancel_delayed_work_sync(&adapter->work);
4540         cancel_delayed_work_sync(&adapter->func_recovery_work);
4541
4542         netif_device_detach(adapter->netdev);
4543
4544         be_cmd_reset_function(adapter);
4545
4546         pci_disable_device(pdev);
4547 }
4548
4549 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4550                                 pci_channel_state_t state)
4551 {
4552         struct be_adapter *adapter = pci_get_drvdata(pdev);
4553         struct net_device *netdev =  adapter->netdev;
4554
4555         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4556
4557         if (!adapter->eeh_error) {
4558                 adapter->eeh_error = true;
4559
4560                 cancel_delayed_work_sync(&adapter->func_recovery_work);
4561
4562                 rtnl_lock();
4563                 netif_device_detach(netdev);
4564                 if (netif_running(netdev))
4565                         be_close(netdev);
4566                 rtnl_unlock();
4567
4568                 be_clear(adapter);
4569         }
4570
4571         if (state == pci_channel_io_perm_failure)
4572                 return PCI_ERS_RESULT_DISCONNECT;
4573
4574         pci_disable_device(pdev);
4575
4576         /* The error could cause the FW to trigger a flash debug dump.
4577          * Resetting the card while flash dump is in progress
4578          * can cause it not to recover; wait for it to finish.
4579          * Wait only for first function as it is needed only once per
4580          * adapter.
4581          */
4582         if (pdev->devfn == 0)
4583                 ssleep(30);
4584
4585         return PCI_ERS_RESULT_NEED_RESET;
4586 }
4587
4588 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4589 {
4590         struct be_adapter *adapter = pci_get_drvdata(pdev);
4591         int status;
4592
4593         dev_info(&adapter->pdev->dev, "EEH reset\n");
4594
4595         status = pci_enable_device(pdev);
4596         if (status)
4597                 return PCI_ERS_RESULT_DISCONNECT;
4598
4599         pci_set_master(pdev);
4600         pci_set_power_state(pdev, PCI_D0);
4601         pci_restore_state(pdev);
4602
4603         /* Check if card is ok and fw is ready */
4604         dev_info(&adapter->pdev->dev,
4605                  "Waiting for FW to be ready after EEH reset\n");
4606         status = be_fw_wait_ready(adapter);
4607         if (status)
4608                 return PCI_ERS_RESULT_DISCONNECT;
4609
4610         pci_cleanup_aer_uncorrect_error_status(pdev);
4611         be_clear_all_error(adapter);
4612         return PCI_ERS_RESULT_RECOVERED;
4613 }
4614
4615 static void be_eeh_resume(struct pci_dev *pdev)
4616 {
4617         int status = 0;
4618         struct be_adapter *adapter = pci_get_drvdata(pdev);
4619         struct net_device *netdev =  adapter->netdev;
4620
4621         dev_info(&adapter->pdev->dev, "EEH resume\n");
4622
4623         pci_save_state(pdev);
4624
4625         status = be_cmd_reset_function(adapter);
4626         if (status)
4627                 goto err;
4628
4629         /* tell fw we're ready to fire cmds */
4630         status = be_cmd_fw_init(adapter);
4631         if (status)
4632                 goto err;
4633
4634         status = be_setup(adapter);
4635         if (status)
4636                 goto err;
4637
4638         if (netif_running(netdev)) {
4639                 status = be_open(netdev);
4640                 if (status)
4641                         goto err;
4642         }
4643
4644         schedule_delayed_work(&adapter->func_recovery_work,
4645                               msecs_to_jiffies(1000));
4646         netif_device_attach(netdev);
4647         return;
4648 err:
4649         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4650 }
4651
4652 static const struct pci_error_handlers be_eeh_handlers = {
4653         .error_detected = be_eeh_err_detected,
4654         .slot_reset = be_eeh_reset,
4655         .resume = be_eeh_resume,
4656 };
4657
4658 static struct pci_driver be_driver = {
4659         .name = DRV_NAME,
4660         .id_table = be_dev_ids,
4661         .probe = be_probe,
4662         .remove = be_remove,
4663         .suspend = be_suspend,
4664         .resume = be_resume,
4665         .shutdown = be_shutdown,
4666         .err_handler = &be_eeh_handlers
4667 };
4668
4669 static int __init be_init_module(void)
4670 {
4671         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4672             rx_frag_size != 2048) {
4673                 printk(KERN_WARNING DRV_NAME
4674                         " : Module param rx_frag_size must be 2048/4096/8192."
4675                         " Using 2048\n");
4676                 rx_frag_size = 2048;
4677         }
4678
4679         return pci_register_driver(&be_driver);
4680 }
4681 module_init(be_init_module);
4682
4683 static void __exit be_exit_module(void)
4684 {
4685         pci_unregister_driver(&be_driver);
4686 }
4687 module_exit(be_exit_module);