]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/emulex/benet/be_main.c
be2net: Avoid flashing BE3 UFI on BE3-R chip.
[karo-tx-linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL | __GFP_ZERO);
150         if (!mem->va)
151                 return -ENOMEM;
152         return 0;
153 }
154
155 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
156 {
157         u32 reg, enabled;
158
159         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160                                 &reg);
161         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
163         if (!enabled && enable)
164                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165         else if (enabled && !enable)
166                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else
168                 return;
169
170         pci_write_config_dword(adapter->pdev,
171                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 }
173
174 static void be_intr_set(struct be_adapter *adapter, bool enable)
175 {
176         int status = 0;
177
178         /* On lancer interrupts can't be controlled via this register */
179         if (lancer_chip(adapter))
180                 return;
181
182         if (adapter->eeh_error)
183                 return;
184
185         status = be_cmd_intr_set(adapter, enable);
186         if (status)
187                 be_reg_intr_set(adapter, enable);
188 }
189
190 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
191 {
192         u32 val = 0;
193         val |= qid & DB_RQ_RING_ID_MASK;
194         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
195
196         wmb();
197         iowrite32(val, adapter->db + DB_RQ_OFFSET);
198 }
199
200 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201                           u16 posted)
202 {
203         u32 val = 0;
204         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
205         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
206
207         wmb();
208         iowrite32(val, adapter->db + txo->db_offset);
209 }
210
211 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
212                 bool arm, bool clear_int, u16 num_popped)
213 {
214         u32 val = 0;
215         val |= qid & DB_EQ_RING_ID_MASK;
216         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
218
219         if (adapter->eeh_error)
220                 return;
221
222         if (arm)
223                 val |= 1 << DB_EQ_REARM_SHIFT;
224         if (clear_int)
225                 val |= 1 << DB_EQ_CLR_SHIFT;
226         val |= 1 << DB_EQ_EVNT_SHIFT;
227         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
228         iowrite32(val, adapter->db + DB_EQ_OFFSET);
229 }
230
231 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
232 {
233         u32 val = 0;
234         val |= qid & DB_CQ_RING_ID_MASK;
235         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
237
238         if (adapter->eeh_error)
239                 return;
240
241         if (arm)
242                 val |= 1 << DB_CQ_REARM_SHIFT;
243         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
244         iowrite32(val, adapter->db + DB_CQ_OFFSET);
245 }
246
247 static int be_mac_addr_set(struct net_device *netdev, void *p)
248 {
249         struct be_adapter *adapter = netdev_priv(netdev);
250         struct sockaddr *addr = p;
251         int status = 0;
252         u8 current_mac[ETH_ALEN];
253         u32 pmac_id = adapter->pmac_id[0];
254         bool active_mac = true;
255
256         if (!is_valid_ether_addr(addr->sa_data))
257                 return -EADDRNOTAVAIL;
258
259         /* For BE VF, MAC address is already activated by PF.
260          * Hence only operation left is updating netdev->devaddr.
261          * Update it if user is passing the same MAC which was used
262          * during configuring VF MAC from PF(Hypervisor).
263          */
264         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265                 status = be_cmd_mac_addr_query(adapter, current_mac,
266                                                false, adapter->if_handle, 0);
267                 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268                         goto done;
269                 else
270                         goto err;
271         }
272
273         if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274                 goto done;
275
276         /* For Lancer check if any MAC is active.
277          * If active, get its mac id.
278          */
279         if (lancer_chip(adapter) && !be_physfn(adapter))
280                 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281                                          &pmac_id, 0);
282
283         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284                                  adapter->if_handle,
285                                  &adapter->pmac_id[0], 0);
286
287         if (status)
288                 goto err;
289
290         if (active_mac)
291                 be_cmd_pmac_del(adapter, adapter->if_handle,
292                                 pmac_id, 0);
293 done:
294         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295         return 0;
296 err:
297         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
298         return status;
299 }
300
301 /* BE2 supports only v0 cmd */
302 static void *hw_stats_from_cmd(struct be_adapter *adapter)
303 {
304         if (BE2_chip(adapter)) {
305                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307                 return &cmd->hw_stats;
308         } else  {
309                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311                 return &cmd->hw_stats;
312         }
313 }
314
315 /* BE2 supports only v0 cmd */
316 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317 {
318         if (BE2_chip(adapter)) {
319                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321                 return &hw_stats->erx;
322         } else {
323                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325                 return &hw_stats->erx;
326         }
327 }
328
329 static void populate_be_v0_stats(struct be_adapter *adapter)
330 {
331         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
334         struct be_port_rxf_stats_v0 *port_stats =
335                                         &rxf_stats->port[adapter->port_num];
336         struct be_drv_stats *drvs = &adapter->drv_stats;
337
338         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
339         drvs->rx_pause_frames = port_stats->rx_pause_frames;
340         drvs->rx_crc_errors = port_stats->rx_crc_errors;
341         drvs->rx_control_frames = port_stats->rx_control_frames;
342         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
353         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
354         drvs->rx_dropped_header_too_small =
355                 port_stats->rx_dropped_header_too_small;
356         drvs->rx_address_mismatch_drops =
357                                         port_stats->rx_address_mismatch_drops +
358                                         port_stats->rx_vlan_mismatch_drops;
359         drvs->rx_alignment_symbol_errors =
360                 port_stats->rx_alignment_symbol_errors;
361
362         drvs->tx_pauseframes = port_stats->tx_pauseframes;
363         drvs->tx_controlframes = port_stats->tx_controlframes;
364
365         if (adapter->port_num)
366                 drvs->jabber_events = rxf_stats->port1_jabber_events;
367         else
368                 drvs->jabber_events = rxf_stats->port0_jabber_events;
369         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
370         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
371         drvs->forwarded_packets = rxf_stats->forwarded_packets;
372         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
373         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
375         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376 }
377
378 static void populate_be_v1_stats(struct be_adapter *adapter)
379 {
380         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
383         struct be_port_rxf_stats_v1 *port_stats =
384                                         &rxf_stats->port[adapter->port_num];
385         struct be_drv_stats *drvs = &adapter->drv_stats;
386
387         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
388         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
390         drvs->rx_pause_frames = port_stats->rx_pause_frames;
391         drvs->rx_crc_errors = port_stats->rx_crc_errors;
392         drvs->rx_control_frames = port_stats->rx_control_frames;
393         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403         drvs->rx_dropped_header_too_small =
404                 port_stats->rx_dropped_header_too_small;
405         drvs->rx_input_fifo_overflow_drop =
406                 port_stats->rx_input_fifo_overflow_drop;
407         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
408         drvs->rx_alignment_symbol_errors =
409                 port_stats->rx_alignment_symbol_errors;
410         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
411         drvs->tx_pauseframes = port_stats->tx_pauseframes;
412         drvs->tx_controlframes = port_stats->tx_controlframes;
413         drvs->jabber_events = port_stats->jabber_events;
414         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
415         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
416         drvs->forwarded_packets = rxf_stats->forwarded_packets;
417         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
418         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
419         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
420         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
421 }
422
423 static void populate_lancer_stats(struct be_adapter *adapter)
424 {
425
426         struct be_drv_stats *drvs = &adapter->drv_stats;
427         struct lancer_pport_stats *pport_stats =
428                                         pport_stats_from_cmd(adapter);
429
430         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
431         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
432         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
433         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
434         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
435         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
436         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
437         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
438         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
439         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
440         drvs->rx_dropped_tcp_length =
441                                 pport_stats->rx_dropped_invalid_tcp_length;
442         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
443         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
444         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
445         drvs->rx_dropped_header_too_small =
446                                 pport_stats->rx_dropped_header_too_small;
447         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
448         drvs->rx_address_mismatch_drops =
449                                         pport_stats->rx_address_mismatch_drops +
450                                         pport_stats->rx_vlan_mismatch_drops;
451         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
452         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
453         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
454         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
455         drvs->jabber_events = pport_stats->rx_jabbers;
456         drvs->forwarded_packets = pport_stats->num_forwards_lo;
457         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
458         drvs->rx_drops_too_many_frags =
459                                 pport_stats->rx_drops_too_many_frags_lo;
460 }
461
462 static void accumulate_16bit_val(u32 *acc, u16 val)
463 {
464 #define lo(x)                   (x & 0xFFFF)
465 #define hi(x)                   (x & 0xFFFF0000)
466         bool wrapped = val < lo(*acc);
467         u32 newacc = hi(*acc) + val;
468
469         if (wrapped)
470                 newacc += 65536;
471         ACCESS_ONCE(*acc) = newacc;
472 }
473
474 void be_parse_stats(struct be_adapter *adapter)
475 {
476         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
477         struct be_rx_obj *rxo;
478         int i;
479
480         if (lancer_chip(adapter)) {
481                 populate_lancer_stats(adapter);
482         } else {
483                 if (BE2_chip(adapter))
484                         populate_be_v0_stats(adapter);
485                 else
486                         /* for BE3 and Skyhawk */
487                         populate_be_v1_stats(adapter);
488
489                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
490                 for_all_rx_queues(adapter, rxo, i) {
491                         /* below erx HW counter can actually wrap around after
492                          * 65535. Driver accumulates a 32-bit value
493                          */
494                         accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
495                                              (u16)erx->rx_drops_no_fragments \
496                                              [rxo->q.id]);
497                 }
498         }
499 }
500
501 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
502                                         struct rtnl_link_stats64 *stats)
503 {
504         struct be_adapter *adapter = netdev_priv(netdev);
505         struct be_drv_stats *drvs = &adapter->drv_stats;
506         struct be_rx_obj *rxo;
507         struct be_tx_obj *txo;
508         u64 pkts, bytes;
509         unsigned int start;
510         int i;
511
512         for_all_rx_queues(adapter, rxo, i) {
513                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
514                 do {
515                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
516                         pkts = rx_stats(rxo)->rx_pkts;
517                         bytes = rx_stats(rxo)->rx_bytes;
518                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
519                 stats->rx_packets += pkts;
520                 stats->rx_bytes += bytes;
521                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
522                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
523                                         rx_stats(rxo)->rx_drops_no_frags;
524         }
525
526         for_all_tx_queues(adapter, txo, i) {
527                 const struct be_tx_stats *tx_stats = tx_stats(txo);
528                 do {
529                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
530                         pkts = tx_stats(txo)->tx_pkts;
531                         bytes = tx_stats(txo)->tx_bytes;
532                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
533                 stats->tx_packets += pkts;
534                 stats->tx_bytes += bytes;
535         }
536
537         /* bad pkts received */
538         stats->rx_errors = drvs->rx_crc_errors +
539                 drvs->rx_alignment_symbol_errors +
540                 drvs->rx_in_range_errors +
541                 drvs->rx_out_range_errors +
542                 drvs->rx_frame_too_long +
543                 drvs->rx_dropped_too_small +
544                 drvs->rx_dropped_too_short +
545                 drvs->rx_dropped_header_too_small +
546                 drvs->rx_dropped_tcp_length +
547                 drvs->rx_dropped_runt;
548
549         /* detailed rx errors */
550         stats->rx_length_errors = drvs->rx_in_range_errors +
551                 drvs->rx_out_range_errors +
552                 drvs->rx_frame_too_long;
553
554         stats->rx_crc_errors = drvs->rx_crc_errors;
555
556         /* frame alignment errors */
557         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
558
559         /* receiver fifo overrun */
560         /* drops_no_pbuf is no per i/f, it's per BE card */
561         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
562                                 drvs->rx_input_fifo_overflow_drop +
563                                 drvs->rx_drops_no_pbuf;
564         return stats;
565 }
566
567 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
568 {
569         struct net_device *netdev = adapter->netdev;
570
571         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
572                 netif_carrier_off(netdev);
573                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
574         }
575
576         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
577                 netif_carrier_on(netdev);
578         else
579                 netif_carrier_off(netdev);
580 }
581
582 static void be_tx_stats_update(struct be_tx_obj *txo,
583                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
584 {
585         struct be_tx_stats *stats = tx_stats(txo);
586
587         u64_stats_update_begin(&stats->sync);
588         stats->tx_reqs++;
589         stats->tx_wrbs += wrb_cnt;
590         stats->tx_bytes += copied;
591         stats->tx_pkts += (gso_segs ? gso_segs : 1);
592         if (stopped)
593                 stats->tx_stops++;
594         u64_stats_update_end(&stats->sync);
595 }
596
597 /* Determine number of WRB entries needed to xmit data in an skb */
598 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
599                                                                 bool *dummy)
600 {
601         int cnt = (skb->len > skb->data_len);
602
603         cnt += skb_shinfo(skb)->nr_frags;
604
605         /* to account for hdr wrb */
606         cnt++;
607         if (lancer_chip(adapter) || !(cnt & 1)) {
608                 *dummy = false;
609         } else {
610                 /* add a dummy to make it an even num */
611                 cnt++;
612                 *dummy = true;
613         }
614         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
615         return cnt;
616 }
617
618 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
619 {
620         wrb->frag_pa_hi = upper_32_bits(addr);
621         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
622         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
623         wrb->rsvd0 = 0;
624 }
625
626 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
627                                         struct sk_buff *skb)
628 {
629         u8 vlan_prio;
630         u16 vlan_tag;
631
632         vlan_tag = vlan_tx_tag_get(skb);
633         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
634         /* If vlan priority provided by OS is NOT in available bmap */
635         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
636                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
637                                 adapter->recommended_prio;
638
639         return vlan_tag;
640 }
641
642 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
643 {
644         return vlan_tx_tag_present(skb) || adapter->pvid;
645 }
646
647 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
648                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
649 {
650         u16 vlan_tag;
651
652         memset(hdr, 0, sizeof(*hdr));
653
654         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
655
656         if (skb_is_gso(skb)) {
657                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
658                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
659                         hdr, skb_shinfo(skb)->gso_size);
660                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
661                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
662         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
663                 if (is_tcp_pkt(skb))
664                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
665                 else if (is_udp_pkt(skb))
666                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
667         }
668
669         if (vlan_tx_tag_present(skb)) {
670                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
671                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
672                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
673         }
674
675         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
676         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
677         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
678         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
679 }
680
681 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
682                 bool unmap_single)
683 {
684         dma_addr_t dma;
685
686         be_dws_le_to_cpu(wrb, sizeof(*wrb));
687
688         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
689         if (wrb->frag_len) {
690                 if (unmap_single)
691                         dma_unmap_single(dev, dma, wrb->frag_len,
692                                          DMA_TO_DEVICE);
693                 else
694                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
695         }
696 }
697
698 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
699                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
700 {
701         dma_addr_t busaddr;
702         int i, copied = 0;
703         struct device *dev = &adapter->pdev->dev;
704         struct sk_buff *first_skb = skb;
705         struct be_eth_wrb *wrb;
706         struct be_eth_hdr_wrb *hdr;
707         bool map_single = false;
708         u16 map_head;
709
710         hdr = queue_head_node(txq);
711         queue_head_inc(txq);
712         map_head = txq->head;
713
714         if (skb->len > skb->data_len) {
715                 int len = skb_headlen(skb);
716                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
717                 if (dma_mapping_error(dev, busaddr))
718                         goto dma_err;
719                 map_single = true;
720                 wrb = queue_head_node(txq);
721                 wrb_fill(wrb, busaddr, len);
722                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
723                 queue_head_inc(txq);
724                 copied += len;
725         }
726
727         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
728                 const struct skb_frag_struct *frag =
729                         &skb_shinfo(skb)->frags[i];
730                 busaddr = skb_frag_dma_map(dev, frag, 0,
731                                            skb_frag_size(frag), DMA_TO_DEVICE);
732                 if (dma_mapping_error(dev, busaddr))
733                         goto dma_err;
734                 wrb = queue_head_node(txq);
735                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
736                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
737                 queue_head_inc(txq);
738                 copied += skb_frag_size(frag);
739         }
740
741         if (dummy_wrb) {
742                 wrb = queue_head_node(txq);
743                 wrb_fill(wrb, 0, 0);
744                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
745                 queue_head_inc(txq);
746         }
747
748         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
749         be_dws_cpu_to_le(hdr, sizeof(*hdr));
750
751         return copied;
752 dma_err:
753         txq->head = map_head;
754         while (copied) {
755                 wrb = queue_head_node(txq);
756                 unmap_tx_frag(dev, wrb, map_single);
757                 map_single = false;
758                 copied -= wrb->frag_len;
759                 queue_head_inc(txq);
760         }
761         return 0;
762 }
763
764 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
765                                              struct sk_buff *skb)
766 {
767         u16 vlan_tag = 0;
768
769         skb = skb_share_check(skb, GFP_ATOMIC);
770         if (unlikely(!skb))
771                 return skb;
772
773         if (vlan_tx_tag_present(skb)) {
774                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
775                 __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
776                 skb->vlan_tci = 0;
777         }
778
779         return skb;
780 }
781
782 static netdev_tx_t be_xmit(struct sk_buff *skb,
783                         struct net_device *netdev)
784 {
785         struct be_adapter *adapter = netdev_priv(netdev);
786         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
787         struct be_queue_info *txq = &txo->q;
788         struct iphdr *ip = NULL;
789         u32 wrb_cnt = 0, copied = 0;
790         u32 start = txq->head, eth_hdr_len;
791         bool dummy_wrb, stopped = false;
792
793         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
794                 VLAN_ETH_HLEN : ETH_HLEN;
795
796         /* HW has a bug which considers padding bytes as legal
797          * and modifies the IPv4 hdr's 'tot_len' field
798          */
799         if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
800                         is_ipv4_pkt(skb)) {
801                 ip = (struct iphdr *)ip_hdr(skb);
802                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
803         }
804
805         /* HW has a bug wherein it will calculate CSUM for VLAN
806          * pkts even though it is disabled.
807          * Manually insert VLAN in pkt.
808          */
809         if (skb->ip_summed != CHECKSUM_PARTIAL &&
810                         be_vlan_tag_chk(adapter, skb)) {
811                 skb = be_insert_vlan_in_pkt(adapter, skb);
812                 if (unlikely(!skb))
813                         goto tx_drop;
814         }
815
816         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
817
818         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
819         if (copied) {
820                 int gso_segs = skb_shinfo(skb)->gso_segs;
821
822                 /* record the sent skb in the sent_skb table */
823                 BUG_ON(txo->sent_skb_list[start]);
824                 txo->sent_skb_list[start] = skb;
825
826                 /* Ensure txq has space for the next skb; Else stop the queue
827                  * *BEFORE* ringing the tx doorbell, so that we serialze the
828                  * tx compls of the current transmit which'll wake up the queue
829                  */
830                 atomic_add(wrb_cnt, &txq->used);
831                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
832                                                                 txq->len) {
833                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
834                         stopped = true;
835                 }
836
837                 be_txq_notify(adapter, txo, wrb_cnt);
838
839                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
840         } else {
841                 txq->head = start;
842                 dev_kfree_skb_any(skb);
843         }
844 tx_drop:
845         return NETDEV_TX_OK;
846 }
847
848 static int be_change_mtu(struct net_device *netdev, int new_mtu)
849 {
850         struct be_adapter *adapter = netdev_priv(netdev);
851         if (new_mtu < BE_MIN_MTU ||
852                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
853                                         (ETH_HLEN + ETH_FCS_LEN))) {
854                 dev_info(&adapter->pdev->dev,
855                         "MTU must be between %d and %d bytes\n",
856                         BE_MIN_MTU,
857                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
858                 return -EINVAL;
859         }
860         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
861                         netdev->mtu, new_mtu);
862         netdev->mtu = new_mtu;
863         return 0;
864 }
865
866 /*
867  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
868  * If the user configures more, place BE in vlan promiscuous mode.
869  */
870 static int be_vid_config(struct be_adapter *adapter)
871 {
872         u16 vids[BE_NUM_VLANS_SUPPORTED];
873         u16 num = 0, i;
874         int status = 0;
875
876         /* No need to further configure vids if in promiscuous mode */
877         if (adapter->promiscuous)
878                 return 0;
879
880         if (adapter->vlans_added > adapter->max_vlans)
881                 goto set_vlan_promisc;
882
883         /* Construct VLAN Table to give to HW */
884         for (i = 0; i < VLAN_N_VID; i++)
885                 if (adapter->vlan_tag[i])
886                         vids[num++] = cpu_to_le16(i);
887
888         status = be_cmd_vlan_config(adapter, adapter->if_handle,
889                                     vids, num, 1, 0);
890
891         /* Set to VLAN promisc mode as setting VLAN filter failed */
892         if (status) {
893                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
894                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
895                 goto set_vlan_promisc;
896         }
897
898         return status;
899
900 set_vlan_promisc:
901         status = be_cmd_vlan_config(adapter, adapter->if_handle,
902                                     NULL, 0, 1, 1);
903         return status;
904 }
905
906 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
907 {
908         struct be_adapter *adapter = netdev_priv(netdev);
909         int status = 0;
910
911         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
912                 status = -EINVAL;
913                 goto ret;
914         }
915
916         /* Packets with VID 0 are always received by Lancer by default */
917         if (lancer_chip(adapter) && vid == 0)
918                 goto ret;
919
920         adapter->vlan_tag[vid] = 1;
921         if (adapter->vlans_added <= (adapter->max_vlans + 1))
922                 status = be_vid_config(adapter);
923
924         if (!status)
925                 adapter->vlans_added++;
926         else
927                 adapter->vlan_tag[vid] = 0;
928 ret:
929         return status;
930 }
931
932 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
933 {
934         struct be_adapter *adapter = netdev_priv(netdev);
935         int status = 0;
936
937         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
938                 status = -EINVAL;
939                 goto ret;
940         }
941
942         /* Packets with VID 0 are always received by Lancer by default */
943         if (lancer_chip(adapter) && vid == 0)
944                 goto ret;
945
946         adapter->vlan_tag[vid] = 0;
947         if (adapter->vlans_added <= adapter->max_vlans)
948                 status = be_vid_config(adapter);
949
950         if (!status)
951                 adapter->vlans_added--;
952         else
953                 adapter->vlan_tag[vid] = 1;
954 ret:
955         return status;
956 }
957
958 static void be_set_rx_mode(struct net_device *netdev)
959 {
960         struct be_adapter *adapter = netdev_priv(netdev);
961         int status;
962
963         if (netdev->flags & IFF_PROMISC) {
964                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
965                 adapter->promiscuous = true;
966                 goto done;
967         }
968
969         /* BE was previously in promiscuous mode; disable it */
970         if (adapter->promiscuous) {
971                 adapter->promiscuous = false;
972                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
973
974                 if (adapter->vlans_added)
975                         be_vid_config(adapter);
976         }
977
978         /* Enable multicast promisc if num configured exceeds what we support */
979         if (netdev->flags & IFF_ALLMULTI ||
980             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
981                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
982                 goto done;
983         }
984
985         if (netdev_uc_count(netdev) != adapter->uc_macs) {
986                 struct netdev_hw_addr *ha;
987                 int i = 1; /* First slot is claimed by the Primary MAC */
988
989                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
990                         be_cmd_pmac_del(adapter, adapter->if_handle,
991                                         adapter->pmac_id[i], 0);
992                 }
993
994                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
995                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
996                         adapter->promiscuous = true;
997                         goto done;
998                 }
999
1000                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1001                         adapter->uc_macs++; /* First slot is for Primary MAC */
1002                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1003                                         adapter->if_handle,
1004                                         &adapter->pmac_id[adapter->uc_macs], 0);
1005                 }
1006         }
1007
1008         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1009
1010         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1011         if (status) {
1012                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1013                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1014                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1015         }
1016 done:
1017         return;
1018 }
1019
1020 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1021 {
1022         struct be_adapter *adapter = netdev_priv(netdev);
1023         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1024         int status;
1025         bool active_mac = false;
1026         u32 pmac_id;
1027         u8 old_mac[ETH_ALEN];
1028
1029         if (!sriov_enabled(adapter))
1030                 return -EPERM;
1031
1032         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1033                 return -EINVAL;
1034
1035         if (lancer_chip(adapter)) {
1036                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1037                                                   &pmac_id, vf + 1);
1038                 if (!status && active_mac)
1039                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1040                                         pmac_id, vf + 1);
1041
1042                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1043         } else {
1044                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1045                                          vf_cfg->pmac_id, vf + 1);
1046
1047                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1048                                          &vf_cfg->pmac_id, vf + 1);
1049         }
1050
1051         if (status)
1052                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1053                                 mac, vf);
1054         else
1055                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1056
1057         return status;
1058 }
1059
1060 static int be_get_vf_config(struct net_device *netdev, int vf,
1061                         struct ifla_vf_info *vi)
1062 {
1063         struct be_adapter *adapter = netdev_priv(netdev);
1064         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1065
1066         if (!sriov_enabled(adapter))
1067                 return -EPERM;
1068
1069         if (vf >= adapter->num_vfs)
1070                 return -EINVAL;
1071
1072         vi->vf = vf;
1073         vi->tx_rate = vf_cfg->tx_rate;
1074         vi->vlan = vf_cfg->vlan_tag;
1075         vi->qos = 0;
1076         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1077
1078         return 0;
1079 }
1080
1081 static int be_set_vf_vlan(struct net_device *netdev,
1082                         int vf, u16 vlan, u8 qos)
1083 {
1084         struct be_adapter *adapter = netdev_priv(netdev);
1085         int status = 0;
1086
1087         if (!sriov_enabled(adapter))
1088                 return -EPERM;
1089
1090         if (vf >= adapter->num_vfs || vlan > 4095)
1091                 return -EINVAL;
1092
1093         if (vlan) {
1094                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1095                         /* If this is new value, program it. Else skip. */
1096                         adapter->vf_cfg[vf].vlan_tag = vlan;
1097
1098                         status = be_cmd_set_hsw_config(adapter, vlan,
1099                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1100                 }
1101         } else {
1102                 /* Reset Transparent Vlan Tagging. */
1103                 adapter->vf_cfg[vf].vlan_tag = 0;
1104                 vlan = adapter->vf_cfg[vf].def_vid;
1105                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1106                         adapter->vf_cfg[vf].if_handle);
1107         }
1108
1109
1110         if (status)
1111                 dev_info(&adapter->pdev->dev,
1112                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1113         return status;
1114 }
1115
1116 static int be_set_vf_tx_rate(struct net_device *netdev,
1117                         int vf, int rate)
1118 {
1119         struct be_adapter *adapter = netdev_priv(netdev);
1120         int status = 0;
1121
1122         if (!sriov_enabled(adapter))
1123                 return -EPERM;
1124
1125         if (vf >= adapter->num_vfs)
1126                 return -EINVAL;
1127
1128         if (rate < 100 || rate > 10000) {
1129                 dev_err(&adapter->pdev->dev,
1130                         "tx rate must be between 100 and 10000 Mbps\n");
1131                 return -EINVAL;
1132         }
1133
1134         if (lancer_chip(adapter))
1135                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1136         else
1137                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1138
1139         if (status)
1140                 dev_err(&adapter->pdev->dev,
1141                                 "tx rate %d on VF %d failed\n", rate, vf);
1142         else
1143                 adapter->vf_cfg[vf].tx_rate = rate;
1144         return status;
1145 }
1146
1147 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1148 {
1149         struct pci_dev *dev, *pdev = adapter->pdev;
1150         int vfs = 0, assigned_vfs = 0, pos;
1151         u16 offset, stride;
1152
1153         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1154         if (!pos)
1155                 return 0;
1156         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1157         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1158
1159         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1160         while (dev) {
1161                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1162                         vfs++;
1163                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1164                                 assigned_vfs++;
1165                 }
1166                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1167         }
1168         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1169 }
1170
1171 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1172 {
1173         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1174         ulong now = jiffies;
1175         ulong delta = now - stats->rx_jiffies;
1176         u64 pkts;
1177         unsigned int start, eqd;
1178
1179         if (!eqo->enable_aic) {
1180                 eqd = eqo->eqd;
1181                 goto modify_eqd;
1182         }
1183
1184         if (eqo->idx >= adapter->num_rx_qs)
1185                 return;
1186
1187         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1188
1189         /* Wrapped around */
1190         if (time_before(now, stats->rx_jiffies)) {
1191                 stats->rx_jiffies = now;
1192                 return;
1193         }
1194
1195         /* Update once a second */
1196         if (delta < HZ)
1197                 return;
1198
1199         do {
1200                 start = u64_stats_fetch_begin_bh(&stats->sync);
1201                 pkts = stats->rx_pkts;
1202         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1203
1204         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1205         stats->rx_pkts_prev = pkts;
1206         stats->rx_jiffies = now;
1207         eqd = (stats->rx_pps / 110000) << 3;
1208         eqd = min(eqd, eqo->max_eqd);
1209         eqd = max(eqd, eqo->min_eqd);
1210         if (eqd < 10)
1211                 eqd = 0;
1212
1213 modify_eqd:
1214         if (eqd != eqo->cur_eqd) {
1215                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1216                 eqo->cur_eqd = eqd;
1217         }
1218 }
1219
1220 static void be_rx_stats_update(struct be_rx_obj *rxo,
1221                 struct be_rx_compl_info *rxcp)
1222 {
1223         struct be_rx_stats *stats = rx_stats(rxo);
1224
1225         u64_stats_update_begin(&stats->sync);
1226         stats->rx_compl++;
1227         stats->rx_bytes += rxcp->pkt_size;
1228         stats->rx_pkts++;
1229         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1230                 stats->rx_mcast_pkts++;
1231         if (rxcp->err)
1232                 stats->rx_compl_err++;
1233         u64_stats_update_end(&stats->sync);
1234 }
1235
1236 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1237 {
1238         /* L4 checksum is not reliable for non TCP/UDP packets.
1239          * Also ignore ipcksm for ipv6 pkts */
1240         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1241                                 (rxcp->ip_csum || rxcp->ipv6);
1242 }
1243
1244 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1245                                                 u16 frag_idx)
1246 {
1247         struct be_adapter *adapter = rxo->adapter;
1248         struct be_rx_page_info *rx_page_info;
1249         struct be_queue_info *rxq = &rxo->q;
1250
1251         rx_page_info = &rxo->page_info_tbl[frag_idx];
1252         BUG_ON(!rx_page_info->page);
1253
1254         if (rx_page_info->last_page_user) {
1255                 dma_unmap_page(&adapter->pdev->dev,
1256                                dma_unmap_addr(rx_page_info, bus),
1257                                adapter->big_page_size, DMA_FROM_DEVICE);
1258                 rx_page_info->last_page_user = false;
1259         }
1260
1261         atomic_dec(&rxq->used);
1262         return rx_page_info;
1263 }
1264
1265 /* Throwaway the data in the Rx completion */
1266 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1267                                 struct be_rx_compl_info *rxcp)
1268 {
1269         struct be_queue_info *rxq = &rxo->q;
1270         struct be_rx_page_info *page_info;
1271         u16 i, num_rcvd = rxcp->num_rcvd;
1272
1273         for (i = 0; i < num_rcvd; i++) {
1274                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1275                 put_page(page_info->page);
1276                 memset(page_info, 0, sizeof(*page_info));
1277                 index_inc(&rxcp->rxq_idx, rxq->len);
1278         }
1279 }
1280
1281 /*
1282  * skb_fill_rx_data forms a complete skb for an ether frame
1283  * indicated by rxcp.
1284  */
1285 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1286                              struct be_rx_compl_info *rxcp)
1287 {
1288         struct be_queue_info *rxq = &rxo->q;
1289         struct be_rx_page_info *page_info;
1290         u16 i, j;
1291         u16 hdr_len, curr_frag_len, remaining;
1292         u8 *start;
1293
1294         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1295         start = page_address(page_info->page) + page_info->page_offset;
1296         prefetch(start);
1297
1298         /* Copy data in the first descriptor of this completion */
1299         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1300
1301         skb->len = curr_frag_len;
1302         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1303                 memcpy(skb->data, start, curr_frag_len);
1304                 /* Complete packet has now been moved to data */
1305                 put_page(page_info->page);
1306                 skb->data_len = 0;
1307                 skb->tail += curr_frag_len;
1308         } else {
1309                 hdr_len = ETH_HLEN;
1310                 memcpy(skb->data, start, hdr_len);
1311                 skb_shinfo(skb)->nr_frags = 1;
1312                 skb_frag_set_page(skb, 0, page_info->page);
1313                 skb_shinfo(skb)->frags[0].page_offset =
1314                                         page_info->page_offset + hdr_len;
1315                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1316                 skb->data_len = curr_frag_len - hdr_len;
1317                 skb->truesize += rx_frag_size;
1318                 skb->tail += hdr_len;
1319         }
1320         page_info->page = NULL;
1321
1322         if (rxcp->pkt_size <= rx_frag_size) {
1323                 BUG_ON(rxcp->num_rcvd != 1);
1324                 return;
1325         }
1326
1327         /* More frags present for this completion */
1328         index_inc(&rxcp->rxq_idx, rxq->len);
1329         remaining = rxcp->pkt_size - curr_frag_len;
1330         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1331                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1332                 curr_frag_len = min(remaining, rx_frag_size);
1333
1334                 /* Coalesce all frags from the same physical page in one slot */
1335                 if (page_info->page_offset == 0) {
1336                         /* Fresh page */
1337                         j++;
1338                         skb_frag_set_page(skb, j, page_info->page);
1339                         skb_shinfo(skb)->frags[j].page_offset =
1340                                                         page_info->page_offset;
1341                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1342                         skb_shinfo(skb)->nr_frags++;
1343                 } else {
1344                         put_page(page_info->page);
1345                 }
1346
1347                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1348                 skb->len += curr_frag_len;
1349                 skb->data_len += curr_frag_len;
1350                 skb->truesize += rx_frag_size;
1351                 remaining -= curr_frag_len;
1352                 index_inc(&rxcp->rxq_idx, rxq->len);
1353                 page_info->page = NULL;
1354         }
1355         BUG_ON(j > MAX_SKB_FRAGS);
1356 }
1357
1358 /* Process the RX completion indicated by rxcp when GRO is disabled */
1359 static void be_rx_compl_process(struct be_rx_obj *rxo,
1360                                 struct be_rx_compl_info *rxcp)
1361 {
1362         struct be_adapter *adapter = rxo->adapter;
1363         struct net_device *netdev = adapter->netdev;
1364         struct sk_buff *skb;
1365
1366         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1367         if (unlikely(!skb)) {
1368                 rx_stats(rxo)->rx_drops_no_skbs++;
1369                 be_rx_compl_discard(rxo, rxcp);
1370                 return;
1371         }
1372
1373         skb_fill_rx_data(rxo, skb, rxcp);
1374
1375         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1376                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1377         else
1378                 skb_checksum_none_assert(skb);
1379
1380         skb->protocol = eth_type_trans(skb, netdev);
1381         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1382         if (netdev->features & NETIF_F_RXHASH)
1383                 skb->rxhash = rxcp->rss_hash;
1384
1385
1386         if (rxcp->vlanf)
1387                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1388
1389         netif_receive_skb(skb);
1390 }
1391
1392 /* Process the RX completion indicated by rxcp when GRO is enabled */
1393 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1394                              struct be_rx_compl_info *rxcp)
1395 {
1396         struct be_adapter *adapter = rxo->adapter;
1397         struct be_rx_page_info *page_info;
1398         struct sk_buff *skb = NULL;
1399         struct be_queue_info *rxq = &rxo->q;
1400         u16 remaining, curr_frag_len;
1401         u16 i, j;
1402
1403         skb = napi_get_frags(napi);
1404         if (!skb) {
1405                 be_rx_compl_discard(rxo, rxcp);
1406                 return;
1407         }
1408
1409         remaining = rxcp->pkt_size;
1410         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1411                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1412
1413                 curr_frag_len = min(remaining, rx_frag_size);
1414
1415                 /* Coalesce all frags from the same physical page in one slot */
1416                 if (i == 0 || page_info->page_offset == 0) {
1417                         /* First frag or Fresh page */
1418                         j++;
1419                         skb_frag_set_page(skb, j, page_info->page);
1420                         skb_shinfo(skb)->frags[j].page_offset =
1421                                                         page_info->page_offset;
1422                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1423                 } else {
1424                         put_page(page_info->page);
1425                 }
1426                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1427                 skb->truesize += rx_frag_size;
1428                 remaining -= curr_frag_len;
1429                 index_inc(&rxcp->rxq_idx, rxq->len);
1430                 memset(page_info, 0, sizeof(*page_info));
1431         }
1432         BUG_ON(j > MAX_SKB_FRAGS);
1433
1434         skb_shinfo(skb)->nr_frags = j + 1;
1435         skb->len = rxcp->pkt_size;
1436         skb->data_len = rxcp->pkt_size;
1437         skb->ip_summed = CHECKSUM_UNNECESSARY;
1438         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1439         if (adapter->netdev->features & NETIF_F_RXHASH)
1440                 skb->rxhash = rxcp->rss_hash;
1441
1442         if (rxcp->vlanf)
1443                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1444
1445         napi_gro_frags(napi);
1446 }
1447
1448 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1449                                  struct be_rx_compl_info *rxcp)
1450 {
1451         rxcp->pkt_size =
1452                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1453         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1454         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1455         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1456         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1457         rxcp->ip_csum =
1458                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1459         rxcp->l4_csum =
1460                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1461         rxcp->ipv6 =
1462                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1463         rxcp->rxq_idx =
1464                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1465         rxcp->num_rcvd =
1466                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1467         rxcp->pkt_type =
1468                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1469         rxcp->rss_hash =
1470                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1471         if (rxcp->vlanf) {
1472                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1473                                           compl);
1474                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1475                                                compl);
1476         }
1477         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1478 }
1479
1480 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1481                                  struct be_rx_compl_info *rxcp)
1482 {
1483         rxcp->pkt_size =
1484                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1485         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1486         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1487         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1488         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1489         rxcp->ip_csum =
1490                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1491         rxcp->l4_csum =
1492                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1493         rxcp->ipv6 =
1494                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1495         rxcp->rxq_idx =
1496                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1497         rxcp->num_rcvd =
1498                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1499         rxcp->pkt_type =
1500                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1501         rxcp->rss_hash =
1502                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1503         if (rxcp->vlanf) {
1504                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1505                                           compl);
1506                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1507                                                compl);
1508         }
1509         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1510 }
1511
1512 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1513 {
1514         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1515         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1516         struct be_adapter *adapter = rxo->adapter;
1517
1518         /* For checking the valid bit it is Ok to use either definition as the
1519          * valid bit is at the same position in both v0 and v1 Rx compl */
1520         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1521                 return NULL;
1522
1523         rmb();
1524         be_dws_le_to_cpu(compl, sizeof(*compl));
1525
1526         if (adapter->be3_native)
1527                 be_parse_rx_compl_v1(compl, rxcp);
1528         else
1529                 be_parse_rx_compl_v0(compl, rxcp);
1530
1531         if (rxcp->vlanf) {
1532                 /* vlanf could be wrongly set in some cards.
1533                  * ignore if vtm is not set */
1534                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1535                         rxcp->vlanf = 0;
1536
1537                 if (!lancer_chip(adapter))
1538                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1539
1540                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1541                     !adapter->vlan_tag[rxcp->vlan_tag])
1542                         rxcp->vlanf = 0;
1543         }
1544
1545         /* As the compl has been parsed, reset it; we wont touch it again */
1546         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1547
1548         queue_tail_inc(&rxo->cq);
1549         return rxcp;
1550 }
1551
1552 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1553 {
1554         u32 order = get_order(size);
1555
1556         if (order > 0)
1557                 gfp |= __GFP_COMP;
1558         return  alloc_pages(gfp, order);
1559 }
1560
1561 /*
1562  * Allocate a page, split it to fragments of size rx_frag_size and post as
1563  * receive buffers to BE
1564  */
1565 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1566 {
1567         struct be_adapter *adapter = rxo->adapter;
1568         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1569         struct be_queue_info *rxq = &rxo->q;
1570         struct page *pagep = NULL;
1571         struct be_eth_rx_d *rxd;
1572         u64 page_dmaaddr = 0, frag_dmaaddr;
1573         u32 posted, page_offset = 0;
1574
1575         page_info = &rxo->page_info_tbl[rxq->head];
1576         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1577                 if (!pagep) {
1578                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1579                         if (unlikely(!pagep)) {
1580                                 rx_stats(rxo)->rx_post_fail++;
1581                                 break;
1582                         }
1583                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1584                                                     0, adapter->big_page_size,
1585                                                     DMA_FROM_DEVICE);
1586                         page_info->page_offset = 0;
1587                 } else {
1588                         get_page(pagep);
1589                         page_info->page_offset = page_offset + rx_frag_size;
1590                 }
1591                 page_offset = page_info->page_offset;
1592                 page_info->page = pagep;
1593                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1594                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1595
1596                 rxd = queue_head_node(rxq);
1597                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1598                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1599
1600                 /* Any space left in the current big page for another frag? */
1601                 if ((page_offset + rx_frag_size + rx_frag_size) >
1602                                         adapter->big_page_size) {
1603                         pagep = NULL;
1604                         page_info->last_page_user = true;
1605                 }
1606
1607                 prev_page_info = page_info;
1608                 queue_head_inc(rxq);
1609                 page_info = &rxo->page_info_tbl[rxq->head];
1610         }
1611         if (pagep)
1612                 prev_page_info->last_page_user = true;
1613
1614         if (posted) {
1615                 atomic_add(posted, &rxq->used);
1616                 be_rxq_notify(adapter, rxq->id, posted);
1617         } else if (atomic_read(&rxq->used) == 0) {
1618                 /* Let be_worker replenish when memory is available */
1619                 rxo->rx_post_starved = true;
1620         }
1621 }
1622
1623 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1624 {
1625         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1626
1627         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1628                 return NULL;
1629
1630         rmb();
1631         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1632
1633         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1634
1635         queue_tail_inc(tx_cq);
1636         return txcp;
1637 }
1638
1639 static u16 be_tx_compl_process(struct be_adapter *adapter,
1640                 struct be_tx_obj *txo, u16 last_index)
1641 {
1642         struct be_queue_info *txq = &txo->q;
1643         struct be_eth_wrb *wrb;
1644         struct sk_buff **sent_skbs = txo->sent_skb_list;
1645         struct sk_buff *sent_skb;
1646         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1647         bool unmap_skb_hdr = true;
1648
1649         sent_skb = sent_skbs[txq->tail];
1650         BUG_ON(!sent_skb);
1651         sent_skbs[txq->tail] = NULL;
1652
1653         /* skip header wrb */
1654         queue_tail_inc(txq);
1655
1656         do {
1657                 cur_index = txq->tail;
1658                 wrb = queue_tail_node(txq);
1659                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1660                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1661                 unmap_skb_hdr = false;
1662
1663                 num_wrbs++;
1664                 queue_tail_inc(txq);
1665         } while (cur_index != last_index);
1666
1667         kfree_skb(sent_skb);
1668         return num_wrbs;
1669 }
1670
1671 /* Return the number of events in the event queue */
1672 static inline int events_get(struct be_eq_obj *eqo)
1673 {
1674         struct be_eq_entry *eqe;
1675         int num = 0;
1676
1677         do {
1678                 eqe = queue_tail_node(&eqo->q);
1679                 if (eqe->evt == 0)
1680                         break;
1681
1682                 rmb();
1683                 eqe->evt = 0;
1684                 num++;
1685                 queue_tail_inc(&eqo->q);
1686         } while (true);
1687
1688         return num;
1689 }
1690
1691 /* Leaves the EQ is disarmed state */
1692 static void be_eq_clean(struct be_eq_obj *eqo)
1693 {
1694         int num = events_get(eqo);
1695
1696         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1697 }
1698
1699 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1700 {
1701         struct be_rx_page_info *page_info;
1702         struct be_queue_info *rxq = &rxo->q;
1703         struct be_queue_info *rx_cq = &rxo->cq;
1704         struct be_rx_compl_info *rxcp;
1705         struct be_adapter *adapter = rxo->adapter;
1706         int flush_wait = 0;
1707         u16 tail;
1708
1709         /* Consume pending rx completions.
1710          * Wait for the flush completion (identified by zero num_rcvd)
1711          * to arrive. Notify CQ even when there are no more CQ entries
1712          * for HW to flush partially coalesced CQ entries.
1713          * In Lancer, there is no need to wait for flush compl.
1714          */
1715         for (;;) {
1716                 rxcp = be_rx_compl_get(rxo);
1717                 if (rxcp == NULL) {
1718                         if (lancer_chip(adapter))
1719                                 break;
1720
1721                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1722                                 dev_warn(&adapter->pdev->dev,
1723                                          "did not receive flush compl\n");
1724                                 break;
1725                         }
1726                         be_cq_notify(adapter, rx_cq->id, true, 0);
1727                         mdelay(1);
1728                 } else {
1729                         be_rx_compl_discard(rxo, rxcp);
1730                         be_cq_notify(adapter, rx_cq->id, true, 1);
1731                         if (rxcp->num_rcvd == 0)
1732                                 break;
1733                 }
1734         }
1735
1736         /* After cleanup, leave the CQ in unarmed state */
1737         be_cq_notify(adapter, rx_cq->id, false, 0);
1738
1739         /* Then free posted rx buffers that were not used */
1740         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1741         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1742                 page_info = get_rx_page_info(rxo, tail);
1743                 put_page(page_info->page);
1744                 memset(page_info, 0, sizeof(*page_info));
1745         }
1746         BUG_ON(atomic_read(&rxq->used));
1747         rxq->tail = rxq->head = 0;
1748 }
1749
1750 static void be_tx_compl_clean(struct be_adapter *adapter)
1751 {
1752         struct be_tx_obj *txo;
1753         struct be_queue_info *txq;
1754         struct be_eth_tx_compl *txcp;
1755         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1756         struct sk_buff *sent_skb;
1757         bool dummy_wrb;
1758         int i, pending_txqs;
1759
1760         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1761         do {
1762                 pending_txqs = adapter->num_tx_qs;
1763
1764                 for_all_tx_queues(adapter, txo, i) {
1765                         txq = &txo->q;
1766                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1767                                 end_idx =
1768                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1769                                                       wrb_index, txcp);
1770                                 num_wrbs += be_tx_compl_process(adapter, txo,
1771                                                                 end_idx);
1772                                 cmpl++;
1773                         }
1774                         if (cmpl) {
1775                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1776                                 atomic_sub(num_wrbs, &txq->used);
1777                                 cmpl = 0;
1778                                 num_wrbs = 0;
1779                         }
1780                         if (atomic_read(&txq->used) == 0)
1781                                 pending_txqs--;
1782                 }
1783
1784                 if (pending_txqs == 0 || ++timeo > 200)
1785                         break;
1786
1787                 mdelay(1);
1788         } while (true);
1789
1790         for_all_tx_queues(adapter, txo, i) {
1791                 txq = &txo->q;
1792                 if (atomic_read(&txq->used))
1793                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1794                                 atomic_read(&txq->used));
1795
1796                 /* free posted tx for which compls will never arrive */
1797                 while (atomic_read(&txq->used)) {
1798                         sent_skb = txo->sent_skb_list[txq->tail];
1799                         end_idx = txq->tail;
1800                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1801                                                    &dummy_wrb);
1802                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1803                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1804                         atomic_sub(num_wrbs, &txq->used);
1805                 }
1806         }
1807 }
1808
1809 static void be_evt_queues_destroy(struct be_adapter *adapter)
1810 {
1811         struct be_eq_obj *eqo;
1812         int i;
1813
1814         for_all_evt_queues(adapter, eqo, i) {
1815                 if (eqo->q.created) {
1816                         be_eq_clean(eqo);
1817                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1818                 }
1819                 be_queue_free(adapter, &eqo->q);
1820         }
1821 }
1822
1823 static int be_evt_queues_create(struct be_adapter *adapter)
1824 {
1825         struct be_queue_info *eq;
1826         struct be_eq_obj *eqo;
1827         int i, rc;
1828
1829         adapter->num_evt_qs = num_irqs(adapter);
1830
1831         for_all_evt_queues(adapter, eqo, i) {
1832                 eqo->adapter = adapter;
1833                 eqo->tx_budget = BE_TX_BUDGET;
1834                 eqo->idx = i;
1835                 eqo->max_eqd = BE_MAX_EQD;
1836                 eqo->enable_aic = true;
1837
1838                 eq = &eqo->q;
1839                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1840                                         sizeof(struct be_eq_entry));
1841                 if (rc)
1842                         return rc;
1843
1844                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1845                 if (rc)
1846                         return rc;
1847         }
1848         return 0;
1849 }
1850
1851 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1852 {
1853         struct be_queue_info *q;
1854
1855         q = &adapter->mcc_obj.q;
1856         if (q->created)
1857                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1858         be_queue_free(adapter, q);
1859
1860         q = &adapter->mcc_obj.cq;
1861         if (q->created)
1862                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1863         be_queue_free(adapter, q);
1864 }
1865
1866 /* Must be called only after TX qs are created as MCC shares TX EQ */
1867 static int be_mcc_queues_create(struct be_adapter *adapter)
1868 {
1869         struct be_queue_info *q, *cq;
1870
1871         cq = &adapter->mcc_obj.cq;
1872         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1873                         sizeof(struct be_mcc_compl)))
1874                 goto err;
1875
1876         /* Use the default EQ for MCC completions */
1877         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1878                 goto mcc_cq_free;
1879
1880         q = &adapter->mcc_obj.q;
1881         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1882                 goto mcc_cq_destroy;
1883
1884         if (be_cmd_mccq_create(adapter, q, cq))
1885                 goto mcc_q_free;
1886
1887         return 0;
1888
1889 mcc_q_free:
1890         be_queue_free(adapter, q);
1891 mcc_cq_destroy:
1892         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1893 mcc_cq_free:
1894         be_queue_free(adapter, cq);
1895 err:
1896         return -1;
1897 }
1898
1899 static void be_tx_queues_destroy(struct be_adapter *adapter)
1900 {
1901         struct be_queue_info *q;
1902         struct be_tx_obj *txo;
1903         u8 i;
1904
1905         for_all_tx_queues(adapter, txo, i) {
1906                 q = &txo->q;
1907                 if (q->created)
1908                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1909                 be_queue_free(adapter, q);
1910
1911                 q = &txo->cq;
1912                 if (q->created)
1913                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1914                 be_queue_free(adapter, q);
1915         }
1916 }
1917
1918 static int be_num_txqs_want(struct be_adapter *adapter)
1919 {
1920         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1921             be_is_mc(adapter) ||
1922             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
1923             BE2_chip(adapter))
1924                 return 1;
1925         else
1926                 return adapter->max_tx_queues;
1927 }
1928
1929 static int be_tx_cqs_create(struct be_adapter *adapter)
1930 {
1931         struct be_queue_info *cq, *eq;
1932         int status;
1933         struct be_tx_obj *txo;
1934         u8 i;
1935
1936         adapter->num_tx_qs = be_num_txqs_want(adapter);
1937         if (adapter->num_tx_qs != MAX_TX_QS) {
1938                 rtnl_lock();
1939                 netif_set_real_num_tx_queues(adapter->netdev,
1940                         adapter->num_tx_qs);
1941                 rtnl_unlock();
1942         }
1943
1944         for_all_tx_queues(adapter, txo, i) {
1945                 cq = &txo->cq;
1946                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1947                                         sizeof(struct be_eth_tx_compl));
1948                 if (status)
1949                         return status;
1950
1951                 /* If num_evt_qs is less than num_tx_qs, then more than
1952                  * one txq share an eq
1953                  */
1954                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1955                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1956                 if (status)
1957                         return status;
1958         }
1959         return 0;
1960 }
1961
1962 static int be_tx_qs_create(struct be_adapter *adapter)
1963 {
1964         struct be_tx_obj *txo;
1965         int i, status;
1966
1967         for_all_tx_queues(adapter, txo, i) {
1968                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1969                                         sizeof(struct be_eth_wrb));
1970                 if (status)
1971                         return status;
1972
1973                 status = be_cmd_txq_create(adapter, txo);
1974                 if (status)
1975                         return status;
1976         }
1977
1978         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1979                  adapter->num_tx_qs);
1980         return 0;
1981 }
1982
1983 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1984 {
1985         struct be_queue_info *q;
1986         struct be_rx_obj *rxo;
1987         int i;
1988
1989         for_all_rx_queues(adapter, rxo, i) {
1990                 q = &rxo->cq;
1991                 if (q->created)
1992                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1993                 be_queue_free(adapter, q);
1994         }
1995 }
1996
1997 static int be_rx_cqs_create(struct be_adapter *adapter)
1998 {
1999         struct be_queue_info *eq, *cq;
2000         struct be_rx_obj *rxo;
2001         int rc, i;
2002
2003         /* We'll create as many RSS rings as there are irqs.
2004          * But when there's only one irq there's no use creating RSS rings
2005          */
2006         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2007                                 num_irqs(adapter) + 1 : 1;
2008         if (adapter->num_rx_qs != MAX_RX_QS) {
2009                 rtnl_lock();
2010                 netif_set_real_num_rx_queues(adapter->netdev,
2011                                              adapter->num_rx_qs);
2012                 rtnl_unlock();
2013         }
2014
2015         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2016         for_all_rx_queues(adapter, rxo, i) {
2017                 rxo->adapter = adapter;
2018                 cq = &rxo->cq;
2019                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2020                                 sizeof(struct be_eth_rx_compl));
2021                 if (rc)
2022                         return rc;
2023
2024                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2025                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2026                 if (rc)
2027                         return rc;
2028         }
2029
2030         dev_info(&adapter->pdev->dev,
2031                  "created %d RSS queue(s) and 1 default RX queue\n",
2032                  adapter->num_rx_qs - 1);
2033         return 0;
2034 }
2035
2036 static irqreturn_t be_intx(int irq, void *dev)
2037 {
2038         struct be_eq_obj *eqo = dev;
2039         struct be_adapter *adapter = eqo->adapter;
2040         int num_evts = 0;
2041
2042         /* IRQ is not expected when NAPI is scheduled as the EQ
2043          * will not be armed.
2044          * But, this can happen on Lancer INTx where it takes
2045          * a while to de-assert INTx or in BE2 where occasionaly
2046          * an interrupt may be raised even when EQ is unarmed.
2047          * If NAPI is already scheduled, then counting & notifying
2048          * events will orphan them.
2049          */
2050         if (napi_schedule_prep(&eqo->napi)) {
2051                 num_evts = events_get(eqo);
2052                 __napi_schedule(&eqo->napi);
2053                 if (num_evts)
2054                         eqo->spurious_intr = 0;
2055         }
2056         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2057
2058         /* Return IRQ_HANDLED only for the the first spurious intr
2059          * after a valid intr to stop the kernel from branding
2060          * this irq as a bad one!
2061          */
2062         if (num_evts || eqo->spurious_intr++ == 0)
2063                 return IRQ_HANDLED;
2064         else
2065                 return IRQ_NONE;
2066 }
2067
2068 static irqreturn_t be_msix(int irq, void *dev)
2069 {
2070         struct be_eq_obj *eqo = dev;
2071
2072         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2073         napi_schedule(&eqo->napi);
2074         return IRQ_HANDLED;
2075 }
2076
2077 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2078 {
2079         return (rxcp->tcpf && !rxcp->err) ? true : false;
2080 }
2081
2082 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2083                         int budget)
2084 {
2085         struct be_adapter *adapter = rxo->adapter;
2086         struct be_queue_info *rx_cq = &rxo->cq;
2087         struct be_rx_compl_info *rxcp;
2088         u32 work_done;
2089
2090         for (work_done = 0; work_done < budget; work_done++) {
2091                 rxcp = be_rx_compl_get(rxo);
2092                 if (!rxcp)
2093                         break;
2094
2095                 /* Is it a flush compl that has no data */
2096                 if (unlikely(rxcp->num_rcvd == 0))
2097                         goto loop_continue;
2098
2099                 /* Discard compl with partial DMA Lancer B0 */
2100                 if (unlikely(!rxcp->pkt_size)) {
2101                         be_rx_compl_discard(rxo, rxcp);
2102                         goto loop_continue;
2103                 }
2104
2105                 /* On BE drop pkts that arrive due to imperfect filtering in
2106                  * promiscuous mode on some skews
2107                  */
2108                 if (unlikely(rxcp->port != adapter->port_num &&
2109                                 !lancer_chip(adapter))) {
2110                         be_rx_compl_discard(rxo, rxcp);
2111                         goto loop_continue;
2112                 }
2113
2114                 if (do_gro(rxcp))
2115                         be_rx_compl_process_gro(rxo, napi, rxcp);
2116                 else
2117                         be_rx_compl_process(rxo, rxcp);
2118 loop_continue:
2119                 be_rx_stats_update(rxo, rxcp);
2120         }
2121
2122         if (work_done) {
2123                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2124
2125                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2126                         be_post_rx_frags(rxo, GFP_ATOMIC);
2127         }
2128
2129         return work_done;
2130 }
2131
2132 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2133                           int budget, int idx)
2134 {
2135         struct be_eth_tx_compl *txcp;
2136         int num_wrbs = 0, work_done;
2137
2138         for (work_done = 0; work_done < budget; work_done++) {
2139                 txcp = be_tx_compl_get(&txo->cq);
2140                 if (!txcp)
2141                         break;
2142                 num_wrbs += be_tx_compl_process(adapter, txo,
2143                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2144                                         wrb_index, txcp));
2145         }
2146
2147         if (work_done) {
2148                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2149                 atomic_sub(num_wrbs, &txo->q.used);
2150
2151                 /* As Tx wrbs have been freed up, wake up netdev queue
2152                  * if it was stopped due to lack of tx wrbs.  */
2153                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2154                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2155                         netif_wake_subqueue(adapter->netdev, idx);
2156                 }
2157
2158                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2159                 tx_stats(txo)->tx_compl += work_done;
2160                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2161         }
2162         return (work_done < budget); /* Done */
2163 }
2164
2165 int be_poll(struct napi_struct *napi, int budget)
2166 {
2167         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2168         struct be_adapter *adapter = eqo->adapter;
2169         int max_work = 0, work, i, num_evts;
2170         bool tx_done;
2171
2172         num_evts = events_get(eqo);
2173
2174         /* Process all TXQs serviced by this EQ */
2175         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2176                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2177                                         eqo->tx_budget, i);
2178                 if (!tx_done)
2179                         max_work = budget;
2180         }
2181
2182         /* This loop will iterate twice for EQ0 in which
2183          * completions of the last RXQ (default one) are also processed
2184          * For other EQs the loop iterates only once
2185          */
2186         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2187                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2188                 max_work = max(work, max_work);
2189         }
2190
2191         if (is_mcc_eqo(eqo))
2192                 be_process_mcc(adapter);
2193
2194         if (max_work < budget) {
2195                 napi_complete(napi);
2196                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2197         } else {
2198                 /* As we'll continue in polling mode, count and clear events */
2199                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2200         }
2201         return max_work;
2202 }
2203
2204 void be_detect_error(struct be_adapter *adapter)
2205 {
2206         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2207         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2208         u32 i;
2209
2210         if (be_hw_error(adapter))
2211                 return;
2212
2213         if (lancer_chip(adapter)) {
2214                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2215                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2216                         sliport_err1 = ioread32(adapter->db +
2217                                         SLIPORT_ERROR1_OFFSET);
2218                         sliport_err2 = ioread32(adapter->db +
2219                                         SLIPORT_ERROR2_OFFSET);
2220                 }
2221         } else {
2222                 pci_read_config_dword(adapter->pdev,
2223                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2224                 pci_read_config_dword(adapter->pdev,
2225                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2226                 pci_read_config_dword(adapter->pdev,
2227                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2228                 pci_read_config_dword(adapter->pdev,
2229                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2230
2231                 ue_lo = (ue_lo & ~ue_lo_mask);
2232                 ue_hi = (ue_hi & ~ue_hi_mask);
2233         }
2234
2235         /* On certain platforms BE hardware can indicate spurious UEs.
2236          * Allow the h/w to stop working completely in case of a real UE.
2237          * Hence not setting the hw_error for UE detection.
2238          */
2239         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2240                 adapter->hw_error = true;
2241                 dev_err(&adapter->pdev->dev,
2242                         "Error detected in the card\n");
2243         }
2244
2245         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2246                 dev_err(&adapter->pdev->dev,
2247                         "ERR: sliport status 0x%x\n", sliport_status);
2248                 dev_err(&adapter->pdev->dev,
2249                         "ERR: sliport error1 0x%x\n", sliport_err1);
2250                 dev_err(&adapter->pdev->dev,
2251                         "ERR: sliport error2 0x%x\n", sliport_err2);
2252         }
2253
2254         if (ue_lo) {
2255                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2256                         if (ue_lo & 1)
2257                                 dev_err(&adapter->pdev->dev,
2258                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2259                 }
2260         }
2261
2262         if (ue_hi) {
2263                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2264                         if (ue_hi & 1)
2265                                 dev_err(&adapter->pdev->dev,
2266                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2267                 }
2268         }
2269
2270 }
2271
2272 static void be_msix_disable(struct be_adapter *adapter)
2273 {
2274         if (msix_enabled(adapter)) {
2275                 pci_disable_msix(adapter->pdev);
2276                 adapter->num_msix_vec = 0;
2277         }
2278 }
2279
2280 static uint be_num_rss_want(struct be_adapter *adapter)
2281 {
2282         u32 num = 0;
2283
2284         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2285             (lancer_chip(adapter) ||
2286              (!sriov_want(adapter) && be_physfn(adapter)))) {
2287                 num = adapter->max_rss_queues;
2288                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2289         }
2290         return num;
2291 }
2292
2293 static void be_msix_enable(struct be_adapter *adapter)
2294 {
2295 #define BE_MIN_MSIX_VECTORS             1
2296         int i, status, num_vec, num_roce_vec = 0;
2297         struct device *dev = &adapter->pdev->dev;
2298
2299         /* If RSS queues are not used, need a vec for default RX Q */
2300         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2301         if (be_roce_supported(adapter)) {
2302                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2303                                         (num_online_cpus() + 1));
2304                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2305                 num_vec += num_roce_vec;
2306                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2307         }
2308         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2309
2310         for (i = 0; i < num_vec; i++)
2311                 adapter->msix_entries[i].entry = i;
2312
2313         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2314         if (status == 0) {
2315                 goto done;
2316         } else if (status >= BE_MIN_MSIX_VECTORS) {
2317                 num_vec = status;
2318                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2319                                 num_vec) == 0)
2320                         goto done;
2321         }
2322
2323         dev_warn(dev, "MSIx enable failed\n");
2324         return;
2325 done:
2326         if (be_roce_supported(adapter)) {
2327                 if (num_vec > num_roce_vec) {
2328                         adapter->num_msix_vec = num_vec - num_roce_vec;
2329                         adapter->num_msix_roce_vec =
2330                                 num_vec - adapter->num_msix_vec;
2331                 } else {
2332                         adapter->num_msix_vec = num_vec;
2333                         adapter->num_msix_roce_vec = 0;
2334                 }
2335         } else
2336                 adapter->num_msix_vec = num_vec;
2337         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2338         return;
2339 }
2340
2341 static inline int be_msix_vec_get(struct be_adapter *adapter,
2342                                 struct be_eq_obj *eqo)
2343 {
2344         return adapter->msix_entries[eqo->idx].vector;
2345 }
2346
2347 static int be_msix_register(struct be_adapter *adapter)
2348 {
2349         struct net_device *netdev = adapter->netdev;
2350         struct be_eq_obj *eqo;
2351         int status, i, vec;
2352
2353         for_all_evt_queues(adapter, eqo, i) {
2354                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2355                 vec = be_msix_vec_get(adapter, eqo);
2356                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2357                 if (status)
2358                         goto err_msix;
2359         }
2360
2361         return 0;
2362 err_msix:
2363         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2364                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2365         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2366                 status);
2367         be_msix_disable(adapter);
2368         return status;
2369 }
2370
2371 static int be_irq_register(struct be_adapter *adapter)
2372 {
2373         struct net_device *netdev = adapter->netdev;
2374         int status;
2375
2376         if (msix_enabled(adapter)) {
2377                 status = be_msix_register(adapter);
2378                 if (status == 0)
2379                         goto done;
2380                 /* INTx is not supported for VF */
2381                 if (!be_physfn(adapter))
2382                         return status;
2383         }
2384
2385         /* INTx: only the first EQ is used */
2386         netdev->irq = adapter->pdev->irq;
2387         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2388                              &adapter->eq_obj[0]);
2389         if (status) {
2390                 dev_err(&adapter->pdev->dev,
2391                         "INTx request IRQ failed - err %d\n", status);
2392                 return status;
2393         }
2394 done:
2395         adapter->isr_registered = true;
2396         return 0;
2397 }
2398
2399 static void be_irq_unregister(struct be_adapter *adapter)
2400 {
2401         struct net_device *netdev = adapter->netdev;
2402         struct be_eq_obj *eqo;
2403         int i;
2404
2405         if (!adapter->isr_registered)
2406                 return;
2407
2408         /* INTx */
2409         if (!msix_enabled(adapter)) {
2410                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2411                 goto done;
2412         }
2413
2414         /* MSIx */
2415         for_all_evt_queues(adapter, eqo, i)
2416                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2417
2418 done:
2419         adapter->isr_registered = false;
2420 }
2421
2422 static void be_rx_qs_destroy(struct be_adapter *adapter)
2423 {
2424         struct be_queue_info *q;
2425         struct be_rx_obj *rxo;
2426         int i;
2427
2428         for_all_rx_queues(adapter, rxo, i) {
2429                 q = &rxo->q;
2430                 if (q->created) {
2431                         be_cmd_rxq_destroy(adapter, q);
2432                         /* After the rxq is invalidated, wait for a grace time
2433                          * of 1ms for all dma to end and the flush compl to
2434                          * arrive
2435                          */
2436                         mdelay(1);
2437                         be_rx_cq_clean(rxo);
2438                 }
2439                 be_queue_free(adapter, q);
2440         }
2441 }
2442
2443 static int be_close(struct net_device *netdev)
2444 {
2445         struct be_adapter *adapter = netdev_priv(netdev);
2446         struct be_eq_obj *eqo;
2447         int i;
2448
2449         be_roce_dev_close(adapter);
2450
2451         for_all_evt_queues(adapter, eqo, i)
2452                 napi_disable(&eqo->napi);
2453
2454         be_async_mcc_disable(adapter);
2455
2456         /* Wait for all pending tx completions to arrive so that
2457          * all tx skbs are freed.
2458          */
2459         be_tx_compl_clean(adapter);
2460
2461         be_rx_qs_destroy(adapter);
2462
2463         for_all_evt_queues(adapter, eqo, i) {
2464                 if (msix_enabled(adapter))
2465                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2466                 else
2467                         synchronize_irq(netdev->irq);
2468                 be_eq_clean(eqo);
2469         }
2470
2471         be_irq_unregister(adapter);
2472
2473         return 0;
2474 }
2475
2476 static int be_rx_qs_create(struct be_adapter *adapter)
2477 {
2478         struct be_rx_obj *rxo;
2479         int rc, i, j;
2480         u8 rsstable[128];
2481
2482         for_all_rx_queues(adapter, rxo, i) {
2483                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2484                                     sizeof(struct be_eth_rx_d));
2485                 if (rc)
2486                         return rc;
2487         }
2488
2489         /* The FW would like the default RXQ to be created first */
2490         rxo = default_rxo(adapter);
2491         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2492                                adapter->if_handle, false, &rxo->rss_id);
2493         if (rc)
2494                 return rc;
2495
2496         for_all_rss_queues(adapter, rxo, i) {
2497                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2498                                        rx_frag_size, adapter->if_handle,
2499                                        true, &rxo->rss_id);
2500                 if (rc)
2501                         return rc;
2502         }
2503
2504         if (be_multi_rxq(adapter)) {
2505                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2506                         for_all_rss_queues(adapter, rxo, i) {
2507                                 if ((j + i) >= 128)
2508                                         break;
2509                                 rsstable[j + i] = rxo->rss_id;
2510                         }
2511                 }
2512                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2513                 if (rc)
2514                         return rc;
2515         }
2516
2517         /* First time posting */
2518         for_all_rx_queues(adapter, rxo, i)
2519                 be_post_rx_frags(rxo, GFP_KERNEL);
2520         return 0;
2521 }
2522
2523 static int be_open(struct net_device *netdev)
2524 {
2525         struct be_adapter *adapter = netdev_priv(netdev);
2526         struct be_eq_obj *eqo;
2527         struct be_rx_obj *rxo;
2528         struct be_tx_obj *txo;
2529         u8 link_status;
2530         int status, i;
2531
2532         status = be_rx_qs_create(adapter);
2533         if (status)
2534                 goto err;
2535
2536         be_irq_register(adapter);
2537
2538         for_all_rx_queues(adapter, rxo, i)
2539                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2540
2541         for_all_tx_queues(adapter, txo, i)
2542                 be_cq_notify(adapter, txo->cq.id, true, 0);
2543
2544         be_async_mcc_enable(adapter);
2545
2546         for_all_evt_queues(adapter, eqo, i) {
2547                 napi_enable(&eqo->napi);
2548                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2549         }
2550
2551         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2552         if (!status)
2553                 be_link_status_update(adapter, link_status);
2554
2555         be_roce_dev_open(adapter);
2556         return 0;
2557 err:
2558         be_close(adapter->netdev);
2559         return -EIO;
2560 }
2561
2562 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2563 {
2564         struct be_dma_mem cmd;
2565         int status = 0;
2566         u8 mac[ETH_ALEN];
2567
2568         memset(mac, 0, ETH_ALEN);
2569
2570         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2571         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2572                                     GFP_KERNEL | __GFP_ZERO);
2573         if (cmd.va == NULL)
2574                 return -1;
2575
2576         if (enable) {
2577                 status = pci_write_config_dword(adapter->pdev,
2578                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2579                 if (status) {
2580                         dev_err(&adapter->pdev->dev,
2581                                 "Could not enable Wake-on-lan\n");
2582                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2583                                           cmd.dma);
2584                         return status;
2585                 }
2586                 status = be_cmd_enable_magic_wol(adapter,
2587                                 adapter->netdev->dev_addr, &cmd);
2588                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2589                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2590         } else {
2591                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2592                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2593                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2594         }
2595
2596         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2597         return status;
2598 }
2599
2600 /*
2601  * Generate a seed MAC address from the PF MAC Address using jhash.
2602  * MAC Address for VFs are assigned incrementally starting from the seed.
2603  * These addresses are programmed in the ASIC by the PF and the VF driver
2604  * queries for the MAC address during its probe.
2605  */
2606 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2607 {
2608         u32 vf;
2609         int status = 0;
2610         u8 mac[ETH_ALEN];
2611         struct be_vf_cfg *vf_cfg;
2612
2613         be_vf_eth_addr_generate(adapter, mac);
2614
2615         for_all_vfs(adapter, vf_cfg, vf) {
2616                 if (lancer_chip(adapter)) {
2617                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2618                 } else {
2619                         status = be_cmd_pmac_add(adapter, mac,
2620                                                  vf_cfg->if_handle,
2621                                                  &vf_cfg->pmac_id, vf + 1);
2622                 }
2623
2624                 if (status)
2625                         dev_err(&adapter->pdev->dev,
2626                         "Mac address assignment failed for VF %d\n", vf);
2627                 else
2628                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2629
2630                 mac[5] += 1;
2631         }
2632         return status;
2633 }
2634
2635 static int be_vfs_mac_query(struct be_adapter *adapter)
2636 {
2637         int status, vf;
2638         u8 mac[ETH_ALEN];
2639         struct be_vf_cfg *vf_cfg;
2640         bool active;
2641
2642         for_all_vfs(adapter, vf_cfg, vf) {
2643                 be_cmd_get_mac_from_list(adapter, mac, &active,
2644                                          &vf_cfg->pmac_id, 0);
2645
2646                 status = be_cmd_mac_addr_query(adapter, mac, false,
2647                                                vf_cfg->if_handle, 0);
2648                 if (status)
2649                         return status;
2650                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2651         }
2652         return 0;
2653 }
2654
2655 static void be_vf_clear(struct be_adapter *adapter)
2656 {
2657         struct be_vf_cfg *vf_cfg;
2658         u32 vf;
2659
2660         if (be_find_vfs(adapter, ASSIGNED)) {
2661                 dev_warn(&adapter->pdev->dev,
2662                          "VFs are assigned to VMs: not disabling VFs\n");
2663                 goto done;
2664         }
2665
2666         for_all_vfs(adapter, vf_cfg, vf) {
2667                 if (lancer_chip(adapter))
2668                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2669                 else
2670                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2671                                         vf_cfg->pmac_id, vf + 1);
2672
2673                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2674         }
2675         pci_disable_sriov(adapter->pdev);
2676 done:
2677         kfree(adapter->vf_cfg);
2678         adapter->num_vfs = 0;
2679 }
2680
2681 static int be_clear(struct be_adapter *adapter)
2682 {
2683         int i = 1;
2684
2685         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2686                 cancel_delayed_work_sync(&adapter->work);
2687                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2688         }
2689
2690         if (sriov_enabled(adapter))
2691                 be_vf_clear(adapter);
2692
2693         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2694                 be_cmd_pmac_del(adapter, adapter->if_handle,
2695                         adapter->pmac_id[i], 0);
2696
2697         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2698
2699         be_mcc_queues_destroy(adapter);
2700         be_rx_cqs_destroy(adapter);
2701         be_tx_queues_destroy(adapter);
2702         be_evt_queues_destroy(adapter);
2703
2704         kfree(adapter->pmac_id);
2705         adapter->pmac_id = NULL;
2706
2707         be_msix_disable(adapter);
2708         return 0;
2709 }
2710
2711 static int be_vfs_if_create(struct be_adapter *adapter)
2712 {
2713         struct be_vf_cfg *vf_cfg;
2714         u32 cap_flags, en_flags, vf;
2715         int status;
2716
2717         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2718                     BE_IF_FLAGS_MULTICAST;
2719
2720         for_all_vfs(adapter, vf_cfg, vf) {
2721                 if (!BE3_chip(adapter))
2722                         be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
2723
2724                 /* If a FW profile exists, then cap_flags are updated */
2725                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2726                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2727                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2728                                           &vf_cfg->if_handle, vf + 1);
2729                 if (status)
2730                         goto err;
2731         }
2732 err:
2733         return status;
2734 }
2735
2736 static int be_vf_setup_init(struct be_adapter *adapter)
2737 {
2738         struct be_vf_cfg *vf_cfg;
2739         int vf;
2740
2741         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2742                                   GFP_KERNEL);
2743         if (!adapter->vf_cfg)
2744                 return -ENOMEM;
2745
2746         for_all_vfs(adapter, vf_cfg, vf) {
2747                 vf_cfg->if_handle = -1;
2748                 vf_cfg->pmac_id = -1;
2749         }
2750         return 0;
2751 }
2752
2753 static int be_vf_setup(struct be_adapter *adapter)
2754 {
2755         struct be_vf_cfg *vf_cfg;
2756         u16 def_vlan, lnk_speed;
2757         int status, old_vfs, vf;
2758         struct device *dev = &adapter->pdev->dev;
2759
2760         old_vfs = be_find_vfs(adapter, ENABLED);
2761         if (old_vfs) {
2762                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2763                 if (old_vfs != num_vfs)
2764                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2765                 adapter->num_vfs = old_vfs;
2766         } else {
2767                 if (num_vfs > adapter->dev_num_vfs)
2768                         dev_info(dev, "Device supports %d VFs and not %d\n",
2769                                  adapter->dev_num_vfs, num_vfs);
2770                 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2771
2772                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2773                 if (status) {
2774                         dev_err(dev, "SRIOV enable failed\n");
2775                         adapter->num_vfs = 0;
2776                         return 0;
2777                 }
2778         }
2779
2780         status = be_vf_setup_init(adapter);
2781         if (status)
2782                 goto err;
2783
2784         if (old_vfs) {
2785                 for_all_vfs(adapter, vf_cfg, vf) {
2786                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2787                         if (status)
2788                                 goto err;
2789                 }
2790         } else {
2791                 status = be_vfs_if_create(adapter);
2792                 if (status)
2793                         goto err;
2794         }
2795
2796         if (old_vfs) {
2797                 status = be_vfs_mac_query(adapter);
2798                 if (status)
2799                         goto err;
2800         } else {
2801                 status = be_vf_eth_addr_config(adapter);
2802                 if (status)
2803                         goto err;
2804         }
2805
2806         for_all_vfs(adapter, vf_cfg, vf) {
2807                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2808                  * Allow full available bandwidth
2809                  */
2810                 if (BE3_chip(adapter) && !old_vfs)
2811                         be_cmd_set_qos(adapter, 1000, vf+1);
2812
2813                 status = be_cmd_link_status_query(adapter, &lnk_speed,
2814                                                   NULL, vf + 1);
2815                 if (!status)
2816                         vf_cfg->tx_rate = lnk_speed;
2817
2818                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2819                                                vf + 1, vf_cfg->if_handle);
2820                 if (status)
2821                         goto err;
2822                 vf_cfg->def_vid = def_vlan;
2823
2824                 be_cmd_enable_vf(adapter, vf + 1);
2825         }
2826         return 0;
2827 err:
2828         dev_err(dev, "VF setup failed\n");
2829         be_vf_clear(adapter);
2830         return status;
2831 }
2832
2833 static void be_setup_init(struct be_adapter *adapter)
2834 {
2835         adapter->vlan_prio_bmap = 0xff;
2836         adapter->phy.link_speed = -1;
2837         adapter->if_handle = -1;
2838         adapter->be3_native = false;
2839         adapter->promiscuous = false;
2840         if (be_physfn(adapter))
2841                 adapter->cmd_privileges = MAX_PRIVILEGES;
2842         else
2843                 adapter->cmd_privileges = MIN_PRIVILEGES;
2844 }
2845
2846 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2847                            bool *active_mac, u32 *pmac_id)
2848 {
2849         int status = 0;
2850
2851         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2852                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2853                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2854                         *active_mac = true;
2855                 else
2856                         *active_mac = false;
2857
2858                 return status;
2859         }
2860
2861         if (lancer_chip(adapter)) {
2862                 status = be_cmd_get_mac_from_list(adapter, mac,
2863                                                   active_mac, pmac_id, 0);
2864                 if (*active_mac) {
2865                         status = be_cmd_mac_addr_query(adapter, mac, false,
2866                                                        if_handle, *pmac_id);
2867                 }
2868         } else if (be_physfn(adapter)) {
2869                 /* For BE3, for PF get permanent MAC */
2870                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2871                 *active_mac = false;
2872         } else {
2873                 /* For BE3, for VF get soft MAC assigned by PF*/
2874                 status = be_cmd_mac_addr_query(adapter, mac, false,
2875                                                if_handle, 0);
2876                 *active_mac = true;
2877         }
2878         return status;
2879 }
2880
2881 static void be_get_resources(struct be_adapter *adapter)
2882 {
2883         u16 dev_num_vfs;
2884         int pos, status;
2885         bool profile_present = false;
2886
2887         if (!BEx_chip(adapter)) {
2888                 status = be_cmd_get_func_config(adapter);
2889                 if (!status)
2890                         profile_present = true;
2891         }
2892
2893         if (profile_present) {
2894                 /* Sanity fixes for Lancer */
2895                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2896                                               BE_UC_PMAC_COUNT);
2897                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2898                                            BE_NUM_VLANS_SUPPORTED);
2899                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2900                                                BE_MAX_MC);
2901                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2902                                                MAX_TX_QS);
2903                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2904                                                 BE3_MAX_RSS_QS);
2905                 adapter->max_event_queues = min_t(u16,
2906                                                   adapter->max_event_queues,
2907                                                   BE3_MAX_RSS_QS);
2908
2909                 if (adapter->max_rss_queues &&
2910                     adapter->max_rss_queues == adapter->max_rx_queues)
2911                         adapter->max_rss_queues -= 1;
2912
2913                 if (adapter->max_event_queues < adapter->max_rss_queues)
2914                         adapter->max_rss_queues = adapter->max_event_queues;
2915
2916         } else {
2917                 if (be_physfn(adapter))
2918                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2919                 else
2920                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2921
2922                 if (adapter->function_mode & FLEX10_MODE)
2923                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2924                 else
2925                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2926
2927                 adapter->max_mcast_mac = BE_MAX_MC;
2928                 adapter->max_tx_queues = MAX_TX_QS;
2929                 adapter->max_rss_queues = (adapter->be3_native) ?
2930                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2931                 adapter->max_event_queues = BE3_MAX_RSS_QS;
2932
2933                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2934                                         BE_IF_FLAGS_BROADCAST |
2935                                         BE_IF_FLAGS_MULTICAST |
2936                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
2937                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
2938                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
2939                                         BE_IF_FLAGS_PROMISCUOUS;
2940
2941                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2942                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2943         }
2944
2945         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2946         if (pos) {
2947                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2948                                      &dev_num_vfs);
2949                 if (BE3_chip(adapter))
2950                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2951                 adapter->dev_num_vfs = dev_num_vfs;
2952         }
2953 }
2954
2955 /* Routine to query per function resource limits */
2956 static int be_get_config(struct be_adapter *adapter)
2957 {
2958         int status;
2959
2960         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2961                                      &adapter->function_mode,
2962                                      &adapter->function_caps,
2963                                      &adapter->asic_rev);
2964         if (status)
2965                 goto err;
2966
2967         be_get_resources(adapter);
2968
2969         /* primary mac needs 1 pmac entry */
2970         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2971                                    sizeof(u32), GFP_KERNEL);
2972         if (!adapter->pmac_id) {
2973                 status = -ENOMEM;
2974                 goto err;
2975         }
2976
2977 err:
2978         return status;
2979 }
2980
2981 static int be_setup(struct be_adapter *adapter)
2982 {
2983         struct device *dev = &adapter->pdev->dev;
2984         u32 en_flags;
2985         u32 tx_fc, rx_fc;
2986         int status;
2987         u8 mac[ETH_ALEN];
2988         bool active_mac;
2989
2990         be_setup_init(adapter);
2991
2992         if (!lancer_chip(adapter))
2993                 be_cmd_req_native_mode(adapter);
2994
2995         status = be_get_config(adapter);
2996         if (status)
2997                 goto err;
2998
2999         be_msix_enable(adapter);
3000
3001         status = be_evt_queues_create(adapter);
3002         if (status)
3003                 goto err;
3004
3005         status = be_tx_cqs_create(adapter);
3006         if (status)
3007                 goto err;
3008
3009         status = be_rx_cqs_create(adapter);
3010         if (status)
3011                 goto err;
3012
3013         status = be_mcc_queues_create(adapter);
3014         if (status)
3015                 goto err;
3016
3017         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3018         /* In UMC mode FW does not return right privileges.
3019          * Override with correct privilege equivalent to PF.
3020          */
3021         if (be_is_mc(adapter))
3022                 adapter->cmd_privileges = MAX_PRIVILEGES;
3023
3024         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3025                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3026
3027         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3028                 en_flags |= BE_IF_FLAGS_RSS;
3029
3030         en_flags = en_flags & adapter->if_cap_flags;
3031
3032         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3033                                   &adapter->if_handle, 0);
3034         if (status != 0)
3035                 goto err;
3036
3037         memset(mac, 0, ETH_ALEN);
3038         active_mac = false;
3039         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3040                                  &active_mac, &adapter->pmac_id[0]);
3041         if (status != 0)
3042                 goto err;
3043
3044         if (!active_mac) {
3045                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3046                                          &adapter->pmac_id[0], 0);
3047                 if (status != 0)
3048                         goto err;
3049         }
3050
3051         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3052                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3053                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3054         }
3055
3056         status = be_tx_qs_create(adapter);
3057         if (status)
3058                 goto err;
3059
3060         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3061
3062         if (adapter->vlans_added)
3063                 be_vid_config(adapter);
3064
3065         be_set_rx_mode(adapter->netdev);
3066
3067         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3068
3069         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3070                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3071                                         adapter->rx_fc);
3072
3073         if (be_physfn(adapter) && num_vfs) {
3074                 if (adapter->dev_num_vfs)
3075                         be_vf_setup(adapter);
3076                 else
3077                         dev_warn(dev, "device doesn't support SRIOV\n");
3078         }
3079
3080         status = be_cmd_get_phy_info(adapter);
3081         if (!status && be_pause_supported(adapter))
3082                 adapter->phy.fc_autoneg = 1;
3083
3084         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3085         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3086         return 0;
3087 err:
3088         be_clear(adapter);
3089         return status;
3090 }
3091
3092 #ifdef CONFIG_NET_POLL_CONTROLLER
3093 static void be_netpoll(struct net_device *netdev)
3094 {
3095         struct be_adapter *adapter = netdev_priv(netdev);
3096         struct be_eq_obj *eqo;
3097         int i;
3098
3099         for_all_evt_queues(adapter, eqo, i) {
3100                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3101                 napi_schedule(&eqo->napi);
3102         }
3103
3104         return;
3105 }
3106 #endif
3107
3108 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3109 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3110
3111 static bool be_flash_redboot(struct be_adapter *adapter,
3112                         const u8 *p, u32 img_start, int image_size,
3113                         int hdr_size)
3114 {
3115         u32 crc_offset;
3116         u8 flashed_crc[4];
3117         int status;
3118
3119         crc_offset = hdr_size + img_start + image_size - 4;
3120
3121         p += crc_offset;
3122
3123         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3124                         (image_size - 4));
3125         if (status) {
3126                 dev_err(&adapter->pdev->dev,
3127                 "could not get crc from flash, not flashing redboot\n");
3128                 return false;
3129         }
3130
3131         /*update redboot only if crc does not match*/
3132         if (!memcmp(flashed_crc, p, 4))
3133                 return false;
3134         else
3135                 return true;
3136 }
3137
3138 static bool phy_flashing_required(struct be_adapter *adapter)
3139 {
3140         return (adapter->phy.phy_type == TN_8022 &&
3141                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3142 }
3143
3144 static bool is_comp_in_ufi(struct be_adapter *adapter,
3145                            struct flash_section_info *fsec, int type)
3146 {
3147         int i = 0, img_type = 0;
3148         struct flash_section_info_g2 *fsec_g2 = NULL;
3149
3150         if (BE2_chip(adapter))
3151                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3152
3153         for (i = 0; i < MAX_FLASH_COMP; i++) {
3154                 if (fsec_g2)
3155                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3156                 else
3157                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3158
3159                 if (img_type == type)
3160                         return true;
3161         }
3162         return false;
3163
3164 }
3165
3166 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3167                                          int header_size,
3168                                          const struct firmware *fw)
3169 {
3170         struct flash_section_info *fsec = NULL;
3171         const u8 *p = fw->data;
3172
3173         p += header_size;
3174         while (p < (fw->data + fw->size)) {
3175                 fsec = (struct flash_section_info *)p;
3176                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3177                         return fsec;
3178                 p += 32;
3179         }
3180         return NULL;
3181 }
3182
3183 static int be_flash(struct be_adapter *adapter, const u8 *img,
3184                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3185 {
3186         u32 total_bytes = 0, flash_op, num_bytes = 0;
3187         int status = 0;
3188         struct be_cmd_write_flashrom *req = flash_cmd->va;
3189
3190         total_bytes = img_size;
3191         while (total_bytes) {
3192                 num_bytes = min_t(u32, 32*1024, total_bytes);
3193
3194                 total_bytes -= num_bytes;
3195
3196                 if (!total_bytes) {
3197                         if (optype == OPTYPE_PHY_FW)
3198                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3199                         else
3200                                 flash_op = FLASHROM_OPER_FLASH;
3201                 } else {
3202                         if (optype == OPTYPE_PHY_FW)
3203                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3204                         else
3205                                 flash_op = FLASHROM_OPER_SAVE;
3206                 }
3207
3208                 memcpy(req->data_buf, img, num_bytes);
3209                 img += num_bytes;
3210                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3211                                                 flash_op, num_bytes);
3212                 if (status) {
3213                         if (status == ILLEGAL_IOCTL_REQ &&
3214                             optype == OPTYPE_PHY_FW)
3215                                 break;
3216                         dev_err(&adapter->pdev->dev,
3217                                 "cmd to write to flash rom failed.\n");
3218                         return status;
3219                 }
3220         }
3221         return 0;
3222 }
3223
3224 /* For BE2, BE3 and BE3-R */
3225 static int be_flash_BEx(struct be_adapter *adapter,
3226                          const struct firmware *fw,
3227                          struct be_dma_mem *flash_cmd,
3228                          int num_of_images)
3229
3230 {
3231         int status = 0, i, filehdr_size = 0;
3232         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3233         const u8 *p = fw->data;
3234         const struct flash_comp *pflashcomp;
3235         int num_comp, redboot;
3236         struct flash_section_info *fsec = NULL;
3237
3238         struct flash_comp gen3_flash_types[] = {
3239                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3240                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3241                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3242                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3243                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3244                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3245                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3246                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3247                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3248                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3249                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3250                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3251                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3252                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3253                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3254                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3255                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3256                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3257                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3258                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3259         };
3260
3261         struct flash_comp gen2_flash_types[] = {
3262                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3263                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3264                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3265                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3266                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3267                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3268                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3269                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3270                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3271                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3272                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3273                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3274                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3275                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3276                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3277                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3278         };
3279
3280         if (BE3_chip(adapter)) {
3281                 pflashcomp = gen3_flash_types;
3282                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3283                 num_comp = ARRAY_SIZE(gen3_flash_types);
3284         } else {
3285                 pflashcomp = gen2_flash_types;
3286                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3287                 num_comp = ARRAY_SIZE(gen2_flash_types);
3288         }
3289
3290         /* Get flash section info*/
3291         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3292         if (!fsec) {
3293                 dev_err(&adapter->pdev->dev,
3294                         "Invalid Cookie. UFI corrupted ?\n");
3295                 return -1;
3296         }
3297         for (i = 0; i < num_comp; i++) {
3298                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3299                         continue;
3300
3301                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3302                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3303                         continue;
3304
3305                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3306                     !phy_flashing_required(adapter))
3307                                 continue;
3308
3309                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3310                         redboot = be_flash_redboot(adapter, fw->data,
3311                                 pflashcomp[i].offset, pflashcomp[i].size,
3312                                 filehdr_size + img_hdrs_size);
3313                         if (!redboot)
3314                                 continue;
3315                 }
3316
3317                 p = fw->data;
3318                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3319                 if (p + pflashcomp[i].size > fw->data + fw->size)
3320                         return -1;
3321
3322                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3323                                         pflashcomp[i].size);
3324                 if (status) {
3325                         dev_err(&adapter->pdev->dev,
3326                                 "Flashing section type %d failed.\n",
3327                                 pflashcomp[i].img_type);
3328                         return status;
3329                 }
3330         }
3331         return 0;
3332 }
3333
3334 static int be_flash_skyhawk(struct be_adapter *adapter,
3335                 const struct firmware *fw,
3336                 struct be_dma_mem *flash_cmd, int num_of_images)
3337 {
3338         int status = 0, i, filehdr_size = 0;
3339         int img_offset, img_size, img_optype, redboot;
3340         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3341         const u8 *p = fw->data;
3342         struct flash_section_info *fsec = NULL;
3343
3344         filehdr_size = sizeof(struct flash_file_hdr_g3);
3345         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3346         if (!fsec) {
3347                 dev_err(&adapter->pdev->dev,
3348                         "Invalid Cookie. UFI corrupted ?\n");
3349                 return -1;
3350         }
3351
3352         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3353                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3354                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3355
3356                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3357                 case IMAGE_FIRMWARE_iSCSI:
3358                         img_optype = OPTYPE_ISCSI_ACTIVE;
3359                         break;
3360                 case IMAGE_BOOT_CODE:
3361                         img_optype = OPTYPE_REDBOOT;
3362                         break;
3363                 case IMAGE_OPTION_ROM_ISCSI:
3364                         img_optype = OPTYPE_BIOS;
3365                         break;
3366                 case IMAGE_OPTION_ROM_PXE:
3367                         img_optype = OPTYPE_PXE_BIOS;
3368                         break;
3369                 case IMAGE_OPTION_ROM_FCoE:
3370                         img_optype = OPTYPE_FCOE_BIOS;
3371                         break;
3372                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3373                         img_optype = OPTYPE_ISCSI_BACKUP;
3374                         break;
3375                 case IMAGE_NCSI:
3376                         img_optype = OPTYPE_NCSI_FW;
3377                         break;
3378                 default:
3379                         continue;
3380                 }
3381
3382                 if (img_optype == OPTYPE_REDBOOT) {
3383                         redboot = be_flash_redboot(adapter, fw->data,
3384                                         img_offset, img_size,
3385                                         filehdr_size + img_hdrs_size);
3386                         if (!redboot)
3387                                 continue;
3388                 }
3389
3390                 p = fw->data;
3391                 p += filehdr_size + img_offset + img_hdrs_size;
3392                 if (p + img_size > fw->data + fw->size)
3393                         return -1;
3394
3395                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3396                 if (status) {
3397                         dev_err(&adapter->pdev->dev,
3398                                 "Flashing section type %d failed.\n",
3399                                 fsec->fsec_entry[i].type);
3400                         return status;
3401                 }
3402         }
3403         return 0;
3404 }
3405
3406 static int lancer_wait_idle(struct be_adapter *adapter)
3407 {
3408 #define SLIPORT_IDLE_TIMEOUT 30
3409         u32 reg_val;
3410         int status = 0, i;
3411
3412         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3413                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3414                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3415                         break;
3416
3417                 ssleep(1);
3418         }
3419
3420         if (i == SLIPORT_IDLE_TIMEOUT)
3421                 status = -1;
3422
3423         return status;
3424 }
3425
3426 static int lancer_fw_reset(struct be_adapter *adapter)
3427 {
3428         int status = 0;
3429
3430         status = lancer_wait_idle(adapter);
3431         if (status)
3432                 return status;
3433
3434         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3435                   PHYSDEV_CONTROL_OFFSET);
3436
3437         return status;
3438 }
3439
3440 static int lancer_fw_download(struct be_adapter *adapter,
3441                                 const struct firmware *fw)
3442 {
3443 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3444 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3445         struct be_dma_mem flash_cmd;
3446         const u8 *data_ptr = NULL;
3447         u8 *dest_image_ptr = NULL;
3448         size_t image_size = 0;
3449         u32 chunk_size = 0;
3450         u32 data_written = 0;
3451         u32 offset = 0;
3452         int status = 0;
3453         u8 add_status = 0;
3454         u8 change_status;
3455
3456         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3457                 dev_err(&adapter->pdev->dev,
3458                         "FW Image not properly aligned. "
3459                         "Length must be 4 byte aligned.\n");
3460                 status = -EINVAL;
3461                 goto lancer_fw_exit;
3462         }
3463
3464         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3465                                 + LANCER_FW_DOWNLOAD_CHUNK;
3466         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3467                                           &flash_cmd.dma, GFP_KERNEL);
3468         if (!flash_cmd.va) {
3469                 status = -ENOMEM;
3470                 goto lancer_fw_exit;
3471         }
3472
3473         dest_image_ptr = flash_cmd.va +
3474                                 sizeof(struct lancer_cmd_req_write_object);
3475         image_size = fw->size;
3476         data_ptr = fw->data;
3477
3478         while (image_size) {
3479                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3480
3481                 /* Copy the image chunk content. */
3482                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3483
3484                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3485                                                  chunk_size, offset,
3486                                                  LANCER_FW_DOWNLOAD_LOCATION,
3487                                                  &data_written, &change_status,
3488                                                  &add_status);
3489                 if (status)
3490                         break;
3491
3492                 offset += data_written;
3493                 data_ptr += data_written;
3494                 image_size -= data_written;
3495         }
3496
3497         if (!status) {
3498                 /* Commit the FW written */
3499                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3500                                                  0, offset,
3501                                                  LANCER_FW_DOWNLOAD_LOCATION,
3502                                                  &data_written, &change_status,
3503                                                  &add_status);
3504         }
3505
3506         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3507                                 flash_cmd.dma);
3508         if (status) {
3509                 dev_err(&adapter->pdev->dev,
3510                         "Firmware load error. "
3511                         "Status code: 0x%x Additional Status: 0x%x\n",
3512                         status, add_status);
3513                 goto lancer_fw_exit;
3514         }
3515
3516         if (change_status == LANCER_FW_RESET_NEEDED) {
3517                 status = lancer_fw_reset(adapter);
3518                 if (status) {
3519                         dev_err(&adapter->pdev->dev,
3520                                 "Adapter busy for FW reset.\n"
3521                                 "New FW will not be active.\n");
3522                         goto lancer_fw_exit;
3523                 }
3524         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3525                         dev_err(&adapter->pdev->dev,
3526                                 "System reboot required for new FW"
3527                                 " to be active\n");
3528         }
3529
3530         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3531 lancer_fw_exit:
3532         return status;
3533 }
3534
3535 #define UFI_TYPE2               2
3536 #define UFI_TYPE3               3
3537 #define UFI_TYPE3R              10
3538 #define UFI_TYPE4               4
3539 static int be_get_ufi_type(struct be_adapter *adapter,
3540                            struct flash_file_hdr_g3 *fhdr)
3541 {
3542         if (fhdr == NULL)
3543                 goto be_get_ufi_exit;
3544
3545         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3546                 return UFI_TYPE4;
3547         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3548                 if (fhdr->asic_type_rev == 0x10)
3549                         return UFI_TYPE3R;
3550                 else
3551                         return UFI_TYPE3;
3552         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3553                 return UFI_TYPE2;
3554
3555 be_get_ufi_exit:
3556         dev_err(&adapter->pdev->dev,
3557                 "UFI and Interface are not compatible for flashing\n");
3558         return -1;
3559 }
3560
3561 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3562 {
3563         struct flash_file_hdr_g3 *fhdr3;
3564         struct image_hdr *img_hdr_ptr = NULL;
3565         struct be_dma_mem flash_cmd;
3566         const u8 *p;
3567         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3568
3569         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3570         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3571                                           &flash_cmd.dma, GFP_KERNEL);
3572         if (!flash_cmd.va) {
3573                 status = -ENOMEM;
3574                 goto be_fw_exit;
3575         }
3576
3577         p = fw->data;
3578         fhdr3 = (struct flash_file_hdr_g3 *)p;
3579
3580         ufi_type = be_get_ufi_type(adapter, fhdr3);
3581
3582         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3583         for (i = 0; i < num_imgs; i++) {
3584                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3585                                 (sizeof(struct flash_file_hdr_g3) +
3586                                  i * sizeof(struct image_hdr)));
3587                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3588                         switch (ufi_type) {
3589                         case UFI_TYPE4:
3590                                 status = be_flash_skyhawk(adapter, fw,
3591                                                         &flash_cmd, num_imgs);
3592                                 break;
3593                         case UFI_TYPE3R:
3594                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3595                                                       num_imgs);
3596                                 break;
3597                         case UFI_TYPE3:
3598                                 /* Do not flash this ufi on BE3-R cards */
3599                                 if (adapter->asic_rev < 0x10)
3600                                         status = be_flash_BEx(adapter, fw,
3601                                                               &flash_cmd,
3602                                                               num_imgs);
3603                                 else {
3604                                         status = -1;
3605                                         dev_err(&adapter->pdev->dev,
3606                                                 "Can't load BE3 UFI on BE3R\n");
3607                                 }
3608                         }
3609                 }
3610         }
3611
3612         if (ufi_type == UFI_TYPE2)
3613                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3614         else if (ufi_type == -1)
3615                 status = -1;
3616
3617         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3618                           flash_cmd.dma);
3619         if (status) {
3620                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3621                 goto be_fw_exit;
3622         }
3623
3624         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3625
3626 be_fw_exit:
3627         return status;
3628 }
3629
3630 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3631 {
3632         const struct firmware *fw;
3633         int status;
3634
3635         if (!netif_running(adapter->netdev)) {
3636                 dev_err(&adapter->pdev->dev,
3637                         "Firmware load not allowed (interface is down)\n");
3638                 return -1;
3639         }
3640
3641         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3642         if (status)
3643                 goto fw_exit;
3644
3645         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3646
3647         if (lancer_chip(adapter))
3648                 status = lancer_fw_download(adapter, fw);
3649         else
3650                 status = be_fw_download(adapter, fw);
3651
3652 fw_exit:
3653         release_firmware(fw);
3654         return status;
3655 }
3656
3657 static const struct net_device_ops be_netdev_ops = {
3658         .ndo_open               = be_open,
3659         .ndo_stop               = be_close,
3660         .ndo_start_xmit         = be_xmit,
3661         .ndo_set_rx_mode        = be_set_rx_mode,
3662         .ndo_set_mac_address    = be_mac_addr_set,
3663         .ndo_change_mtu         = be_change_mtu,
3664         .ndo_get_stats64        = be_get_stats64,
3665         .ndo_validate_addr      = eth_validate_addr,
3666         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3667         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3668         .ndo_set_vf_mac         = be_set_vf_mac,
3669         .ndo_set_vf_vlan        = be_set_vf_vlan,
3670         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3671         .ndo_get_vf_config      = be_get_vf_config,
3672 #ifdef CONFIG_NET_POLL_CONTROLLER
3673         .ndo_poll_controller    = be_netpoll,
3674 #endif
3675 };
3676
3677 static void be_netdev_init(struct net_device *netdev)
3678 {
3679         struct be_adapter *adapter = netdev_priv(netdev);
3680         struct be_eq_obj *eqo;
3681         int i;
3682
3683         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3684                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3685                 NETIF_F_HW_VLAN_CTAG_TX;
3686         if (be_multi_rxq(adapter))
3687                 netdev->hw_features |= NETIF_F_RXHASH;
3688
3689         netdev->features |= netdev->hw_features |
3690                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3691
3692         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3693                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3694
3695         netdev->priv_flags |= IFF_UNICAST_FLT;
3696
3697         netdev->flags |= IFF_MULTICAST;
3698
3699         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3700
3701         netdev->netdev_ops = &be_netdev_ops;
3702
3703         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3704
3705         for_all_evt_queues(adapter, eqo, i)
3706                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3707 }
3708
3709 static void be_unmap_pci_bars(struct be_adapter *adapter)
3710 {
3711         if (adapter->csr)
3712                 pci_iounmap(adapter->pdev, adapter->csr);
3713         if (adapter->db)
3714                 pci_iounmap(adapter->pdev, adapter->db);
3715 }
3716
3717 static int db_bar(struct be_adapter *adapter)
3718 {
3719         if (lancer_chip(adapter) || !be_physfn(adapter))
3720                 return 0;
3721         else
3722                 return 4;
3723 }
3724
3725 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3726 {
3727         if (skyhawk_chip(adapter)) {
3728                 adapter->roce_db.size = 4096;
3729                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3730                                                               db_bar(adapter));
3731                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3732                                                                db_bar(adapter));
3733         }
3734         return 0;
3735 }
3736
3737 static int be_map_pci_bars(struct be_adapter *adapter)
3738 {
3739         u8 __iomem *addr;
3740         u32 sli_intf;
3741
3742         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3743         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3744                                 SLI_INTF_IF_TYPE_SHIFT;
3745
3746         if (BEx_chip(adapter) && be_physfn(adapter)) {
3747                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3748                 if (adapter->csr == NULL)
3749                         return -ENOMEM;
3750         }
3751
3752         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3753         if (addr == NULL)
3754                 goto pci_map_err;
3755         adapter->db = addr;
3756
3757         be_roce_map_pci_bars(adapter);
3758         return 0;
3759
3760 pci_map_err:
3761         be_unmap_pci_bars(adapter);
3762         return -ENOMEM;
3763 }
3764
3765 static void be_ctrl_cleanup(struct be_adapter *adapter)
3766 {
3767         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3768
3769         be_unmap_pci_bars(adapter);
3770
3771         if (mem->va)
3772                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3773                                   mem->dma);
3774
3775         mem = &adapter->rx_filter;
3776         if (mem->va)
3777                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3778                                   mem->dma);
3779 }
3780
3781 static int be_ctrl_init(struct be_adapter *adapter)
3782 {
3783         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3784         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3785         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3786         u32 sli_intf;
3787         int status;
3788
3789         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3790         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3791                                  SLI_INTF_FAMILY_SHIFT;
3792         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3793
3794         status = be_map_pci_bars(adapter);
3795         if (status)
3796                 goto done;
3797
3798         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3799         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3800                                                 mbox_mem_alloc->size,
3801                                                 &mbox_mem_alloc->dma,
3802                                                 GFP_KERNEL);
3803         if (!mbox_mem_alloc->va) {
3804                 status = -ENOMEM;
3805                 goto unmap_pci_bars;
3806         }
3807         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3808         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3809         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3810         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3811
3812         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3813         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3814                                            &rx_filter->dma,
3815                                            GFP_KERNEL | __GFP_ZERO);
3816         if (rx_filter->va == NULL) {
3817                 status = -ENOMEM;
3818                 goto free_mbox;
3819         }
3820
3821         mutex_init(&adapter->mbox_lock);
3822         spin_lock_init(&adapter->mcc_lock);
3823         spin_lock_init(&adapter->mcc_cq_lock);
3824
3825         init_completion(&adapter->flash_compl);
3826         pci_save_state(adapter->pdev);
3827         return 0;
3828
3829 free_mbox:
3830         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3831                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3832
3833 unmap_pci_bars:
3834         be_unmap_pci_bars(adapter);
3835
3836 done:
3837         return status;
3838 }
3839
3840 static void be_stats_cleanup(struct be_adapter *adapter)
3841 {
3842         struct be_dma_mem *cmd = &adapter->stats_cmd;
3843
3844         if (cmd->va)
3845                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3846                                   cmd->va, cmd->dma);
3847 }
3848
3849 static int be_stats_init(struct be_adapter *adapter)
3850 {
3851         struct be_dma_mem *cmd = &adapter->stats_cmd;
3852
3853         if (lancer_chip(adapter))
3854                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3855         else if (BE2_chip(adapter))
3856                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3857         else
3858                 /* BE3 and Skyhawk */
3859                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3860
3861         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3862                                      GFP_KERNEL | __GFP_ZERO);
3863         if (cmd->va == NULL)
3864                 return -1;
3865         return 0;
3866 }
3867
3868 static void be_remove(struct pci_dev *pdev)
3869 {
3870         struct be_adapter *adapter = pci_get_drvdata(pdev);
3871
3872         if (!adapter)
3873                 return;
3874
3875         be_roce_dev_remove(adapter);
3876         be_intr_set(adapter, false);
3877
3878         cancel_delayed_work_sync(&adapter->func_recovery_work);
3879
3880         unregister_netdev(adapter->netdev);
3881
3882         be_clear(adapter);
3883
3884         /* tell fw we're done with firing cmds */
3885         be_cmd_fw_clean(adapter);
3886
3887         be_stats_cleanup(adapter);
3888
3889         be_ctrl_cleanup(adapter);
3890
3891         pci_disable_pcie_error_reporting(pdev);
3892
3893         pci_set_drvdata(pdev, NULL);
3894         pci_release_regions(pdev);
3895         pci_disable_device(pdev);
3896
3897         free_netdev(adapter->netdev);
3898 }
3899
3900 bool be_is_wol_supported(struct be_adapter *adapter)
3901 {
3902         return ((adapter->wol_cap & BE_WOL_CAP) &&
3903                 !be_is_wol_excluded(adapter)) ? true : false;
3904 }
3905
3906 u32 be_get_fw_log_level(struct be_adapter *adapter)
3907 {
3908         struct be_dma_mem extfat_cmd;
3909         struct be_fat_conf_params *cfgs;
3910         int status;
3911         u32 level = 0;
3912         int j;
3913
3914         if (lancer_chip(adapter))
3915                 return 0;
3916
3917         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3918         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3919         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3920                                              &extfat_cmd.dma);
3921
3922         if (!extfat_cmd.va) {
3923                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3924                         __func__);
3925                 goto err;
3926         }
3927
3928         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3929         if (!status) {
3930                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3931                                                 sizeof(struct be_cmd_resp_hdr));
3932                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3933                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3934                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3935                 }
3936         }
3937         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3938                             extfat_cmd.dma);
3939 err:
3940         return level;
3941 }
3942
3943 static int be_get_initial_config(struct be_adapter *adapter)
3944 {
3945         int status;
3946         u32 level;
3947
3948         status = be_cmd_get_cntl_attributes(adapter);
3949         if (status)
3950                 return status;
3951
3952         status = be_cmd_get_acpi_wol_cap(adapter);
3953         if (status) {
3954                 /* in case of a failure to get wol capabillities
3955                  * check the exclusion list to determine WOL capability */
3956                 if (!be_is_wol_excluded(adapter))
3957                         adapter->wol_cap |= BE_WOL_CAP;
3958         }
3959
3960         if (be_is_wol_supported(adapter))
3961                 adapter->wol = true;
3962
3963         /* Must be a power of 2 or else MODULO will BUG_ON */
3964         adapter->be_get_temp_freq = 64;
3965
3966         level = be_get_fw_log_level(adapter);
3967         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3968
3969         return 0;
3970 }
3971
3972 static int lancer_recover_func(struct be_adapter *adapter)
3973 {
3974         int status;
3975
3976         status = lancer_test_and_set_rdy_state(adapter);
3977         if (status)
3978                 goto err;
3979
3980         if (netif_running(adapter->netdev))
3981                 be_close(adapter->netdev);
3982
3983         be_clear(adapter);
3984
3985         adapter->hw_error = false;
3986         adapter->fw_timeout = false;
3987
3988         status = be_setup(adapter);
3989         if (status)
3990                 goto err;
3991
3992         if (netif_running(adapter->netdev)) {
3993                 status = be_open(adapter->netdev);
3994                 if (status)
3995                         goto err;
3996         }
3997
3998         dev_err(&adapter->pdev->dev,
3999                 "Adapter SLIPORT recovery succeeded\n");
4000         return 0;
4001 err:
4002         if (adapter->eeh_error)
4003                 dev_err(&adapter->pdev->dev,
4004                         "Adapter SLIPORT recovery failed\n");
4005
4006         return status;
4007 }
4008
4009 static void be_func_recovery_task(struct work_struct *work)
4010 {
4011         struct be_adapter *adapter =
4012                 container_of(work, struct be_adapter,  func_recovery_work.work);
4013         int status;
4014
4015         be_detect_error(adapter);
4016
4017         if (adapter->hw_error && lancer_chip(adapter)) {
4018
4019                 if (adapter->eeh_error)
4020                         goto out;
4021
4022                 rtnl_lock();
4023                 netif_device_detach(adapter->netdev);
4024                 rtnl_unlock();
4025
4026                 status = lancer_recover_func(adapter);
4027
4028                 if (!status)
4029                         netif_device_attach(adapter->netdev);
4030         }
4031
4032 out:
4033         schedule_delayed_work(&adapter->func_recovery_work,
4034                               msecs_to_jiffies(1000));
4035 }
4036
4037 static void be_worker(struct work_struct *work)
4038 {
4039         struct be_adapter *adapter =
4040                 container_of(work, struct be_adapter, work.work);
4041         struct be_rx_obj *rxo;
4042         struct be_eq_obj *eqo;
4043         int i;
4044
4045         /* when interrupts are not yet enabled, just reap any pending
4046         * mcc completions */
4047         if (!netif_running(adapter->netdev)) {
4048                 local_bh_disable();
4049                 be_process_mcc(adapter);
4050                 local_bh_enable();
4051                 goto reschedule;
4052         }
4053
4054         if (!adapter->stats_cmd_sent) {
4055                 if (lancer_chip(adapter))
4056                         lancer_cmd_get_pport_stats(adapter,
4057                                                 &adapter->stats_cmd);
4058                 else
4059                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4060         }
4061
4062         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4063                 be_cmd_get_die_temperature(adapter);
4064
4065         for_all_rx_queues(adapter, rxo, i) {
4066                 if (rxo->rx_post_starved) {
4067                         rxo->rx_post_starved = false;
4068                         be_post_rx_frags(rxo, GFP_KERNEL);
4069                 }
4070         }
4071
4072         for_all_evt_queues(adapter, eqo, i)
4073                 be_eqd_update(adapter, eqo);
4074
4075 reschedule:
4076         adapter->work_counter++;
4077         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4078 }
4079
4080 static bool be_reset_required(struct be_adapter *adapter)
4081 {
4082         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4083 }
4084
4085 static char *mc_name(struct be_adapter *adapter)
4086 {
4087         if (adapter->function_mode & FLEX10_MODE)
4088                 return "FLEX10";
4089         else if (adapter->function_mode & VNIC_MODE)
4090                 return "vNIC";
4091         else if (adapter->function_mode & UMC_ENABLED)
4092                 return "UMC";
4093         else
4094                 return "";
4095 }
4096
4097 static inline char *func_name(struct be_adapter *adapter)
4098 {
4099         return be_physfn(adapter) ? "PF" : "VF";
4100 }
4101
4102 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4103 {
4104         int status = 0;
4105         struct be_adapter *adapter;
4106         struct net_device *netdev;
4107         char port_name;
4108
4109         status = pci_enable_device(pdev);
4110         if (status)
4111                 goto do_none;
4112
4113         status = pci_request_regions(pdev, DRV_NAME);
4114         if (status)
4115                 goto disable_dev;
4116         pci_set_master(pdev);
4117
4118         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4119         if (netdev == NULL) {
4120                 status = -ENOMEM;
4121                 goto rel_reg;
4122         }
4123         adapter = netdev_priv(netdev);
4124         adapter->pdev = pdev;
4125         pci_set_drvdata(pdev, adapter);
4126         adapter->netdev = netdev;
4127         SET_NETDEV_DEV(netdev, &pdev->dev);
4128
4129         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4130         if (!status) {
4131                 netdev->features |= NETIF_F_HIGHDMA;
4132         } else {
4133                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4134                 if (status) {
4135                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4136                         goto free_netdev;
4137                 }
4138         }
4139
4140         status = pci_enable_pcie_error_reporting(pdev);
4141         if (status)
4142                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4143
4144         status = be_ctrl_init(adapter);
4145         if (status)
4146                 goto free_netdev;
4147
4148         /* sync up with fw's ready state */
4149         if (be_physfn(adapter)) {
4150                 status = be_fw_wait_ready(adapter);
4151                 if (status)
4152                         goto ctrl_clean;
4153         }
4154
4155         /* tell fw we're ready to fire cmds */
4156         status = be_cmd_fw_init(adapter);
4157         if (status)
4158                 goto ctrl_clean;
4159
4160         if (be_reset_required(adapter)) {
4161                 status = be_cmd_reset_function(adapter);
4162                 if (status)
4163                         goto ctrl_clean;
4164         }
4165
4166         /* Wait for interrupts to quiesce after an FLR */
4167         msleep(100);
4168
4169         /* Allow interrupts for other ULPs running on NIC function */
4170         be_intr_set(adapter, true);
4171
4172         status = be_stats_init(adapter);
4173         if (status)
4174                 goto ctrl_clean;
4175
4176         status = be_get_initial_config(adapter);
4177         if (status)
4178                 goto stats_clean;
4179
4180         INIT_DELAYED_WORK(&adapter->work, be_worker);
4181         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4182         adapter->rx_fc = adapter->tx_fc = true;
4183
4184         status = be_setup(adapter);
4185         if (status)
4186                 goto stats_clean;
4187
4188         be_netdev_init(netdev);
4189         status = register_netdev(netdev);
4190         if (status != 0)
4191                 goto unsetup;
4192
4193         be_roce_dev_add(adapter);
4194
4195         schedule_delayed_work(&adapter->func_recovery_work,
4196                               msecs_to_jiffies(1000));
4197
4198         be_cmd_query_port_name(adapter, &port_name);
4199
4200         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4201                  func_name(adapter), mc_name(adapter), port_name);
4202
4203         return 0;
4204
4205 unsetup:
4206         be_clear(adapter);
4207 stats_clean:
4208         be_stats_cleanup(adapter);
4209 ctrl_clean:
4210         be_ctrl_cleanup(adapter);
4211 free_netdev:
4212         free_netdev(netdev);
4213         pci_set_drvdata(pdev, NULL);
4214 rel_reg:
4215         pci_release_regions(pdev);
4216 disable_dev:
4217         pci_disable_device(pdev);
4218 do_none:
4219         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4220         return status;
4221 }
4222
4223 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4224 {
4225         struct be_adapter *adapter = pci_get_drvdata(pdev);
4226         struct net_device *netdev =  adapter->netdev;
4227
4228         if (adapter->wol)
4229                 be_setup_wol(adapter, true);
4230
4231         cancel_delayed_work_sync(&adapter->func_recovery_work);
4232
4233         netif_device_detach(netdev);
4234         if (netif_running(netdev)) {
4235                 rtnl_lock();
4236                 be_close(netdev);
4237                 rtnl_unlock();
4238         }
4239         be_clear(adapter);
4240
4241         pci_save_state(pdev);
4242         pci_disable_device(pdev);
4243         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4244         return 0;
4245 }
4246
4247 static int be_resume(struct pci_dev *pdev)
4248 {
4249         int status = 0;
4250         struct be_adapter *adapter = pci_get_drvdata(pdev);
4251         struct net_device *netdev =  adapter->netdev;
4252
4253         netif_device_detach(netdev);
4254
4255         status = pci_enable_device(pdev);
4256         if (status)
4257                 return status;
4258
4259         pci_set_power_state(pdev, 0);
4260         pci_restore_state(pdev);
4261
4262         /* tell fw we're ready to fire cmds */
4263         status = be_cmd_fw_init(adapter);
4264         if (status)
4265                 return status;
4266
4267         be_setup(adapter);
4268         if (netif_running(netdev)) {
4269                 rtnl_lock();
4270                 be_open(netdev);
4271                 rtnl_unlock();
4272         }
4273
4274         schedule_delayed_work(&adapter->func_recovery_work,
4275                               msecs_to_jiffies(1000));
4276         netif_device_attach(netdev);
4277
4278         if (adapter->wol)
4279                 be_setup_wol(adapter, false);
4280
4281         return 0;
4282 }
4283
4284 /*
4285  * An FLR will stop BE from DMAing any data.
4286  */
4287 static void be_shutdown(struct pci_dev *pdev)
4288 {
4289         struct be_adapter *adapter = pci_get_drvdata(pdev);
4290
4291         if (!adapter)
4292                 return;
4293
4294         cancel_delayed_work_sync(&adapter->work);
4295         cancel_delayed_work_sync(&adapter->func_recovery_work);
4296
4297         netif_device_detach(adapter->netdev);
4298
4299         be_cmd_reset_function(adapter);
4300
4301         pci_disable_device(pdev);
4302 }
4303
4304 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4305                                 pci_channel_state_t state)
4306 {
4307         struct be_adapter *adapter = pci_get_drvdata(pdev);
4308         struct net_device *netdev =  adapter->netdev;
4309
4310         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4311
4312         adapter->eeh_error = true;
4313
4314         cancel_delayed_work_sync(&adapter->func_recovery_work);
4315
4316         rtnl_lock();
4317         netif_device_detach(netdev);
4318         rtnl_unlock();
4319
4320         if (netif_running(netdev)) {
4321                 rtnl_lock();
4322                 be_close(netdev);
4323                 rtnl_unlock();
4324         }
4325         be_clear(adapter);
4326
4327         if (state == pci_channel_io_perm_failure)
4328                 return PCI_ERS_RESULT_DISCONNECT;
4329
4330         pci_disable_device(pdev);
4331
4332         /* The error could cause the FW to trigger a flash debug dump.
4333          * Resetting the card while flash dump is in progress
4334          * can cause it not to recover; wait for it to finish.
4335          * Wait only for first function as it is needed only once per
4336          * adapter.
4337          */
4338         if (pdev->devfn == 0)
4339                 ssleep(30);
4340
4341         return PCI_ERS_RESULT_NEED_RESET;
4342 }
4343
4344 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4345 {
4346         struct be_adapter *adapter = pci_get_drvdata(pdev);
4347         int status;
4348
4349         dev_info(&adapter->pdev->dev, "EEH reset\n");
4350         be_clear_all_error(adapter);
4351
4352         status = pci_enable_device(pdev);
4353         if (status)
4354                 return PCI_ERS_RESULT_DISCONNECT;
4355
4356         pci_set_master(pdev);
4357         pci_set_power_state(pdev, 0);
4358         pci_restore_state(pdev);
4359
4360         /* Check if card is ok and fw is ready */
4361         dev_info(&adapter->pdev->dev,
4362                  "Waiting for FW to be ready after EEH reset\n");
4363         status = be_fw_wait_ready(adapter);
4364         if (status)
4365                 return PCI_ERS_RESULT_DISCONNECT;
4366
4367         pci_cleanup_aer_uncorrect_error_status(pdev);
4368         return PCI_ERS_RESULT_RECOVERED;
4369 }
4370
4371 static void be_eeh_resume(struct pci_dev *pdev)
4372 {
4373         int status = 0;
4374         struct be_adapter *adapter = pci_get_drvdata(pdev);
4375         struct net_device *netdev =  adapter->netdev;
4376
4377         dev_info(&adapter->pdev->dev, "EEH resume\n");
4378
4379         pci_save_state(pdev);
4380
4381         /* tell fw we're ready to fire cmds */
4382         status = be_cmd_fw_init(adapter);
4383         if (status)
4384                 goto err;
4385
4386         status = be_cmd_reset_function(adapter);
4387         if (status)
4388                 goto err;
4389
4390         status = be_setup(adapter);
4391         if (status)
4392                 goto err;
4393
4394         if (netif_running(netdev)) {
4395                 status = be_open(netdev);
4396                 if (status)
4397                         goto err;
4398         }
4399
4400         schedule_delayed_work(&adapter->func_recovery_work,
4401                               msecs_to_jiffies(1000));
4402         netif_device_attach(netdev);
4403         return;
4404 err:
4405         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4406 }
4407
4408 static const struct pci_error_handlers be_eeh_handlers = {
4409         .error_detected = be_eeh_err_detected,
4410         .slot_reset = be_eeh_reset,
4411         .resume = be_eeh_resume,
4412 };
4413
4414 static struct pci_driver be_driver = {
4415         .name = DRV_NAME,
4416         .id_table = be_dev_ids,
4417         .probe = be_probe,
4418         .remove = be_remove,
4419         .suspend = be_suspend,
4420         .resume = be_resume,
4421         .shutdown = be_shutdown,
4422         .err_handler = &be_eeh_handlers
4423 };
4424
4425 static int __init be_init_module(void)
4426 {
4427         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4428             rx_frag_size != 2048) {
4429                 printk(KERN_WARNING DRV_NAME
4430                         " : Module param rx_frag_size must be 2048/4096/8192."
4431                         " Using 2048\n");
4432                 rx_frag_size = 2048;
4433         }
4434
4435         return pci_register_driver(&be_driver);
4436 }
4437 module_init(be_init_module);
4438
4439 static void __exit be_exit_module(void)
4440 {
4441         pci_unregister_driver(&be_driver);
4442 }
4443 module_exit(be_exit_module);