]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/emulex/benet/be_main.c
regulator: core: Move list_voltage_{linear,linear_range,table} to helpers.c
[karo-tx-linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL | __GFP_ZERO);
150         if (!mem->va)
151                 return -ENOMEM;
152         return 0;
153 }
154
155 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
156 {
157         u32 reg, enabled;
158
159         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160                                 &reg);
161         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
163         if (!enabled && enable)
164                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165         else if (enabled && !enable)
166                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else
168                 return;
169
170         pci_write_config_dword(adapter->pdev,
171                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 }
173
174 static void be_intr_set(struct be_adapter *adapter, bool enable)
175 {
176         int status = 0;
177
178         /* On lancer interrupts can't be controlled via this register */
179         if (lancer_chip(adapter))
180                 return;
181
182         if (adapter->eeh_error)
183                 return;
184
185         status = be_cmd_intr_set(adapter, enable);
186         if (status)
187                 be_reg_intr_set(adapter, enable);
188 }
189
190 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
191 {
192         u32 val = 0;
193         val |= qid & DB_RQ_RING_ID_MASK;
194         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
195
196         wmb();
197         iowrite32(val, adapter->db + DB_RQ_OFFSET);
198 }
199
200 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201                           u16 posted)
202 {
203         u32 val = 0;
204         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
205         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
206
207         wmb();
208         iowrite32(val, adapter->db + txo->db_offset);
209 }
210
211 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
212                 bool arm, bool clear_int, u16 num_popped)
213 {
214         u32 val = 0;
215         val |= qid & DB_EQ_RING_ID_MASK;
216         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
218
219         if (adapter->eeh_error)
220                 return;
221
222         if (arm)
223                 val |= 1 << DB_EQ_REARM_SHIFT;
224         if (clear_int)
225                 val |= 1 << DB_EQ_CLR_SHIFT;
226         val |= 1 << DB_EQ_EVNT_SHIFT;
227         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
228         iowrite32(val, adapter->db + DB_EQ_OFFSET);
229 }
230
231 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
232 {
233         u32 val = 0;
234         val |= qid & DB_CQ_RING_ID_MASK;
235         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
237
238         if (adapter->eeh_error)
239                 return;
240
241         if (arm)
242                 val |= 1 << DB_CQ_REARM_SHIFT;
243         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
244         iowrite32(val, adapter->db + DB_CQ_OFFSET);
245 }
246
247 static int be_mac_addr_set(struct net_device *netdev, void *p)
248 {
249         struct be_adapter *adapter = netdev_priv(netdev);
250         struct sockaddr *addr = p;
251         int status = 0;
252         u8 current_mac[ETH_ALEN];
253         u32 pmac_id = adapter->pmac_id[0];
254         bool active_mac = true;
255
256         if (!is_valid_ether_addr(addr->sa_data))
257                 return -EADDRNOTAVAIL;
258
259         /* For BE VF, MAC address is already activated by PF.
260          * Hence only operation left is updating netdev->devaddr.
261          * Update it if user is passing the same MAC which was used
262          * during configuring VF MAC from PF(Hypervisor).
263          */
264         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265                 status = be_cmd_mac_addr_query(adapter, current_mac,
266                                                false, adapter->if_handle, 0);
267                 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268                         goto done;
269                 else
270                         goto err;
271         }
272
273         if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274                 goto done;
275
276         /* For Lancer check if any MAC is active.
277          * If active, get its mac id.
278          */
279         if (lancer_chip(adapter) && !be_physfn(adapter))
280                 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281                                          &pmac_id, 0);
282
283         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284                                  adapter->if_handle,
285                                  &adapter->pmac_id[0], 0);
286
287         if (status)
288                 goto err;
289
290         if (active_mac)
291                 be_cmd_pmac_del(adapter, adapter->if_handle,
292                                 pmac_id, 0);
293 done:
294         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295         return 0;
296 err:
297         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
298         return status;
299 }
300
301 /* BE2 supports only v0 cmd */
302 static void *hw_stats_from_cmd(struct be_adapter *adapter)
303 {
304         if (BE2_chip(adapter)) {
305                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307                 return &cmd->hw_stats;
308         } else  {
309                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311                 return &cmd->hw_stats;
312         }
313 }
314
315 /* BE2 supports only v0 cmd */
316 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317 {
318         if (BE2_chip(adapter)) {
319                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321                 return &hw_stats->erx;
322         } else {
323                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325                 return &hw_stats->erx;
326         }
327 }
328
329 static void populate_be_v0_stats(struct be_adapter *adapter)
330 {
331         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
334         struct be_port_rxf_stats_v0 *port_stats =
335                                         &rxf_stats->port[adapter->port_num];
336         struct be_drv_stats *drvs = &adapter->drv_stats;
337
338         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
339         drvs->rx_pause_frames = port_stats->rx_pause_frames;
340         drvs->rx_crc_errors = port_stats->rx_crc_errors;
341         drvs->rx_control_frames = port_stats->rx_control_frames;
342         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
353         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
354         drvs->rx_dropped_header_too_small =
355                 port_stats->rx_dropped_header_too_small;
356         drvs->rx_address_filtered =
357                                         port_stats->rx_address_filtered +
358                                         port_stats->rx_vlan_filtered;
359         drvs->rx_alignment_symbol_errors =
360                 port_stats->rx_alignment_symbol_errors;
361
362         drvs->tx_pauseframes = port_stats->tx_pauseframes;
363         drvs->tx_controlframes = port_stats->tx_controlframes;
364
365         if (adapter->port_num)
366                 drvs->jabber_events = rxf_stats->port1_jabber_events;
367         else
368                 drvs->jabber_events = rxf_stats->port0_jabber_events;
369         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
370         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
371         drvs->forwarded_packets = rxf_stats->forwarded_packets;
372         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
373         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
375         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376 }
377
378 static void populate_be_v1_stats(struct be_adapter *adapter)
379 {
380         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
383         struct be_port_rxf_stats_v1 *port_stats =
384                                         &rxf_stats->port[adapter->port_num];
385         struct be_drv_stats *drvs = &adapter->drv_stats;
386
387         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
388         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
390         drvs->rx_pause_frames = port_stats->rx_pause_frames;
391         drvs->rx_crc_errors = port_stats->rx_crc_errors;
392         drvs->rx_control_frames = port_stats->rx_control_frames;
393         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403         drvs->rx_dropped_header_too_small =
404                 port_stats->rx_dropped_header_too_small;
405         drvs->rx_input_fifo_overflow_drop =
406                 port_stats->rx_input_fifo_overflow_drop;
407         drvs->rx_address_filtered = port_stats->rx_address_filtered;
408         drvs->rx_alignment_symbol_errors =
409                 port_stats->rx_alignment_symbol_errors;
410         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
411         drvs->tx_pauseframes = port_stats->tx_pauseframes;
412         drvs->tx_controlframes = port_stats->tx_controlframes;
413         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
414         drvs->jabber_events = port_stats->jabber_events;
415         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
416         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
417         drvs->forwarded_packets = rxf_stats->forwarded_packets;
418         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
419         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
421         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422 }
423
424 static void populate_lancer_stats(struct be_adapter *adapter)
425 {
426
427         struct be_drv_stats *drvs = &adapter->drv_stats;
428         struct lancer_pport_stats *pport_stats =
429                                         pport_stats_from_cmd(adapter);
430
431         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
435         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
436         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
437         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441         drvs->rx_dropped_tcp_length =
442                                 pport_stats->rx_dropped_invalid_tcp_length;
443         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446         drvs->rx_dropped_header_too_small =
447                                 pport_stats->rx_dropped_header_too_small;
448         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
449         drvs->rx_address_filtered =
450                                         pport_stats->rx_address_filtered +
451                                         pport_stats->rx_vlan_filtered;
452         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
453         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
454         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
456         drvs->jabber_events = pport_stats->rx_jabbers;
457         drvs->forwarded_packets = pport_stats->num_forwards_lo;
458         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
459         drvs->rx_drops_too_many_frags =
460                                 pport_stats->rx_drops_too_many_frags_lo;
461 }
462
463 static void accumulate_16bit_val(u32 *acc, u16 val)
464 {
465 #define lo(x)                   (x & 0xFFFF)
466 #define hi(x)                   (x & 0xFFFF0000)
467         bool wrapped = val < lo(*acc);
468         u32 newacc = hi(*acc) + val;
469
470         if (wrapped)
471                 newacc += 65536;
472         ACCESS_ONCE(*acc) = newacc;
473 }
474
475 void populate_erx_stats(struct be_adapter *adapter,
476                         struct be_rx_obj *rxo,
477                         u32 erx_stat)
478 {
479         if (!BEx_chip(adapter))
480                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481         else
482                 /* below erx HW counter can actually wrap around after
483                  * 65535. Driver accumulates a 32-bit value
484                  */
485                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486                                      (u16)erx_stat);
487 }
488
489 void be_parse_stats(struct be_adapter *adapter)
490 {
491         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492         struct be_rx_obj *rxo;
493         int i;
494         u32 erx_stat;
495
496         if (lancer_chip(adapter)) {
497                 populate_lancer_stats(adapter);
498         } else {
499                 if (BE2_chip(adapter))
500                         populate_be_v0_stats(adapter);
501                 else
502                         /* for BE3 and Skyhawk */
503                         populate_be_v1_stats(adapter);
504
505                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506                 for_all_rx_queues(adapter, rxo, i) {
507                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508                         populate_erx_stats(adapter, rxo, erx_stat);
509                 }
510         }
511 }
512
513 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514                                         struct rtnl_link_stats64 *stats)
515 {
516         struct be_adapter *adapter = netdev_priv(netdev);
517         struct be_drv_stats *drvs = &adapter->drv_stats;
518         struct be_rx_obj *rxo;
519         struct be_tx_obj *txo;
520         u64 pkts, bytes;
521         unsigned int start;
522         int i;
523
524         for_all_rx_queues(adapter, rxo, i) {
525                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526                 do {
527                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528                         pkts = rx_stats(rxo)->rx_pkts;
529                         bytes = rx_stats(rxo)->rx_bytes;
530                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531                 stats->rx_packets += pkts;
532                 stats->rx_bytes += bytes;
533                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535                                         rx_stats(rxo)->rx_drops_no_frags;
536         }
537
538         for_all_tx_queues(adapter, txo, i) {
539                 const struct be_tx_stats *tx_stats = tx_stats(txo);
540                 do {
541                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542                         pkts = tx_stats(txo)->tx_pkts;
543                         bytes = tx_stats(txo)->tx_bytes;
544                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545                 stats->tx_packets += pkts;
546                 stats->tx_bytes += bytes;
547         }
548
549         /* bad pkts received */
550         stats->rx_errors = drvs->rx_crc_errors +
551                 drvs->rx_alignment_symbol_errors +
552                 drvs->rx_in_range_errors +
553                 drvs->rx_out_range_errors +
554                 drvs->rx_frame_too_long +
555                 drvs->rx_dropped_too_small +
556                 drvs->rx_dropped_too_short +
557                 drvs->rx_dropped_header_too_small +
558                 drvs->rx_dropped_tcp_length +
559                 drvs->rx_dropped_runt;
560
561         /* detailed rx errors */
562         stats->rx_length_errors = drvs->rx_in_range_errors +
563                 drvs->rx_out_range_errors +
564                 drvs->rx_frame_too_long;
565
566         stats->rx_crc_errors = drvs->rx_crc_errors;
567
568         /* frame alignment errors */
569         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
570
571         /* receiver fifo overrun */
572         /* drops_no_pbuf is no per i/f, it's per BE card */
573         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
574                                 drvs->rx_input_fifo_overflow_drop +
575                                 drvs->rx_drops_no_pbuf;
576         return stats;
577 }
578
579 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
580 {
581         struct net_device *netdev = adapter->netdev;
582
583         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
584                 netif_carrier_off(netdev);
585                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
586         }
587
588         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589                 netif_carrier_on(netdev);
590         else
591                 netif_carrier_off(netdev);
592 }
593
594 static void be_tx_stats_update(struct be_tx_obj *txo,
595                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
596 {
597         struct be_tx_stats *stats = tx_stats(txo);
598
599         u64_stats_update_begin(&stats->sync);
600         stats->tx_reqs++;
601         stats->tx_wrbs += wrb_cnt;
602         stats->tx_bytes += copied;
603         stats->tx_pkts += (gso_segs ? gso_segs : 1);
604         if (stopped)
605                 stats->tx_stops++;
606         u64_stats_update_end(&stats->sync);
607 }
608
609 /* Determine number of WRB entries needed to xmit data in an skb */
610 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611                                                                 bool *dummy)
612 {
613         int cnt = (skb->len > skb->data_len);
614
615         cnt += skb_shinfo(skb)->nr_frags;
616
617         /* to account for hdr wrb */
618         cnt++;
619         if (lancer_chip(adapter) || !(cnt & 1)) {
620                 *dummy = false;
621         } else {
622                 /* add a dummy to make it an even num */
623                 cnt++;
624                 *dummy = true;
625         }
626         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627         return cnt;
628 }
629
630 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631 {
632         wrb->frag_pa_hi = upper_32_bits(addr);
633         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
635         wrb->rsvd0 = 0;
636 }
637
638 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639                                         struct sk_buff *skb)
640 {
641         u8 vlan_prio;
642         u16 vlan_tag;
643
644         vlan_tag = vlan_tx_tag_get(skb);
645         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646         /* If vlan priority provided by OS is NOT in available bmap */
647         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649                                 adapter->recommended_prio;
650
651         return vlan_tag;
652 }
653
654 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
655                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
656 {
657         u16 vlan_tag;
658
659         memset(hdr, 0, sizeof(*hdr));
660
661         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
663         if (skb_is_gso(skb)) {
664                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666                         hdr, skb_shinfo(skb)->gso_size);
667                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
668                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
669         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670                 if (is_tcp_pkt(skb))
671                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672                 else if (is_udp_pkt(skb))
673                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674         }
675
676         if (vlan_tx_tag_present(skb)) {
677                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
678                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
679                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
680         }
681
682         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
684         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
685         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687 }
688
689 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
690                 bool unmap_single)
691 {
692         dma_addr_t dma;
693
694         be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
697         if (wrb->frag_len) {
698                 if (unmap_single)
699                         dma_unmap_single(dev, dma, wrb->frag_len,
700                                          DMA_TO_DEVICE);
701                 else
702                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
703         }
704 }
705
706 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
707                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708                 bool skip_hw_vlan)
709 {
710         dma_addr_t busaddr;
711         int i, copied = 0;
712         struct device *dev = &adapter->pdev->dev;
713         struct sk_buff *first_skb = skb;
714         struct be_eth_wrb *wrb;
715         struct be_eth_hdr_wrb *hdr;
716         bool map_single = false;
717         u16 map_head;
718
719         hdr = queue_head_node(txq);
720         queue_head_inc(txq);
721         map_head = txq->head;
722
723         if (skb->len > skb->data_len) {
724                 int len = skb_headlen(skb);
725                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726                 if (dma_mapping_error(dev, busaddr))
727                         goto dma_err;
728                 map_single = true;
729                 wrb = queue_head_node(txq);
730                 wrb_fill(wrb, busaddr, len);
731                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732                 queue_head_inc(txq);
733                 copied += len;
734         }
735
736         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
737                 const struct skb_frag_struct *frag =
738                         &skb_shinfo(skb)->frags[i];
739                 busaddr = skb_frag_dma_map(dev, frag, 0,
740                                            skb_frag_size(frag), DMA_TO_DEVICE);
741                 if (dma_mapping_error(dev, busaddr))
742                         goto dma_err;
743                 wrb = queue_head_node(txq);
744                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
745                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746                 queue_head_inc(txq);
747                 copied += skb_frag_size(frag);
748         }
749
750         if (dummy_wrb) {
751                 wrb = queue_head_node(txq);
752                 wrb_fill(wrb, 0, 0);
753                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754                 queue_head_inc(txq);
755         }
756
757         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
758         be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760         return copied;
761 dma_err:
762         txq->head = map_head;
763         while (copied) {
764                 wrb = queue_head_node(txq);
765                 unmap_tx_frag(dev, wrb, map_single);
766                 map_single = false;
767                 copied -= wrb->frag_len;
768                 queue_head_inc(txq);
769         }
770         return 0;
771 }
772
773 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
774                                              struct sk_buff *skb,
775                                              bool *skip_hw_vlan)
776 {
777         u16 vlan_tag = 0;
778
779         skb = skb_share_check(skb, GFP_ATOMIC);
780         if (unlikely(!skb))
781                 return skb;
782
783         if (vlan_tx_tag_present(skb))
784                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
785         else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
786                 vlan_tag = adapter->pvid;
787
788         if (vlan_tag) {
789                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
790                 if (unlikely(!skb))
791                         return skb;
792                 skb->vlan_tci = 0;
793                 if (skip_hw_vlan)
794                         *skip_hw_vlan = true;
795         }
796
797         /* Insert the outer VLAN, if any */
798         if (adapter->qnq_vid) {
799                 vlan_tag = adapter->qnq_vid;
800                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
801                 if (unlikely(!skb))
802                         return skb;
803                 if (skip_hw_vlan)
804                         *skip_hw_vlan = true;
805         }
806
807         return skb;
808 }
809
810 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
811 {
812         struct ethhdr *eh = (struct ethhdr *)skb->data;
813         u16 offset = ETH_HLEN;
814
815         if (eh->h_proto == htons(ETH_P_IPV6)) {
816                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
817
818                 offset += sizeof(struct ipv6hdr);
819                 if (ip6h->nexthdr != NEXTHDR_TCP &&
820                     ip6h->nexthdr != NEXTHDR_UDP) {
821                         struct ipv6_opt_hdr *ehdr =
822                                 (struct ipv6_opt_hdr *) (skb->data + offset);
823
824                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
825                         if (ehdr->hdrlen == 0xff)
826                                 return true;
827                 }
828         }
829         return false;
830 }
831
832 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
833 {
834         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
835 }
836
837 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
838                                 struct sk_buff *skb)
839 {
840         return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
841 }
842
843 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
844                                            struct sk_buff *skb,
845                                            bool *skip_hw_vlan)
846 {
847         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
848         unsigned int eth_hdr_len;
849         struct iphdr *ip;
850
851         /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
852          * may cause a transmit stall on that port. So the work-around is to
853          * pad such packets to a 36-byte length.
854          */
855         if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
856                 if (skb_padto(skb, 36))
857                         goto tx_drop;
858                 skb->len = 36;
859         }
860
861         /* For padded packets, BE HW modifies tot_len field in IP header
862          * incorrecly when VLAN tag is inserted by HW.
863          * For padded packets, Lancer computes incorrect checksum.
864          */
865         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
866                                                 VLAN_ETH_HLEN : ETH_HLEN;
867         if (skb->len <= 60 &&
868             (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
869             is_ipv4_pkt(skb)) {
870                 ip = (struct iphdr *)ip_hdr(skb);
871                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
872         }
873
874         /* If vlan tag is already inlined in the packet, skip HW VLAN
875          * tagging in UMC mode
876          */
877         if ((adapter->function_mode & UMC_ENABLED) &&
878             veh->h_vlan_proto == htons(ETH_P_8021Q))
879                         *skip_hw_vlan = true;
880
881         /* HW has a bug wherein it will calculate CSUM for VLAN
882          * pkts even though it is disabled.
883          * Manually insert VLAN in pkt.
884          */
885         if (skb->ip_summed != CHECKSUM_PARTIAL &&
886             vlan_tx_tag_present(skb)) {
887                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
888                 if (unlikely(!skb))
889                         goto tx_drop;
890         }
891
892         /* HW may lockup when VLAN HW tagging is requested on
893          * certain ipv6 packets. Drop such pkts if the HW workaround to
894          * skip HW tagging is not enabled by FW.
895          */
896         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
897             (adapter->pvid || adapter->qnq_vid) &&
898             !qnq_async_evt_rcvd(adapter)))
899                 goto tx_drop;
900
901         /* Manual VLAN tag insertion to prevent:
902          * ASIC lockup when the ASIC inserts VLAN tag into
903          * certain ipv6 packets. Insert VLAN tags in driver,
904          * and set event, completion, vlan bits accordingly
905          * in the Tx WRB.
906          */
907         if (be_ipv6_tx_stall_chk(adapter, skb) &&
908             be_vlan_tag_tx_chk(adapter, skb)) {
909                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
910                 if (unlikely(!skb))
911                         goto tx_drop;
912         }
913
914         return skb;
915 tx_drop:
916         dev_kfree_skb_any(skb);
917         return NULL;
918 }
919
920 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
921 {
922         struct be_adapter *adapter = netdev_priv(netdev);
923         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
924         struct be_queue_info *txq = &txo->q;
925         bool dummy_wrb, stopped = false;
926         u32 wrb_cnt = 0, copied = 0;
927         bool skip_hw_vlan = false;
928         u32 start = txq->head;
929
930         skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
931         if (!skb)
932                 return NETDEV_TX_OK;
933
934         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
935
936         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
937                               skip_hw_vlan);
938         if (copied) {
939                 int gso_segs = skb_shinfo(skb)->gso_segs;
940
941                 /* record the sent skb in the sent_skb table */
942                 BUG_ON(txo->sent_skb_list[start]);
943                 txo->sent_skb_list[start] = skb;
944
945                 /* Ensure txq has space for the next skb; Else stop the queue
946                  * *BEFORE* ringing the tx doorbell, so that we serialze the
947                  * tx compls of the current transmit which'll wake up the queue
948                  */
949                 atomic_add(wrb_cnt, &txq->used);
950                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
951                                                                 txq->len) {
952                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
953                         stopped = true;
954                 }
955
956                 be_txq_notify(adapter, txo, wrb_cnt);
957
958                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
959         } else {
960                 txq->head = start;
961                 dev_kfree_skb_any(skb);
962         }
963         return NETDEV_TX_OK;
964 }
965
966 static int be_change_mtu(struct net_device *netdev, int new_mtu)
967 {
968         struct be_adapter *adapter = netdev_priv(netdev);
969         if (new_mtu < BE_MIN_MTU ||
970                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
971                                         (ETH_HLEN + ETH_FCS_LEN))) {
972                 dev_info(&adapter->pdev->dev,
973                         "MTU must be between %d and %d bytes\n",
974                         BE_MIN_MTU,
975                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
976                 return -EINVAL;
977         }
978         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
979                         netdev->mtu, new_mtu);
980         netdev->mtu = new_mtu;
981         return 0;
982 }
983
984 /*
985  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
986  * If the user configures more, place BE in vlan promiscuous mode.
987  */
988 static int be_vid_config(struct be_adapter *adapter)
989 {
990         u16 vids[BE_NUM_VLANS_SUPPORTED];
991         u16 num = 0, i;
992         int status = 0;
993
994         /* No need to further configure vids if in promiscuous mode */
995         if (adapter->promiscuous)
996                 return 0;
997
998         if (adapter->vlans_added > adapter->max_vlans)
999                 goto set_vlan_promisc;
1000
1001         /* Construct VLAN Table to give to HW */
1002         for (i = 0; i < VLAN_N_VID; i++)
1003                 if (adapter->vlan_tag[i])
1004                         vids[num++] = cpu_to_le16(i);
1005
1006         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1007                                     vids, num, 1, 0);
1008
1009         /* Set to VLAN promisc mode as setting VLAN filter failed */
1010         if (status) {
1011                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1012                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1013                 goto set_vlan_promisc;
1014         }
1015
1016         return status;
1017
1018 set_vlan_promisc:
1019         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1020                                     NULL, 0, 1, 1);
1021         return status;
1022 }
1023
1024 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1025 {
1026         struct be_adapter *adapter = netdev_priv(netdev);
1027         int status = 0;
1028
1029         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1030                 status = -EINVAL;
1031                 goto ret;
1032         }
1033
1034         /* Packets with VID 0 are always received by Lancer by default */
1035         if (lancer_chip(adapter) && vid == 0)
1036                 goto ret;
1037
1038         adapter->vlan_tag[vid] = 1;
1039         if (adapter->vlans_added <= (adapter->max_vlans + 1))
1040                 status = be_vid_config(adapter);
1041
1042         if (!status)
1043                 adapter->vlans_added++;
1044         else
1045                 adapter->vlan_tag[vid] = 0;
1046 ret:
1047         return status;
1048 }
1049
1050 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1051 {
1052         struct be_adapter *adapter = netdev_priv(netdev);
1053         int status = 0;
1054
1055         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1056                 status = -EINVAL;
1057                 goto ret;
1058         }
1059
1060         /* Packets with VID 0 are always received by Lancer by default */
1061         if (lancer_chip(adapter) && vid == 0)
1062                 goto ret;
1063
1064         adapter->vlan_tag[vid] = 0;
1065         if (adapter->vlans_added <= adapter->max_vlans)
1066                 status = be_vid_config(adapter);
1067
1068         if (!status)
1069                 adapter->vlans_added--;
1070         else
1071                 adapter->vlan_tag[vid] = 1;
1072 ret:
1073         return status;
1074 }
1075
1076 static void be_set_rx_mode(struct net_device *netdev)
1077 {
1078         struct be_adapter *adapter = netdev_priv(netdev);
1079         int status;
1080
1081         if (netdev->flags & IFF_PROMISC) {
1082                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1083                 adapter->promiscuous = true;
1084                 goto done;
1085         }
1086
1087         /* BE was previously in promiscuous mode; disable it */
1088         if (adapter->promiscuous) {
1089                 adapter->promiscuous = false;
1090                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1091
1092                 if (adapter->vlans_added)
1093                         be_vid_config(adapter);
1094         }
1095
1096         /* Enable multicast promisc if num configured exceeds what we support */
1097         if (netdev->flags & IFF_ALLMULTI ||
1098             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
1099                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1100                 goto done;
1101         }
1102
1103         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1104                 struct netdev_hw_addr *ha;
1105                 int i = 1; /* First slot is claimed by the Primary MAC */
1106
1107                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1108                         be_cmd_pmac_del(adapter, adapter->if_handle,
1109                                         adapter->pmac_id[i], 0);
1110                 }
1111
1112                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1113                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1114                         adapter->promiscuous = true;
1115                         goto done;
1116                 }
1117
1118                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1119                         adapter->uc_macs++; /* First slot is for Primary MAC */
1120                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1121                                         adapter->if_handle,
1122                                         &adapter->pmac_id[adapter->uc_macs], 0);
1123                 }
1124         }
1125
1126         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1127
1128         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1129         if (status) {
1130                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1131                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1132                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1133         }
1134 done:
1135         return;
1136 }
1137
1138 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1139 {
1140         struct be_adapter *adapter = netdev_priv(netdev);
1141         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1142         int status;
1143         bool active_mac = false;
1144         u32 pmac_id;
1145         u8 old_mac[ETH_ALEN];
1146
1147         if (!sriov_enabled(adapter))
1148                 return -EPERM;
1149
1150         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1151                 return -EINVAL;
1152
1153         if (lancer_chip(adapter)) {
1154                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1155                                                   &pmac_id, vf + 1);
1156                 if (!status && active_mac)
1157                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1158                                         pmac_id, vf + 1);
1159
1160                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1161         } else {
1162                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1163                                          vf_cfg->pmac_id, vf + 1);
1164
1165                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1166                                          &vf_cfg->pmac_id, vf + 1);
1167         }
1168
1169         if (status)
1170                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1171                                 mac, vf);
1172         else
1173                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1174
1175         return status;
1176 }
1177
1178 static int be_get_vf_config(struct net_device *netdev, int vf,
1179                         struct ifla_vf_info *vi)
1180 {
1181         struct be_adapter *adapter = netdev_priv(netdev);
1182         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1183
1184         if (!sriov_enabled(adapter))
1185                 return -EPERM;
1186
1187         if (vf >= adapter->num_vfs)
1188                 return -EINVAL;
1189
1190         vi->vf = vf;
1191         vi->tx_rate = vf_cfg->tx_rate;
1192         vi->vlan = vf_cfg->vlan_tag;
1193         vi->qos = 0;
1194         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1195
1196         return 0;
1197 }
1198
1199 static int be_set_vf_vlan(struct net_device *netdev,
1200                         int vf, u16 vlan, u8 qos)
1201 {
1202         struct be_adapter *adapter = netdev_priv(netdev);
1203         int status = 0;
1204
1205         if (!sriov_enabled(adapter))
1206                 return -EPERM;
1207
1208         if (vf >= adapter->num_vfs || vlan > 4095)
1209                 return -EINVAL;
1210
1211         if (vlan) {
1212                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1213                         /* If this is new value, program it. Else skip. */
1214                         adapter->vf_cfg[vf].vlan_tag = vlan;
1215
1216                         status = be_cmd_set_hsw_config(adapter, vlan,
1217                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1218                 }
1219         } else {
1220                 /* Reset Transparent Vlan Tagging. */
1221                 adapter->vf_cfg[vf].vlan_tag = 0;
1222                 vlan = adapter->vf_cfg[vf].def_vid;
1223                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1224                         adapter->vf_cfg[vf].if_handle);
1225         }
1226
1227
1228         if (status)
1229                 dev_info(&adapter->pdev->dev,
1230                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1231         return status;
1232 }
1233
1234 static int be_set_vf_tx_rate(struct net_device *netdev,
1235                         int vf, int rate)
1236 {
1237         struct be_adapter *adapter = netdev_priv(netdev);
1238         int status = 0;
1239
1240         if (!sriov_enabled(adapter))
1241                 return -EPERM;
1242
1243         if (vf >= adapter->num_vfs)
1244                 return -EINVAL;
1245
1246         if (rate < 100 || rate > 10000) {
1247                 dev_err(&adapter->pdev->dev,
1248                         "tx rate must be between 100 and 10000 Mbps\n");
1249                 return -EINVAL;
1250         }
1251
1252         if (lancer_chip(adapter))
1253                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1254         else
1255                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1256
1257         if (status)
1258                 dev_err(&adapter->pdev->dev,
1259                                 "tx rate %d on VF %d failed\n", rate, vf);
1260         else
1261                 adapter->vf_cfg[vf].tx_rate = rate;
1262         return status;
1263 }
1264
1265 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1266 {
1267         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1268         ulong now = jiffies;
1269         ulong delta = now - stats->rx_jiffies;
1270         u64 pkts;
1271         unsigned int start, eqd;
1272
1273         if (!eqo->enable_aic) {
1274                 eqd = eqo->eqd;
1275                 goto modify_eqd;
1276         }
1277
1278         if (eqo->idx >= adapter->num_rx_qs)
1279                 return;
1280
1281         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1282
1283         /* Wrapped around */
1284         if (time_before(now, stats->rx_jiffies)) {
1285                 stats->rx_jiffies = now;
1286                 return;
1287         }
1288
1289         /* Update once a second */
1290         if (delta < HZ)
1291                 return;
1292
1293         do {
1294                 start = u64_stats_fetch_begin_bh(&stats->sync);
1295                 pkts = stats->rx_pkts;
1296         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1297
1298         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1299         stats->rx_pkts_prev = pkts;
1300         stats->rx_jiffies = now;
1301         eqd = (stats->rx_pps / 110000) << 3;
1302         eqd = min(eqd, eqo->max_eqd);
1303         eqd = max(eqd, eqo->min_eqd);
1304         if (eqd < 10)
1305                 eqd = 0;
1306
1307 modify_eqd:
1308         if (eqd != eqo->cur_eqd) {
1309                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1310                 eqo->cur_eqd = eqd;
1311         }
1312 }
1313
1314 static void be_rx_stats_update(struct be_rx_obj *rxo,
1315                 struct be_rx_compl_info *rxcp)
1316 {
1317         struct be_rx_stats *stats = rx_stats(rxo);
1318
1319         u64_stats_update_begin(&stats->sync);
1320         stats->rx_compl++;
1321         stats->rx_bytes += rxcp->pkt_size;
1322         stats->rx_pkts++;
1323         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1324                 stats->rx_mcast_pkts++;
1325         if (rxcp->err)
1326                 stats->rx_compl_err++;
1327         u64_stats_update_end(&stats->sync);
1328 }
1329
1330 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1331 {
1332         /* L4 checksum is not reliable for non TCP/UDP packets.
1333          * Also ignore ipcksm for ipv6 pkts */
1334         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1335                                 (rxcp->ip_csum || rxcp->ipv6);
1336 }
1337
1338 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1339                                                 u16 frag_idx)
1340 {
1341         struct be_adapter *adapter = rxo->adapter;
1342         struct be_rx_page_info *rx_page_info;
1343         struct be_queue_info *rxq = &rxo->q;
1344
1345         rx_page_info = &rxo->page_info_tbl[frag_idx];
1346         BUG_ON(!rx_page_info->page);
1347
1348         if (rx_page_info->last_page_user) {
1349                 dma_unmap_page(&adapter->pdev->dev,
1350                                dma_unmap_addr(rx_page_info, bus),
1351                                adapter->big_page_size, DMA_FROM_DEVICE);
1352                 rx_page_info->last_page_user = false;
1353         }
1354
1355         atomic_dec(&rxq->used);
1356         return rx_page_info;
1357 }
1358
1359 /* Throwaway the data in the Rx completion */
1360 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1361                                 struct be_rx_compl_info *rxcp)
1362 {
1363         struct be_queue_info *rxq = &rxo->q;
1364         struct be_rx_page_info *page_info;
1365         u16 i, num_rcvd = rxcp->num_rcvd;
1366
1367         for (i = 0; i < num_rcvd; i++) {
1368                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1369                 put_page(page_info->page);
1370                 memset(page_info, 0, sizeof(*page_info));
1371                 index_inc(&rxcp->rxq_idx, rxq->len);
1372         }
1373 }
1374
1375 /*
1376  * skb_fill_rx_data forms a complete skb for an ether frame
1377  * indicated by rxcp.
1378  */
1379 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1380                              struct be_rx_compl_info *rxcp)
1381 {
1382         struct be_queue_info *rxq = &rxo->q;
1383         struct be_rx_page_info *page_info;
1384         u16 i, j;
1385         u16 hdr_len, curr_frag_len, remaining;
1386         u8 *start;
1387
1388         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1389         start = page_address(page_info->page) + page_info->page_offset;
1390         prefetch(start);
1391
1392         /* Copy data in the first descriptor of this completion */
1393         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1394
1395         skb->len = curr_frag_len;
1396         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1397                 memcpy(skb->data, start, curr_frag_len);
1398                 /* Complete packet has now been moved to data */
1399                 put_page(page_info->page);
1400                 skb->data_len = 0;
1401                 skb->tail += curr_frag_len;
1402         } else {
1403                 hdr_len = ETH_HLEN;
1404                 memcpy(skb->data, start, hdr_len);
1405                 skb_shinfo(skb)->nr_frags = 1;
1406                 skb_frag_set_page(skb, 0, page_info->page);
1407                 skb_shinfo(skb)->frags[0].page_offset =
1408                                         page_info->page_offset + hdr_len;
1409                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1410                 skb->data_len = curr_frag_len - hdr_len;
1411                 skb->truesize += rx_frag_size;
1412                 skb->tail += hdr_len;
1413         }
1414         page_info->page = NULL;
1415
1416         if (rxcp->pkt_size <= rx_frag_size) {
1417                 BUG_ON(rxcp->num_rcvd != 1);
1418                 return;
1419         }
1420
1421         /* More frags present for this completion */
1422         index_inc(&rxcp->rxq_idx, rxq->len);
1423         remaining = rxcp->pkt_size - curr_frag_len;
1424         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1425                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1426                 curr_frag_len = min(remaining, rx_frag_size);
1427
1428                 /* Coalesce all frags from the same physical page in one slot */
1429                 if (page_info->page_offset == 0) {
1430                         /* Fresh page */
1431                         j++;
1432                         skb_frag_set_page(skb, j, page_info->page);
1433                         skb_shinfo(skb)->frags[j].page_offset =
1434                                                         page_info->page_offset;
1435                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1436                         skb_shinfo(skb)->nr_frags++;
1437                 } else {
1438                         put_page(page_info->page);
1439                 }
1440
1441                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1442                 skb->len += curr_frag_len;
1443                 skb->data_len += curr_frag_len;
1444                 skb->truesize += rx_frag_size;
1445                 remaining -= curr_frag_len;
1446                 index_inc(&rxcp->rxq_idx, rxq->len);
1447                 page_info->page = NULL;
1448         }
1449         BUG_ON(j > MAX_SKB_FRAGS);
1450 }
1451
1452 /* Process the RX completion indicated by rxcp when GRO is disabled */
1453 static void be_rx_compl_process(struct be_rx_obj *rxo,
1454                                 struct be_rx_compl_info *rxcp)
1455 {
1456         struct be_adapter *adapter = rxo->adapter;
1457         struct net_device *netdev = adapter->netdev;
1458         struct sk_buff *skb;
1459
1460         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1461         if (unlikely(!skb)) {
1462                 rx_stats(rxo)->rx_drops_no_skbs++;
1463                 be_rx_compl_discard(rxo, rxcp);
1464                 return;
1465         }
1466
1467         skb_fill_rx_data(rxo, skb, rxcp);
1468
1469         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1470                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1471         else
1472                 skb_checksum_none_assert(skb);
1473
1474         skb->protocol = eth_type_trans(skb, netdev);
1475         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1476         if (netdev->features & NETIF_F_RXHASH)
1477                 skb->rxhash = rxcp->rss_hash;
1478
1479
1480         if (rxcp->vlanf)
1481                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1482
1483         netif_receive_skb(skb);
1484 }
1485
1486 /* Process the RX completion indicated by rxcp when GRO is enabled */
1487 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1488                              struct be_rx_compl_info *rxcp)
1489 {
1490         struct be_adapter *adapter = rxo->adapter;
1491         struct be_rx_page_info *page_info;
1492         struct sk_buff *skb = NULL;
1493         struct be_queue_info *rxq = &rxo->q;
1494         u16 remaining, curr_frag_len;
1495         u16 i, j;
1496
1497         skb = napi_get_frags(napi);
1498         if (!skb) {
1499                 be_rx_compl_discard(rxo, rxcp);
1500                 return;
1501         }
1502
1503         remaining = rxcp->pkt_size;
1504         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1505                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1506
1507                 curr_frag_len = min(remaining, rx_frag_size);
1508
1509                 /* Coalesce all frags from the same physical page in one slot */
1510                 if (i == 0 || page_info->page_offset == 0) {
1511                         /* First frag or Fresh page */
1512                         j++;
1513                         skb_frag_set_page(skb, j, page_info->page);
1514                         skb_shinfo(skb)->frags[j].page_offset =
1515                                                         page_info->page_offset;
1516                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1517                 } else {
1518                         put_page(page_info->page);
1519                 }
1520                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1521                 skb->truesize += rx_frag_size;
1522                 remaining -= curr_frag_len;
1523                 index_inc(&rxcp->rxq_idx, rxq->len);
1524                 memset(page_info, 0, sizeof(*page_info));
1525         }
1526         BUG_ON(j > MAX_SKB_FRAGS);
1527
1528         skb_shinfo(skb)->nr_frags = j + 1;
1529         skb->len = rxcp->pkt_size;
1530         skb->data_len = rxcp->pkt_size;
1531         skb->ip_summed = CHECKSUM_UNNECESSARY;
1532         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1533         if (adapter->netdev->features & NETIF_F_RXHASH)
1534                 skb->rxhash = rxcp->rss_hash;
1535
1536         if (rxcp->vlanf)
1537                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1538
1539         napi_gro_frags(napi);
1540 }
1541
1542 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1543                                  struct be_rx_compl_info *rxcp)
1544 {
1545         rxcp->pkt_size =
1546                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1547         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1548         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1549         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1550         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1551         rxcp->ip_csum =
1552                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1553         rxcp->l4_csum =
1554                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1555         rxcp->ipv6 =
1556                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1557         rxcp->rxq_idx =
1558                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1559         rxcp->num_rcvd =
1560                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1561         rxcp->pkt_type =
1562                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1563         rxcp->rss_hash =
1564                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1565         if (rxcp->vlanf) {
1566                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1567                                           compl);
1568                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1569                                                compl);
1570         }
1571         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1572 }
1573
1574 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1575                                  struct be_rx_compl_info *rxcp)
1576 {
1577         rxcp->pkt_size =
1578                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1579         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1580         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1581         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1582         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1583         rxcp->ip_csum =
1584                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1585         rxcp->l4_csum =
1586                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1587         rxcp->ipv6 =
1588                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1589         rxcp->rxq_idx =
1590                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1591         rxcp->num_rcvd =
1592                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1593         rxcp->pkt_type =
1594                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1595         rxcp->rss_hash =
1596                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1597         if (rxcp->vlanf) {
1598                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1599                                           compl);
1600                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1601                                                compl);
1602         }
1603         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1604         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1605                                       ip_frag, compl);
1606 }
1607
1608 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1609 {
1610         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1611         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1612         struct be_adapter *adapter = rxo->adapter;
1613
1614         /* For checking the valid bit it is Ok to use either definition as the
1615          * valid bit is at the same position in both v0 and v1 Rx compl */
1616         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1617                 return NULL;
1618
1619         rmb();
1620         be_dws_le_to_cpu(compl, sizeof(*compl));
1621
1622         if (adapter->be3_native)
1623                 be_parse_rx_compl_v1(compl, rxcp);
1624         else
1625                 be_parse_rx_compl_v0(compl, rxcp);
1626
1627         if (rxcp->ip_frag)
1628                 rxcp->l4_csum = 0;
1629
1630         if (rxcp->vlanf) {
1631                 /* vlanf could be wrongly set in some cards.
1632                  * ignore if vtm is not set */
1633                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1634                         rxcp->vlanf = 0;
1635
1636                 if (!lancer_chip(adapter))
1637                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1638
1639                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1640                     !adapter->vlan_tag[rxcp->vlan_tag])
1641                         rxcp->vlanf = 0;
1642         }
1643
1644         /* As the compl has been parsed, reset it; we wont touch it again */
1645         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1646
1647         queue_tail_inc(&rxo->cq);
1648         return rxcp;
1649 }
1650
1651 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1652 {
1653         u32 order = get_order(size);
1654
1655         if (order > 0)
1656                 gfp |= __GFP_COMP;
1657         return  alloc_pages(gfp, order);
1658 }
1659
1660 /*
1661  * Allocate a page, split it to fragments of size rx_frag_size and post as
1662  * receive buffers to BE
1663  */
1664 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1665 {
1666         struct be_adapter *adapter = rxo->adapter;
1667         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1668         struct be_queue_info *rxq = &rxo->q;
1669         struct page *pagep = NULL;
1670         struct be_eth_rx_d *rxd;
1671         u64 page_dmaaddr = 0, frag_dmaaddr;
1672         u32 posted, page_offset = 0;
1673
1674         page_info = &rxo->page_info_tbl[rxq->head];
1675         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1676                 if (!pagep) {
1677                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1678                         if (unlikely(!pagep)) {
1679                                 rx_stats(rxo)->rx_post_fail++;
1680                                 break;
1681                         }
1682                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1683                                                     0, adapter->big_page_size,
1684                                                     DMA_FROM_DEVICE);
1685                         page_info->page_offset = 0;
1686                 } else {
1687                         get_page(pagep);
1688                         page_info->page_offset = page_offset + rx_frag_size;
1689                 }
1690                 page_offset = page_info->page_offset;
1691                 page_info->page = pagep;
1692                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1693                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1694
1695                 rxd = queue_head_node(rxq);
1696                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1697                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1698
1699                 /* Any space left in the current big page for another frag? */
1700                 if ((page_offset + rx_frag_size + rx_frag_size) >
1701                                         adapter->big_page_size) {
1702                         pagep = NULL;
1703                         page_info->last_page_user = true;
1704                 }
1705
1706                 prev_page_info = page_info;
1707                 queue_head_inc(rxq);
1708                 page_info = &rxo->page_info_tbl[rxq->head];
1709         }
1710         if (pagep)
1711                 prev_page_info->last_page_user = true;
1712
1713         if (posted) {
1714                 atomic_add(posted, &rxq->used);
1715                 be_rxq_notify(adapter, rxq->id, posted);
1716         } else if (atomic_read(&rxq->used) == 0) {
1717                 /* Let be_worker replenish when memory is available */
1718                 rxo->rx_post_starved = true;
1719         }
1720 }
1721
1722 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1723 {
1724         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1725
1726         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1727                 return NULL;
1728
1729         rmb();
1730         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1731
1732         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1733
1734         queue_tail_inc(tx_cq);
1735         return txcp;
1736 }
1737
1738 static u16 be_tx_compl_process(struct be_adapter *adapter,
1739                 struct be_tx_obj *txo, u16 last_index)
1740 {
1741         struct be_queue_info *txq = &txo->q;
1742         struct be_eth_wrb *wrb;
1743         struct sk_buff **sent_skbs = txo->sent_skb_list;
1744         struct sk_buff *sent_skb;
1745         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1746         bool unmap_skb_hdr = true;
1747
1748         sent_skb = sent_skbs[txq->tail];
1749         BUG_ON(!sent_skb);
1750         sent_skbs[txq->tail] = NULL;
1751
1752         /* skip header wrb */
1753         queue_tail_inc(txq);
1754
1755         do {
1756                 cur_index = txq->tail;
1757                 wrb = queue_tail_node(txq);
1758                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1759                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1760                 unmap_skb_hdr = false;
1761
1762                 num_wrbs++;
1763                 queue_tail_inc(txq);
1764         } while (cur_index != last_index);
1765
1766         kfree_skb(sent_skb);
1767         return num_wrbs;
1768 }
1769
1770 /* Return the number of events in the event queue */
1771 static inline int events_get(struct be_eq_obj *eqo)
1772 {
1773         struct be_eq_entry *eqe;
1774         int num = 0;
1775
1776         do {
1777                 eqe = queue_tail_node(&eqo->q);
1778                 if (eqe->evt == 0)
1779                         break;
1780
1781                 rmb();
1782                 eqe->evt = 0;
1783                 num++;
1784                 queue_tail_inc(&eqo->q);
1785         } while (true);
1786
1787         return num;
1788 }
1789
1790 /* Leaves the EQ is disarmed state */
1791 static void be_eq_clean(struct be_eq_obj *eqo)
1792 {
1793         int num = events_get(eqo);
1794
1795         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1796 }
1797
1798 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1799 {
1800         struct be_rx_page_info *page_info;
1801         struct be_queue_info *rxq = &rxo->q;
1802         struct be_queue_info *rx_cq = &rxo->cq;
1803         struct be_rx_compl_info *rxcp;
1804         struct be_adapter *adapter = rxo->adapter;
1805         int flush_wait = 0;
1806         u16 tail;
1807
1808         /* Consume pending rx completions.
1809          * Wait for the flush completion (identified by zero num_rcvd)
1810          * to arrive. Notify CQ even when there are no more CQ entries
1811          * for HW to flush partially coalesced CQ entries.
1812          * In Lancer, there is no need to wait for flush compl.
1813          */
1814         for (;;) {
1815                 rxcp = be_rx_compl_get(rxo);
1816                 if (rxcp == NULL) {
1817                         if (lancer_chip(adapter))
1818                                 break;
1819
1820                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1821                                 dev_warn(&adapter->pdev->dev,
1822                                          "did not receive flush compl\n");
1823                                 break;
1824                         }
1825                         be_cq_notify(adapter, rx_cq->id, true, 0);
1826                         mdelay(1);
1827                 } else {
1828                         be_rx_compl_discard(rxo, rxcp);
1829                         be_cq_notify(adapter, rx_cq->id, false, 1);
1830                         if (rxcp->num_rcvd == 0)
1831                                 break;
1832                 }
1833         }
1834
1835         /* After cleanup, leave the CQ in unarmed state */
1836         be_cq_notify(adapter, rx_cq->id, false, 0);
1837
1838         /* Then free posted rx buffers that were not used */
1839         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1840         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1841                 page_info = get_rx_page_info(rxo, tail);
1842                 put_page(page_info->page);
1843                 memset(page_info, 0, sizeof(*page_info));
1844         }
1845         BUG_ON(atomic_read(&rxq->used));
1846         rxq->tail = rxq->head = 0;
1847 }
1848
1849 static void be_tx_compl_clean(struct be_adapter *adapter)
1850 {
1851         struct be_tx_obj *txo;
1852         struct be_queue_info *txq;
1853         struct be_eth_tx_compl *txcp;
1854         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1855         struct sk_buff *sent_skb;
1856         bool dummy_wrb;
1857         int i, pending_txqs;
1858
1859         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1860         do {
1861                 pending_txqs = adapter->num_tx_qs;
1862
1863                 for_all_tx_queues(adapter, txo, i) {
1864                         txq = &txo->q;
1865                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1866                                 end_idx =
1867                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1868                                                       wrb_index, txcp);
1869                                 num_wrbs += be_tx_compl_process(adapter, txo,
1870                                                                 end_idx);
1871                                 cmpl++;
1872                         }
1873                         if (cmpl) {
1874                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1875                                 atomic_sub(num_wrbs, &txq->used);
1876                                 cmpl = 0;
1877                                 num_wrbs = 0;
1878                         }
1879                         if (atomic_read(&txq->used) == 0)
1880                                 pending_txqs--;
1881                 }
1882
1883                 if (pending_txqs == 0 || ++timeo > 200)
1884                         break;
1885
1886                 mdelay(1);
1887         } while (true);
1888
1889         for_all_tx_queues(adapter, txo, i) {
1890                 txq = &txo->q;
1891                 if (atomic_read(&txq->used))
1892                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1893                                 atomic_read(&txq->used));
1894
1895                 /* free posted tx for which compls will never arrive */
1896                 while (atomic_read(&txq->used)) {
1897                         sent_skb = txo->sent_skb_list[txq->tail];
1898                         end_idx = txq->tail;
1899                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1900                                                    &dummy_wrb);
1901                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1902                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1903                         atomic_sub(num_wrbs, &txq->used);
1904                 }
1905         }
1906 }
1907
1908 static void be_evt_queues_destroy(struct be_adapter *adapter)
1909 {
1910         struct be_eq_obj *eqo;
1911         int i;
1912
1913         for_all_evt_queues(adapter, eqo, i) {
1914                 if (eqo->q.created) {
1915                         be_eq_clean(eqo);
1916                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1917                 }
1918                 be_queue_free(adapter, &eqo->q);
1919         }
1920 }
1921
1922 static int be_evt_queues_create(struct be_adapter *adapter)
1923 {
1924         struct be_queue_info *eq;
1925         struct be_eq_obj *eqo;
1926         int i, rc;
1927
1928         adapter->num_evt_qs = num_irqs(adapter);
1929
1930         for_all_evt_queues(adapter, eqo, i) {
1931                 eqo->adapter = adapter;
1932                 eqo->tx_budget = BE_TX_BUDGET;
1933                 eqo->idx = i;
1934                 eqo->max_eqd = BE_MAX_EQD;
1935                 eqo->enable_aic = true;
1936
1937                 eq = &eqo->q;
1938                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1939                                         sizeof(struct be_eq_entry));
1940                 if (rc)
1941                         return rc;
1942
1943                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1944                 if (rc)
1945                         return rc;
1946         }
1947         return 0;
1948 }
1949
1950 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1951 {
1952         struct be_queue_info *q;
1953
1954         q = &adapter->mcc_obj.q;
1955         if (q->created)
1956                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1957         be_queue_free(adapter, q);
1958
1959         q = &adapter->mcc_obj.cq;
1960         if (q->created)
1961                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1962         be_queue_free(adapter, q);
1963 }
1964
1965 /* Must be called only after TX qs are created as MCC shares TX EQ */
1966 static int be_mcc_queues_create(struct be_adapter *adapter)
1967 {
1968         struct be_queue_info *q, *cq;
1969
1970         cq = &adapter->mcc_obj.cq;
1971         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1972                         sizeof(struct be_mcc_compl)))
1973                 goto err;
1974
1975         /* Use the default EQ for MCC completions */
1976         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1977                 goto mcc_cq_free;
1978
1979         q = &adapter->mcc_obj.q;
1980         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1981                 goto mcc_cq_destroy;
1982
1983         if (be_cmd_mccq_create(adapter, q, cq))
1984                 goto mcc_q_free;
1985
1986         return 0;
1987
1988 mcc_q_free:
1989         be_queue_free(adapter, q);
1990 mcc_cq_destroy:
1991         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1992 mcc_cq_free:
1993         be_queue_free(adapter, cq);
1994 err:
1995         return -1;
1996 }
1997
1998 static void be_tx_queues_destroy(struct be_adapter *adapter)
1999 {
2000         struct be_queue_info *q;
2001         struct be_tx_obj *txo;
2002         u8 i;
2003
2004         for_all_tx_queues(adapter, txo, i) {
2005                 q = &txo->q;
2006                 if (q->created)
2007                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2008                 be_queue_free(adapter, q);
2009
2010                 q = &txo->cq;
2011                 if (q->created)
2012                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2013                 be_queue_free(adapter, q);
2014         }
2015 }
2016
2017 static int be_num_txqs_want(struct be_adapter *adapter)
2018 {
2019         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2020             be_is_mc(adapter) ||
2021             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
2022             BE2_chip(adapter))
2023                 return 1;
2024         else
2025                 return adapter->max_tx_queues;
2026 }
2027
2028 static int be_tx_cqs_create(struct be_adapter *adapter)
2029 {
2030         struct be_queue_info *cq, *eq;
2031         int status;
2032         struct be_tx_obj *txo;
2033         u8 i;
2034
2035         adapter->num_tx_qs = be_num_txqs_want(adapter);
2036         if (adapter->num_tx_qs != MAX_TX_QS) {
2037                 rtnl_lock();
2038                 netif_set_real_num_tx_queues(adapter->netdev,
2039                         adapter->num_tx_qs);
2040                 rtnl_unlock();
2041         }
2042
2043         for_all_tx_queues(adapter, txo, i) {
2044                 cq = &txo->cq;
2045                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2046                                         sizeof(struct be_eth_tx_compl));
2047                 if (status)
2048                         return status;
2049
2050                 /* If num_evt_qs is less than num_tx_qs, then more than
2051                  * one txq share an eq
2052                  */
2053                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2054                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2055                 if (status)
2056                         return status;
2057         }
2058         return 0;
2059 }
2060
2061 static int be_tx_qs_create(struct be_adapter *adapter)
2062 {
2063         struct be_tx_obj *txo;
2064         int i, status;
2065
2066         for_all_tx_queues(adapter, txo, i) {
2067                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2068                                         sizeof(struct be_eth_wrb));
2069                 if (status)
2070                         return status;
2071
2072                 status = be_cmd_txq_create(adapter, txo);
2073                 if (status)
2074                         return status;
2075         }
2076
2077         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2078                  adapter->num_tx_qs);
2079         return 0;
2080 }
2081
2082 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2083 {
2084         struct be_queue_info *q;
2085         struct be_rx_obj *rxo;
2086         int i;
2087
2088         for_all_rx_queues(adapter, rxo, i) {
2089                 q = &rxo->cq;
2090                 if (q->created)
2091                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2092                 be_queue_free(adapter, q);
2093         }
2094 }
2095
2096 static int be_rx_cqs_create(struct be_adapter *adapter)
2097 {
2098         struct be_queue_info *eq, *cq;
2099         struct be_rx_obj *rxo;
2100         int rc, i;
2101
2102         /* We'll create as many RSS rings as there are irqs.
2103          * But when there's only one irq there's no use creating RSS rings
2104          */
2105         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2106                                 num_irqs(adapter) + 1 : 1;
2107         if (adapter->num_rx_qs != MAX_RX_QS) {
2108                 rtnl_lock();
2109                 netif_set_real_num_rx_queues(adapter->netdev,
2110                                              adapter->num_rx_qs);
2111                 rtnl_unlock();
2112         }
2113
2114         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2115         for_all_rx_queues(adapter, rxo, i) {
2116                 rxo->adapter = adapter;
2117                 cq = &rxo->cq;
2118                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2119                                 sizeof(struct be_eth_rx_compl));
2120                 if (rc)
2121                         return rc;
2122
2123                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2124                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2125                 if (rc)
2126                         return rc;
2127         }
2128
2129         dev_info(&adapter->pdev->dev,
2130                  "created %d RSS queue(s) and 1 default RX queue\n",
2131                  adapter->num_rx_qs - 1);
2132         return 0;
2133 }
2134
2135 static irqreturn_t be_intx(int irq, void *dev)
2136 {
2137         struct be_eq_obj *eqo = dev;
2138         struct be_adapter *adapter = eqo->adapter;
2139         int num_evts = 0;
2140
2141         /* IRQ is not expected when NAPI is scheduled as the EQ
2142          * will not be armed.
2143          * But, this can happen on Lancer INTx where it takes
2144          * a while to de-assert INTx or in BE2 where occasionaly
2145          * an interrupt may be raised even when EQ is unarmed.
2146          * If NAPI is already scheduled, then counting & notifying
2147          * events will orphan them.
2148          */
2149         if (napi_schedule_prep(&eqo->napi)) {
2150                 num_evts = events_get(eqo);
2151                 __napi_schedule(&eqo->napi);
2152                 if (num_evts)
2153                         eqo->spurious_intr = 0;
2154         }
2155         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2156
2157         /* Return IRQ_HANDLED only for the the first spurious intr
2158          * after a valid intr to stop the kernel from branding
2159          * this irq as a bad one!
2160          */
2161         if (num_evts || eqo->spurious_intr++ == 0)
2162                 return IRQ_HANDLED;
2163         else
2164                 return IRQ_NONE;
2165 }
2166
2167 static irqreturn_t be_msix(int irq, void *dev)
2168 {
2169         struct be_eq_obj *eqo = dev;
2170
2171         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2172         napi_schedule(&eqo->napi);
2173         return IRQ_HANDLED;
2174 }
2175
2176 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2177 {
2178         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2179 }
2180
2181 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2182                         int budget)
2183 {
2184         struct be_adapter *adapter = rxo->adapter;
2185         struct be_queue_info *rx_cq = &rxo->cq;
2186         struct be_rx_compl_info *rxcp;
2187         u32 work_done;
2188
2189         for (work_done = 0; work_done < budget; work_done++) {
2190                 rxcp = be_rx_compl_get(rxo);
2191                 if (!rxcp)
2192                         break;
2193
2194                 /* Is it a flush compl that has no data */
2195                 if (unlikely(rxcp->num_rcvd == 0))
2196                         goto loop_continue;
2197
2198                 /* Discard compl with partial DMA Lancer B0 */
2199                 if (unlikely(!rxcp->pkt_size)) {
2200                         be_rx_compl_discard(rxo, rxcp);
2201                         goto loop_continue;
2202                 }
2203
2204                 /* On BE drop pkts that arrive due to imperfect filtering in
2205                  * promiscuous mode on some skews
2206                  */
2207                 if (unlikely(rxcp->port != adapter->port_num &&
2208                                 !lancer_chip(adapter))) {
2209                         be_rx_compl_discard(rxo, rxcp);
2210                         goto loop_continue;
2211                 }
2212
2213                 if (do_gro(rxcp))
2214                         be_rx_compl_process_gro(rxo, napi, rxcp);
2215                 else
2216                         be_rx_compl_process(rxo, rxcp);
2217 loop_continue:
2218                 be_rx_stats_update(rxo, rxcp);
2219         }
2220
2221         if (work_done) {
2222                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2223
2224                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2225                         be_post_rx_frags(rxo, GFP_ATOMIC);
2226         }
2227
2228         return work_done;
2229 }
2230
2231 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2232                           int budget, int idx)
2233 {
2234         struct be_eth_tx_compl *txcp;
2235         int num_wrbs = 0, work_done;
2236
2237         for (work_done = 0; work_done < budget; work_done++) {
2238                 txcp = be_tx_compl_get(&txo->cq);
2239                 if (!txcp)
2240                         break;
2241                 num_wrbs += be_tx_compl_process(adapter, txo,
2242                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2243                                         wrb_index, txcp));
2244         }
2245
2246         if (work_done) {
2247                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2248                 atomic_sub(num_wrbs, &txo->q.used);
2249
2250                 /* As Tx wrbs have been freed up, wake up netdev queue
2251                  * if it was stopped due to lack of tx wrbs.  */
2252                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2253                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2254                         netif_wake_subqueue(adapter->netdev, idx);
2255                 }
2256
2257                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2258                 tx_stats(txo)->tx_compl += work_done;
2259                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2260         }
2261         return (work_done < budget); /* Done */
2262 }
2263
2264 int be_poll(struct napi_struct *napi, int budget)
2265 {
2266         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2267         struct be_adapter *adapter = eqo->adapter;
2268         int max_work = 0, work, i, num_evts;
2269         bool tx_done;
2270
2271         num_evts = events_get(eqo);
2272
2273         /* Process all TXQs serviced by this EQ */
2274         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2275                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2276                                         eqo->tx_budget, i);
2277                 if (!tx_done)
2278                         max_work = budget;
2279         }
2280
2281         /* This loop will iterate twice for EQ0 in which
2282          * completions of the last RXQ (default one) are also processed
2283          * For other EQs the loop iterates only once
2284          */
2285         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2286                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2287                 max_work = max(work, max_work);
2288         }
2289
2290         if (is_mcc_eqo(eqo))
2291                 be_process_mcc(adapter);
2292
2293         if (max_work < budget) {
2294                 napi_complete(napi);
2295                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2296         } else {
2297                 /* As we'll continue in polling mode, count and clear events */
2298                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2299         }
2300         return max_work;
2301 }
2302
2303 void be_detect_error(struct be_adapter *adapter)
2304 {
2305         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2306         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2307         u32 i;
2308
2309         if (be_hw_error(adapter))
2310                 return;
2311
2312         if (lancer_chip(adapter)) {
2313                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2314                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2315                         sliport_err1 = ioread32(adapter->db +
2316                                         SLIPORT_ERROR1_OFFSET);
2317                         sliport_err2 = ioread32(adapter->db +
2318                                         SLIPORT_ERROR2_OFFSET);
2319                 }
2320         } else {
2321                 pci_read_config_dword(adapter->pdev,
2322                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2323                 pci_read_config_dword(adapter->pdev,
2324                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2325                 pci_read_config_dword(adapter->pdev,
2326                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2327                 pci_read_config_dword(adapter->pdev,
2328                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2329
2330                 ue_lo = (ue_lo & ~ue_lo_mask);
2331                 ue_hi = (ue_hi & ~ue_hi_mask);
2332         }
2333
2334         /* On certain platforms BE hardware can indicate spurious UEs.
2335          * Allow the h/w to stop working completely in case of a real UE.
2336          * Hence not setting the hw_error for UE detection.
2337          */
2338         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2339                 adapter->hw_error = true;
2340                 dev_err(&adapter->pdev->dev,
2341                         "Error detected in the card\n");
2342         }
2343
2344         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2345                 dev_err(&adapter->pdev->dev,
2346                         "ERR: sliport status 0x%x\n", sliport_status);
2347                 dev_err(&adapter->pdev->dev,
2348                         "ERR: sliport error1 0x%x\n", sliport_err1);
2349                 dev_err(&adapter->pdev->dev,
2350                         "ERR: sliport error2 0x%x\n", sliport_err2);
2351         }
2352
2353         if (ue_lo) {
2354                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2355                         if (ue_lo & 1)
2356                                 dev_err(&adapter->pdev->dev,
2357                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2358                 }
2359         }
2360
2361         if (ue_hi) {
2362                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2363                         if (ue_hi & 1)
2364                                 dev_err(&adapter->pdev->dev,
2365                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2366                 }
2367         }
2368
2369 }
2370
2371 static void be_msix_disable(struct be_adapter *adapter)
2372 {
2373         if (msix_enabled(adapter)) {
2374                 pci_disable_msix(adapter->pdev);
2375                 adapter->num_msix_vec = 0;
2376         }
2377 }
2378
2379 static uint be_num_rss_want(struct be_adapter *adapter)
2380 {
2381         u32 num = 0;
2382
2383         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2384             (lancer_chip(adapter) ||
2385              (!sriov_want(adapter) && be_physfn(adapter)))) {
2386                 num = adapter->max_rss_queues;
2387                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2388         }
2389         return num;
2390 }
2391
2392 static int be_msix_enable(struct be_adapter *adapter)
2393 {
2394 #define BE_MIN_MSIX_VECTORS             1
2395         int i, status, num_vec, num_roce_vec = 0;
2396         struct device *dev = &adapter->pdev->dev;
2397
2398         /* If RSS queues are not used, need a vec for default RX Q */
2399         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2400         if (be_roce_supported(adapter)) {
2401                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2402                                         (num_online_cpus() + 1));
2403                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2404                 num_vec += num_roce_vec;
2405                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2406         }
2407         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2408
2409         for (i = 0; i < num_vec; i++)
2410                 adapter->msix_entries[i].entry = i;
2411
2412         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2413         if (status == 0) {
2414                 goto done;
2415         } else if (status >= BE_MIN_MSIX_VECTORS) {
2416                 num_vec = status;
2417                 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2418                                          num_vec);
2419                 if (!status)
2420                         goto done;
2421         }
2422
2423         dev_warn(dev, "MSIx enable failed\n");
2424         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2425         if (!be_physfn(adapter))
2426                 return status;
2427         return 0;
2428 done:
2429         if (be_roce_supported(adapter)) {
2430                 if (num_vec > num_roce_vec) {
2431                         adapter->num_msix_vec = num_vec - num_roce_vec;
2432                         adapter->num_msix_roce_vec =
2433                                 num_vec - adapter->num_msix_vec;
2434                 } else {
2435                         adapter->num_msix_vec = num_vec;
2436                         adapter->num_msix_roce_vec = 0;
2437                 }
2438         } else
2439                 adapter->num_msix_vec = num_vec;
2440         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2441         return 0;
2442 }
2443
2444 static inline int be_msix_vec_get(struct be_adapter *adapter,
2445                                 struct be_eq_obj *eqo)
2446 {
2447         return adapter->msix_entries[eqo->idx].vector;
2448 }
2449
2450 static int be_msix_register(struct be_adapter *adapter)
2451 {
2452         struct net_device *netdev = adapter->netdev;
2453         struct be_eq_obj *eqo;
2454         int status, i, vec;
2455
2456         for_all_evt_queues(adapter, eqo, i) {
2457                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2458                 vec = be_msix_vec_get(adapter, eqo);
2459                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2460                 if (status)
2461                         goto err_msix;
2462         }
2463
2464         return 0;
2465 err_msix:
2466         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2467                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2468         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2469                 status);
2470         be_msix_disable(adapter);
2471         return status;
2472 }
2473
2474 static int be_irq_register(struct be_adapter *adapter)
2475 {
2476         struct net_device *netdev = adapter->netdev;
2477         int status;
2478
2479         if (msix_enabled(adapter)) {
2480                 status = be_msix_register(adapter);
2481                 if (status == 0)
2482                         goto done;
2483                 /* INTx is not supported for VF */
2484                 if (!be_physfn(adapter))
2485                         return status;
2486         }
2487
2488         /* INTx: only the first EQ is used */
2489         netdev->irq = adapter->pdev->irq;
2490         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2491                              &adapter->eq_obj[0]);
2492         if (status) {
2493                 dev_err(&adapter->pdev->dev,
2494                         "INTx request IRQ failed - err %d\n", status);
2495                 return status;
2496         }
2497 done:
2498         adapter->isr_registered = true;
2499         return 0;
2500 }
2501
2502 static void be_irq_unregister(struct be_adapter *adapter)
2503 {
2504         struct net_device *netdev = adapter->netdev;
2505         struct be_eq_obj *eqo;
2506         int i;
2507
2508         if (!adapter->isr_registered)
2509                 return;
2510
2511         /* INTx */
2512         if (!msix_enabled(adapter)) {
2513                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2514                 goto done;
2515         }
2516
2517         /* MSIx */
2518         for_all_evt_queues(adapter, eqo, i)
2519                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2520
2521 done:
2522         adapter->isr_registered = false;
2523 }
2524
2525 static void be_rx_qs_destroy(struct be_adapter *adapter)
2526 {
2527         struct be_queue_info *q;
2528         struct be_rx_obj *rxo;
2529         int i;
2530
2531         for_all_rx_queues(adapter, rxo, i) {
2532                 q = &rxo->q;
2533                 if (q->created) {
2534                         be_cmd_rxq_destroy(adapter, q);
2535                         be_rx_cq_clean(rxo);
2536                 }
2537                 be_queue_free(adapter, q);
2538         }
2539 }
2540
2541 static int be_close(struct net_device *netdev)
2542 {
2543         struct be_adapter *adapter = netdev_priv(netdev);
2544         struct be_eq_obj *eqo;
2545         int i;
2546
2547         be_roce_dev_close(adapter);
2548
2549         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2550                 for_all_evt_queues(adapter, eqo, i)
2551                         napi_disable(&eqo->napi);
2552                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2553         }
2554
2555         be_async_mcc_disable(adapter);
2556
2557         /* Wait for all pending tx completions to arrive so that
2558          * all tx skbs are freed.
2559          */
2560         be_tx_compl_clean(adapter);
2561         netif_tx_disable(netdev);
2562
2563         be_rx_qs_destroy(adapter);
2564
2565         for_all_evt_queues(adapter, eqo, i) {
2566                 if (msix_enabled(adapter))
2567                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2568                 else
2569                         synchronize_irq(netdev->irq);
2570                 be_eq_clean(eqo);
2571         }
2572
2573         be_irq_unregister(adapter);
2574
2575         return 0;
2576 }
2577
2578 static int be_rx_qs_create(struct be_adapter *adapter)
2579 {
2580         struct be_rx_obj *rxo;
2581         int rc, i, j;
2582         u8 rsstable[128];
2583
2584         for_all_rx_queues(adapter, rxo, i) {
2585                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2586                                     sizeof(struct be_eth_rx_d));
2587                 if (rc)
2588                         return rc;
2589         }
2590
2591         /* The FW would like the default RXQ to be created first */
2592         rxo = default_rxo(adapter);
2593         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2594                                adapter->if_handle, false, &rxo->rss_id);
2595         if (rc)
2596                 return rc;
2597
2598         for_all_rss_queues(adapter, rxo, i) {
2599                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2600                                        rx_frag_size, adapter->if_handle,
2601                                        true, &rxo->rss_id);
2602                 if (rc)
2603                         return rc;
2604         }
2605
2606         if (be_multi_rxq(adapter)) {
2607                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2608                         for_all_rss_queues(adapter, rxo, i) {
2609                                 if ((j + i) >= 128)
2610                                         break;
2611                                 rsstable[j + i] = rxo->rss_id;
2612                         }
2613                 }
2614                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2615                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2616
2617                 if (!BEx_chip(adapter))
2618                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2619                                                 RSS_ENABLE_UDP_IPV6;
2620
2621                 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2622                                        128);
2623                 if (rc) {
2624                         adapter->rss_flags = 0;
2625                         return rc;
2626                 }
2627         }
2628
2629         /* First time posting */
2630         for_all_rx_queues(adapter, rxo, i)
2631                 be_post_rx_frags(rxo, GFP_KERNEL);
2632         return 0;
2633 }
2634
2635 static int be_open(struct net_device *netdev)
2636 {
2637         struct be_adapter *adapter = netdev_priv(netdev);
2638         struct be_eq_obj *eqo;
2639         struct be_rx_obj *rxo;
2640         struct be_tx_obj *txo;
2641         u8 link_status;
2642         int status, i;
2643
2644         status = be_rx_qs_create(adapter);
2645         if (status)
2646                 goto err;
2647
2648         status = be_irq_register(adapter);
2649         if (status)
2650                 goto err;
2651
2652         for_all_rx_queues(adapter, rxo, i)
2653                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2654
2655         for_all_tx_queues(adapter, txo, i)
2656                 be_cq_notify(adapter, txo->cq.id, true, 0);
2657
2658         be_async_mcc_enable(adapter);
2659
2660         for_all_evt_queues(adapter, eqo, i) {
2661                 napi_enable(&eqo->napi);
2662                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2663         }
2664         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2665
2666         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2667         if (!status)
2668                 be_link_status_update(adapter, link_status);
2669
2670         netif_tx_start_all_queues(netdev);
2671         be_roce_dev_open(adapter);
2672         return 0;
2673 err:
2674         be_close(adapter->netdev);
2675         return -EIO;
2676 }
2677
2678 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2679 {
2680         struct be_dma_mem cmd;
2681         int status = 0;
2682         u8 mac[ETH_ALEN];
2683
2684         memset(mac, 0, ETH_ALEN);
2685
2686         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2687         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2688                                     GFP_KERNEL | __GFP_ZERO);
2689         if (cmd.va == NULL)
2690                 return -1;
2691
2692         if (enable) {
2693                 status = pci_write_config_dword(adapter->pdev,
2694                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2695                 if (status) {
2696                         dev_err(&adapter->pdev->dev,
2697                                 "Could not enable Wake-on-lan\n");
2698                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2699                                           cmd.dma);
2700                         return status;
2701                 }
2702                 status = be_cmd_enable_magic_wol(adapter,
2703                                 adapter->netdev->dev_addr, &cmd);
2704                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2705                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2706         } else {
2707                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2708                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2709                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2710         }
2711
2712         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2713         return status;
2714 }
2715
2716 /*
2717  * Generate a seed MAC address from the PF MAC Address using jhash.
2718  * MAC Address for VFs are assigned incrementally starting from the seed.
2719  * These addresses are programmed in the ASIC by the PF and the VF driver
2720  * queries for the MAC address during its probe.
2721  */
2722 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2723 {
2724         u32 vf;
2725         int status = 0;
2726         u8 mac[ETH_ALEN];
2727         struct be_vf_cfg *vf_cfg;
2728
2729         be_vf_eth_addr_generate(adapter, mac);
2730
2731         for_all_vfs(adapter, vf_cfg, vf) {
2732                 if (lancer_chip(adapter)) {
2733                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2734                 } else {
2735                         status = be_cmd_pmac_add(adapter, mac,
2736                                                  vf_cfg->if_handle,
2737                                                  &vf_cfg->pmac_id, vf + 1);
2738                 }
2739
2740                 if (status)
2741                         dev_err(&adapter->pdev->dev,
2742                         "Mac address assignment failed for VF %d\n", vf);
2743                 else
2744                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2745
2746                 mac[5] += 1;
2747         }
2748         return status;
2749 }
2750
2751 static int be_vfs_mac_query(struct be_adapter *adapter)
2752 {
2753         int status, vf;
2754         u8 mac[ETH_ALEN];
2755         struct be_vf_cfg *vf_cfg;
2756         bool active;
2757
2758         for_all_vfs(adapter, vf_cfg, vf) {
2759                 be_cmd_get_mac_from_list(adapter, mac, &active,
2760                                          &vf_cfg->pmac_id, 0);
2761
2762                 status = be_cmd_mac_addr_query(adapter, mac, false,
2763                                                vf_cfg->if_handle, 0);
2764                 if (status)
2765                         return status;
2766                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2767         }
2768         return 0;
2769 }
2770
2771 static void be_vf_clear(struct be_adapter *adapter)
2772 {
2773         struct be_vf_cfg *vf_cfg;
2774         u32 vf;
2775
2776         if (pci_vfs_assigned(adapter->pdev)) {
2777                 dev_warn(&adapter->pdev->dev,
2778                          "VFs are assigned to VMs: not disabling VFs\n");
2779                 goto done;
2780         }
2781
2782         pci_disable_sriov(adapter->pdev);
2783
2784         for_all_vfs(adapter, vf_cfg, vf) {
2785                 if (lancer_chip(adapter))
2786                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2787                 else
2788                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2789                                         vf_cfg->pmac_id, vf + 1);
2790
2791                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2792         }
2793 done:
2794         kfree(adapter->vf_cfg);
2795         adapter->num_vfs = 0;
2796 }
2797
2798 static int be_clear(struct be_adapter *adapter)
2799 {
2800         int i = 1;
2801
2802         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2803                 cancel_delayed_work_sync(&adapter->work);
2804                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2805         }
2806
2807         if (sriov_enabled(adapter))
2808                 be_vf_clear(adapter);
2809
2810         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2811                 be_cmd_pmac_del(adapter, adapter->if_handle,
2812                         adapter->pmac_id[i], 0);
2813
2814         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2815
2816         be_mcc_queues_destroy(adapter);
2817         be_rx_cqs_destroy(adapter);
2818         be_tx_queues_destroy(adapter);
2819         be_evt_queues_destroy(adapter);
2820
2821         kfree(adapter->pmac_id);
2822         adapter->pmac_id = NULL;
2823
2824         be_msix_disable(adapter);
2825         return 0;
2826 }
2827
2828 static int be_vfs_if_create(struct be_adapter *adapter)
2829 {
2830         struct be_vf_cfg *vf_cfg;
2831         u32 cap_flags, en_flags, vf;
2832         int status;
2833
2834         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2835                     BE_IF_FLAGS_MULTICAST;
2836
2837         for_all_vfs(adapter, vf_cfg, vf) {
2838                 if (!BE3_chip(adapter))
2839                         be_cmd_get_profile_config(adapter, &cap_flags,
2840                                                   NULL, vf + 1);
2841
2842                 /* If a FW profile exists, then cap_flags are updated */
2843                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2844                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2845                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2846                                           &vf_cfg->if_handle, vf + 1);
2847                 if (status)
2848                         goto err;
2849         }
2850 err:
2851         return status;
2852 }
2853
2854 static int be_vf_setup_init(struct be_adapter *adapter)
2855 {
2856         struct be_vf_cfg *vf_cfg;
2857         int vf;
2858
2859         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2860                                   GFP_KERNEL);
2861         if (!adapter->vf_cfg)
2862                 return -ENOMEM;
2863
2864         for_all_vfs(adapter, vf_cfg, vf) {
2865                 vf_cfg->if_handle = -1;
2866                 vf_cfg->pmac_id = -1;
2867         }
2868         return 0;
2869 }
2870
2871 static int be_vf_setup(struct be_adapter *adapter)
2872 {
2873         struct be_vf_cfg *vf_cfg;
2874         u16 def_vlan, lnk_speed;
2875         int status, old_vfs, vf;
2876         struct device *dev = &adapter->pdev->dev;
2877
2878         old_vfs = pci_num_vf(adapter->pdev);
2879         if (old_vfs) {
2880                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2881                 if (old_vfs != num_vfs)
2882                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2883                 adapter->num_vfs = old_vfs;
2884         } else {
2885                 if (num_vfs > adapter->dev_num_vfs)
2886                         dev_info(dev, "Device supports %d VFs and not %d\n",
2887                                  adapter->dev_num_vfs, num_vfs);
2888                 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2889                 if (!adapter->num_vfs)
2890                         return 0;
2891         }
2892
2893         status = be_vf_setup_init(adapter);
2894         if (status)
2895                 goto err;
2896
2897         if (old_vfs) {
2898                 for_all_vfs(adapter, vf_cfg, vf) {
2899                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2900                         if (status)
2901                                 goto err;
2902                 }
2903         } else {
2904                 status = be_vfs_if_create(adapter);
2905                 if (status)
2906                         goto err;
2907         }
2908
2909         if (old_vfs) {
2910                 status = be_vfs_mac_query(adapter);
2911                 if (status)
2912                         goto err;
2913         } else {
2914                 status = be_vf_eth_addr_config(adapter);
2915                 if (status)
2916                         goto err;
2917         }
2918
2919         for_all_vfs(adapter, vf_cfg, vf) {
2920                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2921                  * Allow full available bandwidth
2922                  */
2923                 if (BE3_chip(adapter) && !old_vfs)
2924                         be_cmd_set_qos(adapter, 1000, vf+1);
2925
2926                 status = be_cmd_link_status_query(adapter, &lnk_speed,
2927                                                   NULL, vf + 1);
2928                 if (!status)
2929                         vf_cfg->tx_rate = lnk_speed;
2930
2931                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2932                                                vf + 1, vf_cfg->if_handle);
2933                 if (status)
2934                         goto err;
2935                 vf_cfg->def_vid = def_vlan;
2936
2937                 be_cmd_enable_vf(adapter, vf + 1);
2938         }
2939
2940         if (!old_vfs) {
2941                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2942                 if (status) {
2943                         dev_err(dev, "SRIOV enable failed\n");
2944                         adapter->num_vfs = 0;
2945                         goto err;
2946                 }
2947         }
2948         return 0;
2949 err:
2950         dev_err(dev, "VF setup failed\n");
2951         be_vf_clear(adapter);
2952         return status;
2953 }
2954
2955 static void be_setup_init(struct be_adapter *adapter)
2956 {
2957         adapter->vlan_prio_bmap = 0xff;
2958         adapter->phy.link_speed = -1;
2959         adapter->if_handle = -1;
2960         adapter->be3_native = false;
2961         adapter->promiscuous = false;
2962         if (be_physfn(adapter))
2963                 adapter->cmd_privileges = MAX_PRIVILEGES;
2964         else
2965                 adapter->cmd_privileges = MIN_PRIVILEGES;
2966 }
2967
2968 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2969                            bool *active_mac, u32 *pmac_id)
2970 {
2971         int status = 0;
2972
2973         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2974                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2975                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2976                         *active_mac = true;
2977                 else
2978                         *active_mac = false;
2979
2980                 return status;
2981         }
2982
2983         if (lancer_chip(adapter)) {
2984                 status = be_cmd_get_mac_from_list(adapter, mac,
2985                                                   active_mac, pmac_id, 0);
2986                 if (*active_mac) {
2987                         status = be_cmd_mac_addr_query(adapter, mac, false,
2988                                                        if_handle, *pmac_id);
2989                 }
2990         } else if (be_physfn(adapter)) {
2991                 /* For BE3, for PF get permanent MAC */
2992                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2993                 *active_mac = false;
2994         } else {
2995                 /* For BE3, for VF get soft MAC assigned by PF*/
2996                 status = be_cmd_mac_addr_query(adapter, mac, false,
2997                                                if_handle, 0);
2998                 *active_mac = true;
2999         }
3000         return status;
3001 }
3002
3003 static void be_get_resources(struct be_adapter *adapter)
3004 {
3005         u16 dev_num_vfs;
3006         int pos, status;
3007         bool profile_present = false;
3008         u16 txq_count = 0;
3009
3010         if (!BEx_chip(adapter)) {
3011                 status = be_cmd_get_func_config(adapter);
3012                 if (!status)
3013                         profile_present = true;
3014         } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3015                 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
3016         }
3017
3018         if (profile_present) {
3019                 /* Sanity fixes for Lancer */
3020                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3021                                               BE_UC_PMAC_COUNT);
3022                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3023                                            BE_NUM_VLANS_SUPPORTED);
3024                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3025                                                BE_MAX_MC);
3026                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3027                                                MAX_TX_QS);
3028                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3029                                                 BE3_MAX_RSS_QS);
3030                 adapter->max_event_queues = min_t(u16,
3031                                                   adapter->max_event_queues,
3032                                                   BE3_MAX_RSS_QS);
3033
3034                 if (adapter->max_rss_queues &&
3035                     adapter->max_rss_queues == adapter->max_rx_queues)
3036                         adapter->max_rss_queues -= 1;
3037
3038                 if (adapter->max_event_queues < adapter->max_rss_queues)
3039                         adapter->max_rss_queues = adapter->max_event_queues;
3040
3041         } else {
3042                 if (be_physfn(adapter))
3043                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3044                 else
3045                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3046
3047                 if (adapter->function_mode & FLEX10_MODE)
3048                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3049                 else
3050                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3051
3052                 adapter->max_mcast_mac = BE_MAX_MC;
3053                 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3054                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3055                                                MAX_TX_QS);
3056                 adapter->max_rss_queues = (adapter->be3_native) ?
3057                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3058                 adapter->max_event_queues = BE3_MAX_RSS_QS;
3059
3060                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3061                                         BE_IF_FLAGS_BROADCAST |
3062                                         BE_IF_FLAGS_MULTICAST |
3063                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
3064                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
3065                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
3066                                         BE_IF_FLAGS_PROMISCUOUS;
3067
3068                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3069                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3070         }
3071
3072         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3073         if (pos) {
3074                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3075                                      &dev_num_vfs);
3076                 if (BE3_chip(adapter))
3077                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3078                 adapter->dev_num_vfs = dev_num_vfs;
3079         }
3080 }
3081
3082 /* Routine to query per function resource limits */
3083 static int be_get_config(struct be_adapter *adapter)
3084 {
3085         int status;
3086
3087         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3088                                      &adapter->function_mode,
3089                                      &adapter->function_caps,
3090                                      &adapter->asic_rev);
3091         if (status)
3092                 goto err;
3093
3094         be_get_resources(adapter);
3095
3096         /* primary mac needs 1 pmac entry */
3097         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3098                                    sizeof(u32), GFP_KERNEL);
3099         if (!adapter->pmac_id) {
3100                 status = -ENOMEM;
3101                 goto err;
3102         }
3103
3104 err:
3105         return status;
3106 }
3107
3108 static int be_setup(struct be_adapter *adapter)
3109 {
3110         struct device *dev = &adapter->pdev->dev;
3111         u32 en_flags;
3112         u32 tx_fc, rx_fc;
3113         int status;
3114         u8 mac[ETH_ALEN];
3115         bool active_mac;
3116
3117         be_setup_init(adapter);
3118
3119         if (!lancer_chip(adapter))
3120                 be_cmd_req_native_mode(adapter);
3121
3122         status = be_get_config(adapter);
3123         if (status)
3124                 goto err;
3125
3126         status = be_msix_enable(adapter);
3127         if (status)
3128                 goto err;
3129
3130         status = be_evt_queues_create(adapter);
3131         if (status)
3132                 goto err;
3133
3134         status = be_tx_cqs_create(adapter);
3135         if (status)
3136                 goto err;
3137
3138         status = be_rx_cqs_create(adapter);
3139         if (status)
3140                 goto err;
3141
3142         status = be_mcc_queues_create(adapter);
3143         if (status)
3144                 goto err;
3145
3146         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3147         /* In UMC mode FW does not return right privileges.
3148          * Override with correct privilege equivalent to PF.
3149          */
3150         if (be_is_mc(adapter))
3151                 adapter->cmd_privileges = MAX_PRIVILEGES;
3152
3153         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3154                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3155
3156         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3157                 en_flags |= BE_IF_FLAGS_RSS;
3158
3159         en_flags = en_flags & adapter->if_cap_flags;
3160
3161         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3162                                   &adapter->if_handle, 0);
3163         if (status != 0)
3164                 goto err;
3165
3166         memset(mac, 0, ETH_ALEN);
3167         active_mac = false;
3168         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3169                                  &active_mac, &adapter->pmac_id[0]);
3170         if (status != 0)
3171                 goto err;
3172
3173         if (!active_mac) {
3174                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3175                                          &adapter->pmac_id[0], 0);
3176                 if (status != 0)
3177                         goto err;
3178         }
3179
3180         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3181                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3182                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3183         }
3184
3185         status = be_tx_qs_create(adapter);
3186         if (status)
3187                 goto err;
3188
3189         be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3190
3191         if (adapter->vlans_added)
3192                 be_vid_config(adapter);
3193
3194         be_set_rx_mode(adapter->netdev);
3195
3196         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3197
3198         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3199                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3200                                         adapter->rx_fc);
3201
3202         if (be_physfn(adapter)) {
3203                 if (adapter->dev_num_vfs)
3204                         be_vf_setup(adapter);
3205                 else
3206                         dev_warn(dev, "device doesn't support SRIOV\n");
3207         }
3208
3209         status = be_cmd_get_phy_info(adapter);
3210         if (!status && be_pause_supported(adapter))
3211                 adapter->phy.fc_autoneg = 1;
3212
3213         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3214         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3215         return 0;
3216 err:
3217         be_clear(adapter);
3218         return status;
3219 }
3220
3221 #ifdef CONFIG_NET_POLL_CONTROLLER
3222 static void be_netpoll(struct net_device *netdev)
3223 {
3224         struct be_adapter *adapter = netdev_priv(netdev);
3225         struct be_eq_obj *eqo;
3226         int i;
3227
3228         for_all_evt_queues(adapter, eqo, i) {
3229                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3230                 napi_schedule(&eqo->napi);
3231         }
3232
3233         return;
3234 }
3235 #endif
3236
3237 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3238 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3239
3240 static bool be_flash_redboot(struct be_adapter *adapter,
3241                         const u8 *p, u32 img_start, int image_size,
3242                         int hdr_size)
3243 {
3244         u32 crc_offset;
3245         u8 flashed_crc[4];
3246         int status;
3247
3248         crc_offset = hdr_size + img_start + image_size - 4;
3249
3250         p += crc_offset;
3251
3252         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3253                         (image_size - 4));
3254         if (status) {
3255                 dev_err(&adapter->pdev->dev,
3256                 "could not get crc from flash, not flashing redboot\n");
3257                 return false;
3258         }
3259
3260         /*update redboot only if crc does not match*/
3261         if (!memcmp(flashed_crc, p, 4))
3262                 return false;
3263         else
3264                 return true;
3265 }
3266
3267 static bool phy_flashing_required(struct be_adapter *adapter)
3268 {
3269         return (adapter->phy.phy_type == TN_8022 &&
3270                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3271 }
3272
3273 static bool is_comp_in_ufi(struct be_adapter *adapter,
3274                            struct flash_section_info *fsec, int type)
3275 {
3276         int i = 0, img_type = 0;
3277         struct flash_section_info_g2 *fsec_g2 = NULL;
3278
3279         if (BE2_chip(adapter))
3280                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3281
3282         for (i = 0; i < MAX_FLASH_COMP; i++) {
3283                 if (fsec_g2)
3284                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3285                 else
3286                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3287
3288                 if (img_type == type)
3289                         return true;
3290         }
3291         return false;
3292
3293 }
3294
3295 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3296                                          int header_size,
3297                                          const struct firmware *fw)
3298 {
3299         struct flash_section_info *fsec = NULL;
3300         const u8 *p = fw->data;
3301
3302         p += header_size;
3303         while (p < (fw->data + fw->size)) {
3304                 fsec = (struct flash_section_info *)p;
3305                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3306                         return fsec;
3307                 p += 32;
3308         }
3309         return NULL;
3310 }
3311
3312 static int be_flash(struct be_adapter *adapter, const u8 *img,
3313                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3314 {
3315         u32 total_bytes = 0, flash_op, num_bytes = 0;
3316         int status = 0;
3317         struct be_cmd_write_flashrom *req = flash_cmd->va;
3318
3319         total_bytes = img_size;
3320         while (total_bytes) {
3321                 num_bytes = min_t(u32, 32*1024, total_bytes);
3322
3323                 total_bytes -= num_bytes;
3324
3325                 if (!total_bytes) {
3326                         if (optype == OPTYPE_PHY_FW)
3327                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3328                         else
3329                                 flash_op = FLASHROM_OPER_FLASH;
3330                 } else {
3331                         if (optype == OPTYPE_PHY_FW)
3332                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3333                         else
3334                                 flash_op = FLASHROM_OPER_SAVE;
3335                 }
3336
3337                 memcpy(req->data_buf, img, num_bytes);
3338                 img += num_bytes;
3339                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3340                                                 flash_op, num_bytes);
3341                 if (status) {
3342                         if (status == ILLEGAL_IOCTL_REQ &&
3343                             optype == OPTYPE_PHY_FW)
3344                                 break;
3345                         dev_err(&adapter->pdev->dev,
3346                                 "cmd to write to flash rom failed.\n");
3347                         return status;
3348                 }
3349         }
3350         return 0;
3351 }
3352
3353 /* For BE2, BE3 and BE3-R */
3354 static int be_flash_BEx(struct be_adapter *adapter,
3355                          const struct firmware *fw,
3356                          struct be_dma_mem *flash_cmd,
3357                          int num_of_images)
3358
3359 {
3360         int status = 0, i, filehdr_size = 0;
3361         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3362         const u8 *p = fw->data;
3363         const struct flash_comp *pflashcomp;
3364         int num_comp, redboot;
3365         struct flash_section_info *fsec = NULL;
3366
3367         struct flash_comp gen3_flash_types[] = {
3368                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3369                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3370                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3371                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3372                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3373                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3374                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3375                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3376                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3377                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3378                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3379                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3380                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3381                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3382                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3383                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3384                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3385                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3386                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3387                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3388         };
3389
3390         struct flash_comp gen2_flash_types[] = {
3391                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3392                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3393                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3394                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3395                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3396                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3397                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3398                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3399                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3400                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3401                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3402                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3403                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3404                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3405                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3406                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3407         };
3408
3409         if (BE3_chip(adapter)) {
3410                 pflashcomp = gen3_flash_types;
3411                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3412                 num_comp = ARRAY_SIZE(gen3_flash_types);
3413         } else {
3414                 pflashcomp = gen2_flash_types;
3415                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3416                 num_comp = ARRAY_SIZE(gen2_flash_types);
3417         }
3418
3419         /* Get flash section info*/
3420         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3421         if (!fsec) {
3422                 dev_err(&adapter->pdev->dev,
3423                         "Invalid Cookie. UFI corrupted ?\n");
3424                 return -1;
3425         }
3426         for (i = 0; i < num_comp; i++) {
3427                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3428                         continue;
3429
3430                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3431                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3432                         continue;
3433
3434                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3435                     !phy_flashing_required(adapter))
3436                                 continue;
3437
3438                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3439                         redboot = be_flash_redboot(adapter, fw->data,
3440                                 pflashcomp[i].offset, pflashcomp[i].size,
3441                                 filehdr_size + img_hdrs_size);
3442                         if (!redboot)
3443                                 continue;
3444                 }
3445
3446                 p = fw->data;
3447                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3448                 if (p + pflashcomp[i].size > fw->data + fw->size)
3449                         return -1;
3450
3451                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3452                                         pflashcomp[i].size);
3453                 if (status) {
3454                         dev_err(&adapter->pdev->dev,
3455                                 "Flashing section type %d failed.\n",
3456                                 pflashcomp[i].img_type);
3457                         return status;
3458                 }
3459         }
3460         return 0;
3461 }
3462
3463 static int be_flash_skyhawk(struct be_adapter *adapter,
3464                 const struct firmware *fw,
3465                 struct be_dma_mem *flash_cmd, int num_of_images)
3466 {
3467         int status = 0, i, filehdr_size = 0;
3468         int img_offset, img_size, img_optype, redboot;
3469         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3470         const u8 *p = fw->data;
3471         struct flash_section_info *fsec = NULL;
3472
3473         filehdr_size = sizeof(struct flash_file_hdr_g3);
3474         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3475         if (!fsec) {
3476                 dev_err(&adapter->pdev->dev,
3477                         "Invalid Cookie. UFI corrupted ?\n");
3478                 return -1;
3479         }
3480
3481         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3482                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3483                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3484
3485                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3486                 case IMAGE_FIRMWARE_iSCSI:
3487                         img_optype = OPTYPE_ISCSI_ACTIVE;
3488                         break;
3489                 case IMAGE_BOOT_CODE:
3490                         img_optype = OPTYPE_REDBOOT;
3491                         break;
3492                 case IMAGE_OPTION_ROM_ISCSI:
3493                         img_optype = OPTYPE_BIOS;
3494                         break;
3495                 case IMAGE_OPTION_ROM_PXE:
3496                         img_optype = OPTYPE_PXE_BIOS;
3497                         break;
3498                 case IMAGE_OPTION_ROM_FCoE:
3499                         img_optype = OPTYPE_FCOE_BIOS;
3500                         break;
3501                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3502                         img_optype = OPTYPE_ISCSI_BACKUP;
3503                         break;
3504                 case IMAGE_NCSI:
3505                         img_optype = OPTYPE_NCSI_FW;
3506                         break;
3507                 default:
3508                         continue;
3509                 }
3510
3511                 if (img_optype == OPTYPE_REDBOOT) {
3512                         redboot = be_flash_redboot(adapter, fw->data,
3513                                         img_offset, img_size,
3514                                         filehdr_size + img_hdrs_size);
3515                         if (!redboot)
3516                                 continue;
3517                 }
3518
3519                 p = fw->data;
3520                 p += filehdr_size + img_offset + img_hdrs_size;
3521                 if (p + img_size > fw->data + fw->size)
3522                         return -1;
3523
3524                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3525                 if (status) {
3526                         dev_err(&adapter->pdev->dev,
3527                                 "Flashing section type %d failed.\n",
3528                                 fsec->fsec_entry[i].type);
3529                         return status;
3530                 }
3531         }
3532         return 0;
3533 }
3534
3535 static int lancer_fw_download(struct be_adapter *adapter,
3536                                 const struct firmware *fw)
3537 {
3538 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3539 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3540         struct be_dma_mem flash_cmd;
3541         const u8 *data_ptr = NULL;
3542         u8 *dest_image_ptr = NULL;
3543         size_t image_size = 0;
3544         u32 chunk_size = 0;
3545         u32 data_written = 0;
3546         u32 offset = 0;
3547         int status = 0;
3548         u8 add_status = 0;
3549         u8 change_status;
3550
3551         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3552                 dev_err(&adapter->pdev->dev,
3553                         "FW Image not properly aligned. "
3554                         "Length must be 4 byte aligned.\n");
3555                 status = -EINVAL;
3556                 goto lancer_fw_exit;
3557         }
3558
3559         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3560                                 + LANCER_FW_DOWNLOAD_CHUNK;
3561         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3562                                           &flash_cmd.dma, GFP_KERNEL);
3563         if (!flash_cmd.va) {
3564                 status = -ENOMEM;
3565                 goto lancer_fw_exit;
3566         }
3567
3568         dest_image_ptr = flash_cmd.va +
3569                                 sizeof(struct lancer_cmd_req_write_object);
3570         image_size = fw->size;
3571         data_ptr = fw->data;
3572
3573         while (image_size) {
3574                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3575
3576                 /* Copy the image chunk content. */
3577                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3578
3579                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3580                                                  chunk_size, offset,
3581                                                  LANCER_FW_DOWNLOAD_LOCATION,
3582                                                  &data_written, &change_status,
3583                                                  &add_status);
3584                 if (status)
3585                         break;
3586
3587                 offset += data_written;
3588                 data_ptr += data_written;
3589                 image_size -= data_written;
3590         }
3591
3592         if (!status) {
3593                 /* Commit the FW written */
3594                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3595                                                  0, offset,
3596                                                  LANCER_FW_DOWNLOAD_LOCATION,
3597                                                  &data_written, &change_status,
3598                                                  &add_status);
3599         }
3600
3601         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3602                                 flash_cmd.dma);
3603         if (status) {
3604                 dev_err(&adapter->pdev->dev,
3605                         "Firmware load error. "
3606                         "Status code: 0x%x Additional Status: 0x%x\n",
3607                         status, add_status);
3608                 goto lancer_fw_exit;
3609         }
3610
3611         if (change_status == LANCER_FW_RESET_NEEDED) {
3612                 status = lancer_physdev_ctrl(adapter,
3613                                              PHYSDEV_CONTROL_FW_RESET_MASK);
3614                 if (status) {
3615                         dev_err(&adapter->pdev->dev,
3616                                 "Adapter busy for FW reset.\n"
3617                                 "New FW will not be active.\n");
3618                         goto lancer_fw_exit;
3619                 }
3620         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3621                         dev_err(&adapter->pdev->dev,
3622                                 "System reboot required for new FW"
3623                                 " to be active\n");
3624         }
3625
3626         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3627 lancer_fw_exit:
3628         return status;
3629 }
3630
3631 #define UFI_TYPE2               2
3632 #define UFI_TYPE3               3
3633 #define UFI_TYPE3R              10
3634 #define UFI_TYPE4               4
3635 static int be_get_ufi_type(struct be_adapter *adapter,
3636                            struct flash_file_hdr_g3 *fhdr)
3637 {
3638         if (fhdr == NULL)
3639                 goto be_get_ufi_exit;
3640
3641         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3642                 return UFI_TYPE4;
3643         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3644                 if (fhdr->asic_type_rev == 0x10)
3645                         return UFI_TYPE3R;
3646                 else
3647                         return UFI_TYPE3;
3648         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3649                 return UFI_TYPE2;
3650
3651 be_get_ufi_exit:
3652         dev_err(&adapter->pdev->dev,
3653                 "UFI and Interface are not compatible for flashing\n");
3654         return -1;
3655 }
3656
3657 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3658 {
3659         struct flash_file_hdr_g3 *fhdr3;
3660         struct image_hdr *img_hdr_ptr = NULL;
3661         struct be_dma_mem flash_cmd;
3662         const u8 *p;
3663         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3664
3665         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3666         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3667                                           &flash_cmd.dma, GFP_KERNEL);
3668         if (!flash_cmd.va) {
3669                 status = -ENOMEM;
3670                 goto be_fw_exit;
3671         }
3672
3673         p = fw->data;
3674         fhdr3 = (struct flash_file_hdr_g3 *)p;
3675
3676         ufi_type = be_get_ufi_type(adapter, fhdr3);
3677
3678         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3679         for (i = 0; i < num_imgs; i++) {
3680                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3681                                 (sizeof(struct flash_file_hdr_g3) +
3682                                  i * sizeof(struct image_hdr)));
3683                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3684                         switch (ufi_type) {
3685                         case UFI_TYPE4:
3686                                 status = be_flash_skyhawk(adapter, fw,
3687                                                         &flash_cmd, num_imgs);
3688                                 break;
3689                         case UFI_TYPE3R:
3690                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3691                                                       num_imgs);
3692                                 break;
3693                         case UFI_TYPE3:
3694                                 /* Do not flash this ufi on BE3-R cards */
3695                                 if (adapter->asic_rev < 0x10)
3696                                         status = be_flash_BEx(adapter, fw,
3697                                                               &flash_cmd,
3698                                                               num_imgs);
3699                                 else {
3700                                         status = -1;
3701                                         dev_err(&adapter->pdev->dev,
3702                                                 "Can't load BE3 UFI on BE3R\n");
3703                                 }
3704                         }
3705                 }
3706         }
3707
3708         if (ufi_type == UFI_TYPE2)
3709                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3710         else if (ufi_type == -1)
3711                 status = -1;
3712
3713         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3714                           flash_cmd.dma);
3715         if (status) {
3716                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3717                 goto be_fw_exit;
3718         }
3719
3720         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3721
3722 be_fw_exit:
3723         return status;
3724 }
3725
3726 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3727 {
3728         const struct firmware *fw;
3729         int status;
3730
3731         if (!netif_running(adapter->netdev)) {
3732                 dev_err(&adapter->pdev->dev,
3733                         "Firmware load not allowed (interface is down)\n");
3734                 return -1;
3735         }
3736
3737         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3738         if (status)
3739                 goto fw_exit;
3740
3741         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3742
3743         if (lancer_chip(adapter))
3744                 status = lancer_fw_download(adapter, fw);
3745         else
3746                 status = be_fw_download(adapter, fw);
3747
3748         if (!status)
3749                 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3750                                   adapter->fw_on_flash);
3751
3752 fw_exit:
3753         release_firmware(fw);
3754         return status;
3755 }
3756
3757 static const struct net_device_ops be_netdev_ops = {
3758         .ndo_open               = be_open,
3759         .ndo_stop               = be_close,
3760         .ndo_start_xmit         = be_xmit,
3761         .ndo_set_rx_mode        = be_set_rx_mode,
3762         .ndo_set_mac_address    = be_mac_addr_set,
3763         .ndo_change_mtu         = be_change_mtu,
3764         .ndo_get_stats64        = be_get_stats64,
3765         .ndo_validate_addr      = eth_validate_addr,
3766         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3767         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3768         .ndo_set_vf_mac         = be_set_vf_mac,
3769         .ndo_set_vf_vlan        = be_set_vf_vlan,
3770         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3771         .ndo_get_vf_config      = be_get_vf_config,
3772 #ifdef CONFIG_NET_POLL_CONTROLLER
3773         .ndo_poll_controller    = be_netpoll,
3774 #endif
3775 };
3776
3777 static void be_netdev_init(struct net_device *netdev)
3778 {
3779         struct be_adapter *adapter = netdev_priv(netdev);
3780         struct be_eq_obj *eqo;
3781         int i;
3782
3783         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3784                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3785                 NETIF_F_HW_VLAN_CTAG_TX;
3786         if (be_multi_rxq(adapter))
3787                 netdev->hw_features |= NETIF_F_RXHASH;
3788
3789         netdev->features |= netdev->hw_features |
3790                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3791
3792         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3793                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3794
3795         netdev->priv_flags |= IFF_UNICAST_FLT;
3796
3797         netdev->flags |= IFF_MULTICAST;
3798
3799         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3800
3801         netdev->netdev_ops = &be_netdev_ops;
3802
3803         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3804
3805         for_all_evt_queues(adapter, eqo, i)
3806                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3807 }
3808
3809 static void be_unmap_pci_bars(struct be_adapter *adapter)
3810 {
3811         if (adapter->csr)
3812                 pci_iounmap(adapter->pdev, adapter->csr);
3813         if (adapter->db)
3814                 pci_iounmap(adapter->pdev, adapter->db);
3815 }
3816
3817 static int db_bar(struct be_adapter *adapter)
3818 {
3819         if (lancer_chip(adapter) || !be_physfn(adapter))
3820                 return 0;
3821         else
3822                 return 4;
3823 }
3824
3825 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3826 {
3827         if (skyhawk_chip(adapter)) {
3828                 adapter->roce_db.size = 4096;
3829                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3830                                                               db_bar(adapter));
3831                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3832                                                                db_bar(adapter));
3833         }
3834         return 0;
3835 }
3836
3837 static int be_map_pci_bars(struct be_adapter *adapter)
3838 {
3839         u8 __iomem *addr;
3840         u32 sli_intf;
3841
3842         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3843         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3844                                 SLI_INTF_IF_TYPE_SHIFT;
3845
3846         if (BEx_chip(adapter) && be_physfn(adapter)) {
3847                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3848                 if (adapter->csr == NULL)
3849                         return -ENOMEM;
3850         }
3851
3852         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3853         if (addr == NULL)
3854                 goto pci_map_err;
3855         adapter->db = addr;
3856
3857         be_roce_map_pci_bars(adapter);
3858         return 0;
3859
3860 pci_map_err:
3861         be_unmap_pci_bars(adapter);
3862         return -ENOMEM;
3863 }
3864
3865 static void be_ctrl_cleanup(struct be_adapter *adapter)
3866 {
3867         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3868
3869         be_unmap_pci_bars(adapter);
3870
3871         if (mem->va)
3872                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3873                                   mem->dma);
3874
3875         mem = &adapter->rx_filter;
3876         if (mem->va)
3877                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3878                                   mem->dma);
3879 }
3880
3881 static int be_ctrl_init(struct be_adapter *adapter)
3882 {
3883         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3884         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3885         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3886         u32 sli_intf;
3887         int status;
3888
3889         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3890         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3891                                  SLI_INTF_FAMILY_SHIFT;
3892         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3893
3894         status = be_map_pci_bars(adapter);
3895         if (status)
3896                 goto done;
3897
3898         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3899         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3900                                                 mbox_mem_alloc->size,
3901                                                 &mbox_mem_alloc->dma,
3902                                                 GFP_KERNEL);
3903         if (!mbox_mem_alloc->va) {
3904                 status = -ENOMEM;
3905                 goto unmap_pci_bars;
3906         }
3907         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3908         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3909         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3910         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3911
3912         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3913         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3914                                            &rx_filter->dma,
3915                                            GFP_KERNEL | __GFP_ZERO);
3916         if (rx_filter->va == NULL) {
3917                 status = -ENOMEM;
3918                 goto free_mbox;
3919         }
3920
3921         mutex_init(&adapter->mbox_lock);
3922         spin_lock_init(&adapter->mcc_lock);
3923         spin_lock_init(&adapter->mcc_cq_lock);
3924
3925         init_completion(&adapter->flash_compl);
3926         pci_save_state(adapter->pdev);
3927         return 0;
3928
3929 free_mbox:
3930         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3931                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3932
3933 unmap_pci_bars:
3934         be_unmap_pci_bars(adapter);
3935
3936 done:
3937         return status;
3938 }
3939
3940 static void be_stats_cleanup(struct be_adapter *adapter)
3941 {
3942         struct be_dma_mem *cmd = &adapter->stats_cmd;
3943
3944         if (cmd->va)
3945                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3946                                   cmd->va, cmd->dma);
3947 }
3948
3949 static int be_stats_init(struct be_adapter *adapter)
3950 {
3951         struct be_dma_mem *cmd = &adapter->stats_cmd;
3952
3953         if (lancer_chip(adapter))
3954                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3955         else if (BE2_chip(adapter))
3956                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3957         else
3958                 /* BE3 and Skyhawk */
3959                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3960
3961         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3962                                      GFP_KERNEL | __GFP_ZERO);
3963         if (cmd->va == NULL)
3964                 return -1;
3965         return 0;
3966 }
3967
3968 static void be_remove(struct pci_dev *pdev)
3969 {
3970         struct be_adapter *adapter = pci_get_drvdata(pdev);
3971
3972         if (!adapter)
3973                 return;
3974
3975         be_roce_dev_remove(adapter);
3976         be_intr_set(adapter, false);
3977
3978         cancel_delayed_work_sync(&adapter->func_recovery_work);
3979
3980         unregister_netdev(adapter->netdev);
3981
3982         be_clear(adapter);
3983
3984         /* tell fw we're done with firing cmds */
3985         be_cmd_fw_clean(adapter);
3986
3987         be_stats_cleanup(adapter);
3988
3989         be_ctrl_cleanup(adapter);
3990
3991         pci_disable_pcie_error_reporting(pdev);
3992
3993         pci_set_drvdata(pdev, NULL);
3994         pci_release_regions(pdev);
3995         pci_disable_device(pdev);
3996
3997         free_netdev(adapter->netdev);
3998 }
3999
4000 bool be_is_wol_supported(struct be_adapter *adapter)
4001 {
4002         return ((adapter->wol_cap & BE_WOL_CAP) &&
4003                 !be_is_wol_excluded(adapter)) ? true : false;
4004 }
4005
4006 u32 be_get_fw_log_level(struct be_adapter *adapter)
4007 {
4008         struct be_dma_mem extfat_cmd;
4009         struct be_fat_conf_params *cfgs;
4010         int status;
4011         u32 level = 0;
4012         int j;
4013
4014         if (lancer_chip(adapter))
4015                 return 0;
4016
4017         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4018         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4019         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4020                                              &extfat_cmd.dma);
4021
4022         if (!extfat_cmd.va) {
4023                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4024                         __func__);
4025                 goto err;
4026         }
4027
4028         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4029         if (!status) {
4030                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4031                                                 sizeof(struct be_cmd_resp_hdr));
4032                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4033                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4034                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4035                 }
4036         }
4037         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4038                             extfat_cmd.dma);
4039 err:
4040         return level;
4041 }
4042
4043 static int be_get_initial_config(struct be_adapter *adapter)
4044 {
4045         int status;
4046         u32 level;
4047
4048         status = be_cmd_get_cntl_attributes(adapter);
4049         if (status)
4050                 return status;
4051
4052         status = be_cmd_get_acpi_wol_cap(adapter);
4053         if (status) {
4054                 /* in case of a failure to get wol capabillities
4055                  * check the exclusion list to determine WOL capability */
4056                 if (!be_is_wol_excluded(adapter))
4057                         adapter->wol_cap |= BE_WOL_CAP;
4058         }
4059
4060         if (be_is_wol_supported(adapter))
4061                 adapter->wol = true;
4062
4063         /* Must be a power of 2 or else MODULO will BUG_ON */
4064         adapter->be_get_temp_freq = 64;
4065
4066         level = be_get_fw_log_level(adapter);
4067         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4068
4069         return 0;
4070 }
4071
4072 static int lancer_recover_func(struct be_adapter *adapter)
4073 {
4074         struct device *dev = &adapter->pdev->dev;
4075         int status;
4076
4077         status = lancer_test_and_set_rdy_state(adapter);
4078         if (status)
4079                 goto err;
4080
4081         if (netif_running(adapter->netdev))
4082                 be_close(adapter->netdev);
4083
4084         be_clear(adapter);
4085
4086         be_clear_all_error(adapter);
4087
4088         status = be_setup(adapter);
4089         if (status)
4090                 goto err;
4091
4092         if (netif_running(adapter->netdev)) {
4093                 status = be_open(adapter->netdev);
4094                 if (status)
4095                         goto err;
4096         }
4097
4098         dev_err(dev, "Error recovery successful\n");
4099         return 0;
4100 err:
4101         if (status == -EAGAIN)
4102                 dev_err(dev, "Waiting for resource provisioning\n");
4103         else
4104                 dev_err(dev, "Error recovery failed\n");
4105
4106         return status;
4107 }
4108
4109 static void be_func_recovery_task(struct work_struct *work)
4110 {
4111         struct be_adapter *adapter =
4112                 container_of(work, struct be_adapter,  func_recovery_work.work);
4113         int status = 0;
4114
4115         be_detect_error(adapter);
4116
4117         if (adapter->hw_error && lancer_chip(adapter)) {
4118
4119                 rtnl_lock();
4120                 netif_device_detach(adapter->netdev);
4121                 rtnl_unlock();
4122
4123                 status = lancer_recover_func(adapter);
4124                 if (!status)
4125                         netif_device_attach(adapter->netdev);
4126         }
4127
4128         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4129          * no need to attempt further recovery.
4130          */
4131         if (!status || status == -EAGAIN)
4132                 schedule_delayed_work(&adapter->func_recovery_work,
4133                                       msecs_to_jiffies(1000));
4134 }
4135
4136 static void be_worker(struct work_struct *work)
4137 {
4138         struct be_adapter *adapter =
4139                 container_of(work, struct be_adapter, work.work);
4140         struct be_rx_obj *rxo;
4141         struct be_eq_obj *eqo;
4142         int i;
4143
4144         /* when interrupts are not yet enabled, just reap any pending
4145         * mcc completions */
4146         if (!netif_running(adapter->netdev)) {
4147                 local_bh_disable();
4148                 be_process_mcc(adapter);
4149                 local_bh_enable();
4150                 goto reschedule;
4151         }
4152
4153         if (!adapter->stats_cmd_sent) {
4154                 if (lancer_chip(adapter))
4155                         lancer_cmd_get_pport_stats(adapter,
4156                                                 &adapter->stats_cmd);
4157                 else
4158                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4159         }
4160
4161         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4162                 be_cmd_get_die_temperature(adapter);
4163
4164         for_all_rx_queues(adapter, rxo, i) {
4165                 if (rxo->rx_post_starved) {
4166                         rxo->rx_post_starved = false;
4167                         be_post_rx_frags(rxo, GFP_KERNEL);
4168                 }
4169         }
4170
4171         for_all_evt_queues(adapter, eqo, i)
4172                 be_eqd_update(adapter, eqo);
4173
4174 reschedule:
4175         adapter->work_counter++;
4176         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4177 }
4178
4179 /* If any VFs are already enabled don't FLR the PF */
4180 static bool be_reset_required(struct be_adapter *adapter)
4181 {
4182         return pci_num_vf(adapter->pdev) ? false : true;
4183 }
4184
4185 static char *mc_name(struct be_adapter *adapter)
4186 {
4187         if (adapter->function_mode & FLEX10_MODE)
4188                 return "FLEX10";
4189         else if (adapter->function_mode & VNIC_MODE)
4190                 return "vNIC";
4191         else if (adapter->function_mode & UMC_ENABLED)
4192                 return "UMC";
4193         else
4194                 return "";
4195 }
4196
4197 static inline char *func_name(struct be_adapter *adapter)
4198 {
4199         return be_physfn(adapter) ? "PF" : "VF";
4200 }
4201
4202 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4203 {
4204         int status = 0;
4205         struct be_adapter *adapter;
4206         struct net_device *netdev;
4207         char port_name;
4208
4209         status = pci_enable_device(pdev);
4210         if (status)
4211                 goto do_none;
4212
4213         status = pci_request_regions(pdev, DRV_NAME);
4214         if (status)
4215                 goto disable_dev;
4216         pci_set_master(pdev);
4217
4218         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4219         if (netdev == NULL) {
4220                 status = -ENOMEM;
4221                 goto rel_reg;
4222         }
4223         adapter = netdev_priv(netdev);
4224         adapter->pdev = pdev;
4225         pci_set_drvdata(pdev, adapter);
4226         adapter->netdev = netdev;
4227         SET_NETDEV_DEV(netdev, &pdev->dev);
4228
4229         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4230         if (!status) {
4231                 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4232                 if (status < 0) {
4233                         dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4234                         goto free_netdev;
4235                 }
4236                 netdev->features |= NETIF_F_HIGHDMA;
4237         } else {
4238                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4239                 if (!status)
4240                         status = dma_set_coherent_mask(&pdev->dev,
4241                                                        DMA_BIT_MASK(32));
4242                 if (status) {
4243                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4244                         goto free_netdev;
4245                 }
4246         }
4247
4248         status = pci_enable_pcie_error_reporting(pdev);
4249         if (status)
4250                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4251
4252         status = be_ctrl_init(adapter);
4253         if (status)
4254                 goto free_netdev;
4255
4256         /* sync up with fw's ready state */
4257         if (be_physfn(adapter)) {
4258                 status = be_fw_wait_ready(adapter);
4259                 if (status)
4260                         goto ctrl_clean;
4261         }
4262
4263         if (be_reset_required(adapter)) {
4264                 status = be_cmd_reset_function(adapter);
4265                 if (status)
4266                         goto ctrl_clean;
4267
4268                 /* Wait for interrupts to quiesce after an FLR */
4269                 msleep(100);
4270         }
4271
4272         /* Allow interrupts for other ULPs running on NIC function */
4273         be_intr_set(adapter, true);
4274
4275         /* tell fw we're ready to fire cmds */
4276         status = be_cmd_fw_init(adapter);
4277         if (status)
4278                 goto ctrl_clean;
4279
4280         status = be_stats_init(adapter);
4281         if (status)
4282                 goto ctrl_clean;
4283
4284         status = be_get_initial_config(adapter);
4285         if (status)
4286                 goto stats_clean;
4287
4288         INIT_DELAYED_WORK(&adapter->work, be_worker);
4289         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4290         adapter->rx_fc = adapter->tx_fc = true;
4291
4292         status = be_setup(adapter);
4293         if (status)
4294                 goto stats_clean;
4295
4296         be_netdev_init(netdev);
4297         status = register_netdev(netdev);
4298         if (status != 0)
4299                 goto unsetup;
4300
4301         be_roce_dev_add(adapter);
4302
4303         schedule_delayed_work(&adapter->func_recovery_work,
4304                               msecs_to_jiffies(1000));
4305
4306         be_cmd_query_port_name(adapter, &port_name);
4307
4308         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4309                  func_name(adapter), mc_name(adapter), port_name);
4310
4311         return 0;
4312
4313 unsetup:
4314         be_clear(adapter);
4315 stats_clean:
4316         be_stats_cleanup(adapter);
4317 ctrl_clean:
4318         be_ctrl_cleanup(adapter);
4319 free_netdev:
4320         free_netdev(netdev);
4321         pci_set_drvdata(pdev, NULL);
4322 rel_reg:
4323         pci_release_regions(pdev);
4324 disable_dev:
4325         pci_disable_device(pdev);
4326 do_none:
4327         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4328         return status;
4329 }
4330
4331 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4332 {
4333         struct be_adapter *adapter = pci_get_drvdata(pdev);
4334         struct net_device *netdev =  adapter->netdev;
4335
4336         if (adapter->wol)
4337                 be_setup_wol(adapter, true);
4338
4339         cancel_delayed_work_sync(&adapter->func_recovery_work);
4340
4341         netif_device_detach(netdev);
4342         if (netif_running(netdev)) {
4343                 rtnl_lock();
4344                 be_close(netdev);
4345                 rtnl_unlock();
4346         }
4347         be_clear(adapter);
4348
4349         pci_save_state(pdev);
4350         pci_disable_device(pdev);
4351         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4352         return 0;
4353 }
4354
4355 static int be_resume(struct pci_dev *pdev)
4356 {
4357         int status = 0;
4358         struct be_adapter *adapter = pci_get_drvdata(pdev);
4359         struct net_device *netdev =  adapter->netdev;
4360
4361         netif_device_detach(netdev);
4362
4363         status = pci_enable_device(pdev);
4364         if (status)
4365                 return status;
4366
4367         pci_set_power_state(pdev, PCI_D0);
4368         pci_restore_state(pdev);
4369
4370         /* tell fw we're ready to fire cmds */
4371         status = be_cmd_fw_init(adapter);
4372         if (status)
4373                 return status;
4374
4375         be_setup(adapter);
4376         if (netif_running(netdev)) {
4377                 rtnl_lock();
4378                 be_open(netdev);
4379                 rtnl_unlock();
4380         }
4381
4382         schedule_delayed_work(&adapter->func_recovery_work,
4383                               msecs_to_jiffies(1000));
4384         netif_device_attach(netdev);
4385
4386         if (adapter->wol)
4387                 be_setup_wol(adapter, false);
4388
4389         return 0;
4390 }
4391
4392 /*
4393  * An FLR will stop BE from DMAing any data.
4394  */
4395 static void be_shutdown(struct pci_dev *pdev)
4396 {
4397         struct be_adapter *adapter = pci_get_drvdata(pdev);
4398
4399         if (!adapter)
4400                 return;
4401
4402         cancel_delayed_work_sync(&adapter->work);
4403         cancel_delayed_work_sync(&adapter->func_recovery_work);
4404
4405         netif_device_detach(adapter->netdev);
4406
4407         be_cmd_reset_function(adapter);
4408
4409         pci_disable_device(pdev);
4410 }
4411
4412 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4413                                 pci_channel_state_t state)
4414 {
4415         struct be_adapter *adapter = pci_get_drvdata(pdev);
4416         struct net_device *netdev =  adapter->netdev;
4417
4418         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4419
4420         if (!adapter->eeh_error) {
4421                 adapter->eeh_error = true;
4422
4423                 cancel_delayed_work_sync(&adapter->func_recovery_work);
4424
4425                 rtnl_lock();
4426                 netif_device_detach(netdev);
4427                 if (netif_running(netdev))
4428                         be_close(netdev);
4429                 rtnl_unlock();
4430
4431                 be_clear(adapter);
4432         }
4433
4434         if (state == pci_channel_io_perm_failure)
4435                 return PCI_ERS_RESULT_DISCONNECT;
4436
4437         pci_disable_device(pdev);
4438
4439         /* The error could cause the FW to trigger a flash debug dump.
4440          * Resetting the card while flash dump is in progress
4441          * can cause it not to recover; wait for it to finish.
4442          * Wait only for first function as it is needed only once per
4443          * adapter.
4444          */
4445         if (pdev->devfn == 0)
4446                 ssleep(30);
4447
4448         return PCI_ERS_RESULT_NEED_RESET;
4449 }
4450
4451 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4452 {
4453         struct be_adapter *adapter = pci_get_drvdata(pdev);
4454         int status;
4455
4456         dev_info(&adapter->pdev->dev, "EEH reset\n");
4457
4458         status = pci_enable_device(pdev);
4459         if (status)
4460                 return PCI_ERS_RESULT_DISCONNECT;
4461
4462         pci_set_master(pdev);
4463         pci_set_power_state(pdev, PCI_D0);
4464         pci_restore_state(pdev);
4465
4466         /* Check if card is ok and fw is ready */
4467         dev_info(&adapter->pdev->dev,
4468                  "Waiting for FW to be ready after EEH reset\n");
4469         status = be_fw_wait_ready(adapter);
4470         if (status)
4471                 return PCI_ERS_RESULT_DISCONNECT;
4472
4473         pci_cleanup_aer_uncorrect_error_status(pdev);
4474         be_clear_all_error(adapter);
4475         return PCI_ERS_RESULT_RECOVERED;
4476 }
4477
4478 static void be_eeh_resume(struct pci_dev *pdev)
4479 {
4480         int status = 0;
4481         struct be_adapter *adapter = pci_get_drvdata(pdev);
4482         struct net_device *netdev =  adapter->netdev;
4483
4484         dev_info(&adapter->pdev->dev, "EEH resume\n");
4485
4486         pci_save_state(pdev);
4487
4488         status = be_cmd_reset_function(adapter);
4489         if (status)
4490                 goto err;
4491
4492         /* tell fw we're ready to fire cmds */
4493         status = be_cmd_fw_init(adapter);
4494         if (status)
4495                 goto err;
4496
4497         status = be_setup(adapter);
4498         if (status)
4499                 goto err;
4500
4501         if (netif_running(netdev)) {
4502                 status = be_open(netdev);
4503                 if (status)
4504                         goto err;
4505         }
4506
4507         schedule_delayed_work(&adapter->func_recovery_work,
4508                               msecs_to_jiffies(1000));
4509         netif_device_attach(netdev);
4510         return;
4511 err:
4512         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4513 }
4514
4515 static const struct pci_error_handlers be_eeh_handlers = {
4516         .error_detected = be_eeh_err_detected,
4517         .slot_reset = be_eeh_reset,
4518         .resume = be_eeh_resume,
4519 };
4520
4521 static struct pci_driver be_driver = {
4522         .name = DRV_NAME,
4523         .id_table = be_dev_ids,
4524         .probe = be_probe,
4525         .remove = be_remove,
4526         .suspend = be_suspend,
4527         .resume = be_resume,
4528         .shutdown = be_shutdown,
4529         .err_handler = &be_eeh_handlers
4530 };
4531
4532 static int __init be_init_module(void)
4533 {
4534         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4535             rx_frag_size != 2048) {
4536                 printk(KERN_WARNING DRV_NAME
4537                         " : Module param rx_frag_size must be 2048/4096/8192."
4538                         " Using 2048\n");
4539                 rx_frag_size = 2048;
4540         }
4541
4542         return pci_register_driver(&be_driver);
4543 }
4544 module_init(be_init_module);
4545
4546 static void __exit be_exit_module(void)
4547 {
4548         pci_unregister_driver(&be_driver);
4549 }
4550 module_exit(be_exit_module);