]> git.karo-electronics.de Git - linux-beck.git/blob - drivers/net/ethernet/emulex/benet/be_main.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux-beck.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL | __GFP_ZERO);
150         if (!mem->va)
151                 return -ENOMEM;
152         return 0;
153 }
154
155 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
156 {
157         u32 reg, enabled;
158
159         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160                                 &reg);
161         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
163         if (!enabled && enable)
164                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165         else if (enabled && !enable)
166                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else
168                 return;
169
170         pci_write_config_dword(adapter->pdev,
171                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 }
173
174 static void be_intr_set(struct be_adapter *adapter, bool enable)
175 {
176         int status = 0;
177
178         /* On lancer interrupts can't be controlled via this register */
179         if (lancer_chip(adapter))
180                 return;
181
182         if (adapter->eeh_error)
183                 return;
184
185         status = be_cmd_intr_set(adapter, enable);
186         if (status)
187                 be_reg_intr_set(adapter, enable);
188 }
189
190 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
191 {
192         u32 val = 0;
193         val |= qid & DB_RQ_RING_ID_MASK;
194         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
195
196         wmb();
197         iowrite32(val, adapter->db + DB_RQ_OFFSET);
198 }
199
200 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201                           u16 posted)
202 {
203         u32 val = 0;
204         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
205         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
206
207         wmb();
208         iowrite32(val, adapter->db + txo->db_offset);
209 }
210
211 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
212                 bool arm, bool clear_int, u16 num_popped)
213 {
214         u32 val = 0;
215         val |= qid & DB_EQ_RING_ID_MASK;
216         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
218
219         if (adapter->eeh_error)
220                 return;
221
222         if (arm)
223                 val |= 1 << DB_EQ_REARM_SHIFT;
224         if (clear_int)
225                 val |= 1 << DB_EQ_CLR_SHIFT;
226         val |= 1 << DB_EQ_EVNT_SHIFT;
227         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
228         iowrite32(val, adapter->db + DB_EQ_OFFSET);
229 }
230
231 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
232 {
233         u32 val = 0;
234         val |= qid & DB_CQ_RING_ID_MASK;
235         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
237
238         if (adapter->eeh_error)
239                 return;
240
241         if (arm)
242                 val |= 1 << DB_CQ_REARM_SHIFT;
243         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
244         iowrite32(val, adapter->db + DB_CQ_OFFSET);
245 }
246
247 static int be_mac_addr_set(struct net_device *netdev, void *p)
248 {
249         struct be_adapter *adapter = netdev_priv(netdev);
250         struct sockaddr *addr = p;
251         int status = 0;
252         u8 current_mac[ETH_ALEN];
253         u32 pmac_id = adapter->pmac_id[0];
254         bool active_mac = true;
255
256         if (!is_valid_ether_addr(addr->sa_data))
257                 return -EADDRNOTAVAIL;
258
259         /* For BE VF, MAC address is already activated by PF.
260          * Hence only operation left is updating netdev->devaddr.
261          * Update it if user is passing the same MAC which was used
262          * during configuring VF MAC from PF(Hypervisor).
263          */
264         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265                 status = be_cmd_mac_addr_query(adapter, current_mac,
266                                                false, adapter->if_handle, 0);
267                 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268                         goto done;
269                 else
270                         goto err;
271         }
272
273         if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274                 goto done;
275
276         /* For Lancer check if any MAC is active.
277          * If active, get its mac id.
278          */
279         if (lancer_chip(adapter) && !be_physfn(adapter))
280                 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281                                          &pmac_id, 0);
282
283         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284                                  adapter->if_handle,
285                                  &adapter->pmac_id[0], 0);
286
287         if (status)
288                 goto err;
289
290         if (active_mac)
291                 be_cmd_pmac_del(adapter, adapter->if_handle,
292                                 pmac_id, 0);
293 done:
294         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295         return 0;
296 err:
297         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
298         return status;
299 }
300
301 /* BE2 supports only v0 cmd */
302 static void *hw_stats_from_cmd(struct be_adapter *adapter)
303 {
304         if (BE2_chip(adapter)) {
305                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307                 return &cmd->hw_stats;
308         } else  {
309                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311                 return &cmd->hw_stats;
312         }
313 }
314
315 /* BE2 supports only v0 cmd */
316 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317 {
318         if (BE2_chip(adapter)) {
319                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321                 return &hw_stats->erx;
322         } else {
323                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325                 return &hw_stats->erx;
326         }
327 }
328
329 static void populate_be_v0_stats(struct be_adapter *adapter)
330 {
331         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
334         struct be_port_rxf_stats_v0 *port_stats =
335                                         &rxf_stats->port[adapter->port_num];
336         struct be_drv_stats *drvs = &adapter->drv_stats;
337
338         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
339         drvs->rx_pause_frames = port_stats->rx_pause_frames;
340         drvs->rx_crc_errors = port_stats->rx_crc_errors;
341         drvs->rx_control_frames = port_stats->rx_control_frames;
342         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
353         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
354         drvs->rx_dropped_header_too_small =
355                 port_stats->rx_dropped_header_too_small;
356         drvs->rx_address_filtered =
357                                         port_stats->rx_address_filtered +
358                                         port_stats->rx_vlan_filtered;
359         drvs->rx_alignment_symbol_errors =
360                 port_stats->rx_alignment_symbol_errors;
361
362         drvs->tx_pauseframes = port_stats->tx_pauseframes;
363         drvs->tx_controlframes = port_stats->tx_controlframes;
364
365         if (adapter->port_num)
366                 drvs->jabber_events = rxf_stats->port1_jabber_events;
367         else
368                 drvs->jabber_events = rxf_stats->port0_jabber_events;
369         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
370         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
371         drvs->forwarded_packets = rxf_stats->forwarded_packets;
372         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
373         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
375         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376 }
377
378 static void populate_be_v1_stats(struct be_adapter *adapter)
379 {
380         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
383         struct be_port_rxf_stats_v1 *port_stats =
384                                         &rxf_stats->port[adapter->port_num];
385         struct be_drv_stats *drvs = &adapter->drv_stats;
386
387         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
388         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
390         drvs->rx_pause_frames = port_stats->rx_pause_frames;
391         drvs->rx_crc_errors = port_stats->rx_crc_errors;
392         drvs->rx_control_frames = port_stats->rx_control_frames;
393         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403         drvs->rx_dropped_header_too_small =
404                 port_stats->rx_dropped_header_too_small;
405         drvs->rx_input_fifo_overflow_drop =
406                 port_stats->rx_input_fifo_overflow_drop;
407         drvs->rx_address_filtered = port_stats->rx_address_filtered;
408         drvs->rx_alignment_symbol_errors =
409                 port_stats->rx_alignment_symbol_errors;
410         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
411         drvs->tx_pauseframes = port_stats->tx_pauseframes;
412         drvs->tx_controlframes = port_stats->tx_controlframes;
413         drvs->jabber_events = port_stats->jabber_events;
414         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
415         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
416         drvs->forwarded_packets = rxf_stats->forwarded_packets;
417         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
418         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
419         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
420         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
421 }
422
423 static void populate_lancer_stats(struct be_adapter *adapter)
424 {
425
426         struct be_drv_stats *drvs = &adapter->drv_stats;
427         struct lancer_pport_stats *pport_stats =
428                                         pport_stats_from_cmd(adapter);
429
430         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
431         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
432         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
433         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
434         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
435         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
436         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
437         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
438         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
439         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
440         drvs->rx_dropped_tcp_length =
441                                 pport_stats->rx_dropped_invalid_tcp_length;
442         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
443         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
444         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
445         drvs->rx_dropped_header_too_small =
446                                 pport_stats->rx_dropped_header_too_small;
447         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
448         drvs->rx_address_filtered =
449                                         pport_stats->rx_address_filtered +
450                                         pport_stats->rx_vlan_filtered;
451         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
452         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
453         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
454         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
455         drvs->jabber_events = pport_stats->rx_jabbers;
456         drvs->forwarded_packets = pport_stats->num_forwards_lo;
457         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
458         drvs->rx_drops_too_many_frags =
459                                 pport_stats->rx_drops_too_many_frags_lo;
460 }
461
462 static void accumulate_16bit_val(u32 *acc, u16 val)
463 {
464 #define lo(x)                   (x & 0xFFFF)
465 #define hi(x)                   (x & 0xFFFF0000)
466         bool wrapped = val < lo(*acc);
467         u32 newacc = hi(*acc) + val;
468
469         if (wrapped)
470                 newacc += 65536;
471         ACCESS_ONCE(*acc) = newacc;
472 }
473
474 void be_parse_stats(struct be_adapter *adapter)
475 {
476         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
477         struct be_rx_obj *rxo;
478         int i;
479
480         if (lancer_chip(adapter)) {
481                 populate_lancer_stats(adapter);
482         } else {
483                 if (BE2_chip(adapter))
484                         populate_be_v0_stats(adapter);
485                 else
486                         /* for BE3 and Skyhawk */
487                         populate_be_v1_stats(adapter);
488
489                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
490                 for_all_rx_queues(adapter, rxo, i) {
491                         /* below erx HW counter can actually wrap around after
492                          * 65535. Driver accumulates a 32-bit value
493                          */
494                         accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
495                                              (u16)erx->rx_drops_no_fragments \
496                                              [rxo->q.id]);
497                 }
498         }
499 }
500
501 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
502                                         struct rtnl_link_stats64 *stats)
503 {
504         struct be_adapter *adapter = netdev_priv(netdev);
505         struct be_drv_stats *drvs = &adapter->drv_stats;
506         struct be_rx_obj *rxo;
507         struct be_tx_obj *txo;
508         u64 pkts, bytes;
509         unsigned int start;
510         int i;
511
512         for_all_rx_queues(adapter, rxo, i) {
513                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
514                 do {
515                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
516                         pkts = rx_stats(rxo)->rx_pkts;
517                         bytes = rx_stats(rxo)->rx_bytes;
518                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
519                 stats->rx_packets += pkts;
520                 stats->rx_bytes += bytes;
521                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
522                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
523                                         rx_stats(rxo)->rx_drops_no_frags;
524         }
525
526         for_all_tx_queues(adapter, txo, i) {
527                 const struct be_tx_stats *tx_stats = tx_stats(txo);
528                 do {
529                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
530                         pkts = tx_stats(txo)->tx_pkts;
531                         bytes = tx_stats(txo)->tx_bytes;
532                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
533                 stats->tx_packets += pkts;
534                 stats->tx_bytes += bytes;
535         }
536
537         /* bad pkts received */
538         stats->rx_errors = drvs->rx_crc_errors +
539                 drvs->rx_alignment_symbol_errors +
540                 drvs->rx_in_range_errors +
541                 drvs->rx_out_range_errors +
542                 drvs->rx_frame_too_long +
543                 drvs->rx_dropped_too_small +
544                 drvs->rx_dropped_too_short +
545                 drvs->rx_dropped_header_too_small +
546                 drvs->rx_dropped_tcp_length +
547                 drvs->rx_dropped_runt;
548
549         /* detailed rx errors */
550         stats->rx_length_errors = drvs->rx_in_range_errors +
551                 drvs->rx_out_range_errors +
552                 drvs->rx_frame_too_long;
553
554         stats->rx_crc_errors = drvs->rx_crc_errors;
555
556         /* frame alignment errors */
557         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
558
559         /* receiver fifo overrun */
560         /* drops_no_pbuf is no per i/f, it's per BE card */
561         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
562                                 drvs->rx_input_fifo_overflow_drop +
563                                 drvs->rx_drops_no_pbuf;
564         return stats;
565 }
566
567 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
568 {
569         struct net_device *netdev = adapter->netdev;
570
571         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
572                 netif_carrier_off(netdev);
573                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
574         }
575
576         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
577                 netif_carrier_on(netdev);
578         else
579                 netif_carrier_off(netdev);
580 }
581
582 static void be_tx_stats_update(struct be_tx_obj *txo,
583                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
584 {
585         struct be_tx_stats *stats = tx_stats(txo);
586
587         u64_stats_update_begin(&stats->sync);
588         stats->tx_reqs++;
589         stats->tx_wrbs += wrb_cnt;
590         stats->tx_bytes += copied;
591         stats->tx_pkts += (gso_segs ? gso_segs : 1);
592         if (stopped)
593                 stats->tx_stops++;
594         u64_stats_update_end(&stats->sync);
595 }
596
597 /* Determine number of WRB entries needed to xmit data in an skb */
598 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
599                                                                 bool *dummy)
600 {
601         int cnt = (skb->len > skb->data_len);
602
603         cnt += skb_shinfo(skb)->nr_frags;
604
605         /* to account for hdr wrb */
606         cnt++;
607         if (lancer_chip(adapter) || !(cnt & 1)) {
608                 *dummy = false;
609         } else {
610                 /* add a dummy to make it an even num */
611                 cnt++;
612                 *dummy = true;
613         }
614         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
615         return cnt;
616 }
617
618 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
619 {
620         wrb->frag_pa_hi = upper_32_bits(addr);
621         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
622         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
623         wrb->rsvd0 = 0;
624 }
625
626 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
627                                         struct sk_buff *skb)
628 {
629         u8 vlan_prio;
630         u16 vlan_tag;
631
632         vlan_tag = vlan_tx_tag_get(skb);
633         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
634         /* If vlan priority provided by OS is NOT in available bmap */
635         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
636                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
637                                 adapter->recommended_prio;
638
639         return vlan_tag;
640 }
641
642 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
643                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
644 {
645         u16 vlan_tag;
646
647         memset(hdr, 0, sizeof(*hdr));
648
649         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
650
651         if (skb_is_gso(skb)) {
652                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
653                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
654                         hdr, skb_shinfo(skb)->gso_size);
655                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
656                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
657         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
658                 if (is_tcp_pkt(skb))
659                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
660                 else if (is_udp_pkt(skb))
661                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
662         }
663
664         if (vlan_tx_tag_present(skb)) {
665                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
666                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
667                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
668         }
669
670         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
671         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
672         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
673         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
674         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
675 }
676
677 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
678                 bool unmap_single)
679 {
680         dma_addr_t dma;
681
682         be_dws_le_to_cpu(wrb, sizeof(*wrb));
683
684         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
685         if (wrb->frag_len) {
686                 if (unmap_single)
687                         dma_unmap_single(dev, dma, wrb->frag_len,
688                                          DMA_TO_DEVICE);
689                 else
690                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
691         }
692 }
693
694 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
695                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
696                 bool skip_hw_vlan)
697 {
698         dma_addr_t busaddr;
699         int i, copied = 0;
700         struct device *dev = &adapter->pdev->dev;
701         struct sk_buff *first_skb = skb;
702         struct be_eth_wrb *wrb;
703         struct be_eth_hdr_wrb *hdr;
704         bool map_single = false;
705         u16 map_head;
706
707         hdr = queue_head_node(txq);
708         queue_head_inc(txq);
709         map_head = txq->head;
710
711         if (skb->len > skb->data_len) {
712                 int len = skb_headlen(skb);
713                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
714                 if (dma_mapping_error(dev, busaddr))
715                         goto dma_err;
716                 map_single = true;
717                 wrb = queue_head_node(txq);
718                 wrb_fill(wrb, busaddr, len);
719                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
720                 queue_head_inc(txq);
721                 copied += len;
722         }
723
724         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
725                 const struct skb_frag_struct *frag =
726                         &skb_shinfo(skb)->frags[i];
727                 busaddr = skb_frag_dma_map(dev, frag, 0,
728                                            skb_frag_size(frag), DMA_TO_DEVICE);
729                 if (dma_mapping_error(dev, busaddr))
730                         goto dma_err;
731                 wrb = queue_head_node(txq);
732                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
733                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
734                 queue_head_inc(txq);
735                 copied += skb_frag_size(frag);
736         }
737
738         if (dummy_wrb) {
739                 wrb = queue_head_node(txq);
740                 wrb_fill(wrb, 0, 0);
741                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
742                 queue_head_inc(txq);
743         }
744
745         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
746         be_dws_cpu_to_le(hdr, sizeof(*hdr));
747
748         return copied;
749 dma_err:
750         txq->head = map_head;
751         while (copied) {
752                 wrb = queue_head_node(txq);
753                 unmap_tx_frag(dev, wrb, map_single);
754                 map_single = false;
755                 copied -= wrb->frag_len;
756                 queue_head_inc(txq);
757         }
758         return 0;
759 }
760
761 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
762                                              struct sk_buff *skb,
763                                              bool *skip_hw_vlan)
764 {
765         u16 vlan_tag = 0;
766
767         skb = skb_share_check(skb, GFP_ATOMIC);
768         if (unlikely(!skb))
769                 return skb;
770
771         if (vlan_tx_tag_present(skb)) {
772                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
773                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
774                 if (skb)
775                         skb->vlan_tci = 0;
776         }
777
778         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
779                 if (!vlan_tag)
780                         vlan_tag = adapter->pvid;
781                 if (skip_hw_vlan)
782                         *skip_hw_vlan = true;
783         }
784
785         if (vlan_tag) {
786                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
787                 if (unlikely(!skb))
788                         return skb;
789
790                 skb->vlan_tci = 0;
791         }
792
793         /* Insert the outer VLAN, if any */
794         if (adapter->qnq_vid) {
795                 vlan_tag = adapter->qnq_vid;
796                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
797                 if (unlikely(!skb))
798                         return skb;
799                 if (skip_hw_vlan)
800                         *skip_hw_vlan = true;
801         }
802
803         return skb;
804 }
805
806 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
807 {
808         struct ethhdr *eh = (struct ethhdr *)skb->data;
809         u16 offset = ETH_HLEN;
810
811         if (eh->h_proto == htons(ETH_P_IPV6)) {
812                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
813
814                 offset += sizeof(struct ipv6hdr);
815                 if (ip6h->nexthdr != NEXTHDR_TCP &&
816                     ip6h->nexthdr != NEXTHDR_UDP) {
817                         struct ipv6_opt_hdr *ehdr =
818                                 (struct ipv6_opt_hdr *) (skb->data + offset);
819
820                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
821                         if (ehdr->hdrlen == 0xff)
822                                 return true;
823                 }
824         }
825         return false;
826 }
827
828 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
829 {
830         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
831 }
832
833 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
834 {
835         return BE3_chip(adapter) &&
836                 be_ipv6_exthdr_check(skb);
837 }
838
839 static netdev_tx_t be_xmit(struct sk_buff *skb,
840                         struct net_device *netdev)
841 {
842         struct be_adapter *adapter = netdev_priv(netdev);
843         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
844         struct be_queue_info *txq = &txo->q;
845         struct iphdr *ip = NULL;
846         u32 wrb_cnt = 0, copied = 0;
847         u32 start = txq->head, eth_hdr_len;
848         bool dummy_wrb, stopped = false;
849         bool skip_hw_vlan = false;
850         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
851
852         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
853                 VLAN_ETH_HLEN : ETH_HLEN;
854
855         /* For padded packets, BE HW modifies tot_len field in IP header
856          * incorrecly when VLAN tag is inserted by HW.
857          */
858         if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
859                 ip = (struct iphdr *)ip_hdr(skb);
860                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
861         }
862
863         /* If vlan tag is already inlined in the packet, skip HW VLAN
864          * tagging in UMC mode
865          */
866         if ((adapter->function_mode & UMC_ENABLED) &&
867             veh->h_vlan_proto == htons(ETH_P_8021Q))
868                         skip_hw_vlan = true;
869
870         /* HW has a bug wherein it will calculate CSUM for VLAN
871          * pkts even though it is disabled.
872          * Manually insert VLAN in pkt.
873          */
874         if (skb->ip_summed != CHECKSUM_PARTIAL &&
875                         vlan_tx_tag_present(skb)) {
876                 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
877                 if (unlikely(!skb))
878                         goto tx_drop;
879         }
880
881         /* HW may lockup when VLAN HW tagging is requested on
882          * certain ipv6 packets. Drop such pkts if the HW workaround to
883          * skip HW tagging is not enabled by FW.
884          */
885         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
886                      (adapter->pvid || adapter->qnq_vid) &&
887                      !qnq_async_evt_rcvd(adapter)))
888                 goto tx_drop;
889
890         /* Manual VLAN tag insertion to prevent:
891          * ASIC lockup when the ASIC inserts VLAN tag into
892          * certain ipv6 packets. Insert VLAN tags in driver,
893          * and set event, completion, vlan bits accordingly
894          * in the Tx WRB.
895          */
896         if (be_ipv6_tx_stall_chk(adapter, skb) &&
897             be_vlan_tag_tx_chk(adapter, skb)) {
898                 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
899                 if (unlikely(!skb))
900                         goto tx_drop;
901         }
902
903         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
904
905         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
906                               skip_hw_vlan);
907         if (copied) {
908                 int gso_segs = skb_shinfo(skb)->gso_segs;
909
910                 /* record the sent skb in the sent_skb table */
911                 BUG_ON(txo->sent_skb_list[start]);
912                 txo->sent_skb_list[start] = skb;
913
914                 /* Ensure txq has space for the next skb; Else stop the queue
915                  * *BEFORE* ringing the tx doorbell, so that we serialze the
916                  * tx compls of the current transmit which'll wake up the queue
917                  */
918                 atomic_add(wrb_cnt, &txq->used);
919                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
920                                                                 txq->len) {
921                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
922                         stopped = true;
923                 }
924
925                 be_txq_notify(adapter, txo, wrb_cnt);
926
927                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
928         } else {
929                 txq->head = start;
930                 dev_kfree_skb_any(skb);
931         }
932 tx_drop:
933         return NETDEV_TX_OK;
934 }
935
936 static int be_change_mtu(struct net_device *netdev, int new_mtu)
937 {
938         struct be_adapter *adapter = netdev_priv(netdev);
939         if (new_mtu < BE_MIN_MTU ||
940                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
941                                         (ETH_HLEN + ETH_FCS_LEN))) {
942                 dev_info(&adapter->pdev->dev,
943                         "MTU must be between %d and %d bytes\n",
944                         BE_MIN_MTU,
945                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
946                 return -EINVAL;
947         }
948         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
949                         netdev->mtu, new_mtu);
950         netdev->mtu = new_mtu;
951         return 0;
952 }
953
954 /*
955  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
956  * If the user configures more, place BE in vlan promiscuous mode.
957  */
958 static int be_vid_config(struct be_adapter *adapter)
959 {
960         u16 vids[BE_NUM_VLANS_SUPPORTED];
961         u16 num = 0, i;
962         int status = 0;
963
964         /* No need to further configure vids if in promiscuous mode */
965         if (adapter->promiscuous)
966                 return 0;
967
968         if (adapter->vlans_added > adapter->max_vlans)
969                 goto set_vlan_promisc;
970
971         /* Construct VLAN Table to give to HW */
972         for (i = 0; i < VLAN_N_VID; i++)
973                 if (adapter->vlan_tag[i])
974                         vids[num++] = cpu_to_le16(i);
975
976         status = be_cmd_vlan_config(adapter, adapter->if_handle,
977                                     vids, num, 1, 0);
978
979         /* Set to VLAN promisc mode as setting VLAN filter failed */
980         if (status) {
981                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
982                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
983                 goto set_vlan_promisc;
984         }
985
986         return status;
987
988 set_vlan_promisc:
989         status = be_cmd_vlan_config(adapter, adapter->if_handle,
990                                     NULL, 0, 1, 1);
991         return status;
992 }
993
994 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
995 {
996         struct be_adapter *adapter = netdev_priv(netdev);
997         int status = 0;
998
999         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1000                 status = -EINVAL;
1001                 goto ret;
1002         }
1003
1004         /* Packets with VID 0 are always received by Lancer by default */
1005         if (lancer_chip(adapter) && vid == 0)
1006                 goto ret;
1007
1008         adapter->vlan_tag[vid] = 1;
1009         if (adapter->vlans_added <= (adapter->max_vlans + 1))
1010                 status = be_vid_config(adapter);
1011
1012         if (!status)
1013                 adapter->vlans_added++;
1014         else
1015                 adapter->vlan_tag[vid] = 0;
1016 ret:
1017         return status;
1018 }
1019
1020 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1021 {
1022         struct be_adapter *adapter = netdev_priv(netdev);
1023         int status = 0;
1024
1025         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1026                 status = -EINVAL;
1027                 goto ret;
1028         }
1029
1030         /* Packets with VID 0 are always received by Lancer by default */
1031         if (lancer_chip(adapter) && vid == 0)
1032                 goto ret;
1033
1034         adapter->vlan_tag[vid] = 0;
1035         if (adapter->vlans_added <= adapter->max_vlans)
1036                 status = be_vid_config(adapter);
1037
1038         if (!status)
1039                 adapter->vlans_added--;
1040         else
1041                 adapter->vlan_tag[vid] = 1;
1042 ret:
1043         return status;
1044 }
1045
1046 static void be_set_rx_mode(struct net_device *netdev)
1047 {
1048         struct be_adapter *adapter = netdev_priv(netdev);
1049         int status;
1050
1051         if (netdev->flags & IFF_PROMISC) {
1052                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1053                 adapter->promiscuous = true;
1054                 goto done;
1055         }
1056
1057         /* BE was previously in promiscuous mode; disable it */
1058         if (adapter->promiscuous) {
1059                 adapter->promiscuous = false;
1060                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1061
1062                 if (adapter->vlans_added)
1063                         be_vid_config(adapter);
1064         }
1065
1066         /* Enable multicast promisc if num configured exceeds what we support */
1067         if (netdev->flags & IFF_ALLMULTI ||
1068             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
1069                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1070                 goto done;
1071         }
1072
1073         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1074                 struct netdev_hw_addr *ha;
1075                 int i = 1; /* First slot is claimed by the Primary MAC */
1076
1077                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1078                         be_cmd_pmac_del(adapter, adapter->if_handle,
1079                                         adapter->pmac_id[i], 0);
1080                 }
1081
1082                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1083                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1084                         adapter->promiscuous = true;
1085                         goto done;
1086                 }
1087
1088                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1089                         adapter->uc_macs++; /* First slot is for Primary MAC */
1090                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1091                                         adapter->if_handle,
1092                                         &adapter->pmac_id[adapter->uc_macs], 0);
1093                 }
1094         }
1095
1096         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1097
1098         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1099         if (status) {
1100                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1101                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1102                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1103         }
1104 done:
1105         return;
1106 }
1107
1108 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1109 {
1110         struct be_adapter *adapter = netdev_priv(netdev);
1111         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1112         int status;
1113         bool active_mac = false;
1114         u32 pmac_id;
1115         u8 old_mac[ETH_ALEN];
1116
1117         if (!sriov_enabled(adapter))
1118                 return -EPERM;
1119
1120         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1121                 return -EINVAL;
1122
1123         if (lancer_chip(adapter)) {
1124                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1125                                                   &pmac_id, vf + 1);
1126                 if (!status && active_mac)
1127                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1128                                         pmac_id, vf + 1);
1129
1130                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1131         } else {
1132                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1133                                          vf_cfg->pmac_id, vf + 1);
1134
1135                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1136                                          &vf_cfg->pmac_id, vf + 1);
1137         }
1138
1139         if (status)
1140                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1141                                 mac, vf);
1142         else
1143                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1144
1145         return status;
1146 }
1147
1148 static int be_get_vf_config(struct net_device *netdev, int vf,
1149                         struct ifla_vf_info *vi)
1150 {
1151         struct be_adapter *adapter = netdev_priv(netdev);
1152         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1153
1154         if (!sriov_enabled(adapter))
1155                 return -EPERM;
1156
1157         if (vf >= adapter->num_vfs)
1158                 return -EINVAL;
1159
1160         vi->vf = vf;
1161         vi->tx_rate = vf_cfg->tx_rate;
1162         vi->vlan = vf_cfg->vlan_tag;
1163         vi->qos = 0;
1164         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1165
1166         return 0;
1167 }
1168
1169 static int be_set_vf_vlan(struct net_device *netdev,
1170                         int vf, u16 vlan, u8 qos)
1171 {
1172         struct be_adapter *adapter = netdev_priv(netdev);
1173         int status = 0;
1174
1175         if (!sriov_enabled(adapter))
1176                 return -EPERM;
1177
1178         if (vf >= adapter->num_vfs || vlan > 4095)
1179                 return -EINVAL;
1180
1181         if (vlan) {
1182                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1183                         /* If this is new value, program it. Else skip. */
1184                         adapter->vf_cfg[vf].vlan_tag = vlan;
1185
1186                         status = be_cmd_set_hsw_config(adapter, vlan,
1187                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1188                 }
1189         } else {
1190                 /* Reset Transparent Vlan Tagging. */
1191                 adapter->vf_cfg[vf].vlan_tag = 0;
1192                 vlan = adapter->vf_cfg[vf].def_vid;
1193                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1194                         adapter->vf_cfg[vf].if_handle);
1195         }
1196
1197
1198         if (status)
1199                 dev_info(&adapter->pdev->dev,
1200                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1201         return status;
1202 }
1203
1204 static int be_set_vf_tx_rate(struct net_device *netdev,
1205                         int vf, int rate)
1206 {
1207         struct be_adapter *adapter = netdev_priv(netdev);
1208         int status = 0;
1209
1210         if (!sriov_enabled(adapter))
1211                 return -EPERM;
1212
1213         if (vf >= adapter->num_vfs)
1214                 return -EINVAL;
1215
1216         if (rate < 100 || rate > 10000) {
1217                 dev_err(&adapter->pdev->dev,
1218                         "tx rate must be between 100 and 10000 Mbps\n");
1219                 return -EINVAL;
1220         }
1221
1222         if (lancer_chip(adapter))
1223                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1224         else
1225                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1226
1227         if (status)
1228                 dev_err(&adapter->pdev->dev,
1229                                 "tx rate %d on VF %d failed\n", rate, vf);
1230         else
1231                 adapter->vf_cfg[vf].tx_rate = rate;
1232         return status;
1233 }
1234
1235 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1236 {
1237         struct pci_dev *dev, *pdev = adapter->pdev;
1238         int vfs = 0, assigned_vfs = 0, pos;
1239         u16 offset, stride;
1240
1241         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1242         if (!pos)
1243                 return 0;
1244         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1245         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1246
1247         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1248         while (dev) {
1249                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1250                         vfs++;
1251                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1252                                 assigned_vfs++;
1253                 }
1254                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1255         }
1256         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1257 }
1258
1259 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1260 {
1261         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1262         ulong now = jiffies;
1263         ulong delta = now - stats->rx_jiffies;
1264         u64 pkts;
1265         unsigned int start, eqd;
1266
1267         if (!eqo->enable_aic) {
1268                 eqd = eqo->eqd;
1269                 goto modify_eqd;
1270         }
1271
1272         if (eqo->idx >= adapter->num_rx_qs)
1273                 return;
1274
1275         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1276
1277         /* Wrapped around */
1278         if (time_before(now, stats->rx_jiffies)) {
1279                 stats->rx_jiffies = now;
1280                 return;
1281         }
1282
1283         /* Update once a second */
1284         if (delta < HZ)
1285                 return;
1286
1287         do {
1288                 start = u64_stats_fetch_begin_bh(&stats->sync);
1289                 pkts = stats->rx_pkts;
1290         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1291
1292         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1293         stats->rx_pkts_prev = pkts;
1294         stats->rx_jiffies = now;
1295         eqd = (stats->rx_pps / 110000) << 3;
1296         eqd = min(eqd, eqo->max_eqd);
1297         eqd = max(eqd, eqo->min_eqd);
1298         if (eqd < 10)
1299                 eqd = 0;
1300
1301 modify_eqd:
1302         if (eqd != eqo->cur_eqd) {
1303                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1304                 eqo->cur_eqd = eqd;
1305         }
1306 }
1307
1308 static void be_rx_stats_update(struct be_rx_obj *rxo,
1309                 struct be_rx_compl_info *rxcp)
1310 {
1311         struct be_rx_stats *stats = rx_stats(rxo);
1312
1313         u64_stats_update_begin(&stats->sync);
1314         stats->rx_compl++;
1315         stats->rx_bytes += rxcp->pkt_size;
1316         stats->rx_pkts++;
1317         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1318                 stats->rx_mcast_pkts++;
1319         if (rxcp->err)
1320                 stats->rx_compl_err++;
1321         u64_stats_update_end(&stats->sync);
1322 }
1323
1324 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1325 {
1326         /* L4 checksum is not reliable for non TCP/UDP packets.
1327          * Also ignore ipcksm for ipv6 pkts */
1328         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1329                                 (rxcp->ip_csum || rxcp->ipv6);
1330 }
1331
1332 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1333                                                 u16 frag_idx)
1334 {
1335         struct be_adapter *adapter = rxo->adapter;
1336         struct be_rx_page_info *rx_page_info;
1337         struct be_queue_info *rxq = &rxo->q;
1338
1339         rx_page_info = &rxo->page_info_tbl[frag_idx];
1340         BUG_ON(!rx_page_info->page);
1341
1342         if (rx_page_info->last_page_user) {
1343                 dma_unmap_page(&adapter->pdev->dev,
1344                                dma_unmap_addr(rx_page_info, bus),
1345                                adapter->big_page_size, DMA_FROM_DEVICE);
1346                 rx_page_info->last_page_user = false;
1347         }
1348
1349         atomic_dec(&rxq->used);
1350         return rx_page_info;
1351 }
1352
1353 /* Throwaway the data in the Rx completion */
1354 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1355                                 struct be_rx_compl_info *rxcp)
1356 {
1357         struct be_queue_info *rxq = &rxo->q;
1358         struct be_rx_page_info *page_info;
1359         u16 i, num_rcvd = rxcp->num_rcvd;
1360
1361         for (i = 0; i < num_rcvd; i++) {
1362                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1363                 put_page(page_info->page);
1364                 memset(page_info, 0, sizeof(*page_info));
1365                 index_inc(&rxcp->rxq_idx, rxq->len);
1366         }
1367 }
1368
1369 /*
1370  * skb_fill_rx_data forms a complete skb for an ether frame
1371  * indicated by rxcp.
1372  */
1373 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1374                              struct be_rx_compl_info *rxcp)
1375 {
1376         struct be_queue_info *rxq = &rxo->q;
1377         struct be_rx_page_info *page_info;
1378         u16 i, j;
1379         u16 hdr_len, curr_frag_len, remaining;
1380         u8 *start;
1381
1382         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1383         start = page_address(page_info->page) + page_info->page_offset;
1384         prefetch(start);
1385
1386         /* Copy data in the first descriptor of this completion */
1387         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1388
1389         skb->len = curr_frag_len;
1390         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1391                 memcpy(skb->data, start, curr_frag_len);
1392                 /* Complete packet has now been moved to data */
1393                 put_page(page_info->page);
1394                 skb->data_len = 0;
1395                 skb->tail += curr_frag_len;
1396         } else {
1397                 hdr_len = ETH_HLEN;
1398                 memcpy(skb->data, start, hdr_len);
1399                 skb_shinfo(skb)->nr_frags = 1;
1400                 skb_frag_set_page(skb, 0, page_info->page);
1401                 skb_shinfo(skb)->frags[0].page_offset =
1402                                         page_info->page_offset + hdr_len;
1403                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1404                 skb->data_len = curr_frag_len - hdr_len;
1405                 skb->truesize += rx_frag_size;
1406                 skb->tail += hdr_len;
1407         }
1408         page_info->page = NULL;
1409
1410         if (rxcp->pkt_size <= rx_frag_size) {
1411                 BUG_ON(rxcp->num_rcvd != 1);
1412                 return;
1413         }
1414
1415         /* More frags present for this completion */
1416         index_inc(&rxcp->rxq_idx, rxq->len);
1417         remaining = rxcp->pkt_size - curr_frag_len;
1418         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1419                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1420                 curr_frag_len = min(remaining, rx_frag_size);
1421
1422                 /* Coalesce all frags from the same physical page in one slot */
1423                 if (page_info->page_offset == 0) {
1424                         /* Fresh page */
1425                         j++;
1426                         skb_frag_set_page(skb, j, page_info->page);
1427                         skb_shinfo(skb)->frags[j].page_offset =
1428                                                         page_info->page_offset;
1429                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1430                         skb_shinfo(skb)->nr_frags++;
1431                 } else {
1432                         put_page(page_info->page);
1433                 }
1434
1435                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1436                 skb->len += curr_frag_len;
1437                 skb->data_len += curr_frag_len;
1438                 skb->truesize += rx_frag_size;
1439                 remaining -= curr_frag_len;
1440                 index_inc(&rxcp->rxq_idx, rxq->len);
1441                 page_info->page = NULL;
1442         }
1443         BUG_ON(j > MAX_SKB_FRAGS);
1444 }
1445
1446 /* Process the RX completion indicated by rxcp when GRO is disabled */
1447 static void be_rx_compl_process(struct be_rx_obj *rxo,
1448                                 struct be_rx_compl_info *rxcp)
1449 {
1450         struct be_adapter *adapter = rxo->adapter;
1451         struct net_device *netdev = adapter->netdev;
1452         struct sk_buff *skb;
1453
1454         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1455         if (unlikely(!skb)) {
1456                 rx_stats(rxo)->rx_drops_no_skbs++;
1457                 be_rx_compl_discard(rxo, rxcp);
1458                 return;
1459         }
1460
1461         skb_fill_rx_data(rxo, skb, rxcp);
1462
1463         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1464                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1465         else
1466                 skb_checksum_none_assert(skb);
1467
1468         skb->protocol = eth_type_trans(skb, netdev);
1469         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1470         if (netdev->features & NETIF_F_RXHASH)
1471                 skb->rxhash = rxcp->rss_hash;
1472
1473
1474         if (rxcp->vlanf)
1475                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1476
1477         netif_receive_skb(skb);
1478 }
1479
1480 /* Process the RX completion indicated by rxcp when GRO is enabled */
1481 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1482                              struct be_rx_compl_info *rxcp)
1483 {
1484         struct be_adapter *adapter = rxo->adapter;
1485         struct be_rx_page_info *page_info;
1486         struct sk_buff *skb = NULL;
1487         struct be_queue_info *rxq = &rxo->q;
1488         u16 remaining, curr_frag_len;
1489         u16 i, j;
1490
1491         skb = napi_get_frags(napi);
1492         if (!skb) {
1493                 be_rx_compl_discard(rxo, rxcp);
1494                 return;
1495         }
1496
1497         remaining = rxcp->pkt_size;
1498         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1499                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1500
1501                 curr_frag_len = min(remaining, rx_frag_size);
1502
1503                 /* Coalesce all frags from the same physical page in one slot */
1504                 if (i == 0 || page_info->page_offset == 0) {
1505                         /* First frag or Fresh page */
1506                         j++;
1507                         skb_frag_set_page(skb, j, page_info->page);
1508                         skb_shinfo(skb)->frags[j].page_offset =
1509                                                         page_info->page_offset;
1510                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1511                 } else {
1512                         put_page(page_info->page);
1513                 }
1514                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1515                 skb->truesize += rx_frag_size;
1516                 remaining -= curr_frag_len;
1517                 index_inc(&rxcp->rxq_idx, rxq->len);
1518                 memset(page_info, 0, sizeof(*page_info));
1519         }
1520         BUG_ON(j > MAX_SKB_FRAGS);
1521
1522         skb_shinfo(skb)->nr_frags = j + 1;
1523         skb->len = rxcp->pkt_size;
1524         skb->data_len = rxcp->pkt_size;
1525         skb->ip_summed = CHECKSUM_UNNECESSARY;
1526         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1527         if (adapter->netdev->features & NETIF_F_RXHASH)
1528                 skb->rxhash = rxcp->rss_hash;
1529
1530         if (rxcp->vlanf)
1531                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1532
1533         napi_gro_frags(napi);
1534 }
1535
1536 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1537                                  struct be_rx_compl_info *rxcp)
1538 {
1539         rxcp->pkt_size =
1540                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1541         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1542         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1543         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1544         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1545         rxcp->ip_csum =
1546                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1547         rxcp->l4_csum =
1548                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1549         rxcp->ipv6 =
1550                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1551         rxcp->rxq_idx =
1552                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1553         rxcp->num_rcvd =
1554                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1555         rxcp->pkt_type =
1556                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1557         rxcp->rss_hash =
1558                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1559         if (rxcp->vlanf) {
1560                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1561                                           compl);
1562                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1563                                                compl);
1564         }
1565         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1566 }
1567
1568 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1569                                  struct be_rx_compl_info *rxcp)
1570 {
1571         rxcp->pkt_size =
1572                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1573         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1574         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1575         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1576         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1577         rxcp->ip_csum =
1578                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1579         rxcp->l4_csum =
1580                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1581         rxcp->ipv6 =
1582                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1583         rxcp->rxq_idx =
1584                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1585         rxcp->num_rcvd =
1586                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1587         rxcp->pkt_type =
1588                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1589         rxcp->rss_hash =
1590                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1591         if (rxcp->vlanf) {
1592                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1593                                           compl);
1594                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1595                                                compl);
1596         }
1597         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1598 }
1599
1600 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1601 {
1602         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1603         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1604         struct be_adapter *adapter = rxo->adapter;
1605
1606         /* For checking the valid bit it is Ok to use either definition as the
1607          * valid bit is at the same position in both v0 and v1 Rx compl */
1608         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1609                 return NULL;
1610
1611         rmb();
1612         be_dws_le_to_cpu(compl, sizeof(*compl));
1613
1614         if (adapter->be3_native)
1615                 be_parse_rx_compl_v1(compl, rxcp);
1616         else
1617                 be_parse_rx_compl_v0(compl, rxcp);
1618
1619         if (rxcp->vlanf) {
1620                 /* vlanf could be wrongly set in some cards.
1621                  * ignore if vtm is not set */
1622                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1623                         rxcp->vlanf = 0;
1624
1625                 if (!lancer_chip(adapter))
1626                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1627
1628                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1629                     !adapter->vlan_tag[rxcp->vlan_tag])
1630                         rxcp->vlanf = 0;
1631         }
1632
1633         /* As the compl has been parsed, reset it; we wont touch it again */
1634         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1635
1636         queue_tail_inc(&rxo->cq);
1637         return rxcp;
1638 }
1639
1640 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1641 {
1642         u32 order = get_order(size);
1643
1644         if (order > 0)
1645                 gfp |= __GFP_COMP;
1646         return  alloc_pages(gfp, order);
1647 }
1648
1649 /*
1650  * Allocate a page, split it to fragments of size rx_frag_size and post as
1651  * receive buffers to BE
1652  */
1653 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1654 {
1655         struct be_adapter *adapter = rxo->adapter;
1656         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1657         struct be_queue_info *rxq = &rxo->q;
1658         struct page *pagep = NULL;
1659         struct be_eth_rx_d *rxd;
1660         u64 page_dmaaddr = 0, frag_dmaaddr;
1661         u32 posted, page_offset = 0;
1662
1663         page_info = &rxo->page_info_tbl[rxq->head];
1664         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1665                 if (!pagep) {
1666                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1667                         if (unlikely(!pagep)) {
1668                                 rx_stats(rxo)->rx_post_fail++;
1669                                 break;
1670                         }
1671                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1672                                                     0, adapter->big_page_size,
1673                                                     DMA_FROM_DEVICE);
1674                         page_info->page_offset = 0;
1675                 } else {
1676                         get_page(pagep);
1677                         page_info->page_offset = page_offset + rx_frag_size;
1678                 }
1679                 page_offset = page_info->page_offset;
1680                 page_info->page = pagep;
1681                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1682                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1683
1684                 rxd = queue_head_node(rxq);
1685                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1686                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1687
1688                 /* Any space left in the current big page for another frag? */
1689                 if ((page_offset + rx_frag_size + rx_frag_size) >
1690                                         adapter->big_page_size) {
1691                         pagep = NULL;
1692                         page_info->last_page_user = true;
1693                 }
1694
1695                 prev_page_info = page_info;
1696                 queue_head_inc(rxq);
1697                 page_info = &rxo->page_info_tbl[rxq->head];
1698         }
1699         if (pagep)
1700                 prev_page_info->last_page_user = true;
1701
1702         if (posted) {
1703                 atomic_add(posted, &rxq->used);
1704                 be_rxq_notify(adapter, rxq->id, posted);
1705         } else if (atomic_read(&rxq->used) == 0) {
1706                 /* Let be_worker replenish when memory is available */
1707                 rxo->rx_post_starved = true;
1708         }
1709 }
1710
1711 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1712 {
1713         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1714
1715         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1716                 return NULL;
1717
1718         rmb();
1719         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1720
1721         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1722
1723         queue_tail_inc(tx_cq);
1724         return txcp;
1725 }
1726
1727 static u16 be_tx_compl_process(struct be_adapter *adapter,
1728                 struct be_tx_obj *txo, u16 last_index)
1729 {
1730         struct be_queue_info *txq = &txo->q;
1731         struct be_eth_wrb *wrb;
1732         struct sk_buff **sent_skbs = txo->sent_skb_list;
1733         struct sk_buff *sent_skb;
1734         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1735         bool unmap_skb_hdr = true;
1736
1737         sent_skb = sent_skbs[txq->tail];
1738         BUG_ON(!sent_skb);
1739         sent_skbs[txq->tail] = NULL;
1740
1741         /* skip header wrb */
1742         queue_tail_inc(txq);
1743
1744         do {
1745                 cur_index = txq->tail;
1746                 wrb = queue_tail_node(txq);
1747                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1748                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1749                 unmap_skb_hdr = false;
1750
1751                 num_wrbs++;
1752                 queue_tail_inc(txq);
1753         } while (cur_index != last_index);
1754
1755         kfree_skb(sent_skb);
1756         return num_wrbs;
1757 }
1758
1759 /* Return the number of events in the event queue */
1760 static inline int events_get(struct be_eq_obj *eqo)
1761 {
1762         struct be_eq_entry *eqe;
1763         int num = 0;
1764
1765         do {
1766                 eqe = queue_tail_node(&eqo->q);
1767                 if (eqe->evt == 0)
1768                         break;
1769
1770                 rmb();
1771                 eqe->evt = 0;
1772                 num++;
1773                 queue_tail_inc(&eqo->q);
1774         } while (true);
1775
1776         return num;
1777 }
1778
1779 /* Leaves the EQ is disarmed state */
1780 static void be_eq_clean(struct be_eq_obj *eqo)
1781 {
1782         int num = events_get(eqo);
1783
1784         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1785 }
1786
1787 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1788 {
1789         struct be_rx_page_info *page_info;
1790         struct be_queue_info *rxq = &rxo->q;
1791         struct be_queue_info *rx_cq = &rxo->cq;
1792         struct be_rx_compl_info *rxcp;
1793         struct be_adapter *adapter = rxo->adapter;
1794         int flush_wait = 0;
1795         u16 tail;
1796
1797         /* Consume pending rx completions.
1798          * Wait for the flush completion (identified by zero num_rcvd)
1799          * to arrive. Notify CQ even when there are no more CQ entries
1800          * for HW to flush partially coalesced CQ entries.
1801          * In Lancer, there is no need to wait for flush compl.
1802          */
1803         for (;;) {
1804                 rxcp = be_rx_compl_get(rxo);
1805                 if (rxcp == NULL) {
1806                         if (lancer_chip(adapter))
1807                                 break;
1808
1809                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1810                                 dev_warn(&adapter->pdev->dev,
1811                                          "did not receive flush compl\n");
1812                                 break;
1813                         }
1814                         be_cq_notify(adapter, rx_cq->id, true, 0);
1815                         mdelay(1);
1816                 } else {
1817                         be_rx_compl_discard(rxo, rxcp);
1818                         be_cq_notify(adapter, rx_cq->id, true, 1);
1819                         if (rxcp->num_rcvd == 0)
1820                                 break;
1821                 }
1822         }
1823
1824         /* After cleanup, leave the CQ in unarmed state */
1825         be_cq_notify(adapter, rx_cq->id, false, 0);
1826
1827         /* Then free posted rx buffers that were not used */
1828         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1829         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1830                 page_info = get_rx_page_info(rxo, tail);
1831                 put_page(page_info->page);
1832                 memset(page_info, 0, sizeof(*page_info));
1833         }
1834         BUG_ON(atomic_read(&rxq->used));
1835         rxq->tail = rxq->head = 0;
1836 }
1837
1838 static void be_tx_compl_clean(struct be_adapter *adapter)
1839 {
1840         struct be_tx_obj *txo;
1841         struct be_queue_info *txq;
1842         struct be_eth_tx_compl *txcp;
1843         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1844         struct sk_buff *sent_skb;
1845         bool dummy_wrb;
1846         int i, pending_txqs;
1847
1848         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1849         do {
1850                 pending_txqs = adapter->num_tx_qs;
1851
1852                 for_all_tx_queues(adapter, txo, i) {
1853                         txq = &txo->q;
1854                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1855                                 end_idx =
1856                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1857                                                       wrb_index, txcp);
1858                                 num_wrbs += be_tx_compl_process(adapter, txo,
1859                                                                 end_idx);
1860                                 cmpl++;
1861                         }
1862                         if (cmpl) {
1863                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1864                                 atomic_sub(num_wrbs, &txq->used);
1865                                 cmpl = 0;
1866                                 num_wrbs = 0;
1867                         }
1868                         if (atomic_read(&txq->used) == 0)
1869                                 pending_txqs--;
1870                 }
1871
1872                 if (pending_txqs == 0 || ++timeo > 200)
1873                         break;
1874
1875                 mdelay(1);
1876         } while (true);
1877
1878         for_all_tx_queues(adapter, txo, i) {
1879                 txq = &txo->q;
1880                 if (atomic_read(&txq->used))
1881                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1882                                 atomic_read(&txq->used));
1883
1884                 /* free posted tx for which compls will never arrive */
1885                 while (atomic_read(&txq->used)) {
1886                         sent_skb = txo->sent_skb_list[txq->tail];
1887                         end_idx = txq->tail;
1888                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1889                                                    &dummy_wrb);
1890                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1891                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1892                         atomic_sub(num_wrbs, &txq->used);
1893                 }
1894         }
1895 }
1896
1897 static void be_evt_queues_destroy(struct be_adapter *adapter)
1898 {
1899         struct be_eq_obj *eqo;
1900         int i;
1901
1902         for_all_evt_queues(adapter, eqo, i) {
1903                 if (eqo->q.created) {
1904                         be_eq_clean(eqo);
1905                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1906                 }
1907                 be_queue_free(adapter, &eqo->q);
1908         }
1909 }
1910
1911 static int be_evt_queues_create(struct be_adapter *adapter)
1912 {
1913         struct be_queue_info *eq;
1914         struct be_eq_obj *eqo;
1915         int i, rc;
1916
1917         adapter->num_evt_qs = num_irqs(adapter);
1918
1919         for_all_evt_queues(adapter, eqo, i) {
1920                 eqo->adapter = adapter;
1921                 eqo->tx_budget = BE_TX_BUDGET;
1922                 eqo->idx = i;
1923                 eqo->max_eqd = BE_MAX_EQD;
1924                 eqo->enable_aic = true;
1925
1926                 eq = &eqo->q;
1927                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1928                                         sizeof(struct be_eq_entry));
1929                 if (rc)
1930                         return rc;
1931
1932                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1933                 if (rc)
1934                         return rc;
1935         }
1936         return 0;
1937 }
1938
1939 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1940 {
1941         struct be_queue_info *q;
1942
1943         q = &adapter->mcc_obj.q;
1944         if (q->created)
1945                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1946         be_queue_free(adapter, q);
1947
1948         q = &adapter->mcc_obj.cq;
1949         if (q->created)
1950                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1951         be_queue_free(adapter, q);
1952 }
1953
1954 /* Must be called only after TX qs are created as MCC shares TX EQ */
1955 static int be_mcc_queues_create(struct be_adapter *adapter)
1956 {
1957         struct be_queue_info *q, *cq;
1958
1959         cq = &adapter->mcc_obj.cq;
1960         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1961                         sizeof(struct be_mcc_compl)))
1962                 goto err;
1963
1964         /* Use the default EQ for MCC completions */
1965         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1966                 goto mcc_cq_free;
1967
1968         q = &adapter->mcc_obj.q;
1969         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1970                 goto mcc_cq_destroy;
1971
1972         if (be_cmd_mccq_create(adapter, q, cq))
1973                 goto mcc_q_free;
1974
1975         return 0;
1976
1977 mcc_q_free:
1978         be_queue_free(adapter, q);
1979 mcc_cq_destroy:
1980         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1981 mcc_cq_free:
1982         be_queue_free(adapter, cq);
1983 err:
1984         return -1;
1985 }
1986
1987 static void be_tx_queues_destroy(struct be_adapter *adapter)
1988 {
1989         struct be_queue_info *q;
1990         struct be_tx_obj *txo;
1991         u8 i;
1992
1993         for_all_tx_queues(adapter, txo, i) {
1994                 q = &txo->q;
1995                 if (q->created)
1996                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1997                 be_queue_free(adapter, q);
1998
1999                 q = &txo->cq;
2000                 if (q->created)
2001                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2002                 be_queue_free(adapter, q);
2003         }
2004 }
2005
2006 static int be_num_txqs_want(struct be_adapter *adapter)
2007 {
2008         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2009             be_is_mc(adapter) ||
2010             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
2011             BE2_chip(adapter))
2012                 return 1;
2013         else
2014                 return adapter->max_tx_queues;
2015 }
2016
2017 static int be_tx_cqs_create(struct be_adapter *adapter)
2018 {
2019         struct be_queue_info *cq, *eq;
2020         int status;
2021         struct be_tx_obj *txo;
2022         u8 i;
2023
2024         adapter->num_tx_qs = be_num_txqs_want(adapter);
2025         if (adapter->num_tx_qs != MAX_TX_QS) {
2026                 rtnl_lock();
2027                 netif_set_real_num_tx_queues(adapter->netdev,
2028                         adapter->num_tx_qs);
2029                 rtnl_unlock();
2030         }
2031
2032         for_all_tx_queues(adapter, txo, i) {
2033                 cq = &txo->cq;
2034                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2035                                         sizeof(struct be_eth_tx_compl));
2036                 if (status)
2037                         return status;
2038
2039                 /* If num_evt_qs is less than num_tx_qs, then more than
2040                  * one txq share an eq
2041                  */
2042                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2043                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2044                 if (status)
2045                         return status;
2046         }
2047         return 0;
2048 }
2049
2050 static int be_tx_qs_create(struct be_adapter *adapter)
2051 {
2052         struct be_tx_obj *txo;
2053         int i, status;
2054
2055         for_all_tx_queues(adapter, txo, i) {
2056                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2057                                         sizeof(struct be_eth_wrb));
2058                 if (status)
2059                         return status;
2060
2061                 status = be_cmd_txq_create(adapter, txo);
2062                 if (status)
2063                         return status;
2064         }
2065
2066         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2067                  adapter->num_tx_qs);
2068         return 0;
2069 }
2070
2071 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2072 {
2073         struct be_queue_info *q;
2074         struct be_rx_obj *rxo;
2075         int i;
2076
2077         for_all_rx_queues(adapter, rxo, i) {
2078                 q = &rxo->cq;
2079                 if (q->created)
2080                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2081                 be_queue_free(adapter, q);
2082         }
2083 }
2084
2085 static int be_rx_cqs_create(struct be_adapter *adapter)
2086 {
2087         struct be_queue_info *eq, *cq;
2088         struct be_rx_obj *rxo;
2089         int rc, i;
2090
2091         /* We'll create as many RSS rings as there are irqs.
2092          * But when there's only one irq there's no use creating RSS rings
2093          */
2094         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2095                                 num_irqs(adapter) + 1 : 1;
2096         if (adapter->num_rx_qs != MAX_RX_QS) {
2097                 rtnl_lock();
2098                 netif_set_real_num_rx_queues(adapter->netdev,
2099                                              adapter->num_rx_qs);
2100                 rtnl_unlock();
2101         }
2102
2103         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2104         for_all_rx_queues(adapter, rxo, i) {
2105                 rxo->adapter = adapter;
2106                 cq = &rxo->cq;
2107                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2108                                 sizeof(struct be_eth_rx_compl));
2109                 if (rc)
2110                         return rc;
2111
2112                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2113                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2114                 if (rc)
2115                         return rc;
2116         }
2117
2118         dev_info(&adapter->pdev->dev,
2119                  "created %d RSS queue(s) and 1 default RX queue\n",
2120                  adapter->num_rx_qs - 1);
2121         return 0;
2122 }
2123
2124 static irqreturn_t be_intx(int irq, void *dev)
2125 {
2126         struct be_eq_obj *eqo = dev;
2127         struct be_adapter *adapter = eqo->adapter;
2128         int num_evts = 0;
2129
2130         /* IRQ is not expected when NAPI is scheduled as the EQ
2131          * will not be armed.
2132          * But, this can happen on Lancer INTx where it takes
2133          * a while to de-assert INTx or in BE2 where occasionaly
2134          * an interrupt may be raised even when EQ is unarmed.
2135          * If NAPI is already scheduled, then counting & notifying
2136          * events will orphan them.
2137          */
2138         if (napi_schedule_prep(&eqo->napi)) {
2139                 num_evts = events_get(eqo);
2140                 __napi_schedule(&eqo->napi);
2141                 if (num_evts)
2142                         eqo->spurious_intr = 0;
2143         }
2144         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2145
2146         /* Return IRQ_HANDLED only for the the first spurious intr
2147          * after a valid intr to stop the kernel from branding
2148          * this irq as a bad one!
2149          */
2150         if (num_evts || eqo->spurious_intr++ == 0)
2151                 return IRQ_HANDLED;
2152         else
2153                 return IRQ_NONE;
2154 }
2155
2156 static irqreturn_t be_msix(int irq, void *dev)
2157 {
2158         struct be_eq_obj *eqo = dev;
2159
2160         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2161         napi_schedule(&eqo->napi);
2162         return IRQ_HANDLED;
2163 }
2164
2165 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2166 {
2167         return (rxcp->tcpf && !rxcp->err) ? true : false;
2168 }
2169
2170 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2171                         int budget)
2172 {
2173         struct be_adapter *adapter = rxo->adapter;
2174         struct be_queue_info *rx_cq = &rxo->cq;
2175         struct be_rx_compl_info *rxcp;
2176         u32 work_done;
2177
2178         for (work_done = 0; work_done < budget; work_done++) {
2179                 rxcp = be_rx_compl_get(rxo);
2180                 if (!rxcp)
2181                         break;
2182
2183                 /* Is it a flush compl that has no data */
2184                 if (unlikely(rxcp->num_rcvd == 0))
2185                         goto loop_continue;
2186
2187                 /* Discard compl with partial DMA Lancer B0 */
2188                 if (unlikely(!rxcp->pkt_size)) {
2189                         be_rx_compl_discard(rxo, rxcp);
2190                         goto loop_continue;
2191                 }
2192
2193                 /* On BE drop pkts that arrive due to imperfect filtering in
2194                  * promiscuous mode on some skews
2195                  */
2196                 if (unlikely(rxcp->port != adapter->port_num &&
2197                                 !lancer_chip(adapter))) {
2198                         be_rx_compl_discard(rxo, rxcp);
2199                         goto loop_continue;
2200                 }
2201
2202                 if (do_gro(rxcp))
2203                         be_rx_compl_process_gro(rxo, napi, rxcp);
2204                 else
2205                         be_rx_compl_process(rxo, rxcp);
2206 loop_continue:
2207                 be_rx_stats_update(rxo, rxcp);
2208         }
2209
2210         if (work_done) {
2211                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2212
2213                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2214                         be_post_rx_frags(rxo, GFP_ATOMIC);
2215         }
2216
2217         return work_done;
2218 }
2219
2220 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2221                           int budget, int idx)
2222 {
2223         struct be_eth_tx_compl *txcp;
2224         int num_wrbs = 0, work_done;
2225
2226         for (work_done = 0; work_done < budget; work_done++) {
2227                 txcp = be_tx_compl_get(&txo->cq);
2228                 if (!txcp)
2229                         break;
2230                 num_wrbs += be_tx_compl_process(adapter, txo,
2231                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2232                                         wrb_index, txcp));
2233         }
2234
2235         if (work_done) {
2236                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2237                 atomic_sub(num_wrbs, &txo->q.used);
2238
2239                 /* As Tx wrbs have been freed up, wake up netdev queue
2240                  * if it was stopped due to lack of tx wrbs.  */
2241                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2242                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2243                         netif_wake_subqueue(adapter->netdev, idx);
2244                 }
2245
2246                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2247                 tx_stats(txo)->tx_compl += work_done;
2248                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2249         }
2250         return (work_done < budget); /* Done */
2251 }
2252
2253 int be_poll(struct napi_struct *napi, int budget)
2254 {
2255         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2256         struct be_adapter *adapter = eqo->adapter;
2257         int max_work = 0, work, i, num_evts;
2258         bool tx_done;
2259
2260         num_evts = events_get(eqo);
2261
2262         /* Process all TXQs serviced by this EQ */
2263         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2264                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2265                                         eqo->tx_budget, i);
2266                 if (!tx_done)
2267                         max_work = budget;
2268         }
2269
2270         /* This loop will iterate twice for EQ0 in which
2271          * completions of the last RXQ (default one) are also processed
2272          * For other EQs the loop iterates only once
2273          */
2274         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2275                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2276                 max_work = max(work, max_work);
2277         }
2278
2279         if (is_mcc_eqo(eqo))
2280                 be_process_mcc(adapter);
2281
2282         if (max_work < budget) {
2283                 napi_complete(napi);
2284                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2285         } else {
2286                 /* As we'll continue in polling mode, count and clear events */
2287                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2288         }
2289         return max_work;
2290 }
2291
2292 void be_detect_error(struct be_adapter *adapter)
2293 {
2294         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2295         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2296         u32 i;
2297
2298         if (be_hw_error(adapter))
2299                 return;
2300
2301         if (lancer_chip(adapter)) {
2302                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2303                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2304                         sliport_err1 = ioread32(adapter->db +
2305                                         SLIPORT_ERROR1_OFFSET);
2306                         sliport_err2 = ioread32(adapter->db +
2307                                         SLIPORT_ERROR2_OFFSET);
2308                 }
2309         } else {
2310                 pci_read_config_dword(adapter->pdev,
2311                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2312                 pci_read_config_dword(adapter->pdev,
2313                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2314                 pci_read_config_dword(adapter->pdev,
2315                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2316                 pci_read_config_dword(adapter->pdev,
2317                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2318
2319                 ue_lo = (ue_lo & ~ue_lo_mask);
2320                 ue_hi = (ue_hi & ~ue_hi_mask);
2321         }
2322
2323         /* On certain platforms BE hardware can indicate spurious UEs.
2324          * Allow the h/w to stop working completely in case of a real UE.
2325          * Hence not setting the hw_error for UE detection.
2326          */
2327         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2328                 adapter->hw_error = true;
2329                 dev_err(&adapter->pdev->dev,
2330                         "Error detected in the card\n");
2331         }
2332
2333         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2334                 dev_err(&adapter->pdev->dev,
2335                         "ERR: sliport status 0x%x\n", sliport_status);
2336                 dev_err(&adapter->pdev->dev,
2337                         "ERR: sliport error1 0x%x\n", sliport_err1);
2338                 dev_err(&adapter->pdev->dev,
2339                         "ERR: sliport error2 0x%x\n", sliport_err2);
2340         }
2341
2342         if (ue_lo) {
2343                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2344                         if (ue_lo & 1)
2345                                 dev_err(&adapter->pdev->dev,
2346                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2347                 }
2348         }
2349
2350         if (ue_hi) {
2351                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2352                         if (ue_hi & 1)
2353                                 dev_err(&adapter->pdev->dev,
2354                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2355                 }
2356         }
2357
2358 }
2359
2360 static void be_msix_disable(struct be_adapter *adapter)
2361 {
2362         if (msix_enabled(adapter)) {
2363                 pci_disable_msix(adapter->pdev);
2364                 adapter->num_msix_vec = 0;
2365         }
2366 }
2367
2368 static uint be_num_rss_want(struct be_adapter *adapter)
2369 {
2370         u32 num = 0;
2371
2372         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2373             (lancer_chip(adapter) ||
2374              (!sriov_want(adapter) && be_physfn(adapter)))) {
2375                 num = adapter->max_rss_queues;
2376                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2377         }
2378         return num;
2379 }
2380
2381 static void be_msix_enable(struct be_adapter *adapter)
2382 {
2383 #define BE_MIN_MSIX_VECTORS             1
2384         int i, status, num_vec, num_roce_vec = 0;
2385         struct device *dev = &adapter->pdev->dev;
2386
2387         /* If RSS queues are not used, need a vec for default RX Q */
2388         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2389         if (be_roce_supported(adapter)) {
2390                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2391                                         (num_online_cpus() + 1));
2392                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2393                 num_vec += num_roce_vec;
2394                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2395         }
2396         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2397
2398         for (i = 0; i < num_vec; i++)
2399                 adapter->msix_entries[i].entry = i;
2400
2401         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2402         if (status == 0) {
2403                 goto done;
2404         } else if (status >= BE_MIN_MSIX_VECTORS) {
2405                 num_vec = status;
2406                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2407                                 num_vec) == 0)
2408                         goto done;
2409         }
2410
2411         dev_warn(dev, "MSIx enable failed\n");
2412         return;
2413 done:
2414         if (be_roce_supported(adapter)) {
2415                 if (num_vec > num_roce_vec) {
2416                         adapter->num_msix_vec = num_vec - num_roce_vec;
2417                         adapter->num_msix_roce_vec =
2418                                 num_vec - adapter->num_msix_vec;
2419                 } else {
2420                         adapter->num_msix_vec = num_vec;
2421                         adapter->num_msix_roce_vec = 0;
2422                 }
2423         } else
2424                 adapter->num_msix_vec = num_vec;
2425         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2426         return;
2427 }
2428
2429 static inline int be_msix_vec_get(struct be_adapter *adapter,
2430                                 struct be_eq_obj *eqo)
2431 {
2432         return adapter->msix_entries[eqo->idx].vector;
2433 }
2434
2435 static int be_msix_register(struct be_adapter *adapter)
2436 {
2437         struct net_device *netdev = adapter->netdev;
2438         struct be_eq_obj *eqo;
2439         int status, i, vec;
2440
2441         for_all_evt_queues(adapter, eqo, i) {
2442                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2443                 vec = be_msix_vec_get(adapter, eqo);
2444                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2445                 if (status)
2446                         goto err_msix;
2447         }
2448
2449         return 0;
2450 err_msix:
2451         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2452                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2453         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2454                 status);
2455         be_msix_disable(adapter);
2456         return status;
2457 }
2458
2459 static int be_irq_register(struct be_adapter *adapter)
2460 {
2461         struct net_device *netdev = adapter->netdev;
2462         int status;
2463
2464         if (msix_enabled(adapter)) {
2465                 status = be_msix_register(adapter);
2466                 if (status == 0)
2467                         goto done;
2468                 /* INTx is not supported for VF */
2469                 if (!be_physfn(adapter))
2470                         return status;
2471         }
2472
2473         /* INTx: only the first EQ is used */
2474         netdev->irq = adapter->pdev->irq;
2475         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2476                              &adapter->eq_obj[0]);
2477         if (status) {
2478                 dev_err(&adapter->pdev->dev,
2479                         "INTx request IRQ failed - err %d\n", status);
2480                 return status;
2481         }
2482 done:
2483         adapter->isr_registered = true;
2484         return 0;
2485 }
2486
2487 static void be_irq_unregister(struct be_adapter *adapter)
2488 {
2489         struct net_device *netdev = adapter->netdev;
2490         struct be_eq_obj *eqo;
2491         int i;
2492
2493         if (!adapter->isr_registered)
2494                 return;
2495
2496         /* INTx */
2497         if (!msix_enabled(adapter)) {
2498                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2499                 goto done;
2500         }
2501
2502         /* MSIx */
2503         for_all_evt_queues(adapter, eqo, i)
2504                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2505
2506 done:
2507         adapter->isr_registered = false;
2508 }
2509
2510 static void be_rx_qs_destroy(struct be_adapter *adapter)
2511 {
2512         struct be_queue_info *q;
2513         struct be_rx_obj *rxo;
2514         int i;
2515
2516         for_all_rx_queues(adapter, rxo, i) {
2517                 q = &rxo->q;
2518                 if (q->created) {
2519                         be_cmd_rxq_destroy(adapter, q);
2520                         /* After the rxq is invalidated, wait for a grace time
2521                          * of 1ms for all dma to end and the flush compl to
2522                          * arrive
2523                          */
2524                         mdelay(1);
2525                         be_rx_cq_clean(rxo);
2526                 }
2527                 be_queue_free(adapter, q);
2528         }
2529 }
2530
2531 static int be_close(struct net_device *netdev)
2532 {
2533         struct be_adapter *adapter = netdev_priv(netdev);
2534         struct be_eq_obj *eqo;
2535         int i;
2536
2537         be_roce_dev_close(adapter);
2538
2539         for_all_evt_queues(adapter, eqo, i)
2540                 napi_disable(&eqo->napi);
2541
2542         be_async_mcc_disable(adapter);
2543
2544         /* Wait for all pending tx completions to arrive so that
2545          * all tx skbs are freed.
2546          */
2547         be_tx_compl_clean(adapter);
2548
2549         be_rx_qs_destroy(adapter);
2550
2551         for_all_evt_queues(adapter, eqo, i) {
2552                 if (msix_enabled(adapter))
2553                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2554                 else
2555                         synchronize_irq(netdev->irq);
2556                 be_eq_clean(eqo);
2557         }
2558
2559         be_irq_unregister(adapter);
2560
2561         return 0;
2562 }
2563
2564 static int be_rx_qs_create(struct be_adapter *adapter)
2565 {
2566         struct be_rx_obj *rxo;
2567         int rc, i, j;
2568         u8 rsstable[128];
2569
2570         for_all_rx_queues(adapter, rxo, i) {
2571                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2572                                     sizeof(struct be_eth_rx_d));
2573                 if (rc)
2574                         return rc;
2575         }
2576
2577         /* The FW would like the default RXQ to be created first */
2578         rxo = default_rxo(adapter);
2579         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2580                                adapter->if_handle, false, &rxo->rss_id);
2581         if (rc)
2582                 return rc;
2583
2584         for_all_rss_queues(adapter, rxo, i) {
2585                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2586                                        rx_frag_size, adapter->if_handle,
2587                                        true, &rxo->rss_id);
2588                 if (rc)
2589                         return rc;
2590         }
2591
2592         if (be_multi_rxq(adapter)) {
2593                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2594                         for_all_rss_queues(adapter, rxo, i) {
2595                                 if ((j + i) >= 128)
2596                                         break;
2597                                 rsstable[j + i] = rxo->rss_id;
2598                         }
2599                 }
2600                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2601                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2602
2603                 if (!BEx_chip(adapter))
2604                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2605                                                 RSS_ENABLE_UDP_IPV6;
2606
2607                 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2608                                        128);
2609                 if (rc) {
2610                         adapter->rss_flags = 0;
2611                         return rc;
2612                 }
2613         }
2614
2615         /* First time posting */
2616         for_all_rx_queues(adapter, rxo, i)
2617                 be_post_rx_frags(rxo, GFP_KERNEL);
2618         return 0;
2619 }
2620
2621 static int be_open(struct net_device *netdev)
2622 {
2623         struct be_adapter *adapter = netdev_priv(netdev);
2624         struct be_eq_obj *eqo;
2625         struct be_rx_obj *rxo;
2626         struct be_tx_obj *txo;
2627         u8 link_status;
2628         int status, i;
2629
2630         status = be_rx_qs_create(adapter);
2631         if (status)
2632                 goto err;
2633
2634         be_irq_register(adapter);
2635
2636         for_all_rx_queues(adapter, rxo, i)
2637                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2638
2639         for_all_tx_queues(adapter, txo, i)
2640                 be_cq_notify(adapter, txo->cq.id, true, 0);
2641
2642         be_async_mcc_enable(adapter);
2643
2644         for_all_evt_queues(adapter, eqo, i) {
2645                 napi_enable(&eqo->napi);
2646                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2647         }
2648
2649         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2650         if (!status)
2651                 be_link_status_update(adapter, link_status);
2652
2653         be_roce_dev_open(adapter);
2654         return 0;
2655 err:
2656         be_close(adapter->netdev);
2657         return -EIO;
2658 }
2659
2660 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2661 {
2662         struct be_dma_mem cmd;
2663         int status = 0;
2664         u8 mac[ETH_ALEN];
2665
2666         memset(mac, 0, ETH_ALEN);
2667
2668         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2669         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2670                                     GFP_KERNEL | __GFP_ZERO);
2671         if (cmd.va == NULL)
2672                 return -1;
2673
2674         if (enable) {
2675                 status = pci_write_config_dword(adapter->pdev,
2676                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2677                 if (status) {
2678                         dev_err(&adapter->pdev->dev,
2679                                 "Could not enable Wake-on-lan\n");
2680                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2681                                           cmd.dma);
2682                         return status;
2683                 }
2684                 status = be_cmd_enable_magic_wol(adapter,
2685                                 adapter->netdev->dev_addr, &cmd);
2686                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2687                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2688         } else {
2689                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2690                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2691                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2692         }
2693
2694         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2695         return status;
2696 }
2697
2698 /*
2699  * Generate a seed MAC address from the PF MAC Address using jhash.
2700  * MAC Address for VFs are assigned incrementally starting from the seed.
2701  * These addresses are programmed in the ASIC by the PF and the VF driver
2702  * queries for the MAC address during its probe.
2703  */
2704 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2705 {
2706         u32 vf;
2707         int status = 0;
2708         u8 mac[ETH_ALEN];
2709         struct be_vf_cfg *vf_cfg;
2710
2711         be_vf_eth_addr_generate(adapter, mac);
2712
2713         for_all_vfs(adapter, vf_cfg, vf) {
2714                 if (lancer_chip(adapter)) {
2715                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2716                 } else {
2717                         status = be_cmd_pmac_add(adapter, mac,
2718                                                  vf_cfg->if_handle,
2719                                                  &vf_cfg->pmac_id, vf + 1);
2720                 }
2721
2722                 if (status)
2723                         dev_err(&adapter->pdev->dev,
2724                         "Mac address assignment failed for VF %d\n", vf);
2725                 else
2726                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2727
2728                 mac[5] += 1;
2729         }
2730         return status;
2731 }
2732
2733 static int be_vfs_mac_query(struct be_adapter *adapter)
2734 {
2735         int status, vf;
2736         u8 mac[ETH_ALEN];
2737         struct be_vf_cfg *vf_cfg;
2738         bool active;
2739
2740         for_all_vfs(adapter, vf_cfg, vf) {
2741                 be_cmd_get_mac_from_list(adapter, mac, &active,
2742                                          &vf_cfg->pmac_id, 0);
2743
2744                 status = be_cmd_mac_addr_query(adapter, mac, false,
2745                                                vf_cfg->if_handle, 0);
2746                 if (status)
2747                         return status;
2748                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2749         }
2750         return 0;
2751 }
2752
2753 static void be_vf_clear(struct be_adapter *adapter)
2754 {
2755         struct be_vf_cfg *vf_cfg;
2756         u32 vf;
2757
2758         if (be_find_vfs(adapter, ASSIGNED)) {
2759                 dev_warn(&adapter->pdev->dev,
2760                          "VFs are assigned to VMs: not disabling VFs\n");
2761                 goto done;
2762         }
2763
2764         for_all_vfs(adapter, vf_cfg, vf) {
2765                 if (lancer_chip(adapter))
2766                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2767                 else
2768                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2769                                         vf_cfg->pmac_id, vf + 1);
2770
2771                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2772         }
2773         pci_disable_sriov(adapter->pdev);
2774 done:
2775         kfree(adapter->vf_cfg);
2776         adapter->num_vfs = 0;
2777 }
2778
2779 static int be_clear(struct be_adapter *adapter)
2780 {
2781         int i = 1;
2782
2783         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2784                 cancel_delayed_work_sync(&adapter->work);
2785                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2786         }
2787
2788         if (sriov_enabled(adapter))
2789                 be_vf_clear(adapter);
2790
2791         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2792                 be_cmd_pmac_del(adapter, adapter->if_handle,
2793                         adapter->pmac_id[i], 0);
2794
2795         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2796
2797         be_mcc_queues_destroy(adapter);
2798         be_rx_cqs_destroy(adapter);
2799         be_tx_queues_destroy(adapter);
2800         be_evt_queues_destroy(adapter);
2801
2802         kfree(adapter->pmac_id);
2803         adapter->pmac_id = NULL;
2804
2805         be_msix_disable(adapter);
2806         return 0;
2807 }
2808
2809 static int be_vfs_if_create(struct be_adapter *adapter)
2810 {
2811         struct be_vf_cfg *vf_cfg;
2812         u32 cap_flags, en_flags, vf;
2813         int status;
2814
2815         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2816                     BE_IF_FLAGS_MULTICAST;
2817
2818         for_all_vfs(adapter, vf_cfg, vf) {
2819                 if (!BE3_chip(adapter))
2820                         be_cmd_get_profile_config(adapter, &cap_flags,
2821                                                   NULL, vf + 1);
2822
2823                 /* If a FW profile exists, then cap_flags are updated */
2824                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2825                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2826                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2827                                           &vf_cfg->if_handle, vf + 1);
2828                 if (status)
2829                         goto err;
2830         }
2831 err:
2832         return status;
2833 }
2834
2835 static int be_vf_setup_init(struct be_adapter *adapter)
2836 {
2837         struct be_vf_cfg *vf_cfg;
2838         int vf;
2839
2840         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2841                                   GFP_KERNEL);
2842         if (!adapter->vf_cfg)
2843                 return -ENOMEM;
2844
2845         for_all_vfs(adapter, vf_cfg, vf) {
2846                 vf_cfg->if_handle = -1;
2847                 vf_cfg->pmac_id = -1;
2848         }
2849         return 0;
2850 }
2851
2852 static int be_vf_setup(struct be_adapter *adapter)
2853 {
2854         struct be_vf_cfg *vf_cfg;
2855         u16 def_vlan, lnk_speed;
2856         int status, old_vfs, vf;
2857         struct device *dev = &adapter->pdev->dev;
2858
2859         old_vfs = be_find_vfs(adapter, ENABLED);
2860         if (old_vfs) {
2861                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2862                 if (old_vfs != num_vfs)
2863                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2864                 adapter->num_vfs = old_vfs;
2865         } else {
2866                 if (num_vfs > adapter->dev_num_vfs)
2867                         dev_info(dev, "Device supports %d VFs and not %d\n",
2868                                  adapter->dev_num_vfs, num_vfs);
2869                 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2870
2871                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2872                 if (status) {
2873                         dev_err(dev, "SRIOV enable failed\n");
2874                         adapter->num_vfs = 0;
2875                         return 0;
2876                 }
2877         }
2878
2879         status = be_vf_setup_init(adapter);
2880         if (status)
2881                 goto err;
2882
2883         if (old_vfs) {
2884                 for_all_vfs(adapter, vf_cfg, vf) {
2885                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2886                         if (status)
2887                                 goto err;
2888                 }
2889         } else {
2890                 status = be_vfs_if_create(adapter);
2891                 if (status)
2892                         goto err;
2893         }
2894
2895         if (old_vfs) {
2896                 status = be_vfs_mac_query(adapter);
2897                 if (status)
2898                         goto err;
2899         } else {
2900                 status = be_vf_eth_addr_config(adapter);
2901                 if (status)
2902                         goto err;
2903         }
2904
2905         for_all_vfs(adapter, vf_cfg, vf) {
2906                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2907                  * Allow full available bandwidth
2908                  */
2909                 if (BE3_chip(adapter) && !old_vfs)
2910                         be_cmd_set_qos(adapter, 1000, vf+1);
2911
2912                 status = be_cmd_link_status_query(adapter, &lnk_speed,
2913                                                   NULL, vf + 1);
2914                 if (!status)
2915                         vf_cfg->tx_rate = lnk_speed;
2916
2917                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2918                                                vf + 1, vf_cfg->if_handle);
2919                 if (status)
2920                         goto err;
2921                 vf_cfg->def_vid = def_vlan;
2922
2923                 be_cmd_enable_vf(adapter, vf + 1);
2924         }
2925         return 0;
2926 err:
2927         dev_err(dev, "VF setup failed\n");
2928         be_vf_clear(adapter);
2929         return status;
2930 }
2931
2932 static void be_setup_init(struct be_adapter *adapter)
2933 {
2934         adapter->vlan_prio_bmap = 0xff;
2935         adapter->phy.link_speed = -1;
2936         adapter->if_handle = -1;
2937         adapter->be3_native = false;
2938         adapter->promiscuous = false;
2939         if (be_physfn(adapter))
2940                 adapter->cmd_privileges = MAX_PRIVILEGES;
2941         else
2942                 adapter->cmd_privileges = MIN_PRIVILEGES;
2943 }
2944
2945 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2946                            bool *active_mac, u32 *pmac_id)
2947 {
2948         int status = 0;
2949
2950         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2951                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2952                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2953                         *active_mac = true;
2954                 else
2955                         *active_mac = false;
2956
2957                 return status;
2958         }
2959
2960         if (lancer_chip(adapter)) {
2961                 status = be_cmd_get_mac_from_list(adapter, mac,
2962                                                   active_mac, pmac_id, 0);
2963                 if (*active_mac) {
2964                         status = be_cmd_mac_addr_query(adapter, mac, false,
2965                                                        if_handle, *pmac_id);
2966                 }
2967         } else if (be_physfn(adapter)) {
2968                 /* For BE3, for PF get permanent MAC */
2969                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2970                 *active_mac = false;
2971         } else {
2972                 /* For BE3, for VF get soft MAC assigned by PF*/
2973                 status = be_cmd_mac_addr_query(adapter, mac, false,
2974                                                if_handle, 0);
2975                 *active_mac = true;
2976         }
2977         return status;
2978 }
2979
2980 static void be_get_resources(struct be_adapter *adapter)
2981 {
2982         u16 dev_num_vfs;
2983         int pos, status;
2984         bool profile_present = false;
2985         u16 txq_count = 0;
2986
2987         if (!BEx_chip(adapter)) {
2988                 status = be_cmd_get_func_config(adapter);
2989                 if (!status)
2990                         profile_present = true;
2991         } else if (BE3_chip(adapter) && be_physfn(adapter)) {
2992                 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
2993         }
2994
2995         if (profile_present) {
2996                 /* Sanity fixes for Lancer */
2997                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2998                                               BE_UC_PMAC_COUNT);
2999                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3000                                            BE_NUM_VLANS_SUPPORTED);
3001                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3002                                                BE_MAX_MC);
3003                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3004                                                MAX_TX_QS);
3005                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3006                                                 BE3_MAX_RSS_QS);
3007                 adapter->max_event_queues = min_t(u16,
3008                                                   adapter->max_event_queues,
3009                                                   BE3_MAX_RSS_QS);
3010
3011                 if (adapter->max_rss_queues &&
3012                     adapter->max_rss_queues == adapter->max_rx_queues)
3013                         adapter->max_rss_queues -= 1;
3014
3015                 if (adapter->max_event_queues < adapter->max_rss_queues)
3016                         adapter->max_rss_queues = adapter->max_event_queues;
3017
3018         } else {
3019                 if (be_physfn(adapter))
3020                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3021                 else
3022                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3023
3024                 if (adapter->function_mode & FLEX10_MODE)
3025                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3026                 else
3027                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3028
3029                 adapter->max_mcast_mac = BE_MAX_MC;
3030                 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3031                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3032                                                MAX_TX_QS);
3033                 adapter->max_rss_queues = (adapter->be3_native) ?
3034                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3035                 adapter->max_event_queues = BE3_MAX_RSS_QS;
3036
3037                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3038                                         BE_IF_FLAGS_BROADCAST |
3039                                         BE_IF_FLAGS_MULTICAST |
3040                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
3041                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
3042                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
3043                                         BE_IF_FLAGS_PROMISCUOUS;
3044
3045                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3046                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3047         }
3048
3049         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3050         if (pos) {
3051                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3052                                      &dev_num_vfs);
3053                 if (BE3_chip(adapter))
3054                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3055                 adapter->dev_num_vfs = dev_num_vfs;
3056         }
3057 }
3058
3059 /* Routine to query per function resource limits */
3060 static int be_get_config(struct be_adapter *adapter)
3061 {
3062         int status;
3063
3064         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3065                                      &adapter->function_mode,
3066                                      &adapter->function_caps,
3067                                      &adapter->asic_rev);
3068         if (status)
3069                 goto err;
3070
3071         be_get_resources(adapter);
3072
3073         /* primary mac needs 1 pmac entry */
3074         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3075                                    sizeof(u32), GFP_KERNEL);
3076         if (!adapter->pmac_id) {
3077                 status = -ENOMEM;
3078                 goto err;
3079         }
3080
3081 err:
3082         return status;
3083 }
3084
3085 static int be_setup(struct be_adapter *adapter)
3086 {
3087         struct device *dev = &adapter->pdev->dev;
3088         u32 en_flags;
3089         u32 tx_fc, rx_fc;
3090         int status;
3091         u8 mac[ETH_ALEN];
3092         bool active_mac;
3093
3094         be_setup_init(adapter);
3095
3096         if (!lancer_chip(adapter))
3097                 be_cmd_req_native_mode(adapter);
3098
3099         status = be_get_config(adapter);
3100         if (status)
3101                 goto err;
3102
3103         be_msix_enable(adapter);
3104
3105         status = be_evt_queues_create(adapter);
3106         if (status)
3107                 goto err;
3108
3109         status = be_tx_cqs_create(adapter);
3110         if (status)
3111                 goto err;
3112
3113         status = be_rx_cqs_create(adapter);
3114         if (status)
3115                 goto err;
3116
3117         status = be_mcc_queues_create(adapter);
3118         if (status)
3119                 goto err;
3120
3121         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3122         /* In UMC mode FW does not return right privileges.
3123          * Override with correct privilege equivalent to PF.
3124          */
3125         if (be_is_mc(adapter))
3126                 adapter->cmd_privileges = MAX_PRIVILEGES;
3127
3128         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3129                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3130
3131         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3132                 en_flags |= BE_IF_FLAGS_RSS;
3133
3134         en_flags = en_flags & adapter->if_cap_flags;
3135
3136         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3137                                   &adapter->if_handle, 0);
3138         if (status != 0)
3139                 goto err;
3140
3141         memset(mac, 0, ETH_ALEN);
3142         active_mac = false;
3143         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3144                                  &active_mac, &adapter->pmac_id[0]);
3145         if (status != 0)
3146                 goto err;
3147
3148         if (!active_mac) {
3149                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3150                                          &adapter->pmac_id[0], 0);
3151                 if (status != 0)
3152                         goto err;
3153         }
3154
3155         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3156                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3157                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3158         }
3159
3160         status = be_tx_qs_create(adapter);
3161         if (status)
3162                 goto err;
3163
3164         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3165
3166         if (adapter->vlans_added)
3167                 be_vid_config(adapter);
3168
3169         be_set_rx_mode(adapter->netdev);
3170
3171         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3172
3173         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3174                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3175                                         adapter->rx_fc);
3176
3177         if (be_physfn(adapter) && num_vfs) {
3178                 if (adapter->dev_num_vfs)
3179                         be_vf_setup(adapter);
3180                 else
3181                         dev_warn(dev, "device doesn't support SRIOV\n");
3182         }
3183
3184         status = be_cmd_get_phy_info(adapter);
3185         if (!status && be_pause_supported(adapter))
3186                 adapter->phy.fc_autoneg = 1;
3187
3188         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3189         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3190         return 0;
3191 err:
3192         be_clear(adapter);
3193         return status;
3194 }
3195
3196 #ifdef CONFIG_NET_POLL_CONTROLLER
3197 static void be_netpoll(struct net_device *netdev)
3198 {
3199         struct be_adapter *adapter = netdev_priv(netdev);
3200         struct be_eq_obj *eqo;
3201         int i;
3202
3203         for_all_evt_queues(adapter, eqo, i) {
3204                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3205                 napi_schedule(&eqo->napi);
3206         }
3207
3208         return;
3209 }
3210 #endif
3211
3212 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3213 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3214
3215 static bool be_flash_redboot(struct be_adapter *adapter,
3216                         const u8 *p, u32 img_start, int image_size,
3217                         int hdr_size)
3218 {
3219         u32 crc_offset;
3220         u8 flashed_crc[4];
3221         int status;
3222
3223         crc_offset = hdr_size + img_start + image_size - 4;
3224
3225         p += crc_offset;
3226
3227         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3228                         (image_size - 4));
3229         if (status) {
3230                 dev_err(&adapter->pdev->dev,
3231                 "could not get crc from flash, not flashing redboot\n");
3232                 return false;
3233         }
3234
3235         /*update redboot only if crc does not match*/
3236         if (!memcmp(flashed_crc, p, 4))
3237                 return false;
3238         else
3239                 return true;
3240 }
3241
3242 static bool phy_flashing_required(struct be_adapter *adapter)
3243 {
3244         return (adapter->phy.phy_type == TN_8022 &&
3245                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3246 }
3247
3248 static bool is_comp_in_ufi(struct be_adapter *adapter,
3249                            struct flash_section_info *fsec, int type)
3250 {
3251         int i = 0, img_type = 0;
3252         struct flash_section_info_g2 *fsec_g2 = NULL;
3253
3254         if (BE2_chip(adapter))
3255                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3256
3257         for (i = 0; i < MAX_FLASH_COMP; i++) {
3258                 if (fsec_g2)
3259                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3260                 else
3261                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3262
3263                 if (img_type == type)
3264                         return true;
3265         }
3266         return false;
3267
3268 }
3269
3270 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3271                                          int header_size,
3272                                          const struct firmware *fw)
3273 {
3274         struct flash_section_info *fsec = NULL;
3275         const u8 *p = fw->data;
3276
3277         p += header_size;
3278         while (p < (fw->data + fw->size)) {
3279                 fsec = (struct flash_section_info *)p;
3280                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3281                         return fsec;
3282                 p += 32;
3283         }
3284         return NULL;
3285 }
3286
3287 static int be_flash(struct be_adapter *adapter, const u8 *img,
3288                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3289 {
3290         u32 total_bytes = 0, flash_op, num_bytes = 0;
3291         int status = 0;
3292         struct be_cmd_write_flashrom *req = flash_cmd->va;
3293
3294         total_bytes = img_size;
3295         while (total_bytes) {
3296                 num_bytes = min_t(u32, 32*1024, total_bytes);
3297
3298                 total_bytes -= num_bytes;
3299
3300                 if (!total_bytes) {
3301                         if (optype == OPTYPE_PHY_FW)
3302                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3303                         else
3304                                 flash_op = FLASHROM_OPER_FLASH;
3305                 } else {
3306                         if (optype == OPTYPE_PHY_FW)
3307                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3308                         else
3309                                 flash_op = FLASHROM_OPER_SAVE;
3310                 }
3311
3312                 memcpy(req->data_buf, img, num_bytes);
3313                 img += num_bytes;
3314                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3315                                                 flash_op, num_bytes);
3316                 if (status) {
3317                         if (status == ILLEGAL_IOCTL_REQ &&
3318                             optype == OPTYPE_PHY_FW)
3319                                 break;
3320                         dev_err(&adapter->pdev->dev,
3321                                 "cmd to write to flash rom failed.\n");
3322                         return status;
3323                 }
3324         }
3325         return 0;
3326 }
3327
3328 /* For BE2, BE3 and BE3-R */
3329 static int be_flash_BEx(struct be_adapter *adapter,
3330                          const struct firmware *fw,
3331                          struct be_dma_mem *flash_cmd,
3332                          int num_of_images)
3333
3334 {
3335         int status = 0, i, filehdr_size = 0;
3336         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3337         const u8 *p = fw->data;
3338         const struct flash_comp *pflashcomp;
3339         int num_comp, redboot;
3340         struct flash_section_info *fsec = NULL;
3341
3342         struct flash_comp gen3_flash_types[] = {
3343                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3344                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3345                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3346                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3347                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3348                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3349                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3350                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3351                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3352                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3353                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3354                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3355                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3356                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3357                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3358                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3359                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3360                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3361                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3362                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3363         };
3364
3365         struct flash_comp gen2_flash_types[] = {
3366                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3367                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3368                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3369                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3370                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3371                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3372                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3373                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3374                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3375                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3376                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3377                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3378                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3379                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3380                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3381                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3382         };
3383
3384         if (BE3_chip(adapter)) {
3385                 pflashcomp = gen3_flash_types;
3386                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3387                 num_comp = ARRAY_SIZE(gen3_flash_types);
3388         } else {
3389                 pflashcomp = gen2_flash_types;
3390                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3391                 num_comp = ARRAY_SIZE(gen2_flash_types);
3392         }
3393
3394         /* Get flash section info*/
3395         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3396         if (!fsec) {
3397                 dev_err(&adapter->pdev->dev,
3398                         "Invalid Cookie. UFI corrupted ?\n");
3399                 return -1;
3400         }
3401         for (i = 0; i < num_comp; i++) {
3402                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3403                         continue;
3404
3405                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3406                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3407                         continue;
3408
3409                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3410                     !phy_flashing_required(adapter))
3411                                 continue;
3412
3413                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3414                         redboot = be_flash_redboot(adapter, fw->data,
3415                                 pflashcomp[i].offset, pflashcomp[i].size,
3416                                 filehdr_size + img_hdrs_size);
3417                         if (!redboot)
3418                                 continue;
3419                 }
3420
3421                 p = fw->data;
3422                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3423                 if (p + pflashcomp[i].size > fw->data + fw->size)
3424                         return -1;
3425
3426                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3427                                         pflashcomp[i].size);
3428                 if (status) {
3429                         dev_err(&adapter->pdev->dev,
3430                                 "Flashing section type %d failed.\n",
3431                                 pflashcomp[i].img_type);
3432                         return status;
3433                 }
3434         }
3435         return 0;
3436 }
3437
3438 static int be_flash_skyhawk(struct be_adapter *adapter,
3439                 const struct firmware *fw,
3440                 struct be_dma_mem *flash_cmd, int num_of_images)
3441 {
3442         int status = 0, i, filehdr_size = 0;
3443         int img_offset, img_size, img_optype, redboot;
3444         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3445         const u8 *p = fw->data;
3446         struct flash_section_info *fsec = NULL;
3447
3448         filehdr_size = sizeof(struct flash_file_hdr_g3);
3449         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3450         if (!fsec) {
3451                 dev_err(&adapter->pdev->dev,
3452                         "Invalid Cookie. UFI corrupted ?\n");
3453                 return -1;
3454         }
3455
3456         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3457                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3458                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3459
3460                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3461                 case IMAGE_FIRMWARE_iSCSI:
3462                         img_optype = OPTYPE_ISCSI_ACTIVE;
3463                         break;
3464                 case IMAGE_BOOT_CODE:
3465                         img_optype = OPTYPE_REDBOOT;
3466                         break;
3467                 case IMAGE_OPTION_ROM_ISCSI:
3468                         img_optype = OPTYPE_BIOS;
3469                         break;
3470                 case IMAGE_OPTION_ROM_PXE:
3471                         img_optype = OPTYPE_PXE_BIOS;
3472                         break;
3473                 case IMAGE_OPTION_ROM_FCoE:
3474                         img_optype = OPTYPE_FCOE_BIOS;
3475                         break;
3476                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3477                         img_optype = OPTYPE_ISCSI_BACKUP;
3478                         break;
3479                 case IMAGE_NCSI:
3480                         img_optype = OPTYPE_NCSI_FW;
3481                         break;
3482                 default:
3483                         continue;
3484                 }
3485
3486                 if (img_optype == OPTYPE_REDBOOT) {
3487                         redboot = be_flash_redboot(adapter, fw->data,
3488                                         img_offset, img_size,
3489                                         filehdr_size + img_hdrs_size);
3490                         if (!redboot)
3491                                 continue;
3492                 }
3493
3494                 p = fw->data;
3495                 p += filehdr_size + img_offset + img_hdrs_size;
3496                 if (p + img_size > fw->data + fw->size)
3497                         return -1;
3498
3499                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3500                 if (status) {
3501                         dev_err(&adapter->pdev->dev,
3502                                 "Flashing section type %d failed.\n",
3503                                 fsec->fsec_entry[i].type);
3504                         return status;
3505                 }
3506         }
3507         return 0;
3508 }
3509
3510 static int lancer_wait_idle(struct be_adapter *adapter)
3511 {
3512 #define SLIPORT_IDLE_TIMEOUT 30
3513         u32 reg_val;
3514         int status = 0, i;
3515
3516         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3517                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3518                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3519                         break;
3520
3521                 ssleep(1);
3522         }
3523
3524         if (i == SLIPORT_IDLE_TIMEOUT)
3525                 status = -1;
3526
3527         return status;
3528 }
3529
3530 static int lancer_fw_reset(struct be_adapter *adapter)
3531 {
3532         int status = 0;
3533
3534         status = lancer_wait_idle(adapter);
3535         if (status)
3536                 return status;
3537
3538         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3539                   PHYSDEV_CONTROL_OFFSET);
3540
3541         return status;
3542 }
3543
3544 static int lancer_fw_download(struct be_adapter *adapter,
3545                                 const struct firmware *fw)
3546 {
3547 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3548 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3549         struct be_dma_mem flash_cmd;
3550         const u8 *data_ptr = NULL;
3551         u8 *dest_image_ptr = NULL;
3552         size_t image_size = 0;
3553         u32 chunk_size = 0;
3554         u32 data_written = 0;
3555         u32 offset = 0;
3556         int status = 0;
3557         u8 add_status = 0;
3558         u8 change_status;
3559
3560         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3561                 dev_err(&adapter->pdev->dev,
3562                         "FW Image not properly aligned. "
3563                         "Length must be 4 byte aligned.\n");
3564                 status = -EINVAL;
3565                 goto lancer_fw_exit;
3566         }
3567
3568         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3569                                 + LANCER_FW_DOWNLOAD_CHUNK;
3570         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3571                                           &flash_cmd.dma, GFP_KERNEL);
3572         if (!flash_cmd.va) {
3573                 status = -ENOMEM;
3574                 goto lancer_fw_exit;
3575         }
3576
3577         dest_image_ptr = flash_cmd.va +
3578                                 sizeof(struct lancer_cmd_req_write_object);
3579         image_size = fw->size;
3580         data_ptr = fw->data;
3581
3582         while (image_size) {
3583                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3584
3585                 /* Copy the image chunk content. */
3586                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3587
3588                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3589                                                  chunk_size, offset,
3590                                                  LANCER_FW_DOWNLOAD_LOCATION,
3591                                                  &data_written, &change_status,
3592                                                  &add_status);
3593                 if (status)
3594                         break;
3595
3596                 offset += data_written;
3597                 data_ptr += data_written;
3598                 image_size -= data_written;
3599         }
3600
3601         if (!status) {
3602                 /* Commit the FW written */
3603                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3604                                                  0, offset,
3605                                                  LANCER_FW_DOWNLOAD_LOCATION,
3606                                                  &data_written, &change_status,
3607                                                  &add_status);
3608         }
3609
3610         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3611                                 flash_cmd.dma);
3612         if (status) {
3613                 dev_err(&adapter->pdev->dev,
3614                         "Firmware load error. "
3615                         "Status code: 0x%x Additional Status: 0x%x\n",
3616                         status, add_status);
3617                 goto lancer_fw_exit;
3618         }
3619
3620         if (change_status == LANCER_FW_RESET_NEEDED) {
3621                 status = lancer_fw_reset(adapter);
3622                 if (status) {
3623                         dev_err(&adapter->pdev->dev,
3624                                 "Adapter busy for FW reset.\n"
3625                                 "New FW will not be active.\n");
3626                         goto lancer_fw_exit;
3627                 }
3628         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3629                         dev_err(&adapter->pdev->dev,
3630                                 "System reboot required for new FW"
3631                                 " to be active\n");
3632         }
3633
3634         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3635 lancer_fw_exit:
3636         return status;
3637 }
3638
3639 #define UFI_TYPE2               2
3640 #define UFI_TYPE3               3
3641 #define UFI_TYPE3R              10
3642 #define UFI_TYPE4               4
3643 static int be_get_ufi_type(struct be_adapter *adapter,
3644                            struct flash_file_hdr_g3 *fhdr)
3645 {
3646         if (fhdr == NULL)
3647                 goto be_get_ufi_exit;
3648
3649         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3650                 return UFI_TYPE4;
3651         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3652                 if (fhdr->asic_type_rev == 0x10)
3653                         return UFI_TYPE3R;
3654                 else
3655                         return UFI_TYPE3;
3656         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3657                 return UFI_TYPE2;
3658
3659 be_get_ufi_exit:
3660         dev_err(&adapter->pdev->dev,
3661                 "UFI and Interface are not compatible for flashing\n");
3662         return -1;
3663 }
3664
3665 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3666 {
3667         struct flash_file_hdr_g3 *fhdr3;
3668         struct image_hdr *img_hdr_ptr = NULL;
3669         struct be_dma_mem flash_cmd;
3670         const u8 *p;
3671         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3672
3673         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3674         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3675                                           &flash_cmd.dma, GFP_KERNEL);
3676         if (!flash_cmd.va) {
3677                 status = -ENOMEM;
3678                 goto be_fw_exit;
3679         }
3680
3681         p = fw->data;
3682         fhdr3 = (struct flash_file_hdr_g3 *)p;
3683
3684         ufi_type = be_get_ufi_type(adapter, fhdr3);
3685
3686         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3687         for (i = 0; i < num_imgs; i++) {
3688                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3689                                 (sizeof(struct flash_file_hdr_g3) +
3690                                  i * sizeof(struct image_hdr)));
3691                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3692                         switch (ufi_type) {
3693                         case UFI_TYPE4:
3694                                 status = be_flash_skyhawk(adapter, fw,
3695                                                         &flash_cmd, num_imgs);
3696                                 break;
3697                         case UFI_TYPE3R:
3698                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3699                                                       num_imgs);
3700                                 break;
3701                         case UFI_TYPE3:
3702                                 /* Do not flash this ufi on BE3-R cards */
3703                                 if (adapter->asic_rev < 0x10)
3704                                         status = be_flash_BEx(adapter, fw,
3705                                                               &flash_cmd,
3706                                                               num_imgs);
3707                                 else {
3708                                         status = -1;
3709                                         dev_err(&adapter->pdev->dev,
3710                                                 "Can't load BE3 UFI on BE3R\n");
3711                                 }
3712                         }
3713                 }
3714         }
3715
3716         if (ufi_type == UFI_TYPE2)
3717                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3718         else if (ufi_type == -1)
3719                 status = -1;
3720
3721         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3722                           flash_cmd.dma);
3723         if (status) {
3724                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3725                 goto be_fw_exit;
3726         }
3727
3728         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3729
3730 be_fw_exit:
3731         return status;
3732 }
3733
3734 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3735 {
3736         const struct firmware *fw;
3737         int status;
3738
3739         if (!netif_running(adapter->netdev)) {
3740                 dev_err(&adapter->pdev->dev,
3741                         "Firmware load not allowed (interface is down)\n");
3742                 return -1;
3743         }
3744
3745         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3746         if (status)
3747                 goto fw_exit;
3748
3749         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3750
3751         if (lancer_chip(adapter))
3752                 status = lancer_fw_download(adapter, fw);
3753         else
3754                 status = be_fw_download(adapter, fw);
3755
3756 fw_exit:
3757         release_firmware(fw);
3758         return status;
3759 }
3760
3761 static const struct net_device_ops be_netdev_ops = {
3762         .ndo_open               = be_open,
3763         .ndo_stop               = be_close,
3764         .ndo_start_xmit         = be_xmit,
3765         .ndo_set_rx_mode        = be_set_rx_mode,
3766         .ndo_set_mac_address    = be_mac_addr_set,
3767         .ndo_change_mtu         = be_change_mtu,
3768         .ndo_get_stats64        = be_get_stats64,
3769         .ndo_validate_addr      = eth_validate_addr,
3770         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3771         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3772         .ndo_set_vf_mac         = be_set_vf_mac,
3773         .ndo_set_vf_vlan        = be_set_vf_vlan,
3774         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3775         .ndo_get_vf_config      = be_get_vf_config,
3776 #ifdef CONFIG_NET_POLL_CONTROLLER
3777         .ndo_poll_controller    = be_netpoll,
3778 #endif
3779 };
3780
3781 static void be_netdev_init(struct net_device *netdev)
3782 {
3783         struct be_adapter *adapter = netdev_priv(netdev);
3784         struct be_eq_obj *eqo;
3785         int i;
3786
3787         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3788                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3789                 NETIF_F_HW_VLAN_CTAG_TX;
3790         if (be_multi_rxq(adapter))
3791                 netdev->hw_features |= NETIF_F_RXHASH;
3792
3793         netdev->features |= netdev->hw_features |
3794                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3795
3796         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3797                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3798
3799         netdev->priv_flags |= IFF_UNICAST_FLT;
3800
3801         netdev->flags |= IFF_MULTICAST;
3802
3803         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3804
3805         netdev->netdev_ops = &be_netdev_ops;
3806
3807         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3808
3809         for_all_evt_queues(adapter, eqo, i)
3810                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3811 }
3812
3813 static void be_unmap_pci_bars(struct be_adapter *adapter)
3814 {
3815         if (adapter->csr)
3816                 pci_iounmap(adapter->pdev, adapter->csr);
3817         if (adapter->db)
3818                 pci_iounmap(adapter->pdev, adapter->db);
3819 }
3820
3821 static int db_bar(struct be_adapter *adapter)
3822 {
3823         if (lancer_chip(adapter) || !be_physfn(adapter))
3824                 return 0;
3825         else
3826                 return 4;
3827 }
3828
3829 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3830 {
3831         if (skyhawk_chip(adapter)) {
3832                 adapter->roce_db.size = 4096;
3833                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3834                                                               db_bar(adapter));
3835                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3836                                                                db_bar(adapter));
3837         }
3838         return 0;
3839 }
3840
3841 static int be_map_pci_bars(struct be_adapter *adapter)
3842 {
3843         u8 __iomem *addr;
3844         u32 sli_intf;
3845
3846         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3847         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3848                                 SLI_INTF_IF_TYPE_SHIFT;
3849
3850         if (BEx_chip(adapter) && be_physfn(adapter)) {
3851                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3852                 if (adapter->csr == NULL)
3853                         return -ENOMEM;
3854         }
3855
3856         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3857         if (addr == NULL)
3858                 goto pci_map_err;
3859         adapter->db = addr;
3860
3861         be_roce_map_pci_bars(adapter);
3862         return 0;
3863
3864 pci_map_err:
3865         be_unmap_pci_bars(adapter);
3866         return -ENOMEM;
3867 }
3868
3869 static void be_ctrl_cleanup(struct be_adapter *adapter)
3870 {
3871         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3872
3873         be_unmap_pci_bars(adapter);
3874
3875         if (mem->va)
3876                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3877                                   mem->dma);
3878
3879         mem = &adapter->rx_filter;
3880         if (mem->va)
3881                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3882                                   mem->dma);
3883 }
3884
3885 static int be_ctrl_init(struct be_adapter *adapter)
3886 {
3887         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3888         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3889         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3890         u32 sli_intf;
3891         int status;
3892
3893         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3894         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3895                                  SLI_INTF_FAMILY_SHIFT;
3896         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3897
3898         status = be_map_pci_bars(adapter);
3899         if (status)
3900                 goto done;
3901
3902         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3903         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3904                                                 mbox_mem_alloc->size,
3905                                                 &mbox_mem_alloc->dma,
3906                                                 GFP_KERNEL);
3907         if (!mbox_mem_alloc->va) {
3908                 status = -ENOMEM;
3909                 goto unmap_pci_bars;
3910         }
3911         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3912         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3913         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3914         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3915
3916         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3917         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3918                                            &rx_filter->dma,
3919                                            GFP_KERNEL | __GFP_ZERO);
3920         if (rx_filter->va == NULL) {
3921                 status = -ENOMEM;
3922                 goto free_mbox;
3923         }
3924
3925         mutex_init(&adapter->mbox_lock);
3926         spin_lock_init(&adapter->mcc_lock);
3927         spin_lock_init(&adapter->mcc_cq_lock);
3928
3929         init_completion(&adapter->flash_compl);
3930         pci_save_state(adapter->pdev);
3931         return 0;
3932
3933 free_mbox:
3934         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3935                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3936
3937 unmap_pci_bars:
3938         be_unmap_pci_bars(adapter);
3939
3940 done:
3941         return status;
3942 }
3943
3944 static void be_stats_cleanup(struct be_adapter *adapter)
3945 {
3946         struct be_dma_mem *cmd = &adapter->stats_cmd;
3947
3948         if (cmd->va)
3949                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3950                                   cmd->va, cmd->dma);
3951 }
3952
3953 static int be_stats_init(struct be_adapter *adapter)
3954 {
3955         struct be_dma_mem *cmd = &adapter->stats_cmd;
3956
3957         if (lancer_chip(adapter))
3958                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3959         else if (BE2_chip(adapter))
3960                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3961         else
3962                 /* BE3 and Skyhawk */
3963                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3964
3965         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3966                                      GFP_KERNEL | __GFP_ZERO);
3967         if (cmd->va == NULL)
3968                 return -1;
3969         return 0;
3970 }
3971
3972 static void be_remove(struct pci_dev *pdev)
3973 {
3974         struct be_adapter *adapter = pci_get_drvdata(pdev);
3975
3976         if (!adapter)
3977                 return;
3978
3979         be_roce_dev_remove(adapter);
3980         be_intr_set(adapter, false);
3981
3982         cancel_delayed_work_sync(&adapter->func_recovery_work);
3983
3984         unregister_netdev(adapter->netdev);
3985
3986         be_clear(adapter);
3987
3988         /* tell fw we're done with firing cmds */
3989         be_cmd_fw_clean(adapter);
3990
3991         be_stats_cleanup(adapter);
3992
3993         be_ctrl_cleanup(adapter);
3994
3995         pci_disable_pcie_error_reporting(pdev);
3996
3997         pci_set_drvdata(pdev, NULL);
3998         pci_release_regions(pdev);
3999         pci_disable_device(pdev);
4000
4001         free_netdev(adapter->netdev);
4002 }
4003
4004 bool be_is_wol_supported(struct be_adapter *adapter)
4005 {
4006         return ((adapter->wol_cap & BE_WOL_CAP) &&
4007                 !be_is_wol_excluded(adapter)) ? true : false;
4008 }
4009
4010 u32 be_get_fw_log_level(struct be_adapter *adapter)
4011 {
4012         struct be_dma_mem extfat_cmd;
4013         struct be_fat_conf_params *cfgs;
4014         int status;
4015         u32 level = 0;
4016         int j;
4017
4018         if (lancer_chip(adapter))
4019                 return 0;
4020
4021         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4022         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4023         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4024                                              &extfat_cmd.dma);
4025
4026         if (!extfat_cmd.va) {
4027                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4028                         __func__);
4029                 goto err;
4030         }
4031
4032         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4033         if (!status) {
4034                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4035                                                 sizeof(struct be_cmd_resp_hdr));
4036                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4037                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4038                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4039                 }
4040         }
4041         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4042                             extfat_cmd.dma);
4043 err:
4044         return level;
4045 }
4046
4047 static int be_get_initial_config(struct be_adapter *adapter)
4048 {
4049         int status;
4050         u32 level;
4051
4052         status = be_cmd_get_cntl_attributes(adapter);
4053         if (status)
4054                 return status;
4055
4056         status = be_cmd_get_acpi_wol_cap(adapter);
4057         if (status) {
4058                 /* in case of a failure to get wol capabillities
4059                  * check the exclusion list to determine WOL capability */
4060                 if (!be_is_wol_excluded(adapter))
4061                         adapter->wol_cap |= BE_WOL_CAP;
4062         }
4063
4064         if (be_is_wol_supported(adapter))
4065                 adapter->wol = true;
4066
4067         /* Must be a power of 2 or else MODULO will BUG_ON */
4068         adapter->be_get_temp_freq = 64;
4069
4070         level = be_get_fw_log_level(adapter);
4071         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4072
4073         return 0;
4074 }
4075
4076 static int lancer_recover_func(struct be_adapter *adapter)
4077 {
4078         int status;
4079
4080         status = lancer_test_and_set_rdy_state(adapter);
4081         if (status)
4082                 goto err;
4083
4084         if (netif_running(adapter->netdev))
4085                 be_close(adapter->netdev);
4086
4087         be_clear(adapter);
4088
4089         adapter->hw_error = false;
4090         adapter->fw_timeout = false;
4091
4092         status = be_setup(adapter);
4093         if (status)
4094                 goto err;
4095
4096         if (netif_running(adapter->netdev)) {
4097                 status = be_open(adapter->netdev);
4098                 if (status)
4099                         goto err;
4100         }
4101
4102         dev_err(&adapter->pdev->dev,
4103                 "Adapter SLIPORT recovery succeeded\n");
4104         return 0;
4105 err:
4106         if (adapter->eeh_error)
4107                 dev_err(&adapter->pdev->dev,
4108                         "Adapter SLIPORT recovery failed\n");
4109
4110         return status;
4111 }
4112
4113 static void be_func_recovery_task(struct work_struct *work)
4114 {
4115         struct be_adapter *adapter =
4116                 container_of(work, struct be_adapter,  func_recovery_work.work);
4117         int status;
4118
4119         be_detect_error(adapter);
4120
4121         if (adapter->hw_error && lancer_chip(adapter)) {
4122
4123                 if (adapter->eeh_error)
4124                         goto out;
4125
4126                 rtnl_lock();
4127                 netif_device_detach(adapter->netdev);
4128                 rtnl_unlock();
4129
4130                 status = lancer_recover_func(adapter);
4131
4132                 if (!status)
4133                         netif_device_attach(adapter->netdev);
4134         }
4135
4136 out:
4137         schedule_delayed_work(&adapter->func_recovery_work,
4138                               msecs_to_jiffies(1000));
4139 }
4140
4141 static void be_worker(struct work_struct *work)
4142 {
4143         struct be_adapter *adapter =
4144                 container_of(work, struct be_adapter, work.work);
4145         struct be_rx_obj *rxo;
4146         struct be_eq_obj *eqo;
4147         int i;
4148
4149         /* when interrupts are not yet enabled, just reap any pending
4150         * mcc completions */
4151         if (!netif_running(adapter->netdev)) {
4152                 local_bh_disable();
4153                 be_process_mcc(adapter);
4154                 local_bh_enable();
4155                 goto reschedule;
4156         }
4157
4158         if (!adapter->stats_cmd_sent) {
4159                 if (lancer_chip(adapter))
4160                         lancer_cmd_get_pport_stats(adapter,
4161                                                 &adapter->stats_cmd);
4162                 else
4163                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4164         }
4165
4166         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4167                 be_cmd_get_die_temperature(adapter);
4168
4169         for_all_rx_queues(adapter, rxo, i) {
4170                 if (rxo->rx_post_starved) {
4171                         rxo->rx_post_starved = false;
4172                         be_post_rx_frags(rxo, GFP_KERNEL);
4173                 }
4174         }
4175
4176         for_all_evt_queues(adapter, eqo, i)
4177                 be_eqd_update(adapter, eqo);
4178
4179 reschedule:
4180         adapter->work_counter++;
4181         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4182 }
4183
4184 static bool be_reset_required(struct be_adapter *adapter)
4185 {
4186         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4187 }
4188
4189 static char *mc_name(struct be_adapter *adapter)
4190 {
4191         if (adapter->function_mode & FLEX10_MODE)
4192                 return "FLEX10";
4193         else if (adapter->function_mode & VNIC_MODE)
4194                 return "vNIC";
4195         else if (adapter->function_mode & UMC_ENABLED)
4196                 return "UMC";
4197         else
4198                 return "";
4199 }
4200
4201 static inline char *func_name(struct be_adapter *adapter)
4202 {
4203         return be_physfn(adapter) ? "PF" : "VF";
4204 }
4205
4206 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4207 {
4208         int status = 0;
4209         struct be_adapter *adapter;
4210         struct net_device *netdev;
4211         char port_name;
4212
4213         status = pci_enable_device(pdev);
4214         if (status)
4215                 goto do_none;
4216
4217         status = pci_request_regions(pdev, DRV_NAME);
4218         if (status)
4219                 goto disable_dev;
4220         pci_set_master(pdev);
4221
4222         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4223         if (netdev == NULL) {
4224                 status = -ENOMEM;
4225                 goto rel_reg;
4226         }
4227         adapter = netdev_priv(netdev);
4228         adapter->pdev = pdev;
4229         pci_set_drvdata(pdev, adapter);
4230         adapter->netdev = netdev;
4231         SET_NETDEV_DEV(netdev, &pdev->dev);
4232
4233         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4234         if (!status) {
4235                 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4236                 if (status < 0) {
4237                         dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4238                         goto free_netdev;
4239                 }
4240                 netdev->features |= NETIF_F_HIGHDMA;
4241         } else {
4242                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4243                 if (status) {
4244                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4245                         goto free_netdev;
4246                 }
4247         }
4248
4249         status = pci_enable_pcie_error_reporting(pdev);
4250         if (status)
4251                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4252
4253         status = be_ctrl_init(adapter);
4254         if (status)
4255                 goto free_netdev;
4256
4257         /* sync up with fw's ready state */
4258         if (be_physfn(adapter)) {
4259                 status = be_fw_wait_ready(adapter);
4260                 if (status)
4261                         goto ctrl_clean;
4262         }
4263
4264         if (be_reset_required(adapter)) {
4265                 status = be_cmd_reset_function(adapter);
4266                 if (status)
4267                         goto ctrl_clean;
4268
4269                 /* Wait for interrupts to quiesce after an FLR */
4270                 msleep(100);
4271         }
4272
4273         /* Allow interrupts for other ULPs running on NIC function */
4274         be_intr_set(adapter, true);
4275
4276         /* tell fw we're ready to fire cmds */
4277         status = be_cmd_fw_init(adapter);
4278         if (status)
4279                 goto ctrl_clean;
4280
4281         status = be_stats_init(adapter);
4282         if (status)
4283                 goto ctrl_clean;
4284
4285         status = be_get_initial_config(adapter);
4286         if (status)
4287                 goto stats_clean;
4288
4289         INIT_DELAYED_WORK(&adapter->work, be_worker);
4290         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4291         adapter->rx_fc = adapter->tx_fc = true;
4292
4293         status = be_setup(adapter);
4294         if (status)
4295                 goto stats_clean;
4296
4297         be_netdev_init(netdev);
4298         status = register_netdev(netdev);
4299         if (status != 0)
4300                 goto unsetup;
4301
4302         be_roce_dev_add(adapter);
4303
4304         schedule_delayed_work(&adapter->func_recovery_work,
4305                               msecs_to_jiffies(1000));
4306
4307         be_cmd_query_port_name(adapter, &port_name);
4308
4309         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4310                  func_name(adapter), mc_name(adapter), port_name);
4311
4312         return 0;
4313
4314 unsetup:
4315         be_clear(adapter);
4316 stats_clean:
4317         be_stats_cleanup(adapter);
4318 ctrl_clean:
4319         be_ctrl_cleanup(adapter);
4320 free_netdev:
4321         free_netdev(netdev);
4322         pci_set_drvdata(pdev, NULL);
4323 rel_reg:
4324         pci_release_regions(pdev);
4325 disable_dev:
4326         pci_disable_device(pdev);
4327 do_none:
4328         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4329         return status;
4330 }
4331
4332 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4333 {
4334         struct be_adapter *adapter = pci_get_drvdata(pdev);
4335         struct net_device *netdev =  adapter->netdev;
4336
4337         if (adapter->wol)
4338                 be_setup_wol(adapter, true);
4339
4340         cancel_delayed_work_sync(&adapter->func_recovery_work);
4341
4342         netif_device_detach(netdev);
4343         if (netif_running(netdev)) {
4344                 rtnl_lock();
4345                 be_close(netdev);
4346                 rtnl_unlock();
4347         }
4348         be_clear(adapter);
4349
4350         pci_save_state(pdev);
4351         pci_disable_device(pdev);
4352         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4353         return 0;
4354 }
4355
4356 static int be_resume(struct pci_dev *pdev)
4357 {
4358         int status = 0;
4359         struct be_adapter *adapter = pci_get_drvdata(pdev);
4360         struct net_device *netdev =  adapter->netdev;
4361
4362         netif_device_detach(netdev);
4363
4364         status = pci_enable_device(pdev);
4365         if (status)
4366                 return status;
4367
4368         pci_set_power_state(pdev, 0);
4369         pci_restore_state(pdev);
4370
4371         /* tell fw we're ready to fire cmds */
4372         status = be_cmd_fw_init(adapter);
4373         if (status)
4374                 return status;
4375
4376         be_setup(adapter);
4377         if (netif_running(netdev)) {
4378                 rtnl_lock();
4379                 be_open(netdev);
4380                 rtnl_unlock();
4381         }
4382
4383         schedule_delayed_work(&adapter->func_recovery_work,
4384                               msecs_to_jiffies(1000));
4385         netif_device_attach(netdev);
4386
4387         if (adapter->wol)
4388                 be_setup_wol(adapter, false);
4389
4390         return 0;
4391 }
4392
4393 /*
4394  * An FLR will stop BE from DMAing any data.
4395  */
4396 static void be_shutdown(struct pci_dev *pdev)
4397 {
4398         struct be_adapter *adapter = pci_get_drvdata(pdev);
4399
4400         if (!adapter)
4401                 return;
4402
4403         cancel_delayed_work_sync(&adapter->work);
4404         cancel_delayed_work_sync(&adapter->func_recovery_work);
4405
4406         netif_device_detach(adapter->netdev);
4407
4408         be_cmd_reset_function(adapter);
4409
4410         pci_disable_device(pdev);
4411 }
4412
4413 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4414                                 pci_channel_state_t state)
4415 {
4416         struct be_adapter *adapter = pci_get_drvdata(pdev);
4417         struct net_device *netdev =  adapter->netdev;
4418
4419         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4420
4421         adapter->eeh_error = true;
4422
4423         cancel_delayed_work_sync(&adapter->func_recovery_work);
4424
4425         rtnl_lock();
4426         netif_device_detach(netdev);
4427         rtnl_unlock();
4428
4429         if (netif_running(netdev)) {
4430                 rtnl_lock();
4431                 be_close(netdev);
4432                 rtnl_unlock();
4433         }
4434         be_clear(adapter);
4435
4436         if (state == pci_channel_io_perm_failure)
4437                 return PCI_ERS_RESULT_DISCONNECT;
4438
4439         pci_disable_device(pdev);
4440
4441         /* The error could cause the FW to trigger a flash debug dump.
4442          * Resetting the card while flash dump is in progress
4443          * can cause it not to recover; wait for it to finish.
4444          * Wait only for first function as it is needed only once per
4445          * adapter.
4446          */
4447         if (pdev->devfn == 0)
4448                 ssleep(30);
4449
4450         return PCI_ERS_RESULT_NEED_RESET;
4451 }
4452
4453 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4454 {
4455         struct be_adapter *adapter = pci_get_drvdata(pdev);
4456         int status;
4457
4458         dev_info(&adapter->pdev->dev, "EEH reset\n");
4459         be_clear_all_error(adapter);
4460
4461         status = pci_enable_device(pdev);
4462         if (status)
4463                 return PCI_ERS_RESULT_DISCONNECT;
4464
4465         pci_set_master(pdev);
4466         pci_set_power_state(pdev, 0);
4467         pci_restore_state(pdev);
4468
4469         /* Check if card is ok and fw is ready */
4470         dev_info(&adapter->pdev->dev,
4471                  "Waiting for FW to be ready after EEH reset\n");
4472         status = be_fw_wait_ready(adapter);
4473         if (status)
4474                 return PCI_ERS_RESULT_DISCONNECT;
4475
4476         pci_cleanup_aer_uncorrect_error_status(pdev);
4477         return PCI_ERS_RESULT_RECOVERED;
4478 }
4479
4480 static void be_eeh_resume(struct pci_dev *pdev)
4481 {
4482         int status = 0;
4483         struct be_adapter *adapter = pci_get_drvdata(pdev);
4484         struct net_device *netdev =  adapter->netdev;
4485
4486         dev_info(&adapter->pdev->dev, "EEH resume\n");
4487
4488         pci_save_state(pdev);
4489
4490         status = be_cmd_reset_function(adapter);
4491         if (status)
4492                 goto err;
4493
4494         /* tell fw we're ready to fire cmds */
4495         status = be_cmd_fw_init(adapter);
4496         if (status)
4497                 goto err;
4498
4499         status = be_setup(adapter);
4500         if (status)
4501                 goto err;
4502
4503         if (netif_running(netdev)) {
4504                 status = be_open(netdev);
4505                 if (status)
4506                         goto err;
4507         }
4508
4509         schedule_delayed_work(&adapter->func_recovery_work,
4510                               msecs_to_jiffies(1000));
4511         netif_device_attach(netdev);
4512         return;
4513 err:
4514         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4515 }
4516
4517 static const struct pci_error_handlers be_eeh_handlers = {
4518         .error_detected = be_eeh_err_detected,
4519         .slot_reset = be_eeh_reset,
4520         .resume = be_eeh_resume,
4521 };
4522
4523 static struct pci_driver be_driver = {
4524         .name = DRV_NAME,
4525         .id_table = be_dev_ids,
4526         .probe = be_probe,
4527         .remove = be_remove,
4528         .suspend = be_suspend,
4529         .resume = be_resume,
4530         .shutdown = be_shutdown,
4531         .err_handler = &be_eeh_handlers
4532 };
4533
4534 static int __init be_init_module(void)
4535 {
4536         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4537             rx_frag_size != 2048) {
4538                 printk(KERN_WARNING DRV_NAME
4539                         " : Module param rx_frag_size must be 2048/4096/8192."
4540                         " Using 2048\n");
4541                 rx_frag_size = 2048;
4542         }
4543
4544         return pci_register_driver(&be_driver);
4545 }
4546 module_init(be_init_module);
4547
4548 static void __exit be_exit_module(void)
4549 {
4550         pci_unregister_driver(&be_driver);
4551 }
4552 module_exit(be_exit_module);