]> git.karo-electronics.de Git - linux-beck.git/blob - drivers/net/ethernet/emulex/benet/be_main.c
Merge tag 'batman-adv-fix-for-davem' of git://git.open-mesh.org/linux-merge
[linux-beck.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL);
150         if (!mem->va)
151                 return -ENOMEM;
152         memset(mem->va, 0, mem->size);
153         return 0;
154 }
155
156 static void be_intr_set(struct be_adapter *adapter, bool enable)
157 {
158         u32 reg, enabled;
159
160         if (adapter->eeh_error)
161                 return;
162
163         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164                                 &reg);
165         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
167         if (!enabled && enable)
168                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169         else if (enabled && !enable)
170                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
171         else
172                 return;
173
174         pci_write_config_dword(adapter->pdev,
175                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
176 }
177
178 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180         u32 val = 0;
181         val |= qid & DB_RQ_RING_ID_MASK;
182         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
183
184         wmb();
185         iowrite32(val, adapter->db + DB_RQ_OFFSET);
186 }
187
188 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
189 {
190         u32 val = 0;
191         val |= qid & DB_TXULP_RING_ID_MASK;
192         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
193
194         wmb();
195         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
196 }
197
198 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
199                 bool arm, bool clear_int, u16 num_popped)
200 {
201         u32 val = 0;
202         val |= qid & DB_EQ_RING_ID_MASK;
203         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
205
206         if (adapter->eeh_error)
207                 return;
208
209         if (arm)
210                 val |= 1 << DB_EQ_REARM_SHIFT;
211         if (clear_int)
212                 val |= 1 << DB_EQ_CLR_SHIFT;
213         val |= 1 << DB_EQ_EVNT_SHIFT;
214         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
215         iowrite32(val, adapter->db + DB_EQ_OFFSET);
216 }
217
218 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
219 {
220         u32 val = 0;
221         val |= qid & DB_CQ_RING_ID_MASK;
222         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
224
225         if (adapter->eeh_error)
226                 return;
227
228         if (arm)
229                 val |= 1 << DB_CQ_REARM_SHIFT;
230         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
231         iowrite32(val, adapter->db + DB_CQ_OFFSET);
232 }
233
234 static int be_mac_addr_set(struct net_device *netdev, void *p)
235 {
236         struct be_adapter *adapter = netdev_priv(netdev);
237         struct sockaddr *addr = p;
238         int status = 0;
239         u8 current_mac[ETH_ALEN];
240         u32 pmac_id = adapter->pmac_id[0];
241         bool active_mac = true;
242
243         if (!is_valid_ether_addr(addr->sa_data))
244                 return -EADDRNOTAVAIL;
245
246         /* For BE VF, MAC address is already activated by PF.
247          * Hence only operation left is updating netdev->devaddr.
248          * Update it if user is passing the same MAC which was used
249          * during configuring VF MAC from PF(Hypervisor).
250          */
251         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252                 status = be_cmd_mac_addr_query(adapter, current_mac,
253                                                false, adapter->if_handle, 0);
254                 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255                         goto done;
256                 else
257                         goto err;
258         }
259
260         if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261                 goto done;
262
263         /* For Lancer check if any MAC is active.
264          * If active, get its mac id.
265          */
266         if (lancer_chip(adapter) && !be_physfn(adapter))
267                 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268                                          &pmac_id, 0);
269
270         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271                                  adapter->if_handle,
272                                  &adapter->pmac_id[0], 0);
273
274         if (status)
275                 goto err;
276
277         if (active_mac)
278                 be_cmd_pmac_del(adapter, adapter->if_handle,
279                                 pmac_id, 0);
280 done:
281         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282         return 0;
283 err:
284         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
285         return status;
286 }
287
288 /* BE2 supports only v0 cmd */
289 static void *hw_stats_from_cmd(struct be_adapter *adapter)
290 {
291         if (BE2_chip(adapter)) {
292                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
293
294                 return &cmd->hw_stats;
295         } else  {
296                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
297
298                 return &cmd->hw_stats;
299         }
300 }
301
302 /* BE2 supports only v0 cmd */
303 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
304 {
305         if (BE2_chip(adapter)) {
306                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
307
308                 return &hw_stats->erx;
309         } else {
310                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311
312                 return &hw_stats->erx;
313         }
314 }
315
316 static void populate_be_v0_stats(struct be_adapter *adapter)
317 {
318         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
319         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
320         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
321         struct be_port_rxf_stats_v0 *port_stats =
322                                         &rxf_stats->port[adapter->port_num];
323         struct be_drv_stats *drvs = &adapter->drv_stats;
324
325         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
326         drvs->rx_pause_frames = port_stats->rx_pause_frames;
327         drvs->rx_crc_errors = port_stats->rx_crc_errors;
328         drvs->rx_control_frames = port_stats->rx_control_frames;
329         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
330         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
331         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
332         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
333         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
334         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
335         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
336         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
337         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
338         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
339         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
340         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
341         drvs->rx_dropped_header_too_small =
342                 port_stats->rx_dropped_header_too_small;
343         drvs->rx_address_mismatch_drops =
344                                         port_stats->rx_address_mismatch_drops +
345                                         port_stats->rx_vlan_mismatch_drops;
346         drvs->rx_alignment_symbol_errors =
347                 port_stats->rx_alignment_symbol_errors;
348
349         drvs->tx_pauseframes = port_stats->tx_pauseframes;
350         drvs->tx_controlframes = port_stats->tx_controlframes;
351
352         if (adapter->port_num)
353                 drvs->jabber_events = rxf_stats->port1_jabber_events;
354         else
355                 drvs->jabber_events = rxf_stats->port0_jabber_events;
356         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
357         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
358         drvs->forwarded_packets = rxf_stats->forwarded_packets;
359         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
360         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
361         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
362         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
363 }
364
365 static void populate_be_v1_stats(struct be_adapter *adapter)
366 {
367         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
368         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
369         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
370         struct be_port_rxf_stats_v1 *port_stats =
371                                         &rxf_stats->port[adapter->port_num];
372         struct be_drv_stats *drvs = &adapter->drv_stats;
373
374         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
375         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
376         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
377         drvs->rx_pause_frames = port_stats->rx_pause_frames;
378         drvs->rx_crc_errors = port_stats->rx_crc_errors;
379         drvs->rx_control_frames = port_stats->rx_control_frames;
380         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
381         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
382         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
383         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
384         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
385         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
386         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
387         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
388         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
389         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
390         drvs->rx_dropped_header_too_small =
391                 port_stats->rx_dropped_header_too_small;
392         drvs->rx_input_fifo_overflow_drop =
393                 port_stats->rx_input_fifo_overflow_drop;
394         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
395         drvs->rx_alignment_symbol_errors =
396                 port_stats->rx_alignment_symbol_errors;
397         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
398         drvs->tx_pauseframes = port_stats->tx_pauseframes;
399         drvs->tx_controlframes = port_stats->tx_controlframes;
400         drvs->jabber_events = port_stats->jabber_events;
401         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
402         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
403         drvs->forwarded_packets = rxf_stats->forwarded_packets;
404         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
405         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
406         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
407         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
408 }
409
410 static void populate_lancer_stats(struct be_adapter *adapter)
411 {
412
413         struct be_drv_stats *drvs = &adapter->drv_stats;
414         struct lancer_pport_stats *pport_stats =
415                                         pport_stats_from_cmd(adapter);
416
417         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
418         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
419         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
420         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
421         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
422         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
423         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
424         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
425         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
426         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
427         drvs->rx_dropped_tcp_length =
428                                 pport_stats->rx_dropped_invalid_tcp_length;
429         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
430         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
431         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
432         drvs->rx_dropped_header_too_small =
433                                 pport_stats->rx_dropped_header_too_small;
434         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
435         drvs->rx_address_mismatch_drops =
436                                         pport_stats->rx_address_mismatch_drops +
437                                         pport_stats->rx_vlan_mismatch_drops;
438         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
439         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
440         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
441         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
442         drvs->jabber_events = pport_stats->rx_jabbers;
443         drvs->forwarded_packets = pport_stats->num_forwards_lo;
444         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
445         drvs->rx_drops_too_many_frags =
446                                 pport_stats->rx_drops_too_many_frags_lo;
447 }
448
449 static void accumulate_16bit_val(u32 *acc, u16 val)
450 {
451 #define lo(x)                   (x & 0xFFFF)
452 #define hi(x)                   (x & 0xFFFF0000)
453         bool wrapped = val < lo(*acc);
454         u32 newacc = hi(*acc) + val;
455
456         if (wrapped)
457                 newacc += 65536;
458         ACCESS_ONCE(*acc) = newacc;
459 }
460
461 void be_parse_stats(struct be_adapter *adapter)
462 {
463         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
464         struct be_rx_obj *rxo;
465         int i;
466
467         if (lancer_chip(adapter)) {
468                 populate_lancer_stats(adapter);
469         } else {
470                 if (BE2_chip(adapter))
471                         populate_be_v0_stats(adapter);
472                 else
473                         /* for BE3 and Skyhawk */
474                         populate_be_v1_stats(adapter);
475
476                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
477                 for_all_rx_queues(adapter, rxo, i) {
478                         /* below erx HW counter can actually wrap around after
479                          * 65535. Driver accumulates a 32-bit value
480                          */
481                         accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
482                                              (u16)erx->rx_drops_no_fragments \
483                                              [rxo->q.id]);
484                 }
485         }
486 }
487
488 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
489                                         struct rtnl_link_stats64 *stats)
490 {
491         struct be_adapter *adapter = netdev_priv(netdev);
492         struct be_drv_stats *drvs = &adapter->drv_stats;
493         struct be_rx_obj *rxo;
494         struct be_tx_obj *txo;
495         u64 pkts, bytes;
496         unsigned int start;
497         int i;
498
499         for_all_rx_queues(adapter, rxo, i) {
500                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
501                 do {
502                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
503                         pkts = rx_stats(rxo)->rx_pkts;
504                         bytes = rx_stats(rxo)->rx_bytes;
505                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
506                 stats->rx_packets += pkts;
507                 stats->rx_bytes += bytes;
508                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
509                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
510                                         rx_stats(rxo)->rx_drops_no_frags;
511         }
512
513         for_all_tx_queues(adapter, txo, i) {
514                 const struct be_tx_stats *tx_stats = tx_stats(txo);
515                 do {
516                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
517                         pkts = tx_stats(txo)->tx_pkts;
518                         bytes = tx_stats(txo)->tx_bytes;
519                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
520                 stats->tx_packets += pkts;
521                 stats->tx_bytes += bytes;
522         }
523
524         /* bad pkts received */
525         stats->rx_errors = drvs->rx_crc_errors +
526                 drvs->rx_alignment_symbol_errors +
527                 drvs->rx_in_range_errors +
528                 drvs->rx_out_range_errors +
529                 drvs->rx_frame_too_long +
530                 drvs->rx_dropped_too_small +
531                 drvs->rx_dropped_too_short +
532                 drvs->rx_dropped_header_too_small +
533                 drvs->rx_dropped_tcp_length +
534                 drvs->rx_dropped_runt;
535
536         /* detailed rx errors */
537         stats->rx_length_errors = drvs->rx_in_range_errors +
538                 drvs->rx_out_range_errors +
539                 drvs->rx_frame_too_long;
540
541         stats->rx_crc_errors = drvs->rx_crc_errors;
542
543         /* frame alignment errors */
544         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
545
546         /* receiver fifo overrun */
547         /* drops_no_pbuf is no per i/f, it's per BE card */
548         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
549                                 drvs->rx_input_fifo_overflow_drop +
550                                 drvs->rx_drops_no_pbuf;
551         return stats;
552 }
553
554 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
555 {
556         struct net_device *netdev = adapter->netdev;
557
558         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
559                 netif_carrier_off(netdev);
560                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
561         }
562
563         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
564                 netif_carrier_on(netdev);
565         else
566                 netif_carrier_off(netdev);
567 }
568
569 static void be_tx_stats_update(struct be_tx_obj *txo,
570                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
571 {
572         struct be_tx_stats *stats = tx_stats(txo);
573
574         u64_stats_update_begin(&stats->sync);
575         stats->tx_reqs++;
576         stats->tx_wrbs += wrb_cnt;
577         stats->tx_bytes += copied;
578         stats->tx_pkts += (gso_segs ? gso_segs : 1);
579         if (stopped)
580                 stats->tx_stops++;
581         u64_stats_update_end(&stats->sync);
582 }
583
584 /* Determine number of WRB entries needed to xmit data in an skb */
585 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
586                                                                 bool *dummy)
587 {
588         int cnt = (skb->len > skb->data_len);
589
590         cnt += skb_shinfo(skb)->nr_frags;
591
592         /* to account for hdr wrb */
593         cnt++;
594         if (lancer_chip(adapter) || !(cnt & 1)) {
595                 *dummy = false;
596         } else {
597                 /* add a dummy to make it an even num */
598                 cnt++;
599                 *dummy = true;
600         }
601         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
602         return cnt;
603 }
604
605 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
606 {
607         wrb->frag_pa_hi = upper_32_bits(addr);
608         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
609         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
610         wrb->rsvd0 = 0;
611 }
612
613 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
614                                         struct sk_buff *skb)
615 {
616         u8 vlan_prio;
617         u16 vlan_tag;
618
619         vlan_tag = vlan_tx_tag_get(skb);
620         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
621         /* If vlan priority provided by OS is NOT in available bmap */
622         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
623                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
624                                 adapter->recommended_prio;
625
626         return vlan_tag;
627 }
628
629 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
630                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
631 {
632         u16 vlan_tag;
633
634         memset(hdr, 0, sizeof(*hdr));
635
636         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
637
638         if (skb_is_gso(skb)) {
639                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
640                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
641                         hdr, skb_shinfo(skb)->gso_size);
642                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
643                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
644         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
645                 if (is_tcp_pkt(skb))
646                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
647                 else if (is_udp_pkt(skb))
648                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
649         }
650
651         if (vlan_tx_tag_present(skb)) {
652                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
653                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
654                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
655         }
656
657         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
658         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
659         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
660         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
661         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
662 }
663
664 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
665                 bool unmap_single)
666 {
667         dma_addr_t dma;
668
669         be_dws_le_to_cpu(wrb, sizeof(*wrb));
670
671         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
672         if (wrb->frag_len) {
673                 if (unmap_single)
674                         dma_unmap_single(dev, dma, wrb->frag_len,
675                                          DMA_TO_DEVICE);
676                 else
677                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
678         }
679 }
680
681 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
682                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
683                 bool skip_hw_vlan)
684 {
685         dma_addr_t busaddr;
686         int i, copied = 0;
687         struct device *dev = &adapter->pdev->dev;
688         struct sk_buff *first_skb = skb;
689         struct be_eth_wrb *wrb;
690         struct be_eth_hdr_wrb *hdr;
691         bool map_single = false;
692         u16 map_head;
693
694         hdr = queue_head_node(txq);
695         queue_head_inc(txq);
696         map_head = txq->head;
697
698         if (skb->len > skb->data_len) {
699                 int len = skb_headlen(skb);
700                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
701                 if (dma_mapping_error(dev, busaddr))
702                         goto dma_err;
703                 map_single = true;
704                 wrb = queue_head_node(txq);
705                 wrb_fill(wrb, busaddr, len);
706                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
707                 queue_head_inc(txq);
708                 copied += len;
709         }
710
711         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
712                 const struct skb_frag_struct *frag =
713                         &skb_shinfo(skb)->frags[i];
714                 busaddr = skb_frag_dma_map(dev, frag, 0,
715                                            skb_frag_size(frag), DMA_TO_DEVICE);
716                 if (dma_mapping_error(dev, busaddr))
717                         goto dma_err;
718                 wrb = queue_head_node(txq);
719                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
720                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
721                 queue_head_inc(txq);
722                 copied += skb_frag_size(frag);
723         }
724
725         if (dummy_wrb) {
726                 wrb = queue_head_node(txq);
727                 wrb_fill(wrb, 0, 0);
728                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
729                 queue_head_inc(txq);
730         }
731
732         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
733         be_dws_cpu_to_le(hdr, sizeof(*hdr));
734
735         return copied;
736 dma_err:
737         txq->head = map_head;
738         while (copied) {
739                 wrb = queue_head_node(txq);
740                 unmap_tx_frag(dev, wrb, map_single);
741                 map_single = false;
742                 copied -= wrb->frag_len;
743                 queue_head_inc(txq);
744         }
745         return 0;
746 }
747
748 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
749                                              struct sk_buff *skb,
750                                              bool *skip_hw_vlan)
751 {
752         u16 vlan_tag = 0;
753
754         skb = skb_share_check(skb, GFP_ATOMIC);
755         if (unlikely(!skb))
756                 return skb;
757
758         if (vlan_tx_tag_present(skb)) {
759                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
760                 skb = __vlan_put_tag(skb, vlan_tag);
761                 if (skb)
762                         skb->vlan_tci = 0;
763         }
764
765         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
766                 if (!vlan_tag)
767                         vlan_tag = adapter->pvid;
768                 if (skip_hw_vlan)
769                         *skip_hw_vlan = true;
770         }
771
772         if (vlan_tag) {
773                 skb = __vlan_put_tag(skb, vlan_tag);
774                 if (unlikely(!skb))
775                         return skb;
776
777                 skb->vlan_tci = 0;
778         }
779
780         /* Insert the outer VLAN, if any */
781         if (adapter->qnq_vid) {
782                 vlan_tag = adapter->qnq_vid;
783                 skb = __vlan_put_tag(skb, vlan_tag);
784                 if (unlikely(!skb))
785                         return skb;
786                 if (skip_hw_vlan)
787                         *skip_hw_vlan = true;
788         }
789
790         return skb;
791 }
792
793 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
794 {
795         struct ethhdr *eh = (struct ethhdr *)skb->data;
796         u16 offset = ETH_HLEN;
797
798         if (eh->h_proto == htons(ETH_P_IPV6)) {
799                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
800
801                 offset += sizeof(struct ipv6hdr);
802                 if (ip6h->nexthdr != NEXTHDR_TCP &&
803                     ip6h->nexthdr != NEXTHDR_UDP) {
804                         struct ipv6_opt_hdr *ehdr =
805                                 (struct ipv6_opt_hdr *) (skb->data + offset);
806
807                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
808                         if (ehdr->hdrlen == 0xff)
809                                 return true;
810                 }
811         }
812         return false;
813 }
814
815 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
816 {
817         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
818 }
819
820 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
821 {
822         return BE3_chip(adapter) &&
823                 be_ipv6_exthdr_check(skb);
824 }
825
826 static netdev_tx_t be_xmit(struct sk_buff *skb,
827                         struct net_device *netdev)
828 {
829         struct be_adapter *adapter = netdev_priv(netdev);
830         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
831         struct be_queue_info *txq = &txo->q;
832         struct iphdr *ip = NULL;
833         u32 wrb_cnt = 0, copied = 0;
834         u32 start = txq->head, eth_hdr_len;
835         bool dummy_wrb, stopped = false;
836         bool skip_hw_vlan = false;
837         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
838
839         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
840                 VLAN_ETH_HLEN : ETH_HLEN;
841
842         /* For padded packets, BE HW modifies tot_len field in IP header
843          * incorrecly when VLAN tag is inserted by HW.
844          */
845         if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
846                 ip = (struct iphdr *)ip_hdr(skb);
847                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
848         }
849
850         /* If vlan tag is already inlined in the packet, skip HW VLAN
851          * tagging in UMC mode
852          */
853         if ((adapter->function_mode & UMC_ENABLED) &&
854             veh->h_vlan_proto == htons(ETH_P_8021Q))
855                         skip_hw_vlan = true;
856
857         /* HW has a bug wherein it will calculate CSUM for VLAN
858          * pkts even though it is disabled.
859          * Manually insert VLAN in pkt.
860          */
861         if (skb->ip_summed != CHECKSUM_PARTIAL &&
862                         vlan_tx_tag_present(skb)) {
863                 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
864                 if (unlikely(!skb))
865                         goto tx_drop;
866         }
867
868         /* HW may lockup when VLAN HW tagging is requested on
869          * certain ipv6 packets. Drop such pkts if the HW workaround to
870          * skip HW tagging is not enabled by FW.
871          */
872         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
873                      (adapter->pvid || adapter->qnq_vid) &&
874                      !qnq_async_evt_rcvd(adapter)))
875                 goto tx_drop;
876
877         /* Manual VLAN tag insertion to prevent:
878          * ASIC lockup when the ASIC inserts VLAN tag into
879          * certain ipv6 packets. Insert VLAN tags in driver,
880          * and set event, completion, vlan bits accordingly
881          * in the Tx WRB.
882          */
883         if (be_ipv6_tx_stall_chk(adapter, skb) &&
884             be_vlan_tag_tx_chk(adapter, skb)) {
885                 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
886                 if (unlikely(!skb))
887                         goto tx_drop;
888         }
889
890         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
891
892         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
893                               skip_hw_vlan);
894         if (copied) {
895                 int gso_segs = skb_shinfo(skb)->gso_segs;
896
897                 /* record the sent skb in the sent_skb table */
898                 BUG_ON(txo->sent_skb_list[start]);
899                 txo->sent_skb_list[start] = skb;
900
901                 /* Ensure txq has space for the next skb; Else stop the queue
902                  * *BEFORE* ringing the tx doorbell, so that we serialze the
903                  * tx compls of the current transmit which'll wake up the queue
904                  */
905                 atomic_add(wrb_cnt, &txq->used);
906                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
907                                                                 txq->len) {
908                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
909                         stopped = true;
910                 }
911
912                 be_txq_notify(adapter, txq->id, wrb_cnt);
913
914                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
915         } else {
916                 txq->head = start;
917                 dev_kfree_skb_any(skb);
918         }
919 tx_drop:
920         return NETDEV_TX_OK;
921 }
922
923 static int be_change_mtu(struct net_device *netdev, int new_mtu)
924 {
925         struct be_adapter *adapter = netdev_priv(netdev);
926         if (new_mtu < BE_MIN_MTU ||
927                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
928                                         (ETH_HLEN + ETH_FCS_LEN))) {
929                 dev_info(&adapter->pdev->dev,
930                         "MTU must be between %d and %d bytes\n",
931                         BE_MIN_MTU,
932                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
933                 return -EINVAL;
934         }
935         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
936                         netdev->mtu, new_mtu);
937         netdev->mtu = new_mtu;
938         return 0;
939 }
940
941 /*
942  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
943  * If the user configures more, place BE in vlan promiscuous mode.
944  */
945 static int be_vid_config(struct be_adapter *adapter)
946 {
947         u16 vids[BE_NUM_VLANS_SUPPORTED];
948         u16 num = 0, i;
949         int status = 0;
950
951         /* No need to further configure vids if in promiscuous mode */
952         if (adapter->promiscuous)
953                 return 0;
954
955         if (adapter->vlans_added > adapter->max_vlans)
956                 goto set_vlan_promisc;
957
958         /* Construct VLAN Table to give to HW */
959         for (i = 0; i < VLAN_N_VID; i++)
960                 if (adapter->vlan_tag[i])
961                         vids[num++] = cpu_to_le16(i);
962
963         status = be_cmd_vlan_config(adapter, adapter->if_handle,
964                                     vids, num, 1, 0);
965
966         /* Set to VLAN promisc mode as setting VLAN filter failed */
967         if (status) {
968                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
969                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
970                 goto set_vlan_promisc;
971         }
972
973         return status;
974
975 set_vlan_promisc:
976         status = be_cmd_vlan_config(adapter, adapter->if_handle,
977                                     NULL, 0, 1, 1);
978         return status;
979 }
980
981 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
982 {
983         struct be_adapter *adapter = netdev_priv(netdev);
984         int status = 0;
985
986         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
987                 status = -EINVAL;
988                 goto ret;
989         }
990
991         /* Packets with VID 0 are always received by Lancer by default */
992         if (lancer_chip(adapter) && vid == 0)
993                 goto ret;
994
995         adapter->vlan_tag[vid] = 1;
996         if (adapter->vlans_added <= (adapter->max_vlans + 1))
997                 status = be_vid_config(adapter);
998
999         if (!status)
1000                 adapter->vlans_added++;
1001         else
1002                 adapter->vlan_tag[vid] = 0;
1003 ret:
1004         return status;
1005 }
1006
1007 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
1008 {
1009         struct be_adapter *adapter = netdev_priv(netdev);
1010         int status = 0;
1011
1012         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1013                 status = -EINVAL;
1014                 goto ret;
1015         }
1016
1017         /* Packets with VID 0 are always received by Lancer by default */
1018         if (lancer_chip(adapter) && vid == 0)
1019                 goto ret;
1020
1021         adapter->vlan_tag[vid] = 0;
1022         if (adapter->vlans_added <= adapter->max_vlans)
1023                 status = be_vid_config(adapter);
1024
1025         if (!status)
1026                 adapter->vlans_added--;
1027         else
1028                 adapter->vlan_tag[vid] = 1;
1029 ret:
1030         return status;
1031 }
1032
1033 static void be_set_rx_mode(struct net_device *netdev)
1034 {
1035         struct be_adapter *adapter = netdev_priv(netdev);
1036         int status;
1037
1038         if (netdev->flags & IFF_PROMISC) {
1039                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1040                 adapter->promiscuous = true;
1041                 goto done;
1042         }
1043
1044         /* BE was previously in promiscuous mode; disable it */
1045         if (adapter->promiscuous) {
1046                 adapter->promiscuous = false;
1047                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1048
1049                 if (adapter->vlans_added)
1050                         be_vid_config(adapter);
1051         }
1052
1053         /* Enable multicast promisc if num configured exceeds what we support */
1054         if (netdev->flags & IFF_ALLMULTI ||
1055             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
1056                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1057                 goto done;
1058         }
1059
1060         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1061                 struct netdev_hw_addr *ha;
1062                 int i = 1; /* First slot is claimed by the Primary MAC */
1063
1064                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1065                         be_cmd_pmac_del(adapter, adapter->if_handle,
1066                                         adapter->pmac_id[i], 0);
1067                 }
1068
1069                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1070                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1071                         adapter->promiscuous = true;
1072                         goto done;
1073                 }
1074
1075                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1076                         adapter->uc_macs++; /* First slot is for Primary MAC */
1077                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1078                                         adapter->if_handle,
1079                                         &adapter->pmac_id[adapter->uc_macs], 0);
1080                 }
1081         }
1082
1083         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1084
1085         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1086         if (status) {
1087                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1088                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1089                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1090         }
1091 done:
1092         return;
1093 }
1094
1095 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1096 {
1097         struct be_adapter *adapter = netdev_priv(netdev);
1098         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1099         int status;
1100         bool active_mac = false;
1101         u32 pmac_id;
1102         u8 old_mac[ETH_ALEN];
1103
1104         if (!sriov_enabled(adapter))
1105                 return -EPERM;
1106
1107         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1108                 return -EINVAL;
1109
1110         if (lancer_chip(adapter)) {
1111                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1112                                                   &pmac_id, vf + 1);
1113                 if (!status && active_mac)
1114                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1115                                         pmac_id, vf + 1);
1116
1117                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1118         } else {
1119                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1120                                          vf_cfg->pmac_id, vf + 1);
1121
1122                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1123                                          &vf_cfg->pmac_id, vf + 1);
1124         }
1125
1126         if (status)
1127                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1128                                 mac, vf);
1129         else
1130                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1131
1132         return status;
1133 }
1134
1135 static int be_get_vf_config(struct net_device *netdev, int vf,
1136                         struct ifla_vf_info *vi)
1137 {
1138         struct be_adapter *adapter = netdev_priv(netdev);
1139         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1140
1141         if (!sriov_enabled(adapter))
1142                 return -EPERM;
1143
1144         if (vf >= adapter->num_vfs)
1145                 return -EINVAL;
1146
1147         vi->vf = vf;
1148         vi->tx_rate = vf_cfg->tx_rate;
1149         vi->vlan = vf_cfg->vlan_tag;
1150         vi->qos = 0;
1151         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1152
1153         return 0;
1154 }
1155
1156 static int be_set_vf_vlan(struct net_device *netdev,
1157                         int vf, u16 vlan, u8 qos)
1158 {
1159         struct be_adapter *adapter = netdev_priv(netdev);
1160         int status = 0;
1161
1162         if (!sriov_enabled(adapter))
1163                 return -EPERM;
1164
1165         if (vf >= adapter->num_vfs || vlan > 4095)
1166                 return -EINVAL;
1167
1168         if (vlan) {
1169                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1170                         /* If this is new value, program it. Else skip. */
1171                         adapter->vf_cfg[vf].vlan_tag = vlan;
1172
1173                         status = be_cmd_set_hsw_config(adapter, vlan,
1174                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1175                 }
1176         } else {
1177                 /* Reset Transparent Vlan Tagging. */
1178                 adapter->vf_cfg[vf].vlan_tag = 0;
1179                 vlan = adapter->vf_cfg[vf].def_vid;
1180                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1181                         adapter->vf_cfg[vf].if_handle);
1182         }
1183
1184
1185         if (status)
1186                 dev_info(&adapter->pdev->dev,
1187                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1188         return status;
1189 }
1190
1191 static int be_set_vf_tx_rate(struct net_device *netdev,
1192                         int vf, int rate)
1193 {
1194         struct be_adapter *adapter = netdev_priv(netdev);
1195         int status = 0;
1196
1197         if (!sriov_enabled(adapter))
1198                 return -EPERM;
1199
1200         if (vf >= adapter->num_vfs)
1201                 return -EINVAL;
1202
1203         if (rate < 100 || rate > 10000) {
1204                 dev_err(&adapter->pdev->dev,
1205                         "tx rate must be between 100 and 10000 Mbps\n");
1206                 return -EINVAL;
1207         }
1208
1209         if (lancer_chip(adapter))
1210                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1211         else
1212                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1213
1214         if (status)
1215                 dev_err(&adapter->pdev->dev,
1216                                 "tx rate %d on VF %d failed\n", rate, vf);
1217         else
1218                 adapter->vf_cfg[vf].tx_rate = rate;
1219         return status;
1220 }
1221
1222 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1223 {
1224         struct pci_dev *dev, *pdev = adapter->pdev;
1225         int vfs = 0, assigned_vfs = 0, pos;
1226         u16 offset, stride;
1227
1228         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1229         if (!pos)
1230                 return 0;
1231         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1232         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1233
1234         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1235         while (dev) {
1236                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1237                         vfs++;
1238                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1239                                 assigned_vfs++;
1240                 }
1241                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1242         }
1243         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1244 }
1245
1246 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1247 {
1248         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1249         ulong now = jiffies;
1250         ulong delta = now - stats->rx_jiffies;
1251         u64 pkts;
1252         unsigned int start, eqd;
1253
1254         if (!eqo->enable_aic) {
1255                 eqd = eqo->eqd;
1256                 goto modify_eqd;
1257         }
1258
1259         if (eqo->idx >= adapter->num_rx_qs)
1260                 return;
1261
1262         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1263
1264         /* Wrapped around */
1265         if (time_before(now, stats->rx_jiffies)) {
1266                 stats->rx_jiffies = now;
1267                 return;
1268         }
1269
1270         /* Update once a second */
1271         if (delta < HZ)
1272                 return;
1273
1274         do {
1275                 start = u64_stats_fetch_begin_bh(&stats->sync);
1276                 pkts = stats->rx_pkts;
1277         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1278
1279         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1280         stats->rx_pkts_prev = pkts;
1281         stats->rx_jiffies = now;
1282         eqd = (stats->rx_pps / 110000) << 3;
1283         eqd = min(eqd, eqo->max_eqd);
1284         eqd = max(eqd, eqo->min_eqd);
1285         if (eqd < 10)
1286                 eqd = 0;
1287
1288 modify_eqd:
1289         if (eqd != eqo->cur_eqd) {
1290                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1291                 eqo->cur_eqd = eqd;
1292         }
1293 }
1294
1295 static void be_rx_stats_update(struct be_rx_obj *rxo,
1296                 struct be_rx_compl_info *rxcp)
1297 {
1298         struct be_rx_stats *stats = rx_stats(rxo);
1299
1300         u64_stats_update_begin(&stats->sync);
1301         stats->rx_compl++;
1302         stats->rx_bytes += rxcp->pkt_size;
1303         stats->rx_pkts++;
1304         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1305                 stats->rx_mcast_pkts++;
1306         if (rxcp->err)
1307                 stats->rx_compl_err++;
1308         u64_stats_update_end(&stats->sync);
1309 }
1310
1311 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1312 {
1313         /* L4 checksum is not reliable for non TCP/UDP packets.
1314          * Also ignore ipcksm for ipv6 pkts */
1315         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1316                                 (rxcp->ip_csum || rxcp->ipv6);
1317 }
1318
1319 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1320                                                 u16 frag_idx)
1321 {
1322         struct be_adapter *adapter = rxo->adapter;
1323         struct be_rx_page_info *rx_page_info;
1324         struct be_queue_info *rxq = &rxo->q;
1325
1326         rx_page_info = &rxo->page_info_tbl[frag_idx];
1327         BUG_ON(!rx_page_info->page);
1328
1329         if (rx_page_info->last_page_user) {
1330                 dma_unmap_page(&adapter->pdev->dev,
1331                                dma_unmap_addr(rx_page_info, bus),
1332                                adapter->big_page_size, DMA_FROM_DEVICE);
1333                 rx_page_info->last_page_user = false;
1334         }
1335
1336         atomic_dec(&rxq->used);
1337         return rx_page_info;
1338 }
1339
1340 /* Throwaway the data in the Rx completion */
1341 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1342                                 struct be_rx_compl_info *rxcp)
1343 {
1344         struct be_queue_info *rxq = &rxo->q;
1345         struct be_rx_page_info *page_info;
1346         u16 i, num_rcvd = rxcp->num_rcvd;
1347
1348         for (i = 0; i < num_rcvd; i++) {
1349                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1350                 put_page(page_info->page);
1351                 memset(page_info, 0, sizeof(*page_info));
1352                 index_inc(&rxcp->rxq_idx, rxq->len);
1353         }
1354 }
1355
1356 /*
1357  * skb_fill_rx_data forms a complete skb for an ether frame
1358  * indicated by rxcp.
1359  */
1360 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1361                              struct be_rx_compl_info *rxcp)
1362 {
1363         struct be_queue_info *rxq = &rxo->q;
1364         struct be_rx_page_info *page_info;
1365         u16 i, j;
1366         u16 hdr_len, curr_frag_len, remaining;
1367         u8 *start;
1368
1369         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1370         start = page_address(page_info->page) + page_info->page_offset;
1371         prefetch(start);
1372
1373         /* Copy data in the first descriptor of this completion */
1374         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1375
1376         skb->len = curr_frag_len;
1377         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1378                 memcpy(skb->data, start, curr_frag_len);
1379                 /* Complete packet has now been moved to data */
1380                 put_page(page_info->page);
1381                 skb->data_len = 0;
1382                 skb->tail += curr_frag_len;
1383         } else {
1384                 hdr_len = ETH_HLEN;
1385                 memcpy(skb->data, start, hdr_len);
1386                 skb_shinfo(skb)->nr_frags = 1;
1387                 skb_frag_set_page(skb, 0, page_info->page);
1388                 skb_shinfo(skb)->frags[0].page_offset =
1389                                         page_info->page_offset + hdr_len;
1390                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1391                 skb->data_len = curr_frag_len - hdr_len;
1392                 skb->truesize += rx_frag_size;
1393                 skb->tail += hdr_len;
1394         }
1395         page_info->page = NULL;
1396
1397         if (rxcp->pkt_size <= rx_frag_size) {
1398                 BUG_ON(rxcp->num_rcvd != 1);
1399                 return;
1400         }
1401
1402         /* More frags present for this completion */
1403         index_inc(&rxcp->rxq_idx, rxq->len);
1404         remaining = rxcp->pkt_size - curr_frag_len;
1405         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1406                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1407                 curr_frag_len = min(remaining, rx_frag_size);
1408
1409                 /* Coalesce all frags from the same physical page in one slot */
1410                 if (page_info->page_offset == 0) {
1411                         /* Fresh page */
1412                         j++;
1413                         skb_frag_set_page(skb, j, page_info->page);
1414                         skb_shinfo(skb)->frags[j].page_offset =
1415                                                         page_info->page_offset;
1416                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1417                         skb_shinfo(skb)->nr_frags++;
1418                 } else {
1419                         put_page(page_info->page);
1420                 }
1421
1422                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1423                 skb->len += curr_frag_len;
1424                 skb->data_len += curr_frag_len;
1425                 skb->truesize += rx_frag_size;
1426                 remaining -= curr_frag_len;
1427                 index_inc(&rxcp->rxq_idx, rxq->len);
1428                 page_info->page = NULL;
1429         }
1430         BUG_ON(j > MAX_SKB_FRAGS);
1431 }
1432
1433 /* Process the RX completion indicated by rxcp when GRO is disabled */
1434 static void be_rx_compl_process(struct be_rx_obj *rxo,
1435                                 struct be_rx_compl_info *rxcp)
1436 {
1437         struct be_adapter *adapter = rxo->adapter;
1438         struct net_device *netdev = adapter->netdev;
1439         struct sk_buff *skb;
1440
1441         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1442         if (unlikely(!skb)) {
1443                 rx_stats(rxo)->rx_drops_no_skbs++;
1444                 be_rx_compl_discard(rxo, rxcp);
1445                 return;
1446         }
1447
1448         skb_fill_rx_data(rxo, skb, rxcp);
1449
1450         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1451                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1452         else
1453                 skb_checksum_none_assert(skb);
1454
1455         skb->protocol = eth_type_trans(skb, netdev);
1456         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1457         if (netdev->features & NETIF_F_RXHASH)
1458                 skb->rxhash = rxcp->rss_hash;
1459
1460
1461         if (rxcp->vlanf)
1462                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1463
1464         netif_receive_skb(skb);
1465 }
1466
1467 /* Process the RX completion indicated by rxcp when GRO is enabled */
1468 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1469                              struct be_rx_compl_info *rxcp)
1470 {
1471         struct be_adapter *adapter = rxo->adapter;
1472         struct be_rx_page_info *page_info;
1473         struct sk_buff *skb = NULL;
1474         struct be_queue_info *rxq = &rxo->q;
1475         u16 remaining, curr_frag_len;
1476         u16 i, j;
1477
1478         skb = napi_get_frags(napi);
1479         if (!skb) {
1480                 be_rx_compl_discard(rxo, rxcp);
1481                 return;
1482         }
1483
1484         remaining = rxcp->pkt_size;
1485         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1486                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1487
1488                 curr_frag_len = min(remaining, rx_frag_size);
1489
1490                 /* Coalesce all frags from the same physical page in one slot */
1491                 if (i == 0 || page_info->page_offset == 0) {
1492                         /* First frag or Fresh page */
1493                         j++;
1494                         skb_frag_set_page(skb, j, page_info->page);
1495                         skb_shinfo(skb)->frags[j].page_offset =
1496                                                         page_info->page_offset;
1497                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1498                 } else {
1499                         put_page(page_info->page);
1500                 }
1501                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1502                 skb->truesize += rx_frag_size;
1503                 remaining -= curr_frag_len;
1504                 index_inc(&rxcp->rxq_idx, rxq->len);
1505                 memset(page_info, 0, sizeof(*page_info));
1506         }
1507         BUG_ON(j > MAX_SKB_FRAGS);
1508
1509         skb_shinfo(skb)->nr_frags = j + 1;
1510         skb->len = rxcp->pkt_size;
1511         skb->data_len = rxcp->pkt_size;
1512         skb->ip_summed = CHECKSUM_UNNECESSARY;
1513         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1514         if (adapter->netdev->features & NETIF_F_RXHASH)
1515                 skb->rxhash = rxcp->rss_hash;
1516
1517         if (rxcp->vlanf)
1518                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1519
1520         napi_gro_frags(napi);
1521 }
1522
1523 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1524                                  struct be_rx_compl_info *rxcp)
1525 {
1526         rxcp->pkt_size =
1527                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1528         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1529         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1530         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1531         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1532         rxcp->ip_csum =
1533                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1534         rxcp->l4_csum =
1535                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1536         rxcp->ipv6 =
1537                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1538         rxcp->rxq_idx =
1539                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1540         rxcp->num_rcvd =
1541                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1542         rxcp->pkt_type =
1543                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1544         rxcp->rss_hash =
1545                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1546         if (rxcp->vlanf) {
1547                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1548                                           compl);
1549                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1550                                                compl);
1551         }
1552         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1553 }
1554
1555 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1556                                  struct be_rx_compl_info *rxcp)
1557 {
1558         rxcp->pkt_size =
1559                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1560         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1561         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1562         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1563         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1564         rxcp->ip_csum =
1565                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1566         rxcp->l4_csum =
1567                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1568         rxcp->ipv6 =
1569                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1570         rxcp->rxq_idx =
1571                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1572         rxcp->num_rcvd =
1573                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1574         rxcp->pkt_type =
1575                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1576         rxcp->rss_hash =
1577                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1578         if (rxcp->vlanf) {
1579                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1580                                           compl);
1581                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1582                                                compl);
1583         }
1584         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1585 }
1586
1587 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1588 {
1589         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1590         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1591         struct be_adapter *adapter = rxo->adapter;
1592
1593         /* For checking the valid bit it is Ok to use either definition as the
1594          * valid bit is at the same position in both v0 and v1 Rx compl */
1595         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1596                 return NULL;
1597
1598         rmb();
1599         be_dws_le_to_cpu(compl, sizeof(*compl));
1600
1601         if (adapter->be3_native)
1602                 be_parse_rx_compl_v1(compl, rxcp);
1603         else
1604                 be_parse_rx_compl_v0(compl, rxcp);
1605
1606         if (rxcp->vlanf) {
1607                 /* vlanf could be wrongly set in some cards.
1608                  * ignore if vtm is not set */
1609                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1610                         rxcp->vlanf = 0;
1611
1612                 if (!lancer_chip(adapter))
1613                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1614
1615                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1616                     !adapter->vlan_tag[rxcp->vlan_tag])
1617                         rxcp->vlanf = 0;
1618         }
1619
1620         /* As the compl has been parsed, reset it; we wont touch it again */
1621         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1622
1623         queue_tail_inc(&rxo->cq);
1624         return rxcp;
1625 }
1626
1627 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1628 {
1629         u32 order = get_order(size);
1630
1631         if (order > 0)
1632                 gfp |= __GFP_COMP;
1633         return  alloc_pages(gfp, order);
1634 }
1635
1636 /*
1637  * Allocate a page, split it to fragments of size rx_frag_size and post as
1638  * receive buffers to BE
1639  */
1640 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1641 {
1642         struct be_adapter *adapter = rxo->adapter;
1643         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1644         struct be_queue_info *rxq = &rxo->q;
1645         struct page *pagep = NULL;
1646         struct be_eth_rx_d *rxd;
1647         u64 page_dmaaddr = 0, frag_dmaaddr;
1648         u32 posted, page_offset = 0;
1649
1650         page_info = &rxo->page_info_tbl[rxq->head];
1651         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1652                 if (!pagep) {
1653                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1654                         if (unlikely(!pagep)) {
1655                                 rx_stats(rxo)->rx_post_fail++;
1656                                 break;
1657                         }
1658                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1659                                                     0, adapter->big_page_size,
1660                                                     DMA_FROM_DEVICE);
1661                         page_info->page_offset = 0;
1662                 } else {
1663                         get_page(pagep);
1664                         page_info->page_offset = page_offset + rx_frag_size;
1665                 }
1666                 page_offset = page_info->page_offset;
1667                 page_info->page = pagep;
1668                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1669                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1670
1671                 rxd = queue_head_node(rxq);
1672                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1673                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1674
1675                 /* Any space left in the current big page for another frag? */
1676                 if ((page_offset + rx_frag_size + rx_frag_size) >
1677                                         adapter->big_page_size) {
1678                         pagep = NULL;
1679                         page_info->last_page_user = true;
1680                 }
1681
1682                 prev_page_info = page_info;
1683                 queue_head_inc(rxq);
1684                 page_info = &rxo->page_info_tbl[rxq->head];
1685         }
1686         if (pagep)
1687                 prev_page_info->last_page_user = true;
1688
1689         if (posted) {
1690                 atomic_add(posted, &rxq->used);
1691                 be_rxq_notify(adapter, rxq->id, posted);
1692         } else if (atomic_read(&rxq->used) == 0) {
1693                 /* Let be_worker replenish when memory is available */
1694                 rxo->rx_post_starved = true;
1695         }
1696 }
1697
1698 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1699 {
1700         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1701
1702         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1703                 return NULL;
1704
1705         rmb();
1706         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1707
1708         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1709
1710         queue_tail_inc(tx_cq);
1711         return txcp;
1712 }
1713
1714 static u16 be_tx_compl_process(struct be_adapter *adapter,
1715                 struct be_tx_obj *txo, u16 last_index)
1716 {
1717         struct be_queue_info *txq = &txo->q;
1718         struct be_eth_wrb *wrb;
1719         struct sk_buff **sent_skbs = txo->sent_skb_list;
1720         struct sk_buff *sent_skb;
1721         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1722         bool unmap_skb_hdr = true;
1723
1724         sent_skb = sent_skbs[txq->tail];
1725         BUG_ON(!sent_skb);
1726         sent_skbs[txq->tail] = NULL;
1727
1728         /* skip header wrb */
1729         queue_tail_inc(txq);
1730
1731         do {
1732                 cur_index = txq->tail;
1733                 wrb = queue_tail_node(txq);
1734                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1735                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1736                 unmap_skb_hdr = false;
1737
1738                 num_wrbs++;
1739                 queue_tail_inc(txq);
1740         } while (cur_index != last_index);
1741
1742         kfree_skb(sent_skb);
1743         return num_wrbs;
1744 }
1745
1746 /* Return the number of events in the event queue */
1747 static inline int events_get(struct be_eq_obj *eqo)
1748 {
1749         struct be_eq_entry *eqe;
1750         int num = 0;
1751
1752         do {
1753                 eqe = queue_tail_node(&eqo->q);
1754                 if (eqe->evt == 0)
1755                         break;
1756
1757                 rmb();
1758                 eqe->evt = 0;
1759                 num++;
1760                 queue_tail_inc(&eqo->q);
1761         } while (true);
1762
1763         return num;
1764 }
1765
1766 /* Leaves the EQ is disarmed state */
1767 static void be_eq_clean(struct be_eq_obj *eqo)
1768 {
1769         int num = events_get(eqo);
1770
1771         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1772 }
1773
1774 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1775 {
1776         struct be_rx_page_info *page_info;
1777         struct be_queue_info *rxq = &rxo->q;
1778         struct be_queue_info *rx_cq = &rxo->cq;
1779         struct be_rx_compl_info *rxcp;
1780         struct be_adapter *adapter = rxo->adapter;
1781         int flush_wait = 0;
1782         u16 tail;
1783
1784         /* Consume pending rx completions.
1785          * Wait for the flush completion (identified by zero num_rcvd)
1786          * to arrive. Notify CQ even when there are no more CQ entries
1787          * for HW to flush partially coalesced CQ entries.
1788          * In Lancer, there is no need to wait for flush compl.
1789          */
1790         for (;;) {
1791                 rxcp = be_rx_compl_get(rxo);
1792                 if (rxcp == NULL) {
1793                         if (lancer_chip(adapter))
1794                                 break;
1795
1796                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1797                                 dev_warn(&adapter->pdev->dev,
1798                                          "did not receive flush compl\n");
1799                                 break;
1800                         }
1801                         be_cq_notify(adapter, rx_cq->id, true, 0);
1802                         mdelay(1);
1803                 } else {
1804                         be_rx_compl_discard(rxo, rxcp);
1805                         be_cq_notify(adapter, rx_cq->id, true, 1);
1806                         if (rxcp->num_rcvd == 0)
1807                                 break;
1808                 }
1809         }
1810
1811         /* After cleanup, leave the CQ in unarmed state */
1812         be_cq_notify(adapter, rx_cq->id, false, 0);
1813
1814         /* Then free posted rx buffers that were not used */
1815         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1816         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1817                 page_info = get_rx_page_info(rxo, tail);
1818                 put_page(page_info->page);
1819                 memset(page_info, 0, sizeof(*page_info));
1820         }
1821         BUG_ON(atomic_read(&rxq->used));
1822         rxq->tail = rxq->head = 0;
1823 }
1824
1825 static void be_tx_compl_clean(struct be_adapter *adapter)
1826 {
1827         struct be_tx_obj *txo;
1828         struct be_queue_info *txq;
1829         struct be_eth_tx_compl *txcp;
1830         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1831         struct sk_buff *sent_skb;
1832         bool dummy_wrb;
1833         int i, pending_txqs;
1834
1835         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1836         do {
1837                 pending_txqs = adapter->num_tx_qs;
1838
1839                 for_all_tx_queues(adapter, txo, i) {
1840                         txq = &txo->q;
1841                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1842                                 end_idx =
1843                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1844                                                       wrb_index, txcp);
1845                                 num_wrbs += be_tx_compl_process(adapter, txo,
1846                                                                 end_idx);
1847                                 cmpl++;
1848                         }
1849                         if (cmpl) {
1850                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1851                                 atomic_sub(num_wrbs, &txq->used);
1852                                 cmpl = 0;
1853                                 num_wrbs = 0;
1854                         }
1855                         if (atomic_read(&txq->used) == 0)
1856                                 pending_txqs--;
1857                 }
1858
1859                 if (pending_txqs == 0 || ++timeo > 200)
1860                         break;
1861
1862                 mdelay(1);
1863         } while (true);
1864
1865         for_all_tx_queues(adapter, txo, i) {
1866                 txq = &txo->q;
1867                 if (atomic_read(&txq->used))
1868                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1869                                 atomic_read(&txq->used));
1870
1871                 /* free posted tx for which compls will never arrive */
1872                 while (atomic_read(&txq->used)) {
1873                         sent_skb = txo->sent_skb_list[txq->tail];
1874                         end_idx = txq->tail;
1875                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1876                                                    &dummy_wrb);
1877                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1878                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1879                         atomic_sub(num_wrbs, &txq->used);
1880                 }
1881         }
1882 }
1883
1884 static void be_evt_queues_destroy(struct be_adapter *adapter)
1885 {
1886         struct be_eq_obj *eqo;
1887         int i;
1888
1889         for_all_evt_queues(adapter, eqo, i) {
1890                 if (eqo->q.created) {
1891                         be_eq_clean(eqo);
1892                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1893                 }
1894                 be_queue_free(adapter, &eqo->q);
1895         }
1896 }
1897
1898 static int be_evt_queues_create(struct be_adapter *adapter)
1899 {
1900         struct be_queue_info *eq;
1901         struct be_eq_obj *eqo;
1902         int i, rc;
1903
1904         adapter->num_evt_qs = num_irqs(adapter);
1905
1906         for_all_evt_queues(adapter, eqo, i) {
1907                 eqo->adapter = adapter;
1908                 eqo->tx_budget = BE_TX_BUDGET;
1909                 eqo->idx = i;
1910                 eqo->max_eqd = BE_MAX_EQD;
1911                 eqo->enable_aic = true;
1912
1913                 eq = &eqo->q;
1914                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1915                                         sizeof(struct be_eq_entry));
1916                 if (rc)
1917                         return rc;
1918
1919                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1920                 if (rc)
1921                         return rc;
1922         }
1923         return 0;
1924 }
1925
1926 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1927 {
1928         struct be_queue_info *q;
1929
1930         q = &adapter->mcc_obj.q;
1931         if (q->created)
1932                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1933         be_queue_free(adapter, q);
1934
1935         q = &adapter->mcc_obj.cq;
1936         if (q->created)
1937                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1938         be_queue_free(adapter, q);
1939 }
1940
1941 /* Must be called only after TX qs are created as MCC shares TX EQ */
1942 static int be_mcc_queues_create(struct be_adapter *adapter)
1943 {
1944         struct be_queue_info *q, *cq;
1945
1946         cq = &adapter->mcc_obj.cq;
1947         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1948                         sizeof(struct be_mcc_compl)))
1949                 goto err;
1950
1951         /* Use the default EQ for MCC completions */
1952         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1953                 goto mcc_cq_free;
1954
1955         q = &adapter->mcc_obj.q;
1956         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1957                 goto mcc_cq_destroy;
1958
1959         if (be_cmd_mccq_create(adapter, q, cq))
1960                 goto mcc_q_free;
1961
1962         return 0;
1963
1964 mcc_q_free:
1965         be_queue_free(adapter, q);
1966 mcc_cq_destroy:
1967         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1968 mcc_cq_free:
1969         be_queue_free(adapter, cq);
1970 err:
1971         return -1;
1972 }
1973
1974 static void be_tx_queues_destroy(struct be_adapter *adapter)
1975 {
1976         struct be_queue_info *q;
1977         struct be_tx_obj *txo;
1978         u8 i;
1979
1980         for_all_tx_queues(adapter, txo, i) {
1981                 q = &txo->q;
1982                 if (q->created)
1983                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1984                 be_queue_free(adapter, q);
1985
1986                 q = &txo->cq;
1987                 if (q->created)
1988                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1989                 be_queue_free(adapter, q);
1990         }
1991 }
1992
1993 static int be_num_txqs_want(struct be_adapter *adapter)
1994 {
1995         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1996             be_is_mc(adapter) ||
1997             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
1998             BE2_chip(adapter))
1999                 return 1;
2000         else
2001                 return adapter->max_tx_queues;
2002 }
2003
2004 static int be_tx_cqs_create(struct be_adapter *adapter)
2005 {
2006         struct be_queue_info *cq, *eq;
2007         int status;
2008         struct be_tx_obj *txo;
2009         u8 i;
2010
2011         adapter->num_tx_qs = be_num_txqs_want(adapter);
2012         if (adapter->num_tx_qs != MAX_TX_QS) {
2013                 rtnl_lock();
2014                 netif_set_real_num_tx_queues(adapter->netdev,
2015                         adapter->num_tx_qs);
2016                 rtnl_unlock();
2017         }
2018
2019         for_all_tx_queues(adapter, txo, i) {
2020                 cq = &txo->cq;
2021                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2022                                         sizeof(struct be_eth_tx_compl));
2023                 if (status)
2024                         return status;
2025
2026                 /* If num_evt_qs is less than num_tx_qs, then more than
2027                  * one txq share an eq
2028                  */
2029                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2030                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2031                 if (status)
2032                         return status;
2033         }
2034         return 0;
2035 }
2036
2037 static int be_tx_qs_create(struct be_adapter *adapter)
2038 {
2039         struct be_tx_obj *txo;
2040         int i, status;
2041
2042         for_all_tx_queues(adapter, txo, i) {
2043                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2044                                         sizeof(struct be_eth_wrb));
2045                 if (status)
2046                         return status;
2047
2048                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2049                 if (status)
2050                         return status;
2051         }
2052
2053         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2054                  adapter->num_tx_qs);
2055         return 0;
2056 }
2057
2058 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2059 {
2060         struct be_queue_info *q;
2061         struct be_rx_obj *rxo;
2062         int i;
2063
2064         for_all_rx_queues(adapter, rxo, i) {
2065                 q = &rxo->cq;
2066                 if (q->created)
2067                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2068                 be_queue_free(adapter, q);
2069         }
2070 }
2071
2072 static int be_rx_cqs_create(struct be_adapter *adapter)
2073 {
2074         struct be_queue_info *eq, *cq;
2075         struct be_rx_obj *rxo;
2076         int rc, i;
2077
2078         /* We'll create as many RSS rings as there are irqs.
2079          * But when there's only one irq there's no use creating RSS rings
2080          */
2081         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2082                                 num_irqs(adapter) + 1 : 1;
2083         if (adapter->num_rx_qs != MAX_RX_QS) {
2084                 rtnl_lock();
2085                 netif_set_real_num_rx_queues(adapter->netdev,
2086                                              adapter->num_rx_qs);
2087                 rtnl_unlock();
2088         }
2089
2090         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2091         for_all_rx_queues(adapter, rxo, i) {
2092                 rxo->adapter = adapter;
2093                 cq = &rxo->cq;
2094                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2095                                 sizeof(struct be_eth_rx_compl));
2096                 if (rc)
2097                         return rc;
2098
2099                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2100                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2101                 if (rc)
2102                         return rc;
2103         }
2104
2105         dev_info(&adapter->pdev->dev,
2106                  "created %d RSS queue(s) and 1 default RX queue\n",
2107                  adapter->num_rx_qs - 1);
2108         return 0;
2109 }
2110
2111 static irqreturn_t be_intx(int irq, void *dev)
2112 {
2113         struct be_eq_obj *eqo = dev;
2114         struct be_adapter *adapter = eqo->adapter;
2115         int num_evts = 0;
2116
2117         /* IRQ is not expected when NAPI is scheduled as the EQ
2118          * will not be armed.
2119          * But, this can happen on Lancer INTx where it takes
2120          * a while to de-assert INTx or in BE2 where occasionaly
2121          * an interrupt may be raised even when EQ is unarmed.
2122          * If NAPI is already scheduled, then counting & notifying
2123          * events will orphan them.
2124          */
2125         if (napi_schedule_prep(&eqo->napi)) {
2126                 num_evts = events_get(eqo);
2127                 __napi_schedule(&eqo->napi);
2128                 if (num_evts)
2129                         eqo->spurious_intr = 0;
2130         }
2131         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2132
2133         /* Return IRQ_HANDLED only for the the first spurious intr
2134          * after a valid intr to stop the kernel from branding
2135          * this irq as a bad one!
2136          */
2137         if (num_evts || eqo->spurious_intr++ == 0)
2138                 return IRQ_HANDLED;
2139         else
2140                 return IRQ_NONE;
2141 }
2142
2143 static irqreturn_t be_msix(int irq, void *dev)
2144 {
2145         struct be_eq_obj *eqo = dev;
2146
2147         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2148         napi_schedule(&eqo->napi);
2149         return IRQ_HANDLED;
2150 }
2151
2152 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2153 {
2154         return (rxcp->tcpf && !rxcp->err) ? true : false;
2155 }
2156
2157 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2158                         int budget)
2159 {
2160         struct be_adapter *adapter = rxo->adapter;
2161         struct be_queue_info *rx_cq = &rxo->cq;
2162         struct be_rx_compl_info *rxcp;
2163         u32 work_done;
2164
2165         for (work_done = 0; work_done < budget; work_done++) {
2166                 rxcp = be_rx_compl_get(rxo);
2167                 if (!rxcp)
2168                         break;
2169
2170                 /* Is it a flush compl that has no data */
2171                 if (unlikely(rxcp->num_rcvd == 0))
2172                         goto loop_continue;
2173
2174                 /* Discard compl with partial DMA Lancer B0 */
2175                 if (unlikely(!rxcp->pkt_size)) {
2176                         be_rx_compl_discard(rxo, rxcp);
2177                         goto loop_continue;
2178                 }
2179
2180                 /* On BE drop pkts that arrive due to imperfect filtering in
2181                  * promiscuous mode on some skews
2182                  */
2183                 if (unlikely(rxcp->port != adapter->port_num &&
2184                                 !lancer_chip(adapter))) {
2185                         be_rx_compl_discard(rxo, rxcp);
2186                         goto loop_continue;
2187                 }
2188
2189                 if (do_gro(rxcp))
2190                         be_rx_compl_process_gro(rxo, napi, rxcp);
2191                 else
2192                         be_rx_compl_process(rxo, rxcp);
2193 loop_continue:
2194                 be_rx_stats_update(rxo, rxcp);
2195         }
2196
2197         if (work_done) {
2198                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2199
2200                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2201                         be_post_rx_frags(rxo, GFP_ATOMIC);
2202         }
2203
2204         return work_done;
2205 }
2206
2207 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2208                           int budget, int idx)
2209 {
2210         struct be_eth_tx_compl *txcp;
2211         int num_wrbs = 0, work_done;
2212
2213         for (work_done = 0; work_done < budget; work_done++) {
2214                 txcp = be_tx_compl_get(&txo->cq);
2215                 if (!txcp)
2216                         break;
2217                 num_wrbs += be_tx_compl_process(adapter, txo,
2218                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2219                                         wrb_index, txcp));
2220         }
2221
2222         if (work_done) {
2223                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2224                 atomic_sub(num_wrbs, &txo->q.used);
2225
2226                 /* As Tx wrbs have been freed up, wake up netdev queue
2227                  * if it was stopped due to lack of tx wrbs.  */
2228                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2229                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2230                         netif_wake_subqueue(adapter->netdev, idx);
2231                 }
2232
2233                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2234                 tx_stats(txo)->tx_compl += work_done;
2235                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2236         }
2237         return (work_done < budget); /* Done */
2238 }
2239
2240 int be_poll(struct napi_struct *napi, int budget)
2241 {
2242         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2243         struct be_adapter *adapter = eqo->adapter;
2244         int max_work = 0, work, i, num_evts;
2245         bool tx_done;
2246
2247         num_evts = events_get(eqo);
2248
2249         /* Process all TXQs serviced by this EQ */
2250         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2251                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2252                                         eqo->tx_budget, i);
2253                 if (!tx_done)
2254                         max_work = budget;
2255         }
2256
2257         /* This loop will iterate twice for EQ0 in which
2258          * completions of the last RXQ (default one) are also processed
2259          * For other EQs the loop iterates only once
2260          */
2261         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2262                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2263                 max_work = max(work, max_work);
2264         }
2265
2266         if (is_mcc_eqo(eqo))
2267                 be_process_mcc(adapter);
2268
2269         if (max_work < budget) {
2270                 napi_complete(napi);
2271                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2272         } else {
2273                 /* As we'll continue in polling mode, count and clear events */
2274                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2275         }
2276         return max_work;
2277 }
2278
2279 void be_detect_error(struct be_adapter *adapter)
2280 {
2281         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2282         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2283         u32 i;
2284
2285         if (be_hw_error(adapter))
2286                 return;
2287
2288         if (lancer_chip(adapter)) {
2289                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2290                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2291                         sliport_err1 = ioread32(adapter->db +
2292                                         SLIPORT_ERROR1_OFFSET);
2293                         sliport_err2 = ioread32(adapter->db +
2294                                         SLIPORT_ERROR2_OFFSET);
2295                 }
2296         } else {
2297                 pci_read_config_dword(adapter->pdev,
2298                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2299                 pci_read_config_dword(adapter->pdev,
2300                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2301                 pci_read_config_dword(adapter->pdev,
2302                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2303                 pci_read_config_dword(adapter->pdev,
2304                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2305
2306                 ue_lo = (ue_lo & ~ue_lo_mask);
2307                 ue_hi = (ue_hi & ~ue_hi_mask);
2308         }
2309
2310         /* On certain platforms BE hardware can indicate spurious UEs.
2311          * Allow the h/w to stop working completely in case of a real UE.
2312          * Hence not setting the hw_error for UE detection.
2313          */
2314         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2315                 adapter->hw_error = true;
2316                 dev_err(&adapter->pdev->dev,
2317                         "Error detected in the card\n");
2318         }
2319
2320         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2321                 dev_err(&adapter->pdev->dev,
2322                         "ERR: sliport status 0x%x\n", sliport_status);
2323                 dev_err(&adapter->pdev->dev,
2324                         "ERR: sliport error1 0x%x\n", sliport_err1);
2325                 dev_err(&adapter->pdev->dev,
2326                         "ERR: sliport error2 0x%x\n", sliport_err2);
2327         }
2328
2329         if (ue_lo) {
2330                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2331                         if (ue_lo & 1)
2332                                 dev_err(&adapter->pdev->dev,
2333                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2334                 }
2335         }
2336
2337         if (ue_hi) {
2338                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2339                         if (ue_hi & 1)
2340                                 dev_err(&adapter->pdev->dev,
2341                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2342                 }
2343         }
2344
2345 }
2346
2347 static void be_msix_disable(struct be_adapter *adapter)
2348 {
2349         if (msix_enabled(adapter)) {
2350                 pci_disable_msix(adapter->pdev);
2351                 adapter->num_msix_vec = 0;
2352         }
2353 }
2354
2355 static uint be_num_rss_want(struct be_adapter *adapter)
2356 {
2357         u32 num = 0;
2358
2359         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2360             (lancer_chip(adapter) ||
2361              (!sriov_want(adapter) && be_physfn(adapter)))) {
2362                 num = adapter->max_rss_queues;
2363                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2364         }
2365         return num;
2366 }
2367
2368 static void be_msix_enable(struct be_adapter *adapter)
2369 {
2370 #define BE_MIN_MSIX_VECTORS             1
2371         int i, status, num_vec, num_roce_vec = 0;
2372         struct device *dev = &adapter->pdev->dev;
2373
2374         /* If RSS queues are not used, need a vec for default RX Q */
2375         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2376         if (be_roce_supported(adapter)) {
2377                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2378                                         (num_online_cpus() + 1));
2379                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2380                 num_vec += num_roce_vec;
2381                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2382         }
2383         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2384
2385         for (i = 0; i < num_vec; i++)
2386                 adapter->msix_entries[i].entry = i;
2387
2388         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2389         if (status == 0) {
2390                 goto done;
2391         } else if (status >= BE_MIN_MSIX_VECTORS) {
2392                 num_vec = status;
2393                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2394                                 num_vec) == 0)
2395                         goto done;
2396         }
2397
2398         dev_warn(dev, "MSIx enable failed\n");
2399         return;
2400 done:
2401         if (be_roce_supported(adapter)) {
2402                 if (num_vec > num_roce_vec) {
2403                         adapter->num_msix_vec = num_vec - num_roce_vec;
2404                         adapter->num_msix_roce_vec =
2405                                 num_vec - adapter->num_msix_vec;
2406                 } else {
2407                         adapter->num_msix_vec = num_vec;
2408                         adapter->num_msix_roce_vec = 0;
2409                 }
2410         } else
2411                 adapter->num_msix_vec = num_vec;
2412         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2413         return;
2414 }
2415
2416 static inline int be_msix_vec_get(struct be_adapter *adapter,
2417                                 struct be_eq_obj *eqo)
2418 {
2419         return adapter->msix_entries[eqo->idx].vector;
2420 }
2421
2422 static int be_msix_register(struct be_adapter *adapter)
2423 {
2424         struct net_device *netdev = adapter->netdev;
2425         struct be_eq_obj *eqo;
2426         int status, i, vec;
2427
2428         for_all_evt_queues(adapter, eqo, i) {
2429                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2430                 vec = be_msix_vec_get(adapter, eqo);
2431                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2432                 if (status)
2433                         goto err_msix;
2434         }
2435
2436         return 0;
2437 err_msix:
2438         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2439                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2440         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2441                 status);
2442         be_msix_disable(adapter);
2443         return status;
2444 }
2445
2446 static int be_irq_register(struct be_adapter *adapter)
2447 {
2448         struct net_device *netdev = adapter->netdev;
2449         int status;
2450
2451         if (msix_enabled(adapter)) {
2452                 status = be_msix_register(adapter);
2453                 if (status == 0)
2454                         goto done;
2455                 /* INTx is not supported for VF */
2456                 if (!be_physfn(adapter))
2457                         return status;
2458         }
2459
2460         /* INTx: only the first EQ is used */
2461         netdev->irq = adapter->pdev->irq;
2462         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2463                              &adapter->eq_obj[0]);
2464         if (status) {
2465                 dev_err(&adapter->pdev->dev,
2466                         "INTx request IRQ failed - err %d\n", status);
2467                 return status;
2468         }
2469 done:
2470         adapter->isr_registered = true;
2471         return 0;
2472 }
2473
2474 static void be_irq_unregister(struct be_adapter *adapter)
2475 {
2476         struct net_device *netdev = adapter->netdev;
2477         struct be_eq_obj *eqo;
2478         int i;
2479
2480         if (!adapter->isr_registered)
2481                 return;
2482
2483         /* INTx */
2484         if (!msix_enabled(adapter)) {
2485                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2486                 goto done;
2487         }
2488
2489         /* MSIx */
2490         for_all_evt_queues(adapter, eqo, i)
2491                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2492
2493 done:
2494         adapter->isr_registered = false;
2495 }
2496
2497 static void be_rx_qs_destroy(struct be_adapter *adapter)
2498 {
2499         struct be_queue_info *q;
2500         struct be_rx_obj *rxo;
2501         int i;
2502
2503         for_all_rx_queues(adapter, rxo, i) {
2504                 q = &rxo->q;
2505                 if (q->created) {
2506                         be_cmd_rxq_destroy(adapter, q);
2507                         /* After the rxq is invalidated, wait for a grace time
2508                          * of 1ms for all dma to end and the flush compl to
2509                          * arrive
2510                          */
2511                         mdelay(1);
2512                         be_rx_cq_clean(rxo);
2513                 }
2514                 be_queue_free(adapter, q);
2515         }
2516 }
2517
2518 static int be_close(struct net_device *netdev)
2519 {
2520         struct be_adapter *adapter = netdev_priv(netdev);
2521         struct be_eq_obj *eqo;
2522         int i;
2523
2524         be_roce_dev_close(adapter);
2525
2526         if (!lancer_chip(adapter))
2527                 be_intr_set(adapter, false);
2528
2529         for_all_evt_queues(adapter, eqo, i)
2530                 napi_disable(&eqo->napi);
2531
2532         be_async_mcc_disable(adapter);
2533
2534         /* Wait for all pending tx completions to arrive so that
2535          * all tx skbs are freed.
2536          */
2537         be_tx_compl_clean(adapter);
2538
2539         be_rx_qs_destroy(adapter);
2540
2541         for_all_evt_queues(adapter, eqo, i) {
2542                 if (msix_enabled(adapter))
2543                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2544                 else
2545                         synchronize_irq(netdev->irq);
2546                 be_eq_clean(eqo);
2547         }
2548
2549         be_irq_unregister(adapter);
2550
2551         return 0;
2552 }
2553
2554 static int be_rx_qs_create(struct be_adapter *adapter)
2555 {
2556         struct be_rx_obj *rxo;
2557         int rc, i, j;
2558         u8 rsstable[128];
2559
2560         for_all_rx_queues(adapter, rxo, i) {
2561                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2562                                     sizeof(struct be_eth_rx_d));
2563                 if (rc)
2564                         return rc;
2565         }
2566
2567         /* The FW would like the default RXQ to be created first */
2568         rxo = default_rxo(adapter);
2569         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2570                                adapter->if_handle, false, &rxo->rss_id);
2571         if (rc)
2572                 return rc;
2573
2574         for_all_rss_queues(adapter, rxo, i) {
2575                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2576                                        rx_frag_size, adapter->if_handle,
2577                                        true, &rxo->rss_id);
2578                 if (rc)
2579                         return rc;
2580         }
2581
2582         if (be_multi_rxq(adapter)) {
2583                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2584                         for_all_rss_queues(adapter, rxo, i) {
2585                                 if ((j + i) >= 128)
2586                                         break;
2587                                 rsstable[j + i] = rxo->rss_id;
2588                         }
2589                 }
2590                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2591                 if (rc)
2592                         return rc;
2593         }
2594
2595         /* First time posting */
2596         for_all_rx_queues(adapter, rxo, i)
2597                 be_post_rx_frags(rxo, GFP_KERNEL);
2598         return 0;
2599 }
2600
2601 static int be_open(struct net_device *netdev)
2602 {
2603         struct be_adapter *adapter = netdev_priv(netdev);
2604         struct be_eq_obj *eqo;
2605         struct be_rx_obj *rxo;
2606         struct be_tx_obj *txo;
2607         u8 link_status;
2608         int status, i;
2609
2610         status = be_rx_qs_create(adapter);
2611         if (status)
2612                 goto err;
2613
2614         be_irq_register(adapter);
2615
2616         if (!lancer_chip(adapter))
2617                 be_intr_set(adapter, true);
2618
2619         for_all_rx_queues(adapter, rxo, i)
2620                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2621
2622         for_all_tx_queues(adapter, txo, i)
2623                 be_cq_notify(adapter, txo->cq.id, true, 0);
2624
2625         be_async_mcc_enable(adapter);
2626
2627         for_all_evt_queues(adapter, eqo, i) {
2628                 napi_enable(&eqo->napi);
2629                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2630         }
2631
2632         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2633         if (!status)
2634                 be_link_status_update(adapter, link_status);
2635
2636         be_roce_dev_open(adapter);
2637         return 0;
2638 err:
2639         be_close(adapter->netdev);
2640         return -EIO;
2641 }
2642
2643 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2644 {
2645         struct be_dma_mem cmd;
2646         int status = 0;
2647         u8 mac[ETH_ALEN];
2648
2649         memset(mac, 0, ETH_ALEN);
2650
2651         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2652         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2653                                     GFP_KERNEL);
2654         if (cmd.va == NULL)
2655                 return -1;
2656         memset(cmd.va, 0, cmd.size);
2657
2658         if (enable) {
2659                 status = pci_write_config_dword(adapter->pdev,
2660                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2661                 if (status) {
2662                         dev_err(&adapter->pdev->dev,
2663                                 "Could not enable Wake-on-lan\n");
2664                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2665                                           cmd.dma);
2666                         return status;
2667                 }
2668                 status = be_cmd_enable_magic_wol(adapter,
2669                                 adapter->netdev->dev_addr, &cmd);
2670                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2671                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2672         } else {
2673                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2674                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2675                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2676         }
2677
2678         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2679         return status;
2680 }
2681
2682 /*
2683  * Generate a seed MAC address from the PF MAC Address using jhash.
2684  * MAC Address for VFs are assigned incrementally starting from the seed.
2685  * These addresses are programmed in the ASIC by the PF and the VF driver
2686  * queries for the MAC address during its probe.
2687  */
2688 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2689 {
2690         u32 vf;
2691         int status = 0;
2692         u8 mac[ETH_ALEN];
2693         struct be_vf_cfg *vf_cfg;
2694
2695         be_vf_eth_addr_generate(adapter, mac);
2696
2697         for_all_vfs(adapter, vf_cfg, vf) {
2698                 if (lancer_chip(adapter)) {
2699                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2700                 } else {
2701                         status = be_cmd_pmac_add(adapter, mac,
2702                                                  vf_cfg->if_handle,
2703                                                  &vf_cfg->pmac_id, vf + 1);
2704                 }
2705
2706                 if (status)
2707                         dev_err(&adapter->pdev->dev,
2708                         "Mac address assignment failed for VF %d\n", vf);
2709                 else
2710                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2711
2712                 mac[5] += 1;
2713         }
2714         return status;
2715 }
2716
2717 static int be_vfs_mac_query(struct be_adapter *adapter)
2718 {
2719         int status, vf;
2720         u8 mac[ETH_ALEN];
2721         struct be_vf_cfg *vf_cfg;
2722         bool active;
2723
2724         for_all_vfs(adapter, vf_cfg, vf) {
2725                 be_cmd_get_mac_from_list(adapter, mac, &active,
2726                                          &vf_cfg->pmac_id, 0);
2727
2728                 status = be_cmd_mac_addr_query(adapter, mac, false,
2729                                                vf_cfg->if_handle, 0);
2730                 if (status)
2731                         return status;
2732                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2733         }
2734         return 0;
2735 }
2736
2737 static void be_vf_clear(struct be_adapter *adapter)
2738 {
2739         struct be_vf_cfg *vf_cfg;
2740         u32 vf;
2741
2742         if (be_find_vfs(adapter, ASSIGNED)) {
2743                 dev_warn(&adapter->pdev->dev,
2744                          "VFs are assigned to VMs: not disabling VFs\n");
2745                 goto done;
2746         }
2747
2748         for_all_vfs(adapter, vf_cfg, vf) {
2749                 if (lancer_chip(adapter))
2750                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2751                 else
2752                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2753                                         vf_cfg->pmac_id, vf + 1);
2754
2755                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2756         }
2757         pci_disable_sriov(adapter->pdev);
2758 done:
2759         kfree(adapter->vf_cfg);
2760         adapter->num_vfs = 0;
2761 }
2762
2763 static int be_clear(struct be_adapter *adapter)
2764 {
2765         int i = 1;
2766
2767         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2768                 cancel_delayed_work_sync(&adapter->work);
2769                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2770         }
2771
2772         if (sriov_enabled(adapter))
2773                 be_vf_clear(adapter);
2774
2775         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2776                 be_cmd_pmac_del(adapter, adapter->if_handle,
2777                         adapter->pmac_id[i], 0);
2778
2779         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2780
2781         be_mcc_queues_destroy(adapter);
2782         be_rx_cqs_destroy(adapter);
2783         be_tx_queues_destroy(adapter);
2784         be_evt_queues_destroy(adapter);
2785
2786         kfree(adapter->pmac_id);
2787         adapter->pmac_id = NULL;
2788
2789         be_msix_disable(adapter);
2790         return 0;
2791 }
2792
2793 static int be_vfs_if_create(struct be_adapter *adapter)
2794 {
2795         struct be_vf_cfg *vf_cfg;
2796         u32 cap_flags, en_flags, vf;
2797         int status;
2798
2799         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2800                     BE_IF_FLAGS_MULTICAST;
2801
2802         for_all_vfs(adapter, vf_cfg, vf) {
2803                 if (!BE3_chip(adapter))
2804                         be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
2805
2806                 /* If a FW profile exists, then cap_flags are updated */
2807                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2808                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2809                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2810                                           &vf_cfg->if_handle, vf + 1);
2811                 if (status)
2812                         goto err;
2813         }
2814 err:
2815         return status;
2816 }
2817
2818 static int be_vf_setup_init(struct be_adapter *adapter)
2819 {
2820         struct be_vf_cfg *vf_cfg;
2821         int vf;
2822
2823         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2824                                   GFP_KERNEL);
2825         if (!adapter->vf_cfg)
2826                 return -ENOMEM;
2827
2828         for_all_vfs(adapter, vf_cfg, vf) {
2829                 vf_cfg->if_handle = -1;
2830                 vf_cfg->pmac_id = -1;
2831         }
2832         return 0;
2833 }
2834
2835 static int be_vf_setup(struct be_adapter *adapter)
2836 {
2837         struct be_vf_cfg *vf_cfg;
2838         u16 def_vlan, lnk_speed;
2839         int status, old_vfs, vf;
2840         struct device *dev = &adapter->pdev->dev;
2841
2842         old_vfs = be_find_vfs(adapter, ENABLED);
2843         if (old_vfs) {
2844                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2845                 if (old_vfs != num_vfs)
2846                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2847                 adapter->num_vfs = old_vfs;
2848         } else {
2849                 if (num_vfs > adapter->dev_num_vfs)
2850                         dev_info(dev, "Device supports %d VFs and not %d\n",
2851                                  adapter->dev_num_vfs, num_vfs);
2852                 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2853
2854                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2855                 if (status) {
2856                         dev_err(dev, "SRIOV enable failed\n");
2857                         adapter->num_vfs = 0;
2858                         return 0;
2859                 }
2860         }
2861
2862         status = be_vf_setup_init(adapter);
2863         if (status)
2864                 goto err;
2865
2866         if (old_vfs) {
2867                 for_all_vfs(adapter, vf_cfg, vf) {
2868                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2869                         if (status)
2870                                 goto err;
2871                 }
2872         } else {
2873                 status = be_vfs_if_create(adapter);
2874                 if (status)
2875                         goto err;
2876         }
2877
2878         if (old_vfs) {
2879                 status = be_vfs_mac_query(adapter);
2880                 if (status)
2881                         goto err;
2882         } else {
2883                 status = be_vf_eth_addr_config(adapter);
2884                 if (status)
2885                         goto err;
2886         }
2887
2888         for_all_vfs(adapter, vf_cfg, vf) {
2889                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2890                  * Allow full available bandwidth
2891                  */
2892                 if (BE3_chip(adapter) && !old_vfs)
2893                         be_cmd_set_qos(adapter, 1000, vf+1);
2894
2895                 status = be_cmd_link_status_query(adapter, &lnk_speed,
2896                                                   NULL, vf + 1);
2897                 if (!status)
2898                         vf_cfg->tx_rate = lnk_speed;
2899
2900                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2901                                                vf + 1, vf_cfg->if_handle);
2902                 if (status)
2903                         goto err;
2904                 vf_cfg->def_vid = def_vlan;
2905
2906                 be_cmd_enable_vf(adapter, vf + 1);
2907         }
2908         return 0;
2909 err:
2910         dev_err(dev, "VF setup failed\n");
2911         be_vf_clear(adapter);
2912         return status;
2913 }
2914
2915 static void be_setup_init(struct be_adapter *adapter)
2916 {
2917         adapter->vlan_prio_bmap = 0xff;
2918         adapter->phy.link_speed = -1;
2919         adapter->if_handle = -1;
2920         adapter->be3_native = false;
2921         adapter->promiscuous = false;
2922         if (be_physfn(adapter))
2923                 adapter->cmd_privileges = MAX_PRIVILEGES;
2924         else
2925                 adapter->cmd_privileges = MIN_PRIVILEGES;
2926 }
2927
2928 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2929                            bool *active_mac, u32 *pmac_id)
2930 {
2931         int status = 0;
2932
2933         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2934                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2935                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2936                         *active_mac = true;
2937                 else
2938                         *active_mac = false;
2939
2940                 return status;
2941         }
2942
2943         if (lancer_chip(adapter)) {
2944                 status = be_cmd_get_mac_from_list(adapter, mac,
2945                                                   active_mac, pmac_id, 0);
2946                 if (*active_mac) {
2947                         status = be_cmd_mac_addr_query(adapter, mac, false,
2948                                                        if_handle, *pmac_id);
2949                 }
2950         } else if (be_physfn(adapter)) {
2951                 /* For BE3, for PF get permanent MAC */
2952                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2953                 *active_mac = false;
2954         } else {
2955                 /* For BE3, for VF get soft MAC assigned by PF*/
2956                 status = be_cmd_mac_addr_query(adapter, mac, false,
2957                                                if_handle, 0);
2958                 *active_mac = true;
2959         }
2960         return status;
2961 }
2962
2963 static void be_get_resources(struct be_adapter *adapter)
2964 {
2965         u16 dev_num_vfs;
2966         int pos, status;
2967         bool profile_present = false;
2968
2969         if (!BEx_chip(adapter)) {
2970                 status = be_cmd_get_func_config(adapter);
2971                 if (!status)
2972                         profile_present = true;
2973         }
2974
2975         if (profile_present) {
2976                 /* Sanity fixes for Lancer */
2977                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2978                                               BE_UC_PMAC_COUNT);
2979                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2980                                            BE_NUM_VLANS_SUPPORTED);
2981                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2982                                                BE_MAX_MC);
2983                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2984                                                MAX_TX_QS);
2985                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2986                                                 BE3_MAX_RSS_QS);
2987                 adapter->max_event_queues = min_t(u16,
2988                                                   adapter->max_event_queues,
2989                                                   BE3_MAX_RSS_QS);
2990
2991                 if (adapter->max_rss_queues &&
2992                     adapter->max_rss_queues == adapter->max_rx_queues)
2993                         adapter->max_rss_queues -= 1;
2994
2995                 if (adapter->max_event_queues < adapter->max_rss_queues)
2996                         adapter->max_rss_queues = adapter->max_event_queues;
2997
2998         } else {
2999                 if (be_physfn(adapter))
3000                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3001                 else
3002                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3003
3004                 if (adapter->function_mode & FLEX10_MODE)
3005                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3006                 else
3007                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3008
3009                 adapter->max_mcast_mac = BE_MAX_MC;
3010                 adapter->max_tx_queues = MAX_TX_QS;
3011                 adapter->max_rss_queues = (adapter->be3_native) ?
3012                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3013                 adapter->max_event_queues = BE3_MAX_RSS_QS;
3014
3015                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3016                                         BE_IF_FLAGS_BROADCAST |
3017                                         BE_IF_FLAGS_MULTICAST |
3018                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
3019                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
3020                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
3021                                         BE_IF_FLAGS_PROMISCUOUS;
3022
3023                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3024                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3025         }
3026
3027         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3028         if (pos) {
3029                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3030                                      &dev_num_vfs);
3031                 if (BE3_chip(adapter))
3032                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3033                 adapter->dev_num_vfs = dev_num_vfs;
3034         }
3035 }
3036
3037 /* Routine to query per function resource limits */
3038 static int be_get_config(struct be_adapter *adapter)
3039 {
3040         int status;
3041
3042         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3043                                      &adapter->function_mode,
3044                                      &adapter->function_caps);
3045         if (status)
3046                 goto err;
3047
3048         be_get_resources(adapter);
3049
3050         /* primary mac needs 1 pmac entry */
3051         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3052                                    sizeof(u32), GFP_KERNEL);
3053         if (!adapter->pmac_id) {
3054                 status = -ENOMEM;
3055                 goto err;
3056         }
3057
3058 err:
3059         return status;
3060 }
3061
3062 static int be_setup(struct be_adapter *adapter)
3063 {
3064         struct device *dev = &adapter->pdev->dev;
3065         u32 en_flags;
3066         u32 tx_fc, rx_fc;
3067         int status;
3068         u8 mac[ETH_ALEN];
3069         bool active_mac;
3070
3071         be_setup_init(adapter);
3072
3073         if (!lancer_chip(adapter))
3074                 be_cmd_req_native_mode(adapter);
3075
3076         status = be_get_config(adapter);
3077         if (status)
3078                 goto err;
3079
3080         be_msix_enable(adapter);
3081
3082         status = be_evt_queues_create(adapter);
3083         if (status)
3084                 goto err;
3085
3086         status = be_tx_cqs_create(adapter);
3087         if (status)
3088                 goto err;
3089
3090         status = be_rx_cqs_create(adapter);
3091         if (status)
3092                 goto err;
3093
3094         status = be_mcc_queues_create(adapter);
3095         if (status)
3096                 goto err;
3097
3098         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3099         /* In UMC mode FW does not return right privileges.
3100          * Override with correct privilege equivalent to PF.
3101          */
3102         if (be_is_mc(adapter))
3103                 adapter->cmd_privileges = MAX_PRIVILEGES;
3104
3105         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3106                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3107
3108         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3109                 en_flags |= BE_IF_FLAGS_RSS;
3110
3111         en_flags = en_flags & adapter->if_cap_flags;
3112
3113         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3114                                   &adapter->if_handle, 0);
3115         if (status != 0)
3116                 goto err;
3117
3118         memset(mac, 0, ETH_ALEN);
3119         active_mac = false;
3120         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3121                                  &active_mac, &adapter->pmac_id[0]);
3122         if (status != 0)
3123                 goto err;
3124
3125         if (!active_mac) {
3126                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3127                                          &adapter->pmac_id[0], 0);
3128                 if (status != 0)
3129                         goto err;
3130         }
3131
3132         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3133                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3134                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3135         }
3136
3137         status = be_tx_qs_create(adapter);
3138         if (status)
3139                 goto err;
3140
3141         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3142
3143         if (adapter->vlans_added)
3144                 be_vid_config(adapter);
3145
3146         be_set_rx_mode(adapter->netdev);
3147
3148         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3149
3150         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3151                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3152                                         adapter->rx_fc);
3153
3154         if (be_physfn(adapter) && num_vfs) {
3155                 if (adapter->dev_num_vfs)
3156                         be_vf_setup(adapter);
3157                 else
3158                         dev_warn(dev, "device doesn't support SRIOV\n");
3159         }
3160
3161         status = be_cmd_get_phy_info(adapter);
3162         if (!status && be_pause_supported(adapter))
3163                 adapter->phy.fc_autoneg = 1;
3164
3165         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3166         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3167         return 0;
3168 err:
3169         be_clear(adapter);
3170         return status;
3171 }
3172
3173 #ifdef CONFIG_NET_POLL_CONTROLLER
3174 static void be_netpoll(struct net_device *netdev)
3175 {
3176         struct be_adapter *adapter = netdev_priv(netdev);
3177         struct be_eq_obj *eqo;
3178         int i;
3179
3180         for_all_evt_queues(adapter, eqo, i) {
3181                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3182                 napi_schedule(&eqo->napi);
3183         }
3184
3185         return;
3186 }
3187 #endif
3188
3189 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3190 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3191
3192 static bool be_flash_redboot(struct be_adapter *adapter,
3193                         const u8 *p, u32 img_start, int image_size,
3194                         int hdr_size)
3195 {
3196         u32 crc_offset;
3197         u8 flashed_crc[4];
3198         int status;
3199
3200         crc_offset = hdr_size + img_start + image_size - 4;
3201
3202         p += crc_offset;
3203
3204         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3205                         (image_size - 4));
3206         if (status) {
3207                 dev_err(&adapter->pdev->dev,
3208                 "could not get crc from flash, not flashing redboot\n");
3209                 return false;
3210         }
3211
3212         /*update redboot only if crc does not match*/
3213         if (!memcmp(flashed_crc, p, 4))
3214                 return false;
3215         else
3216                 return true;
3217 }
3218
3219 static bool phy_flashing_required(struct be_adapter *adapter)
3220 {
3221         return (adapter->phy.phy_type == TN_8022 &&
3222                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3223 }
3224
3225 static bool is_comp_in_ufi(struct be_adapter *adapter,
3226                            struct flash_section_info *fsec, int type)
3227 {
3228         int i = 0, img_type = 0;
3229         struct flash_section_info_g2 *fsec_g2 = NULL;
3230
3231         if (BE2_chip(adapter))
3232                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3233
3234         for (i = 0; i < MAX_FLASH_COMP; i++) {
3235                 if (fsec_g2)
3236                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3237                 else
3238                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3239
3240                 if (img_type == type)
3241                         return true;
3242         }
3243         return false;
3244
3245 }
3246
3247 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3248                                          int header_size,
3249                                          const struct firmware *fw)
3250 {
3251         struct flash_section_info *fsec = NULL;
3252         const u8 *p = fw->data;
3253
3254         p += header_size;
3255         while (p < (fw->data + fw->size)) {
3256                 fsec = (struct flash_section_info *)p;
3257                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3258                         return fsec;
3259                 p += 32;
3260         }
3261         return NULL;
3262 }
3263
3264 static int be_flash(struct be_adapter *adapter, const u8 *img,
3265                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3266 {
3267         u32 total_bytes = 0, flash_op, num_bytes = 0;
3268         int status = 0;
3269         struct be_cmd_write_flashrom *req = flash_cmd->va;
3270
3271         total_bytes = img_size;
3272         while (total_bytes) {
3273                 num_bytes = min_t(u32, 32*1024, total_bytes);
3274
3275                 total_bytes -= num_bytes;
3276
3277                 if (!total_bytes) {
3278                         if (optype == OPTYPE_PHY_FW)
3279                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3280                         else
3281                                 flash_op = FLASHROM_OPER_FLASH;
3282                 } else {
3283                         if (optype == OPTYPE_PHY_FW)
3284                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3285                         else
3286                                 flash_op = FLASHROM_OPER_SAVE;
3287                 }
3288
3289                 memcpy(req->data_buf, img, num_bytes);
3290                 img += num_bytes;
3291                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3292                                                 flash_op, num_bytes);
3293                 if (status) {
3294                         if (status == ILLEGAL_IOCTL_REQ &&
3295                             optype == OPTYPE_PHY_FW)
3296                                 break;
3297                         dev_err(&adapter->pdev->dev,
3298                                 "cmd to write to flash rom failed.\n");
3299                         return status;
3300                 }
3301         }
3302         return 0;
3303 }
3304
3305 /* For BE2 and BE3 */
3306 static int be_flash_BEx(struct be_adapter *adapter,
3307                          const struct firmware *fw,
3308                          struct be_dma_mem *flash_cmd,
3309                          int num_of_images)
3310
3311 {
3312         int status = 0, i, filehdr_size = 0;
3313         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3314         const u8 *p = fw->data;
3315         const struct flash_comp *pflashcomp;
3316         int num_comp, redboot;
3317         struct flash_section_info *fsec = NULL;
3318
3319         struct flash_comp gen3_flash_types[] = {
3320                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3321                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3322                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3323                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3324                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3325                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3326                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3327                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3328                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3329                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3330                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3331                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3332                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3333                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3334                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3335                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3336                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3337                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3338                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3339                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3340         };
3341
3342         struct flash_comp gen2_flash_types[] = {
3343                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3344                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3345                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3346                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3347                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3348                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3349                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3350                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3351                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3352                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3353                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3354                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3355                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3356                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3357                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3358                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3359         };
3360
3361         if (BE3_chip(adapter)) {
3362                 pflashcomp = gen3_flash_types;
3363                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3364                 num_comp = ARRAY_SIZE(gen3_flash_types);
3365         } else {
3366                 pflashcomp = gen2_flash_types;
3367                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3368                 num_comp = ARRAY_SIZE(gen2_flash_types);
3369         }
3370
3371         /* Get flash section info*/
3372         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3373         if (!fsec) {
3374                 dev_err(&adapter->pdev->dev,
3375                         "Invalid Cookie. UFI corrupted ?\n");
3376                 return -1;
3377         }
3378         for (i = 0; i < num_comp; i++) {
3379                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3380                         continue;
3381
3382                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3383                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3384                         continue;
3385
3386                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3387                     !phy_flashing_required(adapter))
3388                                 continue;
3389
3390                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3391                         redboot = be_flash_redboot(adapter, fw->data,
3392                                 pflashcomp[i].offset, pflashcomp[i].size,
3393                                 filehdr_size + img_hdrs_size);
3394                         if (!redboot)
3395                                 continue;
3396                 }
3397
3398                 p = fw->data;
3399                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3400                 if (p + pflashcomp[i].size > fw->data + fw->size)
3401                         return -1;
3402
3403                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3404                                         pflashcomp[i].size);
3405                 if (status) {
3406                         dev_err(&adapter->pdev->dev,
3407                                 "Flashing section type %d failed.\n",
3408                                 pflashcomp[i].img_type);
3409                         return status;
3410                 }
3411         }
3412         return 0;
3413 }
3414
3415 static int be_flash_skyhawk(struct be_adapter *adapter,
3416                 const struct firmware *fw,
3417                 struct be_dma_mem *flash_cmd, int num_of_images)
3418 {
3419         int status = 0, i, filehdr_size = 0;
3420         int img_offset, img_size, img_optype, redboot;
3421         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3422         const u8 *p = fw->data;
3423         struct flash_section_info *fsec = NULL;
3424
3425         filehdr_size = sizeof(struct flash_file_hdr_g3);
3426         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3427         if (!fsec) {
3428                 dev_err(&adapter->pdev->dev,
3429                         "Invalid Cookie. UFI corrupted ?\n");
3430                 return -1;
3431         }
3432
3433         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3434                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3435                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3436
3437                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3438                 case IMAGE_FIRMWARE_iSCSI:
3439                         img_optype = OPTYPE_ISCSI_ACTIVE;
3440                         break;
3441                 case IMAGE_BOOT_CODE:
3442                         img_optype = OPTYPE_REDBOOT;
3443                         break;
3444                 case IMAGE_OPTION_ROM_ISCSI:
3445                         img_optype = OPTYPE_BIOS;
3446                         break;
3447                 case IMAGE_OPTION_ROM_PXE:
3448                         img_optype = OPTYPE_PXE_BIOS;
3449                         break;
3450                 case IMAGE_OPTION_ROM_FCoE:
3451                         img_optype = OPTYPE_FCOE_BIOS;
3452                         break;
3453                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3454                         img_optype = OPTYPE_ISCSI_BACKUP;
3455                         break;
3456                 case IMAGE_NCSI:
3457                         img_optype = OPTYPE_NCSI_FW;
3458                         break;
3459                 default:
3460                         continue;
3461                 }
3462
3463                 if (img_optype == OPTYPE_REDBOOT) {
3464                         redboot = be_flash_redboot(adapter, fw->data,
3465                                         img_offset, img_size,
3466                                         filehdr_size + img_hdrs_size);
3467                         if (!redboot)
3468                                 continue;
3469                 }
3470
3471                 p = fw->data;
3472                 p += filehdr_size + img_offset + img_hdrs_size;
3473                 if (p + img_size > fw->data + fw->size)
3474                         return -1;
3475
3476                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3477                 if (status) {
3478                         dev_err(&adapter->pdev->dev,
3479                                 "Flashing section type %d failed.\n",
3480                                 fsec->fsec_entry[i].type);
3481                         return status;
3482                 }
3483         }
3484         return 0;
3485 }
3486
3487 static int lancer_wait_idle(struct be_adapter *adapter)
3488 {
3489 #define SLIPORT_IDLE_TIMEOUT 30
3490         u32 reg_val;
3491         int status = 0, i;
3492
3493         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3494                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3495                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3496                         break;
3497
3498                 ssleep(1);
3499         }
3500
3501         if (i == SLIPORT_IDLE_TIMEOUT)
3502                 status = -1;
3503
3504         return status;
3505 }
3506
3507 static int lancer_fw_reset(struct be_adapter *adapter)
3508 {
3509         int status = 0;
3510
3511         status = lancer_wait_idle(adapter);
3512         if (status)
3513                 return status;
3514
3515         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3516                   PHYSDEV_CONTROL_OFFSET);
3517
3518         return status;
3519 }
3520
3521 static int lancer_fw_download(struct be_adapter *adapter,
3522                                 const struct firmware *fw)
3523 {
3524 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3525 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3526         struct be_dma_mem flash_cmd;
3527         const u8 *data_ptr = NULL;
3528         u8 *dest_image_ptr = NULL;
3529         size_t image_size = 0;
3530         u32 chunk_size = 0;
3531         u32 data_written = 0;
3532         u32 offset = 0;
3533         int status = 0;
3534         u8 add_status = 0;
3535         u8 change_status;
3536
3537         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3538                 dev_err(&adapter->pdev->dev,
3539                         "FW Image not properly aligned. "
3540                         "Length must be 4 byte aligned.\n");
3541                 status = -EINVAL;
3542                 goto lancer_fw_exit;
3543         }
3544
3545         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3546                                 + LANCER_FW_DOWNLOAD_CHUNK;
3547         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3548                                                 &flash_cmd.dma, GFP_KERNEL);
3549         if (!flash_cmd.va) {
3550                 status = -ENOMEM;
3551                 dev_err(&adapter->pdev->dev,
3552                         "Memory allocation failure while flashing\n");
3553                 goto lancer_fw_exit;
3554         }
3555
3556         dest_image_ptr = flash_cmd.va +
3557                                 sizeof(struct lancer_cmd_req_write_object);
3558         image_size = fw->size;
3559         data_ptr = fw->data;
3560
3561         while (image_size) {
3562                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3563
3564                 /* Copy the image chunk content. */
3565                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3566
3567                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3568                                                  chunk_size, offset,
3569                                                  LANCER_FW_DOWNLOAD_LOCATION,
3570                                                  &data_written, &change_status,
3571                                                  &add_status);
3572                 if (status)
3573                         break;
3574
3575                 offset += data_written;
3576                 data_ptr += data_written;
3577                 image_size -= data_written;
3578         }
3579
3580         if (!status) {
3581                 /* Commit the FW written */
3582                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3583                                                  0, offset,
3584                                                  LANCER_FW_DOWNLOAD_LOCATION,
3585                                                  &data_written, &change_status,
3586                                                  &add_status);
3587         }
3588
3589         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3590                                 flash_cmd.dma);
3591         if (status) {
3592                 dev_err(&adapter->pdev->dev,
3593                         "Firmware load error. "
3594                         "Status code: 0x%x Additional Status: 0x%x\n",
3595                         status, add_status);
3596                 goto lancer_fw_exit;
3597         }
3598
3599         if (change_status == LANCER_FW_RESET_NEEDED) {
3600                 status = lancer_fw_reset(adapter);
3601                 if (status) {
3602                         dev_err(&adapter->pdev->dev,
3603                                 "Adapter busy for FW reset.\n"
3604                                 "New FW will not be active.\n");
3605                         goto lancer_fw_exit;
3606                 }
3607         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3608                         dev_err(&adapter->pdev->dev,
3609                                 "System reboot required for new FW"
3610                                 " to be active\n");
3611         }
3612
3613         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3614 lancer_fw_exit:
3615         return status;
3616 }
3617
3618 #define UFI_TYPE2               2
3619 #define UFI_TYPE3               3
3620 #define UFI_TYPE4               4
3621 static int be_get_ufi_type(struct be_adapter *adapter,
3622                            struct flash_file_hdr_g2 *fhdr)
3623 {
3624         if (fhdr == NULL)
3625                 goto be_get_ufi_exit;
3626
3627         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3628                 return UFI_TYPE4;
3629         else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3630                 return UFI_TYPE3;
3631         else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3632                 return UFI_TYPE2;
3633
3634 be_get_ufi_exit:
3635         dev_err(&adapter->pdev->dev,
3636                 "UFI and Interface are not compatible for flashing\n");
3637         return -1;
3638 }
3639
3640 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3641 {
3642         struct flash_file_hdr_g2 *fhdr;
3643         struct flash_file_hdr_g3 *fhdr3;
3644         struct image_hdr *img_hdr_ptr = NULL;
3645         struct be_dma_mem flash_cmd;
3646         const u8 *p;
3647         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3648
3649         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3650         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3651                                           &flash_cmd.dma, GFP_KERNEL);
3652         if (!flash_cmd.va) {
3653                 status = -ENOMEM;
3654                 dev_err(&adapter->pdev->dev,
3655                         "Memory allocation failure while flashing\n");
3656                 goto be_fw_exit;
3657         }
3658
3659         p = fw->data;
3660         fhdr = (struct flash_file_hdr_g2 *)p;
3661
3662         ufi_type = be_get_ufi_type(adapter, fhdr);
3663
3664         fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3665         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3666         for (i = 0; i < num_imgs; i++) {
3667                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3668                                 (sizeof(struct flash_file_hdr_g3) +
3669                                  i * sizeof(struct image_hdr)));
3670                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3671                         if (ufi_type == UFI_TYPE4)
3672                                 status = be_flash_skyhawk(adapter, fw,
3673                                                         &flash_cmd, num_imgs);
3674                         else if (ufi_type == UFI_TYPE3)
3675                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3676                                                       num_imgs);
3677                 }
3678         }
3679
3680         if (ufi_type == UFI_TYPE2)
3681                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3682         else if (ufi_type == -1)
3683                 status = -1;
3684
3685         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3686                           flash_cmd.dma);
3687         if (status) {
3688                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3689                 goto be_fw_exit;
3690         }
3691
3692         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3693
3694 be_fw_exit:
3695         return status;
3696 }
3697
3698 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3699 {
3700         const struct firmware *fw;
3701         int status;
3702
3703         if (!netif_running(adapter->netdev)) {
3704                 dev_err(&adapter->pdev->dev,
3705                         "Firmware load not allowed (interface is down)\n");
3706                 return -1;
3707         }
3708
3709         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3710         if (status)
3711                 goto fw_exit;
3712
3713         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3714
3715         if (lancer_chip(adapter))
3716                 status = lancer_fw_download(adapter, fw);
3717         else
3718                 status = be_fw_download(adapter, fw);
3719
3720 fw_exit:
3721         release_firmware(fw);
3722         return status;
3723 }
3724
3725 static const struct net_device_ops be_netdev_ops = {
3726         .ndo_open               = be_open,
3727         .ndo_stop               = be_close,
3728         .ndo_start_xmit         = be_xmit,
3729         .ndo_set_rx_mode        = be_set_rx_mode,
3730         .ndo_set_mac_address    = be_mac_addr_set,
3731         .ndo_change_mtu         = be_change_mtu,
3732         .ndo_get_stats64        = be_get_stats64,
3733         .ndo_validate_addr      = eth_validate_addr,
3734         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3735         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3736         .ndo_set_vf_mac         = be_set_vf_mac,
3737         .ndo_set_vf_vlan        = be_set_vf_vlan,
3738         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3739         .ndo_get_vf_config      = be_get_vf_config,
3740 #ifdef CONFIG_NET_POLL_CONTROLLER
3741         .ndo_poll_controller    = be_netpoll,
3742 #endif
3743 };
3744
3745 static void be_netdev_init(struct net_device *netdev)
3746 {
3747         struct be_adapter *adapter = netdev_priv(netdev);
3748         struct be_eq_obj *eqo;
3749         int i;
3750
3751         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3752                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3753                 NETIF_F_HW_VLAN_TX;
3754         if (be_multi_rxq(adapter))
3755                 netdev->hw_features |= NETIF_F_RXHASH;
3756
3757         netdev->features |= netdev->hw_features |
3758                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3759
3760         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3761                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3762
3763         netdev->priv_flags |= IFF_UNICAST_FLT;
3764
3765         netdev->flags |= IFF_MULTICAST;
3766
3767         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3768
3769         netdev->netdev_ops = &be_netdev_ops;
3770
3771         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3772
3773         for_all_evt_queues(adapter, eqo, i)
3774                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3775 }
3776
3777 static void be_unmap_pci_bars(struct be_adapter *adapter)
3778 {
3779         if (adapter->csr)
3780                 pci_iounmap(adapter->pdev, adapter->csr);
3781         if (adapter->db)
3782                 pci_iounmap(adapter->pdev, adapter->db);
3783 }
3784
3785 static int db_bar(struct be_adapter *adapter)
3786 {
3787         if (lancer_chip(adapter) || !be_physfn(adapter))
3788                 return 0;
3789         else
3790                 return 4;
3791 }
3792
3793 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3794 {
3795         if (skyhawk_chip(adapter)) {
3796                 adapter->roce_db.size = 4096;
3797                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3798                                                               db_bar(adapter));
3799                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3800                                                                db_bar(adapter));
3801         }
3802         return 0;
3803 }
3804
3805 static int be_map_pci_bars(struct be_adapter *adapter)
3806 {
3807         u8 __iomem *addr;
3808         u32 sli_intf;
3809
3810         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3811         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3812                                 SLI_INTF_IF_TYPE_SHIFT;
3813
3814         if (BEx_chip(adapter) && be_physfn(adapter)) {
3815                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3816                 if (adapter->csr == NULL)
3817                         return -ENOMEM;
3818         }
3819
3820         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3821         if (addr == NULL)
3822                 goto pci_map_err;
3823         adapter->db = addr;
3824
3825         be_roce_map_pci_bars(adapter);
3826         return 0;
3827
3828 pci_map_err:
3829         be_unmap_pci_bars(adapter);
3830         return -ENOMEM;
3831 }
3832
3833 static void be_ctrl_cleanup(struct be_adapter *adapter)
3834 {
3835         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3836
3837         be_unmap_pci_bars(adapter);
3838
3839         if (mem->va)
3840                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3841                                   mem->dma);
3842
3843         mem = &adapter->rx_filter;
3844         if (mem->va)
3845                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3846                                   mem->dma);
3847 }
3848
3849 static int be_ctrl_init(struct be_adapter *adapter)
3850 {
3851         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3852         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3853         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3854         u32 sli_intf;
3855         int status;
3856
3857         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3858         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3859                                  SLI_INTF_FAMILY_SHIFT;
3860         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3861
3862         status = be_map_pci_bars(adapter);
3863         if (status)
3864                 goto done;
3865
3866         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3867         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3868                                                 mbox_mem_alloc->size,
3869                                                 &mbox_mem_alloc->dma,
3870                                                 GFP_KERNEL);
3871         if (!mbox_mem_alloc->va) {
3872                 status = -ENOMEM;
3873                 goto unmap_pci_bars;
3874         }
3875         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3876         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3877         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3878         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3879
3880         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3881         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3882                                         &rx_filter->dma, GFP_KERNEL);
3883         if (rx_filter->va == NULL) {
3884                 status = -ENOMEM;
3885                 goto free_mbox;
3886         }
3887         memset(rx_filter->va, 0, rx_filter->size);
3888         mutex_init(&adapter->mbox_lock);
3889         spin_lock_init(&adapter->mcc_lock);
3890         spin_lock_init(&adapter->mcc_cq_lock);
3891
3892         init_completion(&adapter->flash_compl);
3893         pci_save_state(adapter->pdev);
3894         return 0;
3895
3896 free_mbox:
3897         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3898                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3899
3900 unmap_pci_bars:
3901         be_unmap_pci_bars(adapter);
3902
3903 done:
3904         return status;
3905 }
3906
3907 static void be_stats_cleanup(struct be_adapter *adapter)
3908 {
3909         struct be_dma_mem *cmd = &adapter->stats_cmd;
3910
3911         if (cmd->va)
3912                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3913                                   cmd->va, cmd->dma);
3914 }
3915
3916 static int be_stats_init(struct be_adapter *adapter)
3917 {
3918         struct be_dma_mem *cmd = &adapter->stats_cmd;
3919
3920         if (lancer_chip(adapter))
3921                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3922         else if (BE2_chip(adapter))
3923                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3924         else
3925                 /* BE3 and Skyhawk */
3926                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3927
3928         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3929                                      GFP_KERNEL);
3930         if (cmd->va == NULL)
3931                 return -1;
3932         memset(cmd->va, 0, cmd->size);
3933         return 0;
3934 }
3935
3936 static void be_remove(struct pci_dev *pdev)
3937 {
3938         struct be_adapter *adapter = pci_get_drvdata(pdev);
3939
3940         if (!adapter)
3941                 return;
3942
3943         be_roce_dev_remove(adapter);
3944
3945         cancel_delayed_work_sync(&adapter->func_recovery_work);
3946
3947         unregister_netdev(adapter->netdev);
3948
3949         be_clear(adapter);
3950
3951         /* tell fw we're done with firing cmds */
3952         be_cmd_fw_clean(adapter);
3953
3954         be_stats_cleanup(adapter);
3955
3956         be_ctrl_cleanup(adapter);
3957
3958         pci_disable_pcie_error_reporting(pdev);
3959
3960         pci_set_drvdata(pdev, NULL);
3961         pci_release_regions(pdev);
3962         pci_disable_device(pdev);
3963
3964         free_netdev(adapter->netdev);
3965 }
3966
3967 bool be_is_wol_supported(struct be_adapter *adapter)
3968 {
3969         return ((adapter->wol_cap & BE_WOL_CAP) &&
3970                 !be_is_wol_excluded(adapter)) ? true : false;
3971 }
3972
3973 u32 be_get_fw_log_level(struct be_adapter *adapter)
3974 {
3975         struct be_dma_mem extfat_cmd;
3976         struct be_fat_conf_params *cfgs;
3977         int status;
3978         u32 level = 0;
3979         int j;
3980
3981         if (lancer_chip(adapter))
3982                 return 0;
3983
3984         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3985         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3986         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3987                                              &extfat_cmd.dma);
3988
3989         if (!extfat_cmd.va) {
3990                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3991                         __func__);
3992                 goto err;
3993         }
3994
3995         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3996         if (!status) {
3997                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3998                                                 sizeof(struct be_cmd_resp_hdr));
3999                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4000                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4001                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4002                 }
4003         }
4004         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4005                             extfat_cmd.dma);
4006 err:
4007         return level;
4008 }
4009
4010 static int be_get_initial_config(struct be_adapter *adapter)
4011 {
4012         int status;
4013         u32 level;
4014
4015         status = be_cmd_get_cntl_attributes(adapter);
4016         if (status)
4017                 return status;
4018
4019         status = be_cmd_get_acpi_wol_cap(adapter);
4020         if (status) {
4021                 /* in case of a failure to get wol capabillities
4022                  * check the exclusion list to determine WOL capability */
4023                 if (!be_is_wol_excluded(adapter))
4024                         adapter->wol_cap |= BE_WOL_CAP;
4025         }
4026
4027         if (be_is_wol_supported(adapter))
4028                 adapter->wol = true;
4029
4030         /* Must be a power of 2 or else MODULO will BUG_ON */
4031         adapter->be_get_temp_freq = 64;
4032
4033         level = be_get_fw_log_level(adapter);
4034         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4035
4036         return 0;
4037 }
4038
4039 static int lancer_recover_func(struct be_adapter *adapter)
4040 {
4041         int status;
4042
4043         status = lancer_test_and_set_rdy_state(adapter);
4044         if (status)
4045                 goto err;
4046
4047         if (netif_running(adapter->netdev))
4048                 be_close(adapter->netdev);
4049
4050         be_clear(adapter);
4051
4052         adapter->hw_error = false;
4053         adapter->fw_timeout = false;
4054
4055         status = be_setup(adapter);
4056         if (status)
4057                 goto err;
4058
4059         if (netif_running(adapter->netdev)) {
4060                 status = be_open(adapter->netdev);
4061                 if (status)
4062                         goto err;
4063         }
4064
4065         dev_err(&adapter->pdev->dev,
4066                 "Adapter SLIPORT recovery succeeded\n");
4067         return 0;
4068 err:
4069         if (adapter->eeh_error)
4070                 dev_err(&adapter->pdev->dev,
4071                         "Adapter SLIPORT recovery failed\n");
4072
4073         return status;
4074 }
4075
4076 static void be_func_recovery_task(struct work_struct *work)
4077 {
4078         struct be_adapter *adapter =
4079                 container_of(work, struct be_adapter,  func_recovery_work.work);
4080         int status;
4081
4082         be_detect_error(adapter);
4083
4084         if (adapter->hw_error && lancer_chip(adapter)) {
4085
4086                 if (adapter->eeh_error)
4087                         goto out;
4088
4089                 rtnl_lock();
4090                 netif_device_detach(adapter->netdev);
4091                 rtnl_unlock();
4092
4093                 status = lancer_recover_func(adapter);
4094
4095                 if (!status)
4096                         netif_device_attach(adapter->netdev);
4097         }
4098
4099 out:
4100         schedule_delayed_work(&adapter->func_recovery_work,
4101                               msecs_to_jiffies(1000));
4102 }
4103
4104 static void be_worker(struct work_struct *work)
4105 {
4106         struct be_adapter *adapter =
4107                 container_of(work, struct be_adapter, work.work);
4108         struct be_rx_obj *rxo;
4109         struct be_eq_obj *eqo;
4110         int i;
4111
4112         /* when interrupts are not yet enabled, just reap any pending
4113         * mcc completions */
4114         if (!netif_running(adapter->netdev)) {
4115                 local_bh_disable();
4116                 be_process_mcc(adapter);
4117                 local_bh_enable();
4118                 goto reschedule;
4119         }
4120
4121         if (!adapter->stats_cmd_sent) {
4122                 if (lancer_chip(adapter))
4123                         lancer_cmd_get_pport_stats(adapter,
4124                                                 &adapter->stats_cmd);
4125                 else
4126                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4127         }
4128
4129         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4130                 be_cmd_get_die_temperature(adapter);
4131
4132         for_all_rx_queues(adapter, rxo, i) {
4133                 if (rxo->rx_post_starved) {
4134                         rxo->rx_post_starved = false;
4135                         be_post_rx_frags(rxo, GFP_KERNEL);
4136                 }
4137         }
4138
4139         for_all_evt_queues(adapter, eqo, i)
4140                 be_eqd_update(adapter, eqo);
4141
4142 reschedule:
4143         adapter->work_counter++;
4144         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4145 }
4146
4147 static bool be_reset_required(struct be_adapter *adapter)
4148 {
4149         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4150 }
4151
4152 static char *mc_name(struct be_adapter *adapter)
4153 {
4154         if (adapter->function_mode & FLEX10_MODE)
4155                 return "FLEX10";
4156         else if (adapter->function_mode & VNIC_MODE)
4157                 return "vNIC";
4158         else if (adapter->function_mode & UMC_ENABLED)
4159                 return "UMC";
4160         else
4161                 return "";
4162 }
4163
4164 static inline char *func_name(struct be_adapter *adapter)
4165 {
4166         return be_physfn(adapter) ? "PF" : "VF";
4167 }
4168
4169 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4170 {
4171         int status = 0;
4172         struct be_adapter *adapter;
4173         struct net_device *netdev;
4174         char port_name;
4175
4176         status = pci_enable_device(pdev);
4177         if (status)
4178                 goto do_none;
4179
4180         status = pci_request_regions(pdev, DRV_NAME);
4181         if (status)
4182                 goto disable_dev;
4183         pci_set_master(pdev);
4184
4185         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4186         if (netdev == NULL) {
4187                 status = -ENOMEM;
4188                 goto rel_reg;
4189         }
4190         adapter = netdev_priv(netdev);
4191         adapter->pdev = pdev;
4192         pci_set_drvdata(pdev, adapter);
4193         adapter->netdev = netdev;
4194         SET_NETDEV_DEV(netdev, &pdev->dev);
4195
4196         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4197         if (!status) {
4198                 netdev->features |= NETIF_F_HIGHDMA;
4199         } else {
4200                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4201                 if (status) {
4202                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4203                         goto free_netdev;
4204                 }
4205         }
4206
4207         status = pci_enable_pcie_error_reporting(pdev);
4208         if (status)
4209                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4210
4211         status = be_ctrl_init(adapter);
4212         if (status)
4213                 goto free_netdev;
4214
4215         /* sync up with fw's ready state */
4216         if (be_physfn(adapter)) {
4217                 status = be_fw_wait_ready(adapter);
4218                 if (status)
4219                         goto ctrl_clean;
4220         }
4221
4222         /* tell fw we're ready to fire cmds */
4223         status = be_cmd_fw_init(adapter);
4224         if (status)
4225                 goto ctrl_clean;
4226
4227         if (be_reset_required(adapter)) {
4228                 status = be_cmd_reset_function(adapter);
4229                 if (status)
4230                         goto ctrl_clean;
4231         }
4232
4233         /* The INTR bit may be set in the card when probed by a kdump kernel
4234          * after a crash.
4235          */
4236         if (!lancer_chip(adapter))
4237                 be_intr_set(adapter, false);
4238
4239         status = be_stats_init(adapter);
4240         if (status)
4241                 goto ctrl_clean;
4242
4243         status = be_get_initial_config(adapter);
4244         if (status)
4245                 goto stats_clean;
4246
4247         INIT_DELAYED_WORK(&adapter->work, be_worker);
4248         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4249         adapter->rx_fc = adapter->tx_fc = true;
4250
4251         status = be_setup(adapter);
4252         if (status)
4253                 goto stats_clean;
4254
4255         be_netdev_init(netdev);
4256         status = register_netdev(netdev);
4257         if (status != 0)
4258                 goto unsetup;
4259
4260         be_roce_dev_add(adapter);
4261
4262         schedule_delayed_work(&adapter->func_recovery_work,
4263                               msecs_to_jiffies(1000));
4264
4265         be_cmd_query_port_name(adapter, &port_name);
4266
4267         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4268                  func_name(adapter), mc_name(adapter), port_name);
4269
4270         return 0;
4271
4272 unsetup:
4273         be_clear(adapter);
4274 stats_clean:
4275         be_stats_cleanup(adapter);
4276 ctrl_clean:
4277         be_ctrl_cleanup(adapter);
4278 free_netdev:
4279         free_netdev(netdev);
4280         pci_set_drvdata(pdev, NULL);
4281 rel_reg:
4282         pci_release_regions(pdev);
4283 disable_dev:
4284         pci_disable_device(pdev);
4285 do_none:
4286         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4287         return status;
4288 }
4289
4290 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4291 {
4292         struct be_adapter *adapter = pci_get_drvdata(pdev);
4293         struct net_device *netdev =  adapter->netdev;
4294
4295         if (adapter->wol)
4296                 be_setup_wol(adapter, true);
4297
4298         cancel_delayed_work_sync(&adapter->func_recovery_work);
4299
4300         netif_device_detach(netdev);
4301         if (netif_running(netdev)) {
4302                 rtnl_lock();
4303                 be_close(netdev);
4304                 rtnl_unlock();
4305         }
4306         be_clear(adapter);
4307
4308         pci_save_state(pdev);
4309         pci_disable_device(pdev);
4310         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4311         return 0;
4312 }
4313
4314 static int be_resume(struct pci_dev *pdev)
4315 {
4316         int status = 0;
4317         struct be_adapter *adapter = pci_get_drvdata(pdev);
4318         struct net_device *netdev =  adapter->netdev;
4319
4320         netif_device_detach(netdev);
4321
4322         status = pci_enable_device(pdev);
4323         if (status)
4324                 return status;
4325
4326         pci_set_power_state(pdev, 0);
4327         pci_restore_state(pdev);
4328
4329         /* tell fw we're ready to fire cmds */
4330         status = be_cmd_fw_init(adapter);
4331         if (status)
4332                 return status;
4333
4334         be_setup(adapter);
4335         if (netif_running(netdev)) {
4336                 rtnl_lock();
4337                 be_open(netdev);
4338                 rtnl_unlock();
4339         }
4340
4341         schedule_delayed_work(&adapter->func_recovery_work,
4342                               msecs_to_jiffies(1000));
4343         netif_device_attach(netdev);
4344
4345         if (adapter->wol)
4346                 be_setup_wol(adapter, false);
4347
4348         return 0;
4349 }
4350
4351 /*
4352  * An FLR will stop BE from DMAing any data.
4353  */
4354 static void be_shutdown(struct pci_dev *pdev)
4355 {
4356         struct be_adapter *adapter = pci_get_drvdata(pdev);
4357
4358         if (!adapter)
4359                 return;
4360
4361         cancel_delayed_work_sync(&adapter->work);
4362         cancel_delayed_work_sync(&adapter->func_recovery_work);
4363
4364         netif_device_detach(adapter->netdev);
4365
4366         be_cmd_reset_function(adapter);
4367
4368         pci_disable_device(pdev);
4369 }
4370
4371 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4372                                 pci_channel_state_t state)
4373 {
4374         struct be_adapter *adapter = pci_get_drvdata(pdev);
4375         struct net_device *netdev =  adapter->netdev;
4376
4377         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4378
4379         adapter->eeh_error = true;
4380
4381         cancel_delayed_work_sync(&adapter->func_recovery_work);
4382
4383         rtnl_lock();
4384         netif_device_detach(netdev);
4385         rtnl_unlock();
4386
4387         if (netif_running(netdev)) {
4388                 rtnl_lock();
4389                 be_close(netdev);
4390                 rtnl_unlock();
4391         }
4392         be_clear(adapter);
4393
4394         if (state == pci_channel_io_perm_failure)
4395                 return PCI_ERS_RESULT_DISCONNECT;
4396
4397         pci_disable_device(pdev);
4398
4399         /* The error could cause the FW to trigger a flash debug dump.
4400          * Resetting the card while flash dump is in progress
4401          * can cause it not to recover; wait for it to finish.
4402          * Wait only for first function as it is needed only once per
4403          * adapter.
4404          */
4405         if (pdev->devfn == 0)
4406                 ssleep(30);
4407
4408         return PCI_ERS_RESULT_NEED_RESET;
4409 }
4410
4411 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4412 {
4413         struct be_adapter *adapter = pci_get_drvdata(pdev);
4414         int status;
4415
4416         dev_info(&adapter->pdev->dev, "EEH reset\n");
4417         be_clear_all_error(adapter);
4418
4419         status = pci_enable_device(pdev);
4420         if (status)
4421                 return PCI_ERS_RESULT_DISCONNECT;
4422
4423         pci_set_master(pdev);
4424         pci_set_power_state(pdev, 0);
4425         pci_restore_state(pdev);
4426
4427         /* Check if card is ok and fw is ready */
4428         dev_info(&adapter->pdev->dev,
4429                  "Waiting for FW to be ready after EEH reset\n");
4430         status = be_fw_wait_ready(adapter);
4431         if (status)
4432                 return PCI_ERS_RESULT_DISCONNECT;
4433
4434         pci_cleanup_aer_uncorrect_error_status(pdev);
4435         return PCI_ERS_RESULT_RECOVERED;
4436 }
4437
4438 static void be_eeh_resume(struct pci_dev *pdev)
4439 {
4440         int status = 0;
4441         struct be_adapter *adapter = pci_get_drvdata(pdev);
4442         struct net_device *netdev =  adapter->netdev;
4443
4444         dev_info(&adapter->pdev->dev, "EEH resume\n");
4445
4446         pci_save_state(pdev);
4447
4448         /* tell fw we're ready to fire cmds */
4449         status = be_cmd_fw_init(adapter);
4450         if (status)
4451                 goto err;
4452
4453         status = be_cmd_reset_function(adapter);
4454         if (status)
4455                 goto err;
4456
4457         status = be_setup(adapter);
4458         if (status)
4459                 goto err;
4460
4461         if (netif_running(netdev)) {
4462                 status = be_open(netdev);
4463                 if (status)
4464                         goto err;
4465         }
4466
4467         schedule_delayed_work(&adapter->func_recovery_work,
4468                               msecs_to_jiffies(1000));
4469         netif_device_attach(netdev);
4470         return;
4471 err:
4472         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4473 }
4474
4475 static const struct pci_error_handlers be_eeh_handlers = {
4476         .error_detected = be_eeh_err_detected,
4477         .slot_reset = be_eeh_reset,
4478         .resume = be_eeh_resume,
4479 };
4480
4481 static struct pci_driver be_driver = {
4482         .name = DRV_NAME,
4483         .id_table = be_dev_ids,
4484         .probe = be_probe,
4485         .remove = be_remove,
4486         .suspend = be_suspend,
4487         .resume = be_resume,
4488         .shutdown = be_shutdown,
4489         .err_handler = &be_eeh_handlers
4490 };
4491
4492 static int __init be_init_module(void)
4493 {
4494         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4495             rx_frag_size != 2048) {
4496                 printk(KERN_WARNING DRV_NAME
4497                         " : Module param rx_frag_size must be 2048/4096/8192."
4498                         " Using 2048\n");
4499                 rx_frag_size = 2048;
4500         }
4501
4502         return pci_register_driver(&be_driver);
4503 }
4504 module_init(be_init_module);
4505
4506 static void __exit be_exit_module(void)
4507 {
4508         pci_unregister_driver(&be_driver);
4509 }
4510 module_exit(be_exit_module);