]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/emulex/benet/be_main.c
be2net : Fix die temperature stat for Lancer
[karo-tx-linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
29
30 static unsigned int num_vfs;
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
33
34 static ushort rx_frag_size = 2048;
35 module_param(rx_frag_size, ushort, S_IRUGO);
36 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
38 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
39         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
41         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
43         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
46         { 0 }
47 };
48 MODULE_DEVICE_TABLE(pci, be_dev_ids);
49 /* UE Status Low CSR */
50 static const char * const ue_status_low_desc[] = {
51         "CEV",
52         "CTX",
53         "DBUF",
54         "ERX",
55         "Host",
56         "MPU",
57         "NDMA",
58         "PTC ",
59         "RDMA ",
60         "RXF ",
61         "RXIPS ",
62         "RXULP0 ",
63         "RXULP1 ",
64         "RXULP2 ",
65         "TIM ",
66         "TPOST ",
67         "TPRE ",
68         "TXIPS ",
69         "TXULP0 ",
70         "TXULP1 ",
71         "UC ",
72         "WDMA ",
73         "TXULP2 ",
74         "HOST1 ",
75         "P0_OB_LINK ",
76         "P1_OB_LINK ",
77         "HOST_GPIO ",
78         "MBOX ",
79         "AXGMAC0",
80         "AXGMAC1",
81         "JTAG",
82         "MPU_INTPEND"
83 };
84 /* UE Status High CSR */
85 static const char * const ue_status_hi_desc[] = {
86         "LPCMEMHOST",
87         "MGMT_MAC",
88         "PCS0ONLINE",
89         "MPU_IRAM",
90         "PCS1ONLINE",
91         "PCTL0",
92         "PCTL1",
93         "PMEM",
94         "RR",
95         "TXPB",
96         "RXPP",
97         "XAUI",
98         "TXP",
99         "ARM",
100         "IPC",
101         "HOST2",
102         "HOST3",
103         "HOST4",
104         "HOST5",
105         "HOST6",
106         "HOST7",
107         "HOST8",
108         "HOST9",
109         "NETC",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown"
118 };
119
120 /* Is BE in a multi-channel mode */
121 static inline bool be_is_mc(struct be_adapter *adapter) {
122         return (adapter->function_mode & FLEX10_MODE ||
123                 adapter->function_mode & VNIC_MODE ||
124                 adapter->function_mode & UMC_ENABLED);
125 }
126
127 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128 {
129         struct be_dma_mem *mem = &q->dma_mem;
130         if (mem->va) {
131                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132                                   mem->dma);
133                 mem->va = NULL;
134         }
135 }
136
137 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138                 u16 len, u16 entry_size)
139 {
140         struct be_dma_mem *mem = &q->dma_mem;
141
142         memset(q, 0, sizeof(*q));
143         q->len = len;
144         q->entry_size = entry_size;
145         mem->size = len * entry_size;
146         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147                                      GFP_KERNEL);
148         if (!mem->va)
149                 return -ENOMEM;
150         memset(mem->va, 0, mem->size);
151         return 0;
152 }
153
154 static void be_intr_set(struct be_adapter *adapter, bool enable)
155 {
156         u32 reg, enabled;
157
158         if (adapter->eeh_err)
159                 return;
160
161         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162                                 &reg);
163         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
165         if (!enabled && enable)
166                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else if (enabled && !enable)
168                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169         else
170                 return;
171
172         pci_write_config_dword(adapter->pdev,
173                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174 }
175
176 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
177 {
178         u32 val = 0;
179         val |= qid & DB_RQ_RING_ID_MASK;
180         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
181
182         wmb();
183         iowrite32(val, adapter->db + DB_RQ_OFFSET);
184 }
185
186 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
187 {
188         u32 val = 0;
189         val |= qid & DB_TXULP_RING_ID_MASK;
190         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
191
192         wmb();
193         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
194 }
195
196 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
197                 bool arm, bool clear_int, u16 num_popped)
198 {
199         u32 val = 0;
200         val |= qid & DB_EQ_RING_ID_MASK;
201         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
203
204         if (adapter->eeh_err)
205                 return;
206
207         if (arm)
208                 val |= 1 << DB_EQ_REARM_SHIFT;
209         if (clear_int)
210                 val |= 1 << DB_EQ_CLR_SHIFT;
211         val |= 1 << DB_EQ_EVNT_SHIFT;
212         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
213         iowrite32(val, adapter->db + DB_EQ_OFFSET);
214 }
215
216 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
217 {
218         u32 val = 0;
219         val |= qid & DB_CQ_RING_ID_MASK;
220         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
222
223         if (adapter->eeh_err)
224                 return;
225
226         if (arm)
227                 val |= 1 << DB_CQ_REARM_SHIFT;
228         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
229         iowrite32(val, adapter->db + DB_CQ_OFFSET);
230 }
231
232 static int be_mac_addr_set(struct net_device *netdev, void *p)
233 {
234         struct be_adapter *adapter = netdev_priv(netdev);
235         struct sockaddr *addr = p;
236         int status = 0;
237         u8 current_mac[ETH_ALEN];
238         u32 pmac_id = adapter->pmac_id[0];
239
240         if (!is_valid_ether_addr(addr->sa_data))
241                 return -EADDRNOTAVAIL;
242
243         status = be_cmd_mac_addr_query(adapter, current_mac,
244                                 MAC_ADDRESS_TYPE_NETWORK, false,
245                                 adapter->if_handle, 0);
246         if (status)
247                 goto err;
248
249         if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250                 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
251                                 adapter->if_handle, &adapter->pmac_id[0], 0);
252                 if (status)
253                         goto err;
254
255                 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256         }
257         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258         return 0;
259 err:
260         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
261         return status;
262 }
263
264 static void populate_be2_stats(struct be_adapter *adapter)
265 {
266         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
269         struct be_port_rxf_stats_v0 *port_stats =
270                                         &rxf_stats->port[adapter->port_num];
271         struct be_drv_stats *drvs = &adapter->drv_stats;
272
273         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
274         drvs->rx_pause_frames = port_stats->rx_pause_frames;
275         drvs->rx_crc_errors = port_stats->rx_crc_errors;
276         drvs->rx_control_frames = port_stats->rx_control_frames;
277         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
288         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
289         drvs->rx_dropped_header_too_small =
290                 port_stats->rx_dropped_header_too_small;
291         drvs->rx_address_mismatch_drops =
292                                         port_stats->rx_address_mismatch_drops +
293                                         port_stats->rx_vlan_mismatch_drops;
294         drvs->rx_alignment_symbol_errors =
295                 port_stats->rx_alignment_symbol_errors;
296
297         drvs->tx_pauseframes = port_stats->tx_pauseframes;
298         drvs->tx_controlframes = port_stats->tx_controlframes;
299
300         if (adapter->port_num)
301                 drvs->jabber_events = rxf_stats->port1_jabber_events;
302         else
303                 drvs->jabber_events = rxf_stats->port0_jabber_events;
304         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
305         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
306         drvs->forwarded_packets = rxf_stats->forwarded_packets;
307         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
308         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
310         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311 }
312
313 static void populate_be3_stats(struct be_adapter *adapter)
314 {
315         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
318         struct be_port_rxf_stats_v1 *port_stats =
319                                         &rxf_stats->port[adapter->port_num];
320         struct be_drv_stats *drvs = &adapter->drv_stats;
321
322         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
323         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
325         drvs->rx_pause_frames = port_stats->rx_pause_frames;
326         drvs->rx_crc_errors = port_stats->rx_crc_errors;
327         drvs->rx_control_frames = port_stats->rx_control_frames;
328         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338         drvs->rx_dropped_header_too_small =
339                 port_stats->rx_dropped_header_too_small;
340         drvs->rx_input_fifo_overflow_drop =
341                 port_stats->rx_input_fifo_overflow_drop;
342         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
343         drvs->rx_alignment_symbol_errors =
344                 port_stats->rx_alignment_symbol_errors;
345         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
346         drvs->tx_pauseframes = port_stats->tx_pauseframes;
347         drvs->tx_controlframes = port_stats->tx_controlframes;
348         drvs->jabber_events = port_stats->jabber_events;
349         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
350         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
351         drvs->forwarded_packets = rxf_stats->forwarded_packets;
352         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356 }
357
358 static void populate_lancer_stats(struct be_adapter *adapter)
359 {
360
361         struct be_drv_stats *drvs = &adapter->drv_stats;
362         struct lancer_pport_stats *pport_stats =
363                                         pport_stats_from_cmd(adapter);
364
365         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375         drvs->rx_dropped_tcp_length =
376                                 pport_stats->rx_dropped_invalid_tcp_length;
377         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380         drvs->rx_dropped_header_too_small =
381                                 pport_stats->rx_dropped_header_too_small;
382         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383         drvs->rx_address_mismatch_drops =
384                                         pport_stats->rx_address_mismatch_drops +
385                                         pport_stats->rx_vlan_mismatch_drops;
386         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
387         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
390         drvs->jabber_events = pport_stats->rx_jabbers;
391         drvs->forwarded_packets = pport_stats->num_forwards_lo;
392         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
393         drvs->rx_drops_too_many_frags =
394                                 pport_stats->rx_drops_too_many_frags_lo;
395 }
396
397 static void accumulate_16bit_val(u32 *acc, u16 val)
398 {
399 #define lo(x)                   (x & 0xFFFF)
400 #define hi(x)                   (x & 0xFFFF0000)
401         bool wrapped = val < lo(*acc);
402         u32 newacc = hi(*acc) + val;
403
404         if (wrapped)
405                 newacc += 65536;
406         ACCESS_ONCE(*acc) = newacc;
407 }
408
409 void be_parse_stats(struct be_adapter *adapter)
410 {
411         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412         struct be_rx_obj *rxo;
413         int i;
414
415         if (adapter->generation == BE_GEN3) {
416                 if (lancer_chip(adapter))
417                         populate_lancer_stats(adapter);
418                  else
419                         populate_be3_stats(adapter);
420         } else {
421                 populate_be2_stats(adapter);
422         }
423
424         if (lancer_chip(adapter))
425                 goto done;
426
427         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
428         for_all_rx_queues(adapter, rxo, i) {
429                 /* below erx HW counter can actually wrap around after
430                  * 65535. Driver accumulates a 32-bit value
431                  */
432                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434         }
435 done:
436         return;
437 }
438
439 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440                                         struct rtnl_link_stats64 *stats)
441 {
442         struct be_adapter *adapter = netdev_priv(netdev);
443         struct be_drv_stats *drvs = &adapter->drv_stats;
444         struct be_rx_obj *rxo;
445         struct be_tx_obj *txo;
446         u64 pkts, bytes;
447         unsigned int start;
448         int i;
449
450         for_all_rx_queues(adapter, rxo, i) {
451                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452                 do {
453                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454                         pkts = rx_stats(rxo)->rx_pkts;
455                         bytes = rx_stats(rxo)->rx_bytes;
456                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457                 stats->rx_packets += pkts;
458                 stats->rx_bytes += bytes;
459                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461                                         rx_stats(rxo)->rx_drops_no_frags;
462         }
463
464         for_all_tx_queues(adapter, txo, i) {
465                 const struct be_tx_stats *tx_stats = tx_stats(txo);
466                 do {
467                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468                         pkts = tx_stats(txo)->tx_pkts;
469                         bytes = tx_stats(txo)->tx_bytes;
470                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471                 stats->tx_packets += pkts;
472                 stats->tx_bytes += bytes;
473         }
474
475         /* bad pkts received */
476         stats->rx_errors = drvs->rx_crc_errors +
477                 drvs->rx_alignment_symbol_errors +
478                 drvs->rx_in_range_errors +
479                 drvs->rx_out_range_errors +
480                 drvs->rx_frame_too_long +
481                 drvs->rx_dropped_too_small +
482                 drvs->rx_dropped_too_short +
483                 drvs->rx_dropped_header_too_small +
484                 drvs->rx_dropped_tcp_length +
485                 drvs->rx_dropped_runt;
486
487         /* detailed rx errors */
488         stats->rx_length_errors = drvs->rx_in_range_errors +
489                 drvs->rx_out_range_errors +
490                 drvs->rx_frame_too_long;
491
492         stats->rx_crc_errors = drvs->rx_crc_errors;
493
494         /* frame alignment errors */
495         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
496
497         /* receiver fifo overrun */
498         /* drops_no_pbuf is no per i/f, it's per BE card */
499         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
500                                 drvs->rx_input_fifo_overflow_drop +
501                                 drvs->rx_drops_no_pbuf;
502         return stats;
503 }
504
505 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
506 {
507         struct net_device *netdev = adapter->netdev;
508
509         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
510                 netif_carrier_off(netdev);
511                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
512         }
513
514         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515                 netif_carrier_on(netdev);
516         else
517                 netif_carrier_off(netdev);
518 }
519
520 static void be_tx_stats_update(struct be_tx_obj *txo,
521                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
522 {
523         struct be_tx_stats *stats = tx_stats(txo);
524
525         u64_stats_update_begin(&stats->sync);
526         stats->tx_reqs++;
527         stats->tx_wrbs += wrb_cnt;
528         stats->tx_bytes += copied;
529         stats->tx_pkts += (gso_segs ? gso_segs : 1);
530         if (stopped)
531                 stats->tx_stops++;
532         u64_stats_update_end(&stats->sync);
533 }
534
535 /* Determine number of WRB entries needed to xmit data in an skb */
536 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537                                                                 bool *dummy)
538 {
539         int cnt = (skb->len > skb->data_len);
540
541         cnt += skb_shinfo(skb)->nr_frags;
542
543         /* to account for hdr wrb */
544         cnt++;
545         if (lancer_chip(adapter) || !(cnt & 1)) {
546                 *dummy = false;
547         } else {
548                 /* add a dummy to make it an even num */
549                 cnt++;
550                 *dummy = true;
551         }
552         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553         return cnt;
554 }
555
556 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557 {
558         wrb->frag_pa_hi = upper_32_bits(addr);
559         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561         wrb->rsvd0 = 0;
562 }
563
564 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
565                                         struct sk_buff *skb)
566 {
567         u8 vlan_prio;
568         u16 vlan_tag;
569
570         vlan_tag = vlan_tx_tag_get(skb);
571         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
572         /* If vlan priority provided by OS is NOT in available bmap */
573         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
574                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
575                                 adapter->recommended_prio;
576
577         return vlan_tag;
578 }
579
580 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
581 {
582         return vlan_tx_tag_present(skb) || adapter->pvid;
583 }
584
585 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
586                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
587 {
588         u16 vlan_tag;
589
590         memset(hdr, 0, sizeof(*hdr));
591
592         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
593
594         if (skb_is_gso(skb)) {
595                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
596                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
597                         hdr, skb_shinfo(skb)->gso_size);
598                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
599                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
600                 if (lancer_chip(adapter) && adapter->sli_family  ==
601                                                         LANCER_A0_SLI_FAMILY) {
602                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
603                         if (is_tcp_pkt(skb))
604                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
605                                                                 tcpcs, hdr, 1);
606                         else if (is_udp_pkt(skb))
607                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
608                                                                 udpcs, hdr, 1);
609                 }
610         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
611                 if (is_tcp_pkt(skb))
612                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
613                 else if (is_udp_pkt(skb))
614                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
615         }
616
617         if (vlan_tx_tag_present(skb)) {
618                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
619                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
620                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
621         }
622
623         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
624         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
625         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
626         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
627 }
628
629 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
630                 bool unmap_single)
631 {
632         dma_addr_t dma;
633
634         be_dws_le_to_cpu(wrb, sizeof(*wrb));
635
636         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
637         if (wrb->frag_len) {
638                 if (unmap_single)
639                         dma_unmap_single(dev, dma, wrb->frag_len,
640                                          DMA_TO_DEVICE);
641                 else
642                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
643         }
644 }
645
646 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
647                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
648 {
649         dma_addr_t busaddr;
650         int i, copied = 0;
651         struct device *dev = &adapter->pdev->dev;
652         struct sk_buff *first_skb = skb;
653         struct be_eth_wrb *wrb;
654         struct be_eth_hdr_wrb *hdr;
655         bool map_single = false;
656         u16 map_head;
657
658         hdr = queue_head_node(txq);
659         queue_head_inc(txq);
660         map_head = txq->head;
661
662         if (skb->len > skb->data_len) {
663                 int len = skb_headlen(skb);
664                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
665                 if (dma_mapping_error(dev, busaddr))
666                         goto dma_err;
667                 map_single = true;
668                 wrb = queue_head_node(txq);
669                 wrb_fill(wrb, busaddr, len);
670                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
671                 queue_head_inc(txq);
672                 copied += len;
673         }
674
675         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
676                 const struct skb_frag_struct *frag =
677                         &skb_shinfo(skb)->frags[i];
678                 busaddr = skb_frag_dma_map(dev, frag, 0,
679                                            skb_frag_size(frag), DMA_TO_DEVICE);
680                 if (dma_mapping_error(dev, busaddr))
681                         goto dma_err;
682                 wrb = queue_head_node(txq);
683                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
684                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
685                 queue_head_inc(txq);
686                 copied += skb_frag_size(frag);
687         }
688
689         if (dummy_wrb) {
690                 wrb = queue_head_node(txq);
691                 wrb_fill(wrb, 0, 0);
692                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
693                 queue_head_inc(txq);
694         }
695
696         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
697         be_dws_cpu_to_le(hdr, sizeof(*hdr));
698
699         return copied;
700 dma_err:
701         txq->head = map_head;
702         while (copied) {
703                 wrb = queue_head_node(txq);
704                 unmap_tx_frag(dev, wrb, map_single);
705                 map_single = false;
706                 copied -= wrb->frag_len;
707                 queue_head_inc(txq);
708         }
709         return 0;
710 }
711
712 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
713                                              struct sk_buff *skb)
714 {
715         u16 vlan_tag = 0;
716
717         skb = skb_share_check(skb, GFP_ATOMIC);
718         if (unlikely(!skb))
719                 return skb;
720
721         if (vlan_tx_tag_present(skb)) {
722                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
723                 __vlan_put_tag(skb, vlan_tag);
724                 skb->vlan_tci = 0;
725         }
726
727         return skb;
728 }
729
730 static netdev_tx_t be_xmit(struct sk_buff *skb,
731                         struct net_device *netdev)
732 {
733         struct be_adapter *adapter = netdev_priv(netdev);
734         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
735         struct be_queue_info *txq = &txo->q;
736         struct iphdr *ip = NULL;
737         u32 wrb_cnt = 0, copied = 0;
738         u32 start = txq->head, eth_hdr_len;
739         bool dummy_wrb, stopped = false;
740
741         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
742                 VLAN_ETH_HLEN : ETH_HLEN;
743
744         /* HW has a bug which considers padding bytes as legal
745          * and modifies the IPv4 hdr's 'tot_len' field
746          */
747         if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
748                         is_ipv4_pkt(skb)) {
749                 ip = (struct iphdr *)ip_hdr(skb);
750                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
751         }
752
753         /* HW has a bug wherein it will calculate CSUM for VLAN
754          * pkts even though it is disabled.
755          * Manually insert VLAN in pkt.
756          */
757         if (skb->ip_summed != CHECKSUM_PARTIAL &&
758                         be_vlan_tag_chk(adapter, skb)) {
759                 skb = be_insert_vlan_in_pkt(adapter, skb);
760                 if (unlikely(!skb))
761                         goto tx_drop;
762         }
763
764         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
765
766         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
767         if (copied) {
768                 int gso_segs = skb_shinfo(skb)->gso_segs;
769
770                 /* record the sent skb in the sent_skb table */
771                 BUG_ON(txo->sent_skb_list[start]);
772                 txo->sent_skb_list[start] = skb;
773
774                 /* Ensure txq has space for the next skb; Else stop the queue
775                  * *BEFORE* ringing the tx doorbell, so that we serialze the
776                  * tx compls of the current transmit which'll wake up the queue
777                  */
778                 atomic_add(wrb_cnt, &txq->used);
779                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
780                                                                 txq->len) {
781                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
782                         stopped = true;
783                 }
784
785                 be_txq_notify(adapter, txq->id, wrb_cnt);
786
787                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
788         } else {
789                 txq->head = start;
790                 dev_kfree_skb_any(skb);
791         }
792 tx_drop:
793         return NETDEV_TX_OK;
794 }
795
796 static int be_change_mtu(struct net_device *netdev, int new_mtu)
797 {
798         struct be_adapter *adapter = netdev_priv(netdev);
799         if (new_mtu < BE_MIN_MTU ||
800                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801                                         (ETH_HLEN + ETH_FCS_LEN))) {
802                 dev_info(&adapter->pdev->dev,
803                         "MTU must be between %d and %d bytes\n",
804                         BE_MIN_MTU,
805                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
806                 return -EINVAL;
807         }
808         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809                         netdev->mtu, new_mtu);
810         netdev->mtu = new_mtu;
811         return 0;
812 }
813
814 /*
815  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816  * If the user configures more, place BE in vlan promiscuous mode.
817  */
818 static int be_vid_config(struct be_adapter *adapter)
819 {
820         u16 vids[BE_NUM_VLANS_SUPPORTED];
821         u16 num = 0, i;
822         int status = 0;
823
824         /* No need to further configure vids if in promiscuous mode */
825         if (adapter->promiscuous)
826                 return 0;
827
828         if (adapter->vlans_added > adapter->max_vlans)
829                 goto set_vlan_promisc;
830
831         /* Construct VLAN Table to give to HW */
832         for (i = 0; i < VLAN_N_VID; i++)
833                 if (adapter->vlan_tag[i])
834                         vids[num++] = cpu_to_le16(i);
835
836         status = be_cmd_vlan_config(adapter, adapter->if_handle,
837                                     vids, num, 1, 0);
838
839         /* Set to VLAN promisc mode as setting VLAN filter failed */
840         if (status) {
841                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
842                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
843                 goto set_vlan_promisc;
844         }
845
846         return status;
847
848 set_vlan_promisc:
849         status = be_cmd_vlan_config(adapter, adapter->if_handle,
850                                     NULL, 0, 1, 1);
851         return status;
852 }
853
854 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
855 {
856         struct be_adapter *adapter = netdev_priv(netdev);
857         int status = 0;
858
859         if (!be_physfn(adapter)) {
860                 status = -EINVAL;
861                 goto ret;
862         }
863
864         adapter->vlan_tag[vid] = 1;
865         if (adapter->vlans_added <= (adapter->max_vlans + 1))
866                 status = be_vid_config(adapter);
867
868         if (!status)
869                 adapter->vlans_added++;
870         else
871                 adapter->vlan_tag[vid] = 0;
872 ret:
873         return status;
874 }
875
876 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
877 {
878         struct be_adapter *adapter = netdev_priv(netdev);
879         int status = 0;
880
881         if (!be_physfn(adapter)) {
882                 status = -EINVAL;
883                 goto ret;
884         }
885
886         adapter->vlan_tag[vid] = 0;
887         if (adapter->vlans_added <= adapter->max_vlans)
888                 status = be_vid_config(adapter);
889
890         if (!status)
891                 adapter->vlans_added--;
892         else
893                 adapter->vlan_tag[vid] = 1;
894 ret:
895         return status;
896 }
897
898 static void be_set_rx_mode(struct net_device *netdev)
899 {
900         struct be_adapter *adapter = netdev_priv(netdev);
901         int status;
902
903         if (netdev->flags & IFF_PROMISC) {
904                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
905                 adapter->promiscuous = true;
906                 goto done;
907         }
908
909         /* BE was previously in promiscuous mode; disable it */
910         if (adapter->promiscuous) {
911                 adapter->promiscuous = false;
912                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
913
914                 if (adapter->vlans_added)
915                         be_vid_config(adapter);
916         }
917
918         /* Enable multicast promisc if num configured exceeds what we support */
919         if (netdev->flags & IFF_ALLMULTI ||
920                         netdev_mc_count(netdev) > BE_MAX_MC) {
921                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
922                 goto done;
923         }
924
925         if (netdev_uc_count(netdev) != adapter->uc_macs) {
926                 struct netdev_hw_addr *ha;
927                 int i = 1; /* First slot is claimed by the Primary MAC */
928
929                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
930                         be_cmd_pmac_del(adapter, adapter->if_handle,
931                                         adapter->pmac_id[i], 0);
932                 }
933
934                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
935                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
936                         adapter->promiscuous = true;
937                         goto done;
938                 }
939
940                 netdev_for_each_uc_addr(ha, adapter->netdev) {
941                         adapter->uc_macs++; /* First slot is for Primary MAC */
942                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
943                                         adapter->if_handle,
944                                         &adapter->pmac_id[adapter->uc_macs], 0);
945                 }
946         }
947
948         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
949
950         /* Set to MCAST promisc mode if setting MULTICAST address fails */
951         if (status) {
952                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
953                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
954                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
955         }
956 done:
957         return;
958 }
959
960 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
961 {
962         struct be_adapter *adapter = netdev_priv(netdev);
963         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
964         int status;
965
966         if (!sriov_enabled(adapter))
967                 return -EPERM;
968
969         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
970                 return -EINVAL;
971
972         if (lancer_chip(adapter)) {
973                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
974         } else {
975                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
976                                          vf_cfg->pmac_id, vf + 1);
977
978                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
979                                          &vf_cfg->pmac_id, vf + 1);
980         }
981
982         if (status)
983                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
984                                 mac, vf);
985         else
986                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
987
988         return status;
989 }
990
991 static int be_get_vf_config(struct net_device *netdev, int vf,
992                         struct ifla_vf_info *vi)
993 {
994         struct be_adapter *adapter = netdev_priv(netdev);
995         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
996
997         if (!sriov_enabled(adapter))
998                 return -EPERM;
999
1000         if (vf >= adapter->num_vfs)
1001                 return -EINVAL;
1002
1003         vi->vf = vf;
1004         vi->tx_rate = vf_cfg->tx_rate;
1005         vi->vlan = vf_cfg->vlan_tag;
1006         vi->qos = 0;
1007         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1008
1009         return 0;
1010 }
1011
1012 static int be_set_vf_vlan(struct net_device *netdev,
1013                         int vf, u16 vlan, u8 qos)
1014 {
1015         struct be_adapter *adapter = netdev_priv(netdev);
1016         int status = 0;
1017
1018         if (!sriov_enabled(adapter))
1019                 return -EPERM;
1020
1021         if (vf >= adapter->num_vfs || vlan > 4095)
1022                 return -EINVAL;
1023
1024         if (vlan) {
1025                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1026                         /* If this is new value, program it. Else skip. */
1027                         adapter->vf_cfg[vf].vlan_tag = vlan;
1028
1029                         status = be_cmd_set_hsw_config(adapter, vlan,
1030                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1031                 }
1032         } else {
1033                 /* Reset Transparent Vlan Tagging. */
1034                 adapter->vf_cfg[vf].vlan_tag = 0;
1035                 vlan = adapter->vf_cfg[vf].def_vid;
1036                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1037                         adapter->vf_cfg[vf].if_handle);
1038         }
1039
1040
1041         if (status)
1042                 dev_info(&adapter->pdev->dev,
1043                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1044         return status;
1045 }
1046
1047 static int be_set_vf_tx_rate(struct net_device *netdev,
1048                         int vf, int rate)
1049 {
1050         struct be_adapter *adapter = netdev_priv(netdev);
1051         int status = 0;
1052
1053         if (!sriov_enabled(adapter))
1054                 return -EPERM;
1055
1056         if (vf >= adapter->num_vfs)
1057                 return -EINVAL;
1058
1059         if (rate < 100 || rate > 10000) {
1060                 dev_err(&adapter->pdev->dev,
1061                         "tx rate must be between 100 and 10000 Mbps\n");
1062                 return -EINVAL;
1063         }
1064
1065         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1066
1067         if (status)
1068                 dev_err(&adapter->pdev->dev,
1069                                 "tx rate %d on VF %d failed\n", rate, vf);
1070         else
1071                 adapter->vf_cfg[vf].tx_rate = rate;
1072         return status;
1073 }
1074
1075 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1076 {
1077         struct pci_dev *dev, *pdev = adapter->pdev;
1078         int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1079         u16 offset, stride;
1080
1081         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1082         if (!pos)
1083                 return 0;
1084         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1085         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1086
1087         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1088         while (dev) {
1089                 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1090                 if (dev->is_virtfn && dev->devfn == vf_fn &&
1091                         dev->bus->number == pdev->bus->number) {
1092                         vfs++;
1093                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1094                                 assigned_vfs++;
1095                 }
1096                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1097         }
1098         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1099 }
1100
1101 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1102 {
1103         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1104         ulong now = jiffies;
1105         ulong delta = now - stats->rx_jiffies;
1106         u64 pkts;
1107         unsigned int start, eqd;
1108
1109         if (!eqo->enable_aic) {
1110                 eqd = eqo->eqd;
1111                 goto modify_eqd;
1112         }
1113
1114         if (eqo->idx >= adapter->num_rx_qs)
1115                 return;
1116
1117         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1118
1119         /* Wrapped around */
1120         if (time_before(now, stats->rx_jiffies)) {
1121                 stats->rx_jiffies = now;
1122                 return;
1123         }
1124
1125         /* Update once a second */
1126         if (delta < HZ)
1127                 return;
1128
1129         do {
1130                 start = u64_stats_fetch_begin_bh(&stats->sync);
1131                 pkts = stats->rx_pkts;
1132         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1133
1134         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1135         stats->rx_pkts_prev = pkts;
1136         stats->rx_jiffies = now;
1137         eqd = (stats->rx_pps / 110000) << 3;
1138         eqd = min(eqd, eqo->max_eqd);
1139         eqd = max(eqd, eqo->min_eqd);
1140         if (eqd < 10)
1141                 eqd = 0;
1142
1143 modify_eqd:
1144         if (eqd != eqo->cur_eqd) {
1145                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1146                 eqo->cur_eqd = eqd;
1147         }
1148 }
1149
1150 static void be_rx_stats_update(struct be_rx_obj *rxo,
1151                 struct be_rx_compl_info *rxcp)
1152 {
1153         struct be_rx_stats *stats = rx_stats(rxo);
1154
1155         u64_stats_update_begin(&stats->sync);
1156         stats->rx_compl++;
1157         stats->rx_bytes += rxcp->pkt_size;
1158         stats->rx_pkts++;
1159         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1160                 stats->rx_mcast_pkts++;
1161         if (rxcp->err)
1162                 stats->rx_compl_err++;
1163         u64_stats_update_end(&stats->sync);
1164 }
1165
1166 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1167 {
1168         /* L4 checksum is not reliable for non TCP/UDP packets.
1169          * Also ignore ipcksm for ipv6 pkts */
1170         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1171                                 (rxcp->ip_csum || rxcp->ipv6);
1172 }
1173
1174 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1175                                                 u16 frag_idx)
1176 {
1177         struct be_adapter *adapter = rxo->adapter;
1178         struct be_rx_page_info *rx_page_info;
1179         struct be_queue_info *rxq = &rxo->q;
1180
1181         rx_page_info = &rxo->page_info_tbl[frag_idx];
1182         BUG_ON(!rx_page_info->page);
1183
1184         if (rx_page_info->last_page_user) {
1185                 dma_unmap_page(&adapter->pdev->dev,
1186                                dma_unmap_addr(rx_page_info, bus),
1187                                adapter->big_page_size, DMA_FROM_DEVICE);
1188                 rx_page_info->last_page_user = false;
1189         }
1190
1191         atomic_dec(&rxq->used);
1192         return rx_page_info;
1193 }
1194
1195 /* Throwaway the data in the Rx completion */
1196 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1197                                 struct be_rx_compl_info *rxcp)
1198 {
1199         struct be_queue_info *rxq = &rxo->q;
1200         struct be_rx_page_info *page_info;
1201         u16 i, num_rcvd = rxcp->num_rcvd;
1202
1203         for (i = 0; i < num_rcvd; i++) {
1204                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1205                 put_page(page_info->page);
1206                 memset(page_info, 0, sizeof(*page_info));
1207                 index_inc(&rxcp->rxq_idx, rxq->len);
1208         }
1209 }
1210
1211 /*
1212  * skb_fill_rx_data forms a complete skb for an ether frame
1213  * indicated by rxcp.
1214  */
1215 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1216                              struct be_rx_compl_info *rxcp)
1217 {
1218         struct be_queue_info *rxq = &rxo->q;
1219         struct be_rx_page_info *page_info;
1220         u16 i, j;
1221         u16 hdr_len, curr_frag_len, remaining;
1222         u8 *start;
1223
1224         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1225         start = page_address(page_info->page) + page_info->page_offset;
1226         prefetch(start);
1227
1228         /* Copy data in the first descriptor of this completion */
1229         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1230
1231         /* Copy the header portion into skb_data */
1232         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1233         memcpy(skb->data, start, hdr_len);
1234         skb->len = curr_frag_len;
1235         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1236                 /* Complete packet has now been moved to data */
1237                 put_page(page_info->page);
1238                 skb->data_len = 0;
1239                 skb->tail += curr_frag_len;
1240         } else {
1241                 skb_shinfo(skb)->nr_frags = 1;
1242                 skb_frag_set_page(skb, 0, page_info->page);
1243                 skb_shinfo(skb)->frags[0].page_offset =
1244                                         page_info->page_offset + hdr_len;
1245                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1246                 skb->data_len = curr_frag_len - hdr_len;
1247                 skb->truesize += rx_frag_size;
1248                 skb->tail += hdr_len;
1249         }
1250         page_info->page = NULL;
1251
1252         if (rxcp->pkt_size <= rx_frag_size) {
1253                 BUG_ON(rxcp->num_rcvd != 1);
1254                 return;
1255         }
1256
1257         /* More frags present for this completion */
1258         index_inc(&rxcp->rxq_idx, rxq->len);
1259         remaining = rxcp->pkt_size - curr_frag_len;
1260         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1261                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1262                 curr_frag_len = min(remaining, rx_frag_size);
1263
1264                 /* Coalesce all frags from the same physical page in one slot */
1265                 if (page_info->page_offset == 0) {
1266                         /* Fresh page */
1267                         j++;
1268                         skb_frag_set_page(skb, j, page_info->page);
1269                         skb_shinfo(skb)->frags[j].page_offset =
1270                                                         page_info->page_offset;
1271                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1272                         skb_shinfo(skb)->nr_frags++;
1273                 } else {
1274                         put_page(page_info->page);
1275                 }
1276
1277                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1278                 skb->len += curr_frag_len;
1279                 skb->data_len += curr_frag_len;
1280                 skb->truesize += rx_frag_size;
1281                 remaining -= curr_frag_len;
1282                 index_inc(&rxcp->rxq_idx, rxq->len);
1283                 page_info->page = NULL;
1284         }
1285         BUG_ON(j > MAX_SKB_FRAGS);
1286 }
1287
1288 /* Process the RX completion indicated by rxcp when GRO is disabled */
1289 static void be_rx_compl_process(struct be_rx_obj *rxo,
1290                                 struct be_rx_compl_info *rxcp)
1291 {
1292         struct be_adapter *adapter = rxo->adapter;
1293         struct net_device *netdev = adapter->netdev;
1294         struct sk_buff *skb;
1295
1296         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1297         if (unlikely(!skb)) {
1298                 rx_stats(rxo)->rx_drops_no_skbs++;
1299                 be_rx_compl_discard(rxo, rxcp);
1300                 return;
1301         }
1302
1303         skb_fill_rx_data(rxo, skb, rxcp);
1304
1305         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1306                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1307         else
1308                 skb_checksum_none_assert(skb);
1309
1310         skb->protocol = eth_type_trans(skb, netdev);
1311         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1312         if (netdev->features & NETIF_F_RXHASH)
1313                 skb->rxhash = rxcp->rss_hash;
1314
1315
1316         if (rxcp->vlanf)
1317                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1318
1319         netif_receive_skb(skb);
1320 }
1321
1322 /* Process the RX completion indicated by rxcp when GRO is enabled */
1323 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1324                              struct be_rx_compl_info *rxcp)
1325 {
1326         struct be_adapter *adapter = rxo->adapter;
1327         struct be_rx_page_info *page_info;
1328         struct sk_buff *skb = NULL;
1329         struct be_queue_info *rxq = &rxo->q;
1330         u16 remaining, curr_frag_len;
1331         u16 i, j;
1332
1333         skb = napi_get_frags(napi);
1334         if (!skb) {
1335                 be_rx_compl_discard(rxo, rxcp);
1336                 return;
1337         }
1338
1339         remaining = rxcp->pkt_size;
1340         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1341                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1342
1343                 curr_frag_len = min(remaining, rx_frag_size);
1344
1345                 /* Coalesce all frags from the same physical page in one slot */
1346                 if (i == 0 || page_info->page_offset == 0) {
1347                         /* First frag or Fresh page */
1348                         j++;
1349                         skb_frag_set_page(skb, j, page_info->page);
1350                         skb_shinfo(skb)->frags[j].page_offset =
1351                                                         page_info->page_offset;
1352                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1353                 } else {
1354                         put_page(page_info->page);
1355                 }
1356                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1357                 skb->truesize += rx_frag_size;
1358                 remaining -= curr_frag_len;
1359                 index_inc(&rxcp->rxq_idx, rxq->len);
1360                 memset(page_info, 0, sizeof(*page_info));
1361         }
1362         BUG_ON(j > MAX_SKB_FRAGS);
1363
1364         skb_shinfo(skb)->nr_frags = j + 1;
1365         skb->len = rxcp->pkt_size;
1366         skb->data_len = rxcp->pkt_size;
1367         skb->ip_summed = CHECKSUM_UNNECESSARY;
1368         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1369         if (adapter->netdev->features & NETIF_F_RXHASH)
1370                 skb->rxhash = rxcp->rss_hash;
1371
1372         if (rxcp->vlanf)
1373                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1374
1375         napi_gro_frags(napi);
1376 }
1377
1378 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1379                                  struct be_rx_compl_info *rxcp)
1380 {
1381         rxcp->pkt_size =
1382                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1383         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1384         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1385         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1386         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1387         rxcp->ip_csum =
1388                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1389         rxcp->l4_csum =
1390                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1391         rxcp->ipv6 =
1392                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1393         rxcp->rxq_idx =
1394                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1395         rxcp->num_rcvd =
1396                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1397         rxcp->pkt_type =
1398                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1399         rxcp->rss_hash =
1400                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1401         if (rxcp->vlanf) {
1402                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1403                                           compl);
1404                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1405                                                compl);
1406         }
1407         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1408 }
1409
1410 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1411                                  struct be_rx_compl_info *rxcp)
1412 {
1413         rxcp->pkt_size =
1414                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1415         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1416         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1417         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1418         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1419         rxcp->ip_csum =
1420                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1421         rxcp->l4_csum =
1422                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1423         rxcp->ipv6 =
1424                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1425         rxcp->rxq_idx =
1426                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1427         rxcp->num_rcvd =
1428                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1429         rxcp->pkt_type =
1430                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1431         rxcp->rss_hash =
1432                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1433         if (rxcp->vlanf) {
1434                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1435                                           compl);
1436                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1437                                                compl);
1438         }
1439         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1440 }
1441
1442 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1443 {
1444         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1445         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1446         struct be_adapter *adapter = rxo->adapter;
1447
1448         /* For checking the valid bit it is Ok to use either definition as the
1449          * valid bit is at the same position in both v0 and v1 Rx compl */
1450         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1451                 return NULL;
1452
1453         rmb();
1454         be_dws_le_to_cpu(compl, sizeof(*compl));
1455
1456         if (adapter->be3_native)
1457                 be_parse_rx_compl_v1(compl, rxcp);
1458         else
1459                 be_parse_rx_compl_v0(compl, rxcp);
1460
1461         if (rxcp->vlanf) {
1462                 /* vlanf could be wrongly set in some cards.
1463                  * ignore if vtm is not set */
1464                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1465                         rxcp->vlanf = 0;
1466
1467                 if (!lancer_chip(adapter))
1468                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1469
1470                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1471                     !adapter->vlan_tag[rxcp->vlan_tag])
1472                         rxcp->vlanf = 0;
1473         }
1474
1475         /* As the compl has been parsed, reset it; we wont touch it again */
1476         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1477
1478         queue_tail_inc(&rxo->cq);
1479         return rxcp;
1480 }
1481
1482 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1483 {
1484         u32 order = get_order(size);
1485
1486         if (order > 0)
1487                 gfp |= __GFP_COMP;
1488         return  alloc_pages(gfp, order);
1489 }
1490
1491 /*
1492  * Allocate a page, split it to fragments of size rx_frag_size and post as
1493  * receive buffers to BE
1494  */
1495 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1496 {
1497         struct be_adapter *adapter = rxo->adapter;
1498         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1499         struct be_queue_info *rxq = &rxo->q;
1500         struct page *pagep = NULL;
1501         struct be_eth_rx_d *rxd;
1502         u64 page_dmaaddr = 0, frag_dmaaddr;
1503         u32 posted, page_offset = 0;
1504
1505         page_info = &rxo->page_info_tbl[rxq->head];
1506         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1507                 if (!pagep) {
1508                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1509                         if (unlikely(!pagep)) {
1510                                 rx_stats(rxo)->rx_post_fail++;
1511                                 break;
1512                         }
1513                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1514                                                     0, adapter->big_page_size,
1515                                                     DMA_FROM_DEVICE);
1516                         page_info->page_offset = 0;
1517                 } else {
1518                         get_page(pagep);
1519                         page_info->page_offset = page_offset + rx_frag_size;
1520                 }
1521                 page_offset = page_info->page_offset;
1522                 page_info->page = pagep;
1523                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1524                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1525
1526                 rxd = queue_head_node(rxq);
1527                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1528                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1529
1530                 /* Any space left in the current big page for another frag? */
1531                 if ((page_offset + rx_frag_size + rx_frag_size) >
1532                                         adapter->big_page_size) {
1533                         pagep = NULL;
1534                         page_info->last_page_user = true;
1535                 }
1536
1537                 prev_page_info = page_info;
1538                 queue_head_inc(rxq);
1539                 page_info = &rxo->page_info_tbl[rxq->head];
1540         }
1541         if (pagep)
1542                 prev_page_info->last_page_user = true;
1543
1544         if (posted) {
1545                 atomic_add(posted, &rxq->used);
1546                 be_rxq_notify(adapter, rxq->id, posted);
1547         } else if (atomic_read(&rxq->used) == 0) {
1548                 /* Let be_worker replenish when memory is available */
1549                 rxo->rx_post_starved = true;
1550         }
1551 }
1552
1553 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1554 {
1555         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1556
1557         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1558                 return NULL;
1559
1560         rmb();
1561         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1562
1563         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1564
1565         queue_tail_inc(tx_cq);
1566         return txcp;
1567 }
1568
1569 static u16 be_tx_compl_process(struct be_adapter *adapter,
1570                 struct be_tx_obj *txo, u16 last_index)
1571 {
1572         struct be_queue_info *txq = &txo->q;
1573         struct be_eth_wrb *wrb;
1574         struct sk_buff **sent_skbs = txo->sent_skb_list;
1575         struct sk_buff *sent_skb;
1576         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1577         bool unmap_skb_hdr = true;
1578
1579         sent_skb = sent_skbs[txq->tail];
1580         BUG_ON(!sent_skb);
1581         sent_skbs[txq->tail] = NULL;
1582
1583         /* skip header wrb */
1584         queue_tail_inc(txq);
1585
1586         do {
1587                 cur_index = txq->tail;
1588                 wrb = queue_tail_node(txq);
1589                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1590                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1591                 unmap_skb_hdr = false;
1592
1593                 num_wrbs++;
1594                 queue_tail_inc(txq);
1595         } while (cur_index != last_index);
1596
1597         kfree_skb(sent_skb);
1598         return num_wrbs;
1599 }
1600
1601 /* Return the number of events in the event queue */
1602 static inline int events_get(struct be_eq_obj *eqo)
1603 {
1604         struct be_eq_entry *eqe;
1605         int num = 0;
1606
1607         do {
1608                 eqe = queue_tail_node(&eqo->q);
1609                 if (eqe->evt == 0)
1610                         break;
1611
1612                 rmb();
1613                 eqe->evt = 0;
1614                 num++;
1615                 queue_tail_inc(&eqo->q);
1616         } while (true);
1617
1618         return num;
1619 }
1620
1621 static int event_handle(struct be_eq_obj *eqo)
1622 {
1623         bool rearm = false;
1624         int num = events_get(eqo);
1625
1626         /* Deal with any spurious interrupts that come without events */
1627         if (!num)
1628                 rearm = true;
1629
1630         if (num || msix_enabled(eqo->adapter))
1631                 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1632
1633         if (num)
1634                 napi_schedule(&eqo->napi);
1635
1636         return num;
1637 }
1638
1639 /* Leaves the EQ is disarmed state */
1640 static void be_eq_clean(struct be_eq_obj *eqo)
1641 {
1642         int num = events_get(eqo);
1643
1644         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1645 }
1646
1647 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1648 {
1649         struct be_rx_page_info *page_info;
1650         struct be_queue_info *rxq = &rxo->q;
1651         struct be_queue_info *rx_cq = &rxo->cq;
1652         struct be_rx_compl_info *rxcp;
1653         u16 tail;
1654
1655         /* First cleanup pending rx completions */
1656         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1657                 be_rx_compl_discard(rxo, rxcp);
1658                 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1659         }
1660
1661         /* Then free posted rx buffer that were not used */
1662         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1663         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1664                 page_info = get_rx_page_info(rxo, tail);
1665                 put_page(page_info->page);
1666                 memset(page_info, 0, sizeof(*page_info));
1667         }
1668         BUG_ON(atomic_read(&rxq->used));
1669         rxq->tail = rxq->head = 0;
1670 }
1671
1672 static void be_tx_compl_clean(struct be_adapter *adapter)
1673 {
1674         struct be_tx_obj *txo;
1675         struct be_queue_info *txq;
1676         struct be_eth_tx_compl *txcp;
1677         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1678         struct sk_buff *sent_skb;
1679         bool dummy_wrb;
1680         int i, pending_txqs;
1681
1682         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1683         do {
1684                 pending_txqs = adapter->num_tx_qs;
1685
1686                 for_all_tx_queues(adapter, txo, i) {
1687                         txq = &txo->q;
1688                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1689                                 end_idx =
1690                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1691                                                       wrb_index, txcp);
1692                                 num_wrbs += be_tx_compl_process(adapter, txo,
1693                                                                 end_idx);
1694                                 cmpl++;
1695                         }
1696                         if (cmpl) {
1697                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1698                                 atomic_sub(num_wrbs, &txq->used);
1699                                 cmpl = 0;
1700                                 num_wrbs = 0;
1701                         }
1702                         if (atomic_read(&txq->used) == 0)
1703                                 pending_txqs--;
1704                 }
1705
1706                 if (pending_txqs == 0 || ++timeo > 200)
1707                         break;
1708
1709                 mdelay(1);
1710         } while (true);
1711
1712         for_all_tx_queues(adapter, txo, i) {
1713                 txq = &txo->q;
1714                 if (atomic_read(&txq->used))
1715                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1716                                 atomic_read(&txq->used));
1717
1718                 /* free posted tx for which compls will never arrive */
1719                 while (atomic_read(&txq->used)) {
1720                         sent_skb = txo->sent_skb_list[txq->tail];
1721                         end_idx = txq->tail;
1722                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1723                                                    &dummy_wrb);
1724                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1725                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1726                         atomic_sub(num_wrbs, &txq->used);
1727                 }
1728         }
1729 }
1730
1731 static void be_evt_queues_destroy(struct be_adapter *adapter)
1732 {
1733         struct be_eq_obj *eqo;
1734         int i;
1735
1736         for_all_evt_queues(adapter, eqo, i) {
1737                 be_eq_clean(eqo);
1738                 if (eqo->q.created)
1739                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1740                 be_queue_free(adapter, &eqo->q);
1741         }
1742 }
1743
1744 static int be_evt_queues_create(struct be_adapter *adapter)
1745 {
1746         struct be_queue_info *eq;
1747         struct be_eq_obj *eqo;
1748         int i, rc;
1749
1750         adapter->num_evt_qs = num_irqs(adapter);
1751
1752         for_all_evt_queues(adapter, eqo, i) {
1753                 eqo->adapter = adapter;
1754                 eqo->tx_budget = BE_TX_BUDGET;
1755                 eqo->idx = i;
1756                 eqo->max_eqd = BE_MAX_EQD;
1757                 eqo->enable_aic = true;
1758
1759                 eq = &eqo->q;
1760                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1761                                         sizeof(struct be_eq_entry));
1762                 if (rc)
1763                         return rc;
1764
1765                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1766                 if (rc)
1767                         return rc;
1768         }
1769         return 0;
1770 }
1771
1772 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1773 {
1774         struct be_queue_info *q;
1775
1776         q = &adapter->mcc_obj.q;
1777         if (q->created)
1778                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1779         be_queue_free(adapter, q);
1780
1781         q = &adapter->mcc_obj.cq;
1782         if (q->created)
1783                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1784         be_queue_free(adapter, q);
1785 }
1786
1787 /* Must be called only after TX qs are created as MCC shares TX EQ */
1788 static int be_mcc_queues_create(struct be_adapter *adapter)
1789 {
1790         struct be_queue_info *q, *cq;
1791
1792         cq = &adapter->mcc_obj.cq;
1793         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1794                         sizeof(struct be_mcc_compl)))
1795                 goto err;
1796
1797         /* Use the default EQ for MCC completions */
1798         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1799                 goto mcc_cq_free;
1800
1801         q = &adapter->mcc_obj.q;
1802         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1803                 goto mcc_cq_destroy;
1804
1805         if (be_cmd_mccq_create(adapter, q, cq))
1806                 goto mcc_q_free;
1807
1808         return 0;
1809
1810 mcc_q_free:
1811         be_queue_free(adapter, q);
1812 mcc_cq_destroy:
1813         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1814 mcc_cq_free:
1815         be_queue_free(adapter, cq);
1816 err:
1817         return -1;
1818 }
1819
1820 static void be_tx_queues_destroy(struct be_adapter *adapter)
1821 {
1822         struct be_queue_info *q;
1823         struct be_tx_obj *txo;
1824         u8 i;
1825
1826         for_all_tx_queues(adapter, txo, i) {
1827                 q = &txo->q;
1828                 if (q->created)
1829                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1830                 be_queue_free(adapter, q);
1831
1832                 q = &txo->cq;
1833                 if (q->created)
1834                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1835                 be_queue_free(adapter, q);
1836         }
1837 }
1838
1839 static int be_num_txqs_want(struct be_adapter *adapter)
1840 {
1841         if (sriov_want(adapter) || be_is_mc(adapter) ||
1842             lancer_chip(adapter) || !be_physfn(adapter) ||
1843             adapter->generation == BE_GEN2)
1844                 return 1;
1845         else
1846                 return MAX_TX_QS;
1847 }
1848
1849 static int be_tx_cqs_create(struct be_adapter *adapter)
1850 {
1851         struct be_queue_info *cq, *eq;
1852         int status;
1853         struct be_tx_obj *txo;
1854         u8 i;
1855
1856         adapter->num_tx_qs = be_num_txqs_want(adapter);
1857         if (adapter->num_tx_qs != MAX_TX_QS) {
1858                 rtnl_lock();
1859                 netif_set_real_num_tx_queues(adapter->netdev,
1860                         adapter->num_tx_qs);
1861                 rtnl_unlock();
1862         }
1863
1864         for_all_tx_queues(adapter, txo, i) {
1865                 cq = &txo->cq;
1866                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1867                                         sizeof(struct be_eth_tx_compl));
1868                 if (status)
1869                         return status;
1870
1871                 /* If num_evt_qs is less than num_tx_qs, then more than
1872                  * one txq share an eq
1873                  */
1874                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1875                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1876                 if (status)
1877                         return status;
1878         }
1879         return 0;
1880 }
1881
1882 static int be_tx_qs_create(struct be_adapter *adapter)
1883 {
1884         struct be_tx_obj *txo;
1885         int i, status;
1886
1887         for_all_tx_queues(adapter, txo, i) {
1888                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1889                                         sizeof(struct be_eth_wrb));
1890                 if (status)
1891                         return status;
1892
1893                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1894                 if (status)
1895                         return status;
1896         }
1897
1898         return 0;
1899 }
1900
1901 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1902 {
1903         struct be_queue_info *q;
1904         struct be_rx_obj *rxo;
1905         int i;
1906
1907         for_all_rx_queues(adapter, rxo, i) {
1908                 q = &rxo->cq;
1909                 if (q->created)
1910                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1911                 be_queue_free(adapter, q);
1912         }
1913 }
1914
1915 static int be_rx_cqs_create(struct be_adapter *adapter)
1916 {
1917         struct be_queue_info *eq, *cq;
1918         struct be_rx_obj *rxo;
1919         int rc, i;
1920
1921         /* We'll create as many RSS rings as there are irqs.
1922          * But when there's only one irq there's no use creating RSS rings
1923          */
1924         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1925                                 num_irqs(adapter) + 1 : 1;
1926         if (adapter->num_rx_qs != MAX_RX_QS) {
1927                 rtnl_lock();
1928                 netif_set_real_num_rx_queues(adapter->netdev,
1929                                              adapter->num_rx_qs);
1930                 rtnl_unlock();
1931         }
1932
1933         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1934         for_all_rx_queues(adapter, rxo, i) {
1935                 rxo->adapter = adapter;
1936                 cq = &rxo->cq;
1937                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1938                                 sizeof(struct be_eth_rx_compl));
1939                 if (rc)
1940                         return rc;
1941
1942                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1943                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
1944                 if (rc)
1945                         return rc;
1946         }
1947
1948         if (adapter->num_rx_qs != MAX_RX_QS)
1949                 dev_info(&adapter->pdev->dev,
1950                         "Created only %d receive queues", adapter->num_rx_qs);
1951
1952         return 0;
1953 }
1954
1955 static irqreturn_t be_intx(int irq, void *dev)
1956 {
1957         struct be_adapter *adapter = dev;
1958         int num_evts;
1959
1960         /* With INTx only one EQ is used */
1961         num_evts = event_handle(&adapter->eq_obj[0]);
1962         if (num_evts)
1963                 return IRQ_HANDLED;
1964         else
1965                 return IRQ_NONE;
1966 }
1967
1968 static irqreturn_t be_msix(int irq, void *dev)
1969 {
1970         struct be_eq_obj *eqo = dev;
1971
1972         event_handle(eqo);
1973         return IRQ_HANDLED;
1974 }
1975
1976 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1977 {
1978         return (rxcp->tcpf && !rxcp->err) ? true : false;
1979 }
1980
1981 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1982                         int budget)
1983 {
1984         struct be_adapter *adapter = rxo->adapter;
1985         struct be_queue_info *rx_cq = &rxo->cq;
1986         struct be_rx_compl_info *rxcp;
1987         u32 work_done;
1988
1989         for (work_done = 0; work_done < budget; work_done++) {
1990                 rxcp = be_rx_compl_get(rxo);
1991                 if (!rxcp)
1992                         break;
1993
1994                 /* Is it a flush compl that has no data */
1995                 if (unlikely(rxcp->num_rcvd == 0))
1996                         goto loop_continue;
1997
1998                 /* Discard compl with partial DMA Lancer B0 */
1999                 if (unlikely(!rxcp->pkt_size)) {
2000                         be_rx_compl_discard(rxo, rxcp);
2001                         goto loop_continue;
2002                 }
2003
2004                 /* On BE drop pkts that arrive due to imperfect filtering in
2005                  * promiscuous mode on some skews
2006                  */
2007                 if (unlikely(rxcp->port != adapter->port_num &&
2008                                 !lancer_chip(adapter))) {
2009                         be_rx_compl_discard(rxo, rxcp);
2010                         goto loop_continue;
2011                 }
2012
2013                 if (do_gro(rxcp))
2014                         be_rx_compl_process_gro(rxo, napi, rxcp);
2015                 else
2016                         be_rx_compl_process(rxo, rxcp);
2017 loop_continue:
2018                 be_rx_stats_update(rxo, rxcp);
2019         }
2020
2021         if (work_done) {
2022                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2023
2024                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2025                         be_post_rx_frags(rxo, GFP_ATOMIC);
2026         }
2027
2028         return work_done;
2029 }
2030
2031 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2032                           int budget, int idx)
2033 {
2034         struct be_eth_tx_compl *txcp;
2035         int num_wrbs = 0, work_done;
2036
2037         for (work_done = 0; work_done < budget; work_done++) {
2038                 txcp = be_tx_compl_get(&txo->cq);
2039                 if (!txcp)
2040                         break;
2041                 num_wrbs += be_tx_compl_process(adapter, txo,
2042                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2043                                         wrb_index, txcp));
2044         }
2045
2046         if (work_done) {
2047                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2048                 atomic_sub(num_wrbs, &txo->q.used);
2049
2050                 /* As Tx wrbs have been freed up, wake up netdev queue
2051                  * if it was stopped due to lack of tx wrbs.  */
2052                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2053                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2054                         netif_wake_subqueue(adapter->netdev, idx);
2055                 }
2056
2057                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2058                 tx_stats(txo)->tx_compl += work_done;
2059                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2060         }
2061         return (work_done < budget); /* Done */
2062 }
2063
2064 int be_poll(struct napi_struct *napi, int budget)
2065 {
2066         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2067         struct be_adapter *adapter = eqo->adapter;
2068         int max_work = 0, work, i;
2069         bool tx_done;
2070
2071         /* Process all TXQs serviced by this EQ */
2072         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2073                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2074                                         eqo->tx_budget, i);
2075                 if (!tx_done)
2076                         max_work = budget;
2077         }
2078
2079         /* This loop will iterate twice for EQ0 in which
2080          * completions of the last RXQ (default one) are also processed
2081          * For other EQs the loop iterates only once
2082          */
2083         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2084                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2085                 max_work = max(work, max_work);
2086         }
2087
2088         if (is_mcc_eqo(eqo))
2089                 be_process_mcc(adapter);
2090
2091         if (max_work < budget) {
2092                 napi_complete(napi);
2093                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2094         } else {
2095                 /* As we'll continue in polling mode, count and clear events */
2096                 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
2097         }
2098         return max_work;
2099 }
2100
2101 void be_detect_dump_ue(struct be_adapter *adapter)
2102 {
2103         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2104         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2105         u32 i;
2106
2107         if (adapter->eeh_err || adapter->ue_detected)
2108                 return;
2109
2110         if (lancer_chip(adapter)) {
2111                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2112                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2113                         sliport_err1 = ioread32(adapter->db +
2114                                         SLIPORT_ERROR1_OFFSET);
2115                         sliport_err2 = ioread32(adapter->db +
2116                                         SLIPORT_ERROR2_OFFSET);
2117                 }
2118         } else {
2119                 pci_read_config_dword(adapter->pdev,
2120                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2121                 pci_read_config_dword(adapter->pdev,
2122                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2123                 pci_read_config_dword(adapter->pdev,
2124                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2125                 pci_read_config_dword(adapter->pdev,
2126                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2127
2128                 ue_lo = (ue_lo & (~ue_lo_mask));
2129                 ue_hi = (ue_hi & (~ue_hi_mask));
2130         }
2131
2132         if (ue_lo || ue_hi ||
2133                 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2134                 adapter->ue_detected = true;
2135                 adapter->eeh_err = true;
2136                 dev_err(&adapter->pdev->dev,
2137                         "Unrecoverable error in the card\n");
2138         }
2139
2140         if (ue_lo) {
2141                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2142                         if (ue_lo & 1)
2143                                 dev_err(&adapter->pdev->dev,
2144                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2145                 }
2146         }
2147         if (ue_hi) {
2148                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2149                         if (ue_hi & 1)
2150                                 dev_err(&adapter->pdev->dev,
2151                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2152                 }
2153         }
2154
2155         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2156                 dev_err(&adapter->pdev->dev,
2157                         "sliport status 0x%x\n", sliport_status);
2158                 dev_err(&adapter->pdev->dev,
2159                         "sliport error1 0x%x\n", sliport_err1);
2160                 dev_err(&adapter->pdev->dev,
2161                         "sliport error2 0x%x\n", sliport_err2);
2162         }
2163 }
2164
2165 static void be_msix_disable(struct be_adapter *adapter)
2166 {
2167         if (msix_enabled(adapter)) {
2168                 pci_disable_msix(adapter->pdev);
2169                 adapter->num_msix_vec = 0;
2170         }
2171 }
2172
2173 static uint be_num_rss_want(struct be_adapter *adapter)
2174 {
2175         u32 num = 0;
2176         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2177              !sriov_want(adapter) && be_physfn(adapter) &&
2178              !be_is_mc(adapter)) {
2179                 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2180                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2181         }
2182         return num;
2183 }
2184
2185 static void be_msix_enable(struct be_adapter *adapter)
2186 {
2187 #define BE_MIN_MSIX_VECTORS             1
2188         int i, status, num_vec, num_roce_vec = 0;
2189
2190         /* If RSS queues are not used, need a vec for default RX Q */
2191         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2192         if (be_roce_supported(adapter)) {
2193                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2194                                         (num_online_cpus() + 1));
2195                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2196                 num_vec += num_roce_vec;
2197                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2198         }
2199         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2200
2201         for (i = 0; i < num_vec; i++)
2202                 adapter->msix_entries[i].entry = i;
2203
2204         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2205         if (status == 0) {
2206                 goto done;
2207         } else if (status >= BE_MIN_MSIX_VECTORS) {
2208                 num_vec = status;
2209                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2210                                 num_vec) == 0)
2211                         goto done;
2212         }
2213         return;
2214 done:
2215         if (be_roce_supported(adapter)) {
2216                 if (num_vec > num_roce_vec) {
2217                         adapter->num_msix_vec = num_vec - num_roce_vec;
2218                         adapter->num_msix_roce_vec =
2219                                 num_vec - adapter->num_msix_vec;
2220                 } else {
2221                         adapter->num_msix_vec = num_vec;
2222                         adapter->num_msix_roce_vec = 0;
2223                 }
2224         } else
2225                 adapter->num_msix_vec = num_vec;
2226         return;
2227 }
2228
2229 static inline int be_msix_vec_get(struct be_adapter *adapter,
2230                                 struct be_eq_obj *eqo)
2231 {
2232         return adapter->msix_entries[eqo->idx].vector;
2233 }
2234
2235 static int be_msix_register(struct be_adapter *adapter)
2236 {
2237         struct net_device *netdev = adapter->netdev;
2238         struct be_eq_obj *eqo;
2239         int status, i, vec;
2240
2241         for_all_evt_queues(adapter, eqo, i) {
2242                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2243                 vec = be_msix_vec_get(adapter, eqo);
2244                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2245                 if (status)
2246                         goto err_msix;
2247         }
2248
2249         return 0;
2250 err_msix:
2251         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2252                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2253         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2254                 status);
2255         be_msix_disable(adapter);
2256         return status;
2257 }
2258
2259 static int be_irq_register(struct be_adapter *adapter)
2260 {
2261         struct net_device *netdev = adapter->netdev;
2262         int status;
2263
2264         if (msix_enabled(adapter)) {
2265                 status = be_msix_register(adapter);
2266                 if (status == 0)
2267                         goto done;
2268                 /* INTx is not supported for VF */
2269                 if (!be_physfn(adapter))
2270                         return status;
2271         }
2272
2273         /* INTx */
2274         netdev->irq = adapter->pdev->irq;
2275         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2276                         adapter);
2277         if (status) {
2278                 dev_err(&adapter->pdev->dev,
2279                         "INTx request IRQ failed - err %d\n", status);
2280                 return status;
2281         }
2282 done:
2283         adapter->isr_registered = true;
2284         return 0;
2285 }
2286
2287 static void be_irq_unregister(struct be_adapter *adapter)
2288 {
2289         struct net_device *netdev = adapter->netdev;
2290         struct be_eq_obj *eqo;
2291         int i;
2292
2293         if (!adapter->isr_registered)
2294                 return;
2295
2296         /* INTx */
2297         if (!msix_enabled(adapter)) {
2298                 free_irq(netdev->irq, adapter);
2299                 goto done;
2300         }
2301
2302         /* MSIx */
2303         for_all_evt_queues(adapter, eqo, i)
2304                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2305
2306 done:
2307         adapter->isr_registered = false;
2308 }
2309
2310 static void be_rx_qs_destroy(struct be_adapter *adapter)
2311 {
2312         struct be_queue_info *q;
2313         struct be_rx_obj *rxo;
2314         int i;
2315
2316         for_all_rx_queues(adapter, rxo, i) {
2317                 q = &rxo->q;
2318                 if (q->created) {
2319                         be_cmd_rxq_destroy(adapter, q);
2320                         /* After the rxq is invalidated, wait for a grace time
2321                          * of 1ms for all dma to end and the flush compl to
2322                          * arrive
2323                          */
2324                         mdelay(1);
2325                         be_rx_cq_clean(rxo);
2326                 }
2327                 be_queue_free(adapter, q);
2328         }
2329 }
2330
2331 static int be_close(struct net_device *netdev)
2332 {
2333         struct be_adapter *adapter = netdev_priv(netdev);
2334         struct be_eq_obj *eqo;
2335         int i;
2336
2337         be_roce_dev_close(adapter);
2338
2339         be_async_mcc_disable(adapter);
2340
2341         if (!lancer_chip(adapter))
2342                 be_intr_set(adapter, false);
2343
2344         for_all_evt_queues(adapter, eqo, i) {
2345                 napi_disable(&eqo->napi);
2346                 if (msix_enabled(adapter))
2347                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2348                 else
2349                         synchronize_irq(netdev->irq);
2350                 be_eq_clean(eqo);
2351         }
2352
2353         be_irq_unregister(adapter);
2354
2355         /* Wait for all pending tx completions to arrive so that
2356          * all tx skbs are freed.
2357          */
2358         be_tx_compl_clean(adapter);
2359
2360         be_rx_qs_destroy(adapter);
2361         return 0;
2362 }
2363
2364 static int be_rx_qs_create(struct be_adapter *adapter)
2365 {
2366         struct be_rx_obj *rxo;
2367         int rc, i, j;
2368         u8 rsstable[128];
2369
2370         for_all_rx_queues(adapter, rxo, i) {
2371                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2372                                     sizeof(struct be_eth_rx_d));
2373                 if (rc)
2374                         return rc;
2375         }
2376
2377         /* The FW would like the default RXQ to be created first */
2378         rxo = default_rxo(adapter);
2379         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2380                                adapter->if_handle, false, &rxo->rss_id);
2381         if (rc)
2382                 return rc;
2383
2384         for_all_rss_queues(adapter, rxo, i) {
2385                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2386                                        rx_frag_size, adapter->if_handle,
2387                                        true, &rxo->rss_id);
2388                 if (rc)
2389                         return rc;
2390         }
2391
2392         if (be_multi_rxq(adapter)) {
2393                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2394                         for_all_rss_queues(adapter, rxo, i) {
2395                                 if ((j + i) >= 128)
2396                                         break;
2397                                 rsstable[j + i] = rxo->rss_id;
2398                         }
2399                 }
2400                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2401                 if (rc)
2402                         return rc;
2403         }
2404
2405         /* First time posting */
2406         for_all_rx_queues(adapter, rxo, i)
2407                 be_post_rx_frags(rxo, GFP_KERNEL);
2408         return 0;
2409 }
2410
2411 static int be_open(struct net_device *netdev)
2412 {
2413         struct be_adapter *adapter = netdev_priv(netdev);
2414         struct be_eq_obj *eqo;
2415         struct be_rx_obj *rxo;
2416         struct be_tx_obj *txo;
2417         u8 link_status;
2418         int status, i;
2419
2420         status = be_rx_qs_create(adapter);
2421         if (status)
2422                 goto err;
2423
2424         be_irq_register(adapter);
2425
2426         if (!lancer_chip(adapter))
2427                 be_intr_set(adapter, true);
2428
2429         for_all_rx_queues(adapter, rxo, i)
2430                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2431
2432         for_all_tx_queues(adapter, txo, i)
2433                 be_cq_notify(adapter, txo->cq.id, true, 0);
2434
2435         be_async_mcc_enable(adapter);
2436
2437         for_all_evt_queues(adapter, eqo, i) {
2438                 napi_enable(&eqo->napi);
2439                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2440         }
2441
2442         status = be_cmd_link_status_query(adapter, NULL, NULL,
2443                                           &link_status, 0);
2444         if (!status)
2445                 be_link_status_update(adapter, link_status);
2446
2447         be_roce_dev_open(adapter);
2448         return 0;
2449 err:
2450         be_close(adapter->netdev);
2451         return -EIO;
2452 }
2453
2454 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2455 {
2456         struct be_dma_mem cmd;
2457         int status = 0;
2458         u8 mac[ETH_ALEN];
2459
2460         memset(mac, 0, ETH_ALEN);
2461
2462         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2463         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2464                                     GFP_KERNEL);
2465         if (cmd.va == NULL)
2466                 return -1;
2467         memset(cmd.va, 0, cmd.size);
2468
2469         if (enable) {
2470                 status = pci_write_config_dword(adapter->pdev,
2471                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2472                 if (status) {
2473                         dev_err(&adapter->pdev->dev,
2474                                 "Could not enable Wake-on-lan\n");
2475                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2476                                           cmd.dma);
2477                         return status;
2478                 }
2479                 status = be_cmd_enable_magic_wol(adapter,
2480                                 adapter->netdev->dev_addr, &cmd);
2481                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2482                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2483         } else {
2484                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2485                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2486                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2487         }
2488
2489         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2490         return status;
2491 }
2492
2493 /*
2494  * Generate a seed MAC address from the PF MAC Address using jhash.
2495  * MAC Address for VFs are assigned incrementally starting from the seed.
2496  * These addresses are programmed in the ASIC by the PF and the VF driver
2497  * queries for the MAC address during its probe.
2498  */
2499 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2500 {
2501         u32 vf;
2502         int status = 0;
2503         u8 mac[ETH_ALEN];
2504         struct be_vf_cfg *vf_cfg;
2505
2506         be_vf_eth_addr_generate(adapter, mac);
2507
2508         for_all_vfs(adapter, vf_cfg, vf) {
2509                 if (lancer_chip(adapter)) {
2510                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2511                 } else {
2512                         status = be_cmd_pmac_add(adapter, mac,
2513                                                  vf_cfg->if_handle,
2514                                                  &vf_cfg->pmac_id, vf + 1);
2515                 }
2516
2517                 if (status)
2518                         dev_err(&adapter->pdev->dev,
2519                         "Mac address assignment failed for VF %d\n", vf);
2520                 else
2521                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2522
2523                 mac[5] += 1;
2524         }
2525         return status;
2526 }
2527
2528 static void be_vf_clear(struct be_adapter *adapter)
2529 {
2530         struct be_vf_cfg *vf_cfg;
2531         u32 vf;
2532
2533         if (be_find_vfs(adapter, ASSIGNED)) {
2534                 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2535                 goto done;
2536         }
2537
2538         for_all_vfs(adapter, vf_cfg, vf) {
2539                 if (lancer_chip(adapter))
2540                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2541                 else
2542                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2543                                         vf_cfg->pmac_id, vf + 1);
2544
2545                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2546         }
2547         pci_disable_sriov(adapter->pdev);
2548 done:
2549         kfree(adapter->vf_cfg);
2550         adapter->num_vfs = 0;
2551 }
2552
2553 static int be_clear(struct be_adapter *adapter)
2554 {
2555         int i = 1;
2556
2557         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2558                 cancel_delayed_work_sync(&adapter->work);
2559                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2560         }
2561
2562         if (sriov_enabled(adapter))
2563                 be_vf_clear(adapter);
2564
2565         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2566                 be_cmd_pmac_del(adapter, adapter->if_handle,
2567                         adapter->pmac_id[i], 0);
2568
2569         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2570
2571         be_mcc_queues_destroy(adapter);
2572         be_rx_cqs_destroy(adapter);
2573         be_tx_queues_destroy(adapter);
2574         be_evt_queues_destroy(adapter);
2575
2576         /* tell fw we're done with firing cmds */
2577         be_cmd_fw_clean(adapter);
2578
2579         be_msix_disable(adapter);
2580         return 0;
2581 }
2582
2583 static int be_vf_setup_init(struct be_adapter *adapter)
2584 {
2585         struct be_vf_cfg *vf_cfg;
2586         int vf;
2587
2588         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2589                                   GFP_KERNEL);
2590         if (!adapter->vf_cfg)
2591                 return -ENOMEM;
2592
2593         for_all_vfs(adapter, vf_cfg, vf) {
2594                 vf_cfg->if_handle = -1;
2595                 vf_cfg->pmac_id = -1;
2596         }
2597         return 0;
2598 }
2599
2600 static int be_vf_setup(struct be_adapter *adapter)
2601 {
2602         struct be_vf_cfg *vf_cfg;
2603         struct device *dev = &adapter->pdev->dev;
2604         u32 cap_flags, en_flags, vf;
2605         u16 def_vlan, lnk_speed;
2606         int status, enabled_vfs;
2607
2608         enabled_vfs = be_find_vfs(adapter, ENABLED);
2609         if (enabled_vfs) {
2610                 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2611                 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2612                 return 0;
2613         }
2614
2615         if (num_vfs > adapter->dev_num_vfs) {
2616                 dev_warn(dev, "Device supports %d VFs and not %d\n",
2617                          adapter->dev_num_vfs, num_vfs);
2618                 num_vfs = adapter->dev_num_vfs;
2619         }
2620
2621         status = pci_enable_sriov(adapter->pdev, num_vfs);
2622         if (!status) {
2623                 adapter->num_vfs = num_vfs;
2624         } else {
2625                 /* Platform doesn't support SRIOV though device supports it */
2626                 dev_warn(dev, "SRIOV enable failed\n");
2627                 return 0;
2628         }
2629
2630         status = be_vf_setup_init(adapter);
2631         if (status)
2632                 goto err;
2633
2634         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2635                                 BE_IF_FLAGS_MULTICAST;
2636         for_all_vfs(adapter, vf_cfg, vf) {
2637                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2638                                           &vf_cfg->if_handle, vf + 1);
2639                 if (status)
2640                         goto err;
2641         }
2642
2643         if (!enabled_vfs) {
2644                 status = be_vf_eth_addr_config(adapter);
2645                 if (status)
2646                         goto err;
2647         }
2648
2649         for_all_vfs(adapter, vf_cfg, vf) {
2650                 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2651                                                   NULL, vf + 1);
2652                 if (status)
2653                         goto err;
2654                 vf_cfg->tx_rate = lnk_speed * 10;
2655
2656                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2657                                 vf + 1, vf_cfg->if_handle);
2658                 if (status)
2659                         goto err;
2660                 vf_cfg->def_vid = def_vlan;
2661         }
2662         return 0;
2663 err:
2664         return status;
2665 }
2666
2667 static void be_setup_init(struct be_adapter *adapter)
2668 {
2669         adapter->vlan_prio_bmap = 0xff;
2670         adapter->phy.link_speed = -1;
2671         adapter->if_handle = -1;
2672         adapter->be3_native = false;
2673         adapter->promiscuous = false;
2674         adapter->eq_next_idx = 0;
2675         adapter->phy.forced_port_speed = -1;
2676 }
2677
2678 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2679                            bool *active_mac, u32 *pmac_id)
2680 {
2681         int status = 0;
2682
2683         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2684                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2685                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2686                         *active_mac = true;
2687                 else
2688                         *active_mac = false;
2689
2690                 return status;
2691         }
2692
2693         if (lancer_chip(adapter)) {
2694                 status = be_cmd_get_mac_from_list(adapter, mac,
2695                                                   active_mac, pmac_id, 0);
2696                 if (*active_mac) {
2697                         status = be_cmd_mac_addr_query(adapter, mac,
2698                                                        MAC_ADDRESS_TYPE_NETWORK,
2699                                                        false, if_handle,
2700                                                        *pmac_id);
2701                 }
2702         } else if (be_physfn(adapter)) {
2703                 /* For BE3, for PF get permanent MAC */
2704                 status = be_cmd_mac_addr_query(adapter, mac,
2705                                                MAC_ADDRESS_TYPE_NETWORK, true,
2706                                                0, 0);
2707                 *active_mac = false;
2708         } else {
2709                 /* For BE3, for VF get soft MAC assigned by PF*/
2710                 status = be_cmd_mac_addr_query(adapter, mac,
2711                                                MAC_ADDRESS_TYPE_NETWORK, false,
2712                                                if_handle, 0);
2713                 *active_mac = true;
2714         }
2715         return status;
2716 }
2717
2718 /* Routine to query per function resource limits */
2719 static int be_get_config(struct be_adapter *adapter)
2720 {
2721         int pos;
2722         u16 dev_num_vfs;
2723
2724         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2725         if (pos) {
2726                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2727                                      &dev_num_vfs);
2728                 adapter->dev_num_vfs = dev_num_vfs;
2729         }
2730         return 0;
2731 }
2732
2733 static int be_setup(struct be_adapter *adapter)
2734 {
2735         struct device *dev = &adapter->pdev->dev;
2736         u32 cap_flags, en_flags;
2737         u32 tx_fc, rx_fc;
2738         int status;
2739         u8 mac[ETH_ALEN];
2740         bool active_mac;
2741
2742         be_setup_init(adapter);
2743
2744         be_get_config(adapter);
2745
2746         be_cmd_req_native_mode(adapter);
2747
2748         be_msix_enable(adapter);
2749
2750         status = be_evt_queues_create(adapter);
2751         if (status)
2752                 goto err;
2753
2754         status = be_tx_cqs_create(adapter);
2755         if (status)
2756                 goto err;
2757
2758         status = be_rx_cqs_create(adapter);
2759         if (status)
2760                 goto err;
2761
2762         status = be_mcc_queues_create(adapter);
2763         if (status)
2764                 goto err;
2765
2766         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2767                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2768         cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2769                         BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2770
2771         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2772                 cap_flags |= BE_IF_FLAGS_RSS;
2773                 en_flags |= BE_IF_FLAGS_RSS;
2774         }
2775
2776         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2777                                   &adapter->if_handle, 0);
2778         if (status != 0)
2779                 goto err;
2780
2781         memset(mac, 0, ETH_ALEN);
2782         active_mac = false;
2783         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2784                                  &active_mac, &adapter->pmac_id[0]);
2785         if (status != 0)
2786                 goto err;
2787
2788         if (!active_mac) {
2789                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2790                                          &adapter->pmac_id[0], 0);
2791                 if (status != 0)
2792                         goto err;
2793         }
2794
2795         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2796                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2797                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2798         }
2799
2800         status = be_tx_qs_create(adapter);
2801         if (status)
2802                 goto err;
2803
2804         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2805
2806         if (adapter->vlans_added)
2807                 be_vid_config(adapter);
2808
2809         be_set_rx_mode(adapter->netdev);
2810
2811         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2812
2813         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2814                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
2815                                         adapter->rx_fc);
2816
2817         if (be_physfn(adapter) && num_vfs) {
2818                 if (adapter->dev_num_vfs)
2819                         be_vf_setup(adapter);
2820                 else
2821                         dev_warn(dev, "device doesn't support SRIOV\n");
2822         }
2823
2824         be_cmd_get_phy_info(adapter);
2825         if (be_pause_supported(adapter))
2826                 adapter->phy.fc_autoneg = 1;
2827
2828         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2829         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2830         return 0;
2831 err:
2832         be_clear(adapter);
2833         return status;
2834 }
2835
2836 #ifdef CONFIG_NET_POLL_CONTROLLER
2837 static void be_netpoll(struct net_device *netdev)
2838 {
2839         struct be_adapter *adapter = netdev_priv(netdev);
2840         struct be_eq_obj *eqo;
2841         int i;
2842
2843         for_all_evt_queues(adapter, eqo, i)
2844                 event_handle(eqo);
2845
2846         return;
2847 }
2848 #endif
2849
2850 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2851 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
2852
2853 static bool be_flash_redboot(struct be_adapter *adapter,
2854                         const u8 *p, u32 img_start, int image_size,
2855                         int hdr_size)
2856 {
2857         u32 crc_offset;
2858         u8 flashed_crc[4];
2859         int status;
2860
2861         crc_offset = hdr_size + img_start + image_size - 4;
2862
2863         p += crc_offset;
2864
2865         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2866                         (image_size - 4));
2867         if (status) {
2868                 dev_err(&adapter->pdev->dev,
2869                 "could not get crc from flash, not flashing redboot\n");
2870                 return false;
2871         }
2872
2873         /*update redboot only if crc does not match*/
2874         if (!memcmp(flashed_crc, p, 4))
2875                 return false;
2876         else
2877                 return true;
2878 }
2879
2880 static bool phy_flashing_required(struct be_adapter *adapter)
2881 {
2882         return (adapter->phy.phy_type == TN_8022 &&
2883                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2884 }
2885
2886 static bool is_comp_in_ufi(struct be_adapter *adapter,
2887                            struct flash_section_info *fsec, int type)
2888 {
2889         int i = 0, img_type = 0;
2890         struct flash_section_info_g2 *fsec_g2 = NULL;
2891
2892         if (adapter->generation != BE_GEN3)
2893                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2894
2895         for (i = 0; i < MAX_FLASH_COMP; i++) {
2896                 if (fsec_g2)
2897                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2898                 else
2899                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2900
2901                 if (img_type == type)
2902                         return true;
2903         }
2904         return false;
2905
2906 }
2907
2908 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2909                                          int header_size,
2910                                          const struct firmware *fw)
2911 {
2912         struct flash_section_info *fsec = NULL;
2913         const u8 *p = fw->data;
2914
2915         p += header_size;
2916         while (p < (fw->data + fw->size)) {
2917                 fsec = (struct flash_section_info *)p;
2918                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2919                         return fsec;
2920                 p += 32;
2921         }
2922         return NULL;
2923 }
2924
2925 static int be_flash_data(struct be_adapter *adapter,
2926                          const struct firmware *fw,
2927                          struct be_dma_mem *flash_cmd,
2928                          int num_of_images)
2929
2930 {
2931         int status = 0, i, filehdr_size = 0;
2932         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2933         u32 total_bytes = 0, flash_op;
2934         int num_bytes;
2935         const u8 *p = fw->data;
2936         struct be_cmd_write_flashrom *req = flash_cmd->va;
2937         const struct flash_comp *pflashcomp;
2938         int num_comp, hdr_size;
2939         struct flash_section_info *fsec = NULL;
2940
2941         struct flash_comp gen3_flash_types[] = {
2942                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2943                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2944                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2945                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2946                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2947                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2948                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2949                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2950                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2951                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2952                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2953                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2954                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2955                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2956                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2957                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2958                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2959                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2960                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2961                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
2962         };
2963
2964         struct flash_comp gen2_flash_types[] = {
2965                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2966                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2967                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2968                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2969                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2970                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2971                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2972                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2973                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2974                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2975                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2976                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2977                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2978                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2979                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2980                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
2981         };
2982
2983         if (adapter->generation == BE_GEN3) {
2984                 pflashcomp = gen3_flash_types;
2985                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2986                 num_comp = ARRAY_SIZE(gen3_flash_types);
2987         } else {
2988                 pflashcomp = gen2_flash_types;
2989                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2990                 num_comp = ARRAY_SIZE(gen2_flash_types);
2991         }
2992         /* Get flash section info*/
2993         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2994         if (!fsec) {
2995                 dev_err(&adapter->pdev->dev,
2996                         "Invalid Cookie. UFI corrupted ?\n");
2997                 return -1;
2998         }
2999         for (i = 0; i < num_comp; i++) {
3000                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3001                         continue;
3002
3003                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3004                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3005                         continue;
3006
3007                 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
3008                         if (!phy_flashing_required(adapter))
3009                                 continue;
3010                 }
3011
3012                 hdr_size = filehdr_size +
3013                            (num_of_images * sizeof(struct image_hdr));
3014
3015                 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3016                     (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3017                                        pflashcomp[i].size, hdr_size)))
3018                         continue;
3019
3020                 /* Flash the component */
3021                 p = fw->data;
3022                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3023                 if (p + pflashcomp[i].size > fw->data + fw->size)
3024                         return -1;
3025                 total_bytes = pflashcomp[i].size;
3026                 while (total_bytes) {
3027                         if (total_bytes > 32*1024)
3028                                 num_bytes = 32*1024;
3029                         else
3030                                 num_bytes = total_bytes;
3031                         total_bytes -= num_bytes;
3032                         if (!total_bytes) {
3033                                 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3034                                         flash_op = FLASHROM_OPER_PHY_FLASH;
3035                                 else
3036                                         flash_op = FLASHROM_OPER_FLASH;
3037                         } else {
3038                                 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3039                                         flash_op = FLASHROM_OPER_PHY_SAVE;
3040                                 else
3041                                         flash_op = FLASHROM_OPER_SAVE;
3042                         }
3043                         memcpy(req->params.data_buf, p, num_bytes);
3044                         p += num_bytes;
3045                         status = be_cmd_write_flashrom(adapter, flash_cmd,
3046                                 pflashcomp[i].optype, flash_op, num_bytes);
3047                         if (status) {
3048                                 if ((status == ILLEGAL_IOCTL_REQ) &&
3049                                         (pflashcomp[i].optype ==
3050                                                 OPTYPE_PHY_FW))
3051                                         break;
3052                                 dev_err(&adapter->pdev->dev,
3053                                         "cmd to write to flash rom failed.\n");
3054                                 return -1;
3055                         }
3056                 }
3057         }
3058         return 0;
3059 }
3060
3061 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3062 {
3063         if (fhdr == NULL)
3064                 return 0;
3065         if (fhdr->build[0] == '3')
3066                 return BE_GEN3;
3067         else if (fhdr->build[0] == '2')
3068                 return BE_GEN2;
3069         else
3070                 return 0;
3071 }
3072
3073 static int lancer_fw_download(struct be_adapter *adapter,
3074                                 const struct firmware *fw)
3075 {
3076 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3077 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3078         struct be_dma_mem flash_cmd;
3079         const u8 *data_ptr = NULL;
3080         u8 *dest_image_ptr = NULL;
3081         size_t image_size = 0;
3082         u32 chunk_size = 0;
3083         u32 data_written = 0;
3084         u32 offset = 0;
3085         int status = 0;
3086         u8 add_status = 0;
3087
3088         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3089                 dev_err(&adapter->pdev->dev,
3090                         "FW Image not properly aligned. "
3091                         "Length must be 4 byte aligned.\n");
3092                 status = -EINVAL;
3093                 goto lancer_fw_exit;
3094         }
3095
3096         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3097                                 + LANCER_FW_DOWNLOAD_CHUNK;
3098         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3099                                                 &flash_cmd.dma, GFP_KERNEL);
3100         if (!flash_cmd.va) {
3101                 status = -ENOMEM;
3102                 dev_err(&adapter->pdev->dev,
3103                         "Memory allocation failure while flashing\n");
3104                 goto lancer_fw_exit;
3105         }
3106
3107         dest_image_ptr = flash_cmd.va +
3108                                 sizeof(struct lancer_cmd_req_write_object);
3109         image_size = fw->size;
3110         data_ptr = fw->data;
3111
3112         while (image_size) {
3113                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3114
3115                 /* Copy the image chunk content. */
3116                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3117
3118                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3119                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
3120                                 &data_written, &add_status);
3121
3122                 if (status)
3123                         break;
3124
3125                 offset += data_written;
3126                 data_ptr += data_written;
3127                 image_size -= data_written;
3128         }
3129
3130         if (!status) {
3131                 /* Commit the FW written */
3132                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3133                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
3134                                         &data_written, &add_status);
3135         }
3136
3137         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3138                                 flash_cmd.dma);
3139         if (status) {
3140                 dev_err(&adapter->pdev->dev,
3141                         "Firmware load error. "
3142                         "Status code: 0x%x Additional Status: 0x%x\n",
3143                         status, add_status);
3144                 goto lancer_fw_exit;
3145         }
3146
3147         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3148 lancer_fw_exit:
3149         return status;
3150 }
3151
3152 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3153 {
3154         struct flash_file_hdr_g2 *fhdr;
3155         struct flash_file_hdr_g3 *fhdr3;
3156         struct image_hdr *img_hdr_ptr = NULL;
3157         struct be_dma_mem flash_cmd;
3158         const u8 *p;
3159         int status = 0, i = 0, num_imgs = 0;
3160
3161         p = fw->data;
3162         fhdr = (struct flash_file_hdr_g2 *) p;
3163
3164         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
3165         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3166                                           &flash_cmd.dma, GFP_KERNEL);
3167         if (!flash_cmd.va) {
3168                 status = -ENOMEM;
3169                 dev_err(&adapter->pdev->dev,
3170                         "Memory allocation failure while flashing\n");
3171                 goto be_fw_exit;
3172         }
3173
3174         if ((adapter->generation == BE_GEN3) &&
3175                         (get_ufigen_type(fhdr) == BE_GEN3)) {
3176                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
3177                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3178                 for (i = 0; i < num_imgs; i++) {
3179                         img_hdr_ptr = (struct image_hdr *) (fw->data +
3180                                         (sizeof(struct flash_file_hdr_g3) +
3181                                          i * sizeof(struct image_hdr)));
3182                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3183                                 status = be_flash_data(adapter, fw, &flash_cmd,
3184                                                         num_imgs);
3185                 }
3186         } else if ((adapter->generation == BE_GEN2) &&
3187                         (get_ufigen_type(fhdr) == BE_GEN2)) {
3188                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3189         } else {
3190                 dev_err(&adapter->pdev->dev,
3191                         "UFI and Interface are not compatible for flashing\n");
3192                 status = -1;
3193         }
3194
3195         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3196                           flash_cmd.dma);
3197         if (status) {
3198                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3199                 goto be_fw_exit;
3200         }
3201
3202         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3203
3204 be_fw_exit:
3205         return status;
3206 }
3207
3208 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3209 {
3210         const struct firmware *fw;
3211         int status;
3212
3213         if (!netif_running(adapter->netdev)) {
3214                 dev_err(&adapter->pdev->dev,
3215                         "Firmware load not allowed (interface is down)\n");
3216                 return -1;
3217         }
3218
3219         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3220         if (status)
3221                 goto fw_exit;
3222
3223         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3224
3225         if (lancer_chip(adapter))
3226                 status = lancer_fw_download(adapter, fw);
3227         else
3228                 status = be_fw_download(adapter, fw);
3229
3230 fw_exit:
3231         release_firmware(fw);
3232         return status;
3233 }
3234
3235 static const struct net_device_ops be_netdev_ops = {
3236         .ndo_open               = be_open,
3237         .ndo_stop               = be_close,
3238         .ndo_start_xmit         = be_xmit,
3239         .ndo_set_rx_mode        = be_set_rx_mode,
3240         .ndo_set_mac_address    = be_mac_addr_set,
3241         .ndo_change_mtu         = be_change_mtu,
3242         .ndo_get_stats64        = be_get_stats64,
3243         .ndo_validate_addr      = eth_validate_addr,
3244         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3245         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3246         .ndo_set_vf_mac         = be_set_vf_mac,
3247         .ndo_set_vf_vlan        = be_set_vf_vlan,
3248         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3249         .ndo_get_vf_config      = be_get_vf_config,
3250 #ifdef CONFIG_NET_POLL_CONTROLLER
3251         .ndo_poll_controller    = be_netpoll,
3252 #endif
3253 };
3254
3255 static void be_netdev_init(struct net_device *netdev)
3256 {
3257         struct be_adapter *adapter = netdev_priv(netdev);
3258         struct be_eq_obj *eqo;
3259         int i;
3260
3261         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3262                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3263                 NETIF_F_HW_VLAN_TX;
3264         if (be_multi_rxq(adapter))
3265                 netdev->hw_features |= NETIF_F_RXHASH;
3266
3267         netdev->features |= netdev->hw_features |
3268                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3269
3270         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3271                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3272
3273         netdev->priv_flags |= IFF_UNICAST_FLT;
3274
3275         netdev->flags |= IFF_MULTICAST;
3276
3277         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3278
3279         netdev->netdev_ops = &be_netdev_ops;
3280
3281         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3282
3283         for_all_evt_queues(adapter, eqo, i)
3284                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3285 }
3286
3287 static void be_unmap_pci_bars(struct be_adapter *adapter)
3288 {
3289         if (adapter->csr)
3290                 iounmap(adapter->csr);
3291         if (adapter->db)
3292                 iounmap(adapter->db);
3293         if (adapter->roce_db.base)
3294                 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3295 }
3296
3297 static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3298 {
3299         struct pci_dev *pdev = adapter->pdev;
3300         u8 __iomem *addr;
3301
3302         addr = pci_iomap(pdev, 2, 0);
3303         if (addr == NULL)
3304                 return -ENOMEM;
3305
3306         adapter->roce_db.base = addr;
3307         adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3308         adapter->roce_db.size = 8192;
3309         adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3310         return 0;
3311 }
3312
3313 static int be_map_pci_bars(struct be_adapter *adapter)
3314 {
3315         u8 __iomem *addr;
3316         int db_reg;
3317
3318         if (lancer_chip(adapter)) {
3319                 if (be_type_2_3(adapter)) {
3320                         addr = ioremap_nocache(
3321                                         pci_resource_start(adapter->pdev, 0),
3322                                         pci_resource_len(adapter->pdev, 0));
3323                         if (addr == NULL)
3324                                 return -ENOMEM;
3325                         adapter->db = addr;
3326                 }
3327                 if (adapter->if_type == SLI_INTF_TYPE_3) {
3328                         if (lancer_roce_map_pci_bars(adapter))
3329                                 goto pci_map_err;
3330                 }
3331                 return 0;
3332         }
3333
3334         if (be_physfn(adapter)) {
3335                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3336                                 pci_resource_len(adapter->pdev, 2));
3337                 if (addr == NULL)
3338                         return -ENOMEM;
3339                 adapter->csr = addr;
3340         }
3341
3342         if (adapter->generation == BE_GEN2) {
3343                 db_reg = 4;
3344         } else {
3345                 if (be_physfn(adapter))
3346                         db_reg = 4;
3347                 else
3348                         db_reg = 0;
3349         }
3350         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3351                                 pci_resource_len(adapter->pdev, db_reg));
3352         if (addr == NULL)
3353                 goto pci_map_err;
3354         adapter->db = addr;
3355         if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3356                 adapter->roce_db.size = 4096;
3357                 adapter->roce_db.io_addr =
3358                                 pci_resource_start(adapter->pdev, db_reg);
3359                 adapter->roce_db.total_size =
3360                                 pci_resource_len(adapter->pdev, db_reg);
3361         }
3362         return 0;
3363 pci_map_err:
3364         be_unmap_pci_bars(adapter);
3365         return -ENOMEM;
3366 }
3367
3368 static void be_ctrl_cleanup(struct be_adapter *adapter)
3369 {
3370         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3371
3372         be_unmap_pci_bars(adapter);
3373
3374         if (mem->va)
3375                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3376                                   mem->dma);
3377
3378         mem = &adapter->rx_filter;
3379         if (mem->va)
3380                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3381                                   mem->dma);
3382 }
3383
3384 static int be_ctrl_init(struct be_adapter *adapter)
3385 {
3386         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3387         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3388         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3389         int status;
3390
3391         status = be_map_pci_bars(adapter);
3392         if (status)
3393                 goto done;
3394
3395         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3396         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3397                                                 mbox_mem_alloc->size,
3398                                                 &mbox_mem_alloc->dma,
3399                                                 GFP_KERNEL);
3400         if (!mbox_mem_alloc->va) {
3401                 status = -ENOMEM;
3402                 goto unmap_pci_bars;
3403         }
3404         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3405         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3406         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3407         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3408
3409         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3410         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3411                                         &rx_filter->dma, GFP_KERNEL);
3412         if (rx_filter->va == NULL) {
3413                 status = -ENOMEM;
3414                 goto free_mbox;
3415         }
3416         memset(rx_filter->va, 0, rx_filter->size);
3417
3418         mutex_init(&adapter->mbox_lock);
3419         spin_lock_init(&adapter->mcc_lock);
3420         spin_lock_init(&adapter->mcc_cq_lock);
3421
3422         init_completion(&adapter->flash_compl);
3423         pci_save_state(adapter->pdev);
3424         return 0;
3425
3426 free_mbox:
3427         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3428                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3429
3430 unmap_pci_bars:
3431         be_unmap_pci_bars(adapter);
3432
3433 done:
3434         return status;
3435 }
3436
3437 static void be_stats_cleanup(struct be_adapter *adapter)
3438 {
3439         struct be_dma_mem *cmd = &adapter->stats_cmd;
3440
3441         if (cmd->va)
3442                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3443                                   cmd->va, cmd->dma);
3444 }
3445
3446 static int be_stats_init(struct be_adapter *adapter)
3447 {
3448         struct be_dma_mem *cmd = &adapter->stats_cmd;
3449
3450         if (adapter->generation == BE_GEN2) {
3451                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3452         } else {
3453                 if (lancer_chip(adapter))
3454                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3455                 else
3456                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3457         }
3458         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3459                                      GFP_KERNEL);
3460         if (cmd->va == NULL)
3461                 return -1;
3462         memset(cmd->va, 0, cmd->size);
3463         return 0;
3464 }
3465
3466 static void __devexit be_remove(struct pci_dev *pdev)
3467 {
3468         struct be_adapter *adapter = pci_get_drvdata(pdev);
3469
3470         if (!adapter)
3471                 return;
3472
3473         be_roce_dev_remove(adapter);
3474
3475         unregister_netdev(adapter->netdev);
3476
3477         be_clear(adapter);
3478
3479         be_stats_cleanup(adapter);
3480
3481         be_ctrl_cleanup(adapter);
3482
3483         pci_set_drvdata(pdev, NULL);
3484         pci_release_regions(pdev);
3485         pci_disable_device(pdev);
3486
3487         free_netdev(adapter->netdev);
3488 }
3489
3490 bool be_is_wol_supported(struct be_adapter *adapter)
3491 {
3492         return ((adapter->wol_cap & BE_WOL_CAP) &&
3493                 !be_is_wol_excluded(adapter)) ? true : false;
3494 }
3495
3496 u32 be_get_fw_log_level(struct be_adapter *adapter)
3497 {
3498         struct be_dma_mem extfat_cmd;
3499         struct be_fat_conf_params *cfgs;
3500         int status;
3501         u32 level = 0;
3502         int j;
3503
3504         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3505         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3506         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3507                                              &extfat_cmd.dma);
3508
3509         if (!extfat_cmd.va) {
3510                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3511                         __func__);
3512                 goto err;
3513         }
3514
3515         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3516         if (!status) {
3517                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3518                                                 sizeof(struct be_cmd_resp_hdr));
3519                 for (j = 0; j < cfgs->module[0].num_modes; j++) {
3520                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3521                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3522                 }
3523         }
3524         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3525                             extfat_cmd.dma);
3526 err:
3527         return level;
3528 }
3529 static int be_get_initial_config(struct be_adapter *adapter)
3530 {
3531         int status;
3532         u32 level;
3533
3534         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3535                         &adapter->function_mode, &adapter->function_caps);
3536         if (status)
3537                 return status;
3538
3539         if (adapter->function_mode & FLEX10_MODE)
3540                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3541         else
3542                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3543
3544         if (be_physfn(adapter))
3545                 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3546         else
3547                 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3548
3549         /* primary mac needs 1 pmac entry */
3550         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3551                                   sizeof(u32), GFP_KERNEL);
3552         if (!adapter->pmac_id)
3553                 return -ENOMEM;
3554
3555         status = be_cmd_get_cntl_attributes(adapter);
3556         if (status)
3557                 return status;
3558
3559         status = be_cmd_get_acpi_wol_cap(adapter);
3560         if (status) {
3561                 /* in case of a failure to get wol capabillities
3562                  * check the exclusion list to determine WOL capability */
3563                 if (!be_is_wol_excluded(adapter))
3564                         adapter->wol_cap |= BE_WOL_CAP;
3565         }
3566
3567         if (be_is_wol_supported(adapter))
3568                 adapter->wol = true;
3569
3570         /* Must be a power of 2 or else MODULO will BUG_ON */
3571         adapter->be_get_temp_freq = 64;
3572
3573         level = be_get_fw_log_level(adapter);
3574         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3575
3576         return 0;
3577 }
3578
3579 static int be_dev_type_check(struct be_adapter *adapter)
3580 {
3581         struct pci_dev *pdev = adapter->pdev;
3582         u32 sli_intf = 0, if_type;
3583
3584         switch (pdev->device) {
3585         case BE_DEVICE_ID1:
3586         case OC_DEVICE_ID1:
3587                 adapter->generation = BE_GEN2;
3588                 break;
3589         case BE_DEVICE_ID2:
3590         case OC_DEVICE_ID2:
3591                 adapter->generation = BE_GEN3;
3592                 break;
3593         case OC_DEVICE_ID3:
3594         case OC_DEVICE_ID4:
3595                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3596                 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3597                                                 SLI_INTF_IF_TYPE_SHIFT;
3598                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3599                                                 SLI_INTF_IF_TYPE_SHIFT;
3600                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3601                         !be_type_2_3(adapter)) {
3602                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3603                         return -EINVAL;
3604                 }
3605                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3606                                          SLI_INTF_FAMILY_SHIFT);
3607                 adapter->generation = BE_GEN3;
3608                 break;
3609         case OC_DEVICE_ID5:
3610                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3611                 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
3612                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3613                         return -EINVAL;
3614                 }
3615                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3616                                          SLI_INTF_FAMILY_SHIFT);
3617                 adapter->generation = BE_GEN3;
3618                 break;
3619         default:
3620                 adapter->generation = 0;
3621         }
3622
3623         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3624         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3625         return 0;
3626 }
3627
3628 static int lancer_wait_ready(struct be_adapter *adapter)
3629 {
3630 #define SLIPORT_READY_TIMEOUT 30
3631         u32 sliport_status;
3632         int status = 0, i;
3633
3634         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3635                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3636                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3637                         break;
3638
3639                 msleep(1000);
3640         }
3641
3642         if (i == SLIPORT_READY_TIMEOUT)
3643                 status = -1;
3644
3645         return status;
3646 }
3647
3648 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3649 {
3650         int status;
3651         u32 sliport_status, err, reset_needed;
3652         status = lancer_wait_ready(adapter);
3653         if (!status) {
3654                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3655                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3656                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3657                 if (err && reset_needed) {
3658                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3659                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3660
3661                         /* check adapter has corrected the error */
3662                         status = lancer_wait_ready(adapter);
3663                         sliport_status = ioread32(adapter->db +
3664                                                         SLIPORT_STATUS_OFFSET);
3665                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3666                                                 SLIPORT_STATUS_RN_MASK);
3667                         if (status || sliport_status)
3668                                 status = -1;
3669                 } else if (err || reset_needed) {
3670                         status = -1;
3671                 }
3672         }
3673         return status;
3674 }
3675
3676 static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3677 {
3678         int status;
3679         u32 sliport_status;
3680
3681         if (adapter->eeh_err || adapter->ue_detected)
3682                 return;
3683
3684         sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3685
3686         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3687                 dev_err(&adapter->pdev->dev,
3688                                 "Adapter in error state."
3689                                 "Trying to recover.\n");
3690
3691                 status = lancer_test_and_set_rdy_state(adapter);
3692                 if (status)
3693                         goto err;
3694
3695                 netif_device_detach(adapter->netdev);
3696
3697                 if (netif_running(adapter->netdev))
3698                         be_close(adapter->netdev);
3699
3700                 be_clear(adapter);
3701
3702                 adapter->fw_timeout = false;
3703
3704                 status = be_setup(adapter);
3705                 if (status)
3706                         goto err;
3707
3708                 if (netif_running(adapter->netdev)) {
3709                         status = be_open(adapter->netdev);
3710                         if (status)
3711                                 goto err;
3712                 }
3713
3714                 netif_device_attach(adapter->netdev);
3715
3716                 dev_err(&adapter->pdev->dev,
3717                                 "Adapter error recovery succeeded\n");
3718         }
3719         return;
3720 err:
3721         dev_err(&adapter->pdev->dev,
3722                         "Adapter error recovery failed\n");
3723 }
3724
3725 static void be_worker(struct work_struct *work)
3726 {
3727         struct be_adapter *adapter =
3728                 container_of(work, struct be_adapter, work.work);
3729         struct be_rx_obj *rxo;
3730         struct be_eq_obj *eqo;
3731         int i;
3732
3733         if (lancer_chip(adapter))
3734                 lancer_test_and_recover_fn_err(adapter);
3735
3736         be_detect_dump_ue(adapter);
3737
3738         /* when interrupts are not yet enabled, just reap any pending
3739         * mcc completions */
3740         if (!netif_running(adapter->netdev)) {
3741                 be_process_mcc(adapter);
3742                 goto reschedule;
3743         }
3744
3745         if (!adapter->stats_cmd_sent) {
3746                 if (lancer_chip(adapter))
3747                         lancer_cmd_get_pport_stats(adapter,
3748                                                 &adapter->stats_cmd);
3749                 else
3750                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
3751         }
3752
3753         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3754                 be_cmd_get_die_temperature(adapter);
3755
3756         for_all_rx_queues(adapter, rxo, i) {
3757                 if (rxo->rx_post_starved) {
3758                         rxo->rx_post_starved = false;
3759                         be_post_rx_frags(rxo, GFP_KERNEL);
3760                 }
3761         }
3762
3763         for_all_evt_queues(adapter, eqo, i)
3764                 be_eqd_update(adapter, eqo);
3765
3766 reschedule:
3767         adapter->work_counter++;
3768         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3769 }
3770
3771 static bool be_reset_required(struct be_adapter *adapter)
3772 {
3773         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
3774 }
3775
3776 static int __devinit be_probe(struct pci_dev *pdev,
3777                         const struct pci_device_id *pdev_id)
3778 {
3779         int status = 0;
3780         struct be_adapter *adapter;
3781         struct net_device *netdev;
3782
3783         status = pci_enable_device(pdev);
3784         if (status)
3785                 goto do_none;
3786
3787         status = pci_request_regions(pdev, DRV_NAME);
3788         if (status)
3789                 goto disable_dev;
3790         pci_set_master(pdev);
3791
3792         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
3793         if (netdev == NULL) {
3794                 status = -ENOMEM;
3795                 goto rel_reg;
3796         }
3797         adapter = netdev_priv(netdev);
3798         adapter->pdev = pdev;
3799         pci_set_drvdata(pdev, adapter);
3800
3801         status = be_dev_type_check(adapter);
3802         if (status)
3803                 goto free_netdev;
3804
3805         adapter->netdev = netdev;
3806         SET_NETDEV_DEV(netdev, &pdev->dev);
3807
3808         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3809         if (!status) {
3810                 netdev->features |= NETIF_F_HIGHDMA;
3811         } else {
3812                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3813                 if (status) {
3814                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3815                         goto free_netdev;
3816                 }
3817         }
3818
3819         status = be_ctrl_init(adapter);
3820         if (status)
3821                 goto free_netdev;
3822
3823         if (lancer_chip(adapter)) {
3824                 status = lancer_wait_ready(adapter);
3825                 if (!status) {
3826                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3827                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3828                         status = lancer_test_and_set_rdy_state(adapter);
3829                 }
3830                 if (status) {
3831                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3832                         goto ctrl_clean;
3833                 }
3834         }
3835
3836         /* sync up with fw's ready state */
3837         if (be_physfn(adapter)) {
3838                 status = be_cmd_POST(adapter);
3839                 if (status)
3840                         goto ctrl_clean;
3841         }
3842
3843         /* tell fw we're ready to fire cmds */
3844         status = be_cmd_fw_init(adapter);
3845         if (status)
3846                 goto ctrl_clean;
3847
3848         if (be_reset_required(adapter)) {
3849                 status = be_cmd_reset_function(adapter);
3850                 if (status)
3851                         goto ctrl_clean;
3852         }
3853
3854         /* The INTR bit may be set in the card when probed by a kdump kernel
3855          * after a crash.
3856          */
3857         if (!lancer_chip(adapter))
3858                 be_intr_set(adapter, false);
3859
3860         status = be_stats_init(adapter);
3861         if (status)
3862                 goto ctrl_clean;
3863
3864         status = be_get_initial_config(adapter);
3865         if (status)
3866                 goto stats_clean;
3867
3868         INIT_DELAYED_WORK(&adapter->work, be_worker);
3869         adapter->rx_fc = adapter->tx_fc = true;
3870
3871         status = be_setup(adapter);
3872         if (status)
3873                 goto msix_disable;
3874
3875         be_netdev_init(netdev);
3876         status = register_netdev(netdev);
3877         if (status != 0)
3878                 goto unsetup;
3879
3880         be_roce_dev_add(adapter);
3881
3882         dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3883                 adapter->port_num);
3884
3885         return 0;
3886
3887 unsetup:
3888         be_clear(adapter);
3889 msix_disable:
3890         be_msix_disable(adapter);
3891 stats_clean:
3892         be_stats_cleanup(adapter);
3893 ctrl_clean:
3894         be_ctrl_cleanup(adapter);
3895 free_netdev:
3896         free_netdev(netdev);
3897         pci_set_drvdata(pdev, NULL);
3898 rel_reg:
3899         pci_release_regions(pdev);
3900 disable_dev:
3901         pci_disable_device(pdev);
3902 do_none:
3903         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3904         return status;
3905 }
3906
3907 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3908 {
3909         struct be_adapter *adapter = pci_get_drvdata(pdev);
3910         struct net_device *netdev =  adapter->netdev;
3911
3912         if (adapter->wol)
3913                 be_setup_wol(adapter, true);
3914
3915         netif_device_detach(netdev);
3916         if (netif_running(netdev)) {
3917                 rtnl_lock();
3918                 be_close(netdev);
3919                 rtnl_unlock();
3920         }
3921         be_clear(adapter);
3922
3923         pci_save_state(pdev);
3924         pci_disable_device(pdev);
3925         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3926         return 0;
3927 }
3928
3929 static int be_resume(struct pci_dev *pdev)
3930 {
3931         int status = 0;
3932         struct be_adapter *adapter = pci_get_drvdata(pdev);
3933         struct net_device *netdev =  adapter->netdev;
3934
3935         netif_device_detach(netdev);
3936
3937         status = pci_enable_device(pdev);
3938         if (status)
3939                 return status;
3940
3941         pci_set_power_state(pdev, 0);
3942         pci_restore_state(pdev);
3943
3944         /* tell fw we're ready to fire cmds */
3945         status = be_cmd_fw_init(adapter);
3946         if (status)
3947                 return status;
3948
3949         be_setup(adapter);
3950         if (netif_running(netdev)) {
3951                 rtnl_lock();
3952                 be_open(netdev);
3953                 rtnl_unlock();
3954         }
3955         netif_device_attach(netdev);
3956
3957         if (adapter->wol)
3958                 be_setup_wol(adapter, false);
3959
3960         return 0;
3961 }
3962
3963 /*
3964  * An FLR will stop BE from DMAing any data.
3965  */
3966 static void be_shutdown(struct pci_dev *pdev)
3967 {
3968         struct be_adapter *adapter = pci_get_drvdata(pdev);
3969
3970         if (!adapter)
3971                 return;
3972
3973         cancel_delayed_work_sync(&adapter->work);
3974
3975         netif_device_detach(adapter->netdev);
3976
3977         if (adapter->wol)
3978                 be_setup_wol(adapter, true);
3979
3980         be_cmd_reset_function(adapter);
3981
3982         pci_disable_device(pdev);
3983 }
3984
3985 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3986                                 pci_channel_state_t state)
3987 {
3988         struct be_adapter *adapter = pci_get_drvdata(pdev);
3989         struct net_device *netdev =  adapter->netdev;
3990
3991         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3992
3993         adapter->eeh_err = true;
3994
3995         netif_device_detach(netdev);
3996
3997         if (netif_running(netdev)) {
3998                 rtnl_lock();
3999                 be_close(netdev);
4000                 rtnl_unlock();
4001         }
4002         be_clear(adapter);
4003
4004         if (state == pci_channel_io_perm_failure)
4005                 return PCI_ERS_RESULT_DISCONNECT;
4006
4007         pci_disable_device(pdev);
4008
4009         /* The error could cause the FW to trigger a flash debug dump.
4010          * Resetting the card while flash dump is in progress
4011          * can cause it not to recover; wait for it to finish
4012          */
4013         ssleep(30);
4014         return PCI_ERS_RESULT_NEED_RESET;
4015 }
4016
4017 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4018 {
4019         struct be_adapter *adapter = pci_get_drvdata(pdev);
4020         int status;
4021
4022         dev_info(&adapter->pdev->dev, "EEH reset\n");
4023         adapter->eeh_err = false;
4024         adapter->ue_detected = false;
4025         adapter->fw_timeout = false;
4026
4027         status = pci_enable_device(pdev);
4028         if (status)
4029                 return PCI_ERS_RESULT_DISCONNECT;
4030
4031         pci_set_master(pdev);
4032         pci_set_power_state(pdev, 0);
4033         pci_restore_state(pdev);
4034
4035         /* Check if card is ok and fw is ready */
4036         status = be_cmd_POST(adapter);
4037         if (status)
4038                 return PCI_ERS_RESULT_DISCONNECT;
4039
4040         return PCI_ERS_RESULT_RECOVERED;
4041 }
4042
4043 static void be_eeh_resume(struct pci_dev *pdev)
4044 {
4045         int status = 0;
4046         struct be_adapter *adapter = pci_get_drvdata(pdev);
4047         struct net_device *netdev =  adapter->netdev;
4048
4049         dev_info(&adapter->pdev->dev, "EEH resume\n");
4050
4051         pci_save_state(pdev);
4052
4053         /* tell fw we're ready to fire cmds */
4054         status = be_cmd_fw_init(adapter);
4055         if (status)
4056                 goto err;
4057
4058         status = be_setup(adapter);
4059         if (status)
4060                 goto err;
4061
4062         if (netif_running(netdev)) {
4063                 status = be_open(netdev);
4064                 if (status)
4065                         goto err;
4066         }
4067         netif_device_attach(netdev);
4068         return;
4069 err:
4070         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4071 }
4072
4073 static struct pci_error_handlers be_eeh_handlers = {
4074         .error_detected = be_eeh_err_detected,
4075         .slot_reset = be_eeh_reset,
4076         .resume = be_eeh_resume,
4077 };
4078
4079 static struct pci_driver be_driver = {
4080         .name = DRV_NAME,
4081         .id_table = be_dev_ids,
4082         .probe = be_probe,
4083         .remove = be_remove,
4084         .suspend = be_suspend,
4085         .resume = be_resume,
4086         .shutdown = be_shutdown,
4087         .err_handler = &be_eeh_handlers
4088 };
4089
4090 static int __init be_init_module(void)
4091 {
4092         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4093             rx_frag_size != 2048) {
4094                 printk(KERN_WARNING DRV_NAME
4095                         " : Module param rx_frag_size must be 2048/4096/8192."
4096                         " Using 2048\n");
4097                 rx_frag_size = 2048;
4098         }
4099
4100         return pci_register_driver(&be_driver);
4101 }
4102 module_init(be_init_module);
4103
4104 static void __exit be_exit_module(void)
4105 {
4106         pci_unregister_driver(&be_driver);
4107 }
4108 module_exit(be_exit_module);