]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/benet/be_main.c
68f107817326790f2d9e662199ffa6a6843e4103
[karo-tx-linux.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static unsigned int rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, uint, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { 0 }
46 };
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
50         "CEV",
51         "CTX",
52         "DBUF",
53         "ERX",
54         "Host",
55         "MPU",
56         "NDMA",
57         "PTC ",
58         "RDMA ",
59         "RXF ",
60         "RXIPS ",
61         "RXULP0 ",
62         "RXULP1 ",
63         "RXULP2 ",
64         "TIM ",
65         "TPOST ",
66         "TPRE ",
67         "TXIPS ",
68         "TXULP0 ",
69         "TXULP1 ",
70         "UC ",
71         "WDMA ",
72         "TXULP2 ",
73         "HOST1 ",
74         "P0_OB_LINK ",
75         "P1_OB_LINK ",
76         "HOST_GPIO ",
77         "MBOX ",
78         "AXGMAC0",
79         "AXGMAC1",
80         "JTAG",
81         "MPU_INTPEND"
82 };
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
85         "LPCMEMHOST",
86         "MGMT_MAC",
87         "PCS0ONLINE",
88         "MPU_IRAM",
89         "PCS1ONLINE",
90         "PCTL0",
91         "PCTL1",
92         "PMEM",
93         "RR",
94         "TXPB",
95         "RXPP",
96         "XAUI",
97         "TXP",
98         "ARM",
99         "IPC",
100         "HOST2",
101         "HOST3",
102         "HOST4",
103         "HOST5",
104         "HOST6",
105         "HOST7",
106         "HOST8",
107         "HOST9",
108         "NETC"
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown"
117 };
118
119 static inline bool be_multi_rxq(struct be_adapter *adapter)
120 {
121         return (adapter->num_rx_qs > 1);
122 }
123
124 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125 {
126         struct be_dma_mem *mem = &q->dma_mem;
127         if (mem->va)
128                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129                                   mem->dma);
130 }
131
132 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133                 u16 len, u16 entry_size)
134 {
135         struct be_dma_mem *mem = &q->dma_mem;
136
137         memset(q, 0, sizeof(*q));
138         q->len = len;
139         q->entry_size = entry_size;
140         mem->size = len * entry_size;
141         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142                                      GFP_KERNEL);
143         if (!mem->va)
144                 return -1;
145         memset(mem->va, 0, mem->size);
146         return 0;
147 }
148
149 static void be_intr_set(struct be_adapter *adapter, bool enable)
150 {
151         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
152         u32 reg = ioread32(addr);
153         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
154
155         if (adapter->eeh_err)
156                 return;
157
158         if (!enabled && enable)
159                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160         else if (enabled && !enable)
161                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162         else
163                 return;
164
165         iowrite32(reg, addr);
166 }
167
168 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
169 {
170         u32 val = 0;
171         val |= qid & DB_RQ_RING_ID_MASK;
172         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
173
174         wmb();
175         iowrite32(val, adapter->db + DB_RQ_OFFSET);
176 }
177
178 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180         u32 val = 0;
181         val |= qid & DB_TXULP_RING_ID_MASK;
182         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
183
184         wmb();
185         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
186 }
187
188 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
189                 bool arm, bool clear_int, u16 num_popped)
190 {
191         u32 val = 0;
192         val |= qid & DB_EQ_RING_ID_MASK;
193         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
194                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
195
196         if (adapter->eeh_err)
197                 return;
198
199         if (arm)
200                 val |= 1 << DB_EQ_REARM_SHIFT;
201         if (clear_int)
202                 val |= 1 << DB_EQ_CLR_SHIFT;
203         val |= 1 << DB_EQ_EVNT_SHIFT;
204         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
205         iowrite32(val, adapter->db + DB_EQ_OFFSET);
206 }
207
208 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
209 {
210         u32 val = 0;
211         val |= qid & DB_CQ_RING_ID_MASK;
212         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
213                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
214
215         if (adapter->eeh_err)
216                 return;
217
218         if (arm)
219                 val |= 1 << DB_CQ_REARM_SHIFT;
220         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
221         iowrite32(val, adapter->db + DB_CQ_OFFSET);
222 }
223
224 static int be_mac_addr_set(struct net_device *netdev, void *p)
225 {
226         struct be_adapter *adapter = netdev_priv(netdev);
227         struct sockaddr *addr = p;
228         int status = 0;
229
230         if (!is_valid_ether_addr(addr->sa_data))
231                 return -EADDRNOTAVAIL;
232
233         /* MAC addr configuration will be done in hardware for VFs
234          * by their corresponding PFs. Just copy to netdev addr here
235          */
236         if (!be_physfn(adapter))
237                 goto netdev_addr;
238
239         status = be_cmd_pmac_del(adapter, adapter->if_handle,
240                                 adapter->pmac_id, 0);
241         if (status)
242                 return status;
243
244         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
245                                 adapter->if_handle, &adapter->pmac_id, 0);
246 netdev_addr:
247         if (!status)
248                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
249
250         return status;
251 }
252
253 void netdev_stats_update(struct be_adapter *adapter)
254 {
255         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
256         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
257         struct be_port_rxf_stats *port_stats =
258                         &rxf_stats->port[adapter->port_num];
259         struct net_device_stats *dev_stats = &adapter->netdev->stats;
260         struct be_erx_stats *erx_stats = &hw_stats->erx;
261         struct be_rx_obj *rxo;
262         int i;
263
264         memset(dev_stats, 0, sizeof(*dev_stats));
265         for_all_rx_queues(adapter, rxo, i) {
266                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
267                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
268                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
269                 /*  no space in linux buffers: best possible approximation */
270                 dev_stats->rx_dropped +=
271                         erx_stats->rx_drops_no_fragments[rxo->q.id];
272         }
273
274         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
275         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
276
277         /* bad pkts received */
278         dev_stats->rx_errors = port_stats->rx_crc_errors +
279                 port_stats->rx_alignment_symbol_errors +
280                 port_stats->rx_in_range_errors +
281                 port_stats->rx_out_range_errors +
282                 port_stats->rx_frame_too_long +
283                 port_stats->rx_dropped_too_small +
284                 port_stats->rx_dropped_too_short +
285                 port_stats->rx_dropped_header_too_small +
286                 port_stats->rx_dropped_tcp_length +
287                 port_stats->rx_dropped_runt +
288                 port_stats->rx_tcp_checksum_errs +
289                 port_stats->rx_ip_checksum_errs +
290                 port_stats->rx_udp_checksum_errs;
291
292         /* detailed rx errors */
293         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
294                 port_stats->rx_out_range_errors +
295                 port_stats->rx_frame_too_long;
296
297         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
298
299         /* frame alignment errors */
300         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
301
302         /* receiver fifo overrun */
303         /* drops_no_pbuf is no per i/f, it's per BE card */
304         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
305                                         port_stats->rx_input_fifo_overflow +
306                                         rxf_stats->rx_drops_no_pbuf;
307 }
308
309 void be_link_status_update(struct be_adapter *adapter, bool link_up)
310 {
311         struct net_device *netdev = adapter->netdev;
312
313         /* If link came up or went down */
314         if (adapter->link_up != link_up) {
315                 adapter->link_speed = -1;
316                 if (link_up) {
317                         netif_carrier_on(netdev);
318                         printk(KERN_INFO "%s: Link up\n", netdev->name);
319                 } else {
320                         netif_carrier_off(netdev);
321                         printk(KERN_INFO "%s: Link down\n", netdev->name);
322                 }
323                 adapter->link_up = link_up;
324         }
325 }
326
327 /* Update the EQ delay n BE based on the RX frags consumed / sec */
328 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
329 {
330         struct be_eq_obj *rx_eq = &rxo->rx_eq;
331         struct be_rx_stats *stats = &rxo->stats;
332         ulong now = jiffies;
333         u32 eqd;
334
335         if (!rx_eq->enable_aic)
336                 return;
337
338         /* Wrapped around */
339         if (time_before(now, stats->rx_fps_jiffies)) {
340                 stats->rx_fps_jiffies = now;
341                 return;
342         }
343
344         /* Update once a second */
345         if ((now - stats->rx_fps_jiffies) < HZ)
346                 return;
347
348         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
349                         ((now - stats->rx_fps_jiffies) / HZ);
350
351         stats->rx_fps_jiffies = now;
352         stats->prev_rx_frags = stats->rx_frags;
353         eqd = stats->rx_fps / 110000;
354         eqd = eqd << 3;
355         if (eqd > rx_eq->max_eqd)
356                 eqd = rx_eq->max_eqd;
357         if (eqd < rx_eq->min_eqd)
358                 eqd = rx_eq->min_eqd;
359         if (eqd < 10)
360                 eqd = 0;
361         if (eqd != rx_eq->cur_eqd)
362                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
363
364         rx_eq->cur_eqd = eqd;
365 }
366
367 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
368 {
369         u64 rate = bytes;
370
371         do_div(rate, ticks / HZ);
372         rate <<= 3;                     /* bytes/sec -> bits/sec */
373         do_div(rate, 1000000ul);        /* MB/Sec */
374
375         return rate;
376 }
377
378 static void be_tx_rate_update(struct be_adapter *adapter)
379 {
380         struct be_tx_stats *stats = tx_stats(adapter);
381         ulong now = jiffies;
382
383         /* Wrapped around? */
384         if (time_before(now, stats->be_tx_jiffies)) {
385                 stats->be_tx_jiffies = now;
386                 return;
387         }
388
389         /* Update tx rate once in two seconds */
390         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
391                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
392                                                   - stats->be_tx_bytes_prev,
393                                                  now - stats->be_tx_jiffies);
394                 stats->be_tx_jiffies = now;
395                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
396         }
397 }
398
399 static void be_tx_stats_update(struct be_adapter *adapter,
400                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
401 {
402         struct be_tx_stats *stats = tx_stats(adapter);
403         stats->be_tx_reqs++;
404         stats->be_tx_wrbs += wrb_cnt;
405         stats->be_tx_bytes += copied;
406         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
407         if (stopped)
408                 stats->be_tx_stops++;
409 }
410
411 /* Determine number of WRB entries needed to xmit data in an skb */
412 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413                                                                 bool *dummy)
414 {
415         int cnt = (skb->len > skb->data_len);
416
417         cnt += skb_shinfo(skb)->nr_frags;
418
419         /* to account for hdr wrb */
420         cnt++;
421         if (lancer_chip(adapter) || !(cnt & 1)) {
422                 *dummy = false;
423         } else {
424                 /* add a dummy to make it an even num */
425                 cnt++;
426                 *dummy = true;
427         }
428         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
429         return cnt;
430 }
431
432 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
433 {
434         wrb->frag_pa_hi = upper_32_bits(addr);
435         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
436         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
437 }
438
439 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
440                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
441 {
442         u8 vlan_prio = 0;
443         u16 vlan_tag = 0;
444
445         memset(hdr, 0, sizeof(*hdr));
446
447         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
448
449         if (skb_is_gso(skb)) {
450                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
451                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
452                         hdr, skb_shinfo(skb)->gso_size);
453                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
454                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
455                 if (lancer_chip(adapter) && adapter->sli_family  ==
456                                                         LANCER_A0_SLI_FAMILY) {
457                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
458                         if (is_tcp_pkt(skb))
459                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460                                                                 tcpcs, hdr, 1);
461                         else if (is_udp_pkt(skb))
462                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463                                                                 udpcs, hdr, 1);
464                 }
465         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
466                 if (is_tcp_pkt(skb))
467                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
468                 else if (is_udp_pkt(skb))
469                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
470         }
471
472         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
473                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
474                 vlan_tag = vlan_tx_tag_get(skb);
475                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
476                 /* If vlan priority provided by OS is NOT in available bmap */
477                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
478                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
479                                         adapter->recommended_prio;
480                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
481         }
482
483         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
484         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
485         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
486         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
487 }
488
489 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
490                 bool unmap_single)
491 {
492         dma_addr_t dma;
493
494         be_dws_le_to_cpu(wrb, sizeof(*wrb));
495
496         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
497         if (wrb->frag_len) {
498                 if (unmap_single)
499                         dma_unmap_single(dev, dma, wrb->frag_len,
500                                          DMA_TO_DEVICE);
501                 else
502                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
503         }
504 }
505
506 static int make_tx_wrbs(struct be_adapter *adapter,
507                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
508 {
509         dma_addr_t busaddr;
510         int i, copied = 0;
511         struct device *dev = &adapter->pdev->dev;
512         struct sk_buff *first_skb = skb;
513         struct be_queue_info *txq = &adapter->tx_obj.q;
514         struct be_eth_wrb *wrb;
515         struct be_eth_hdr_wrb *hdr;
516         bool map_single = false;
517         u16 map_head;
518
519         hdr = queue_head_node(txq);
520         queue_head_inc(txq);
521         map_head = txq->head;
522
523         if (skb->len > skb->data_len) {
524                 int len = skb_headlen(skb);
525                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
526                 if (dma_mapping_error(dev, busaddr))
527                         goto dma_err;
528                 map_single = true;
529                 wrb = queue_head_node(txq);
530                 wrb_fill(wrb, busaddr, len);
531                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
532                 queue_head_inc(txq);
533                 copied += len;
534         }
535
536         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
537                 struct skb_frag_struct *frag =
538                         &skb_shinfo(skb)->frags[i];
539                 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
540                                        frag->size, DMA_TO_DEVICE);
541                 if (dma_mapping_error(dev, busaddr))
542                         goto dma_err;
543                 wrb = queue_head_node(txq);
544                 wrb_fill(wrb, busaddr, frag->size);
545                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
546                 queue_head_inc(txq);
547                 copied += frag->size;
548         }
549
550         if (dummy_wrb) {
551                 wrb = queue_head_node(txq);
552                 wrb_fill(wrb, 0, 0);
553                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
554                 queue_head_inc(txq);
555         }
556
557         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
558         be_dws_cpu_to_le(hdr, sizeof(*hdr));
559
560         return copied;
561 dma_err:
562         txq->head = map_head;
563         while (copied) {
564                 wrb = queue_head_node(txq);
565                 unmap_tx_frag(dev, wrb, map_single);
566                 map_single = false;
567                 copied -= wrb->frag_len;
568                 queue_head_inc(txq);
569         }
570         return 0;
571 }
572
573 static netdev_tx_t be_xmit(struct sk_buff *skb,
574                         struct net_device *netdev)
575 {
576         struct be_adapter *adapter = netdev_priv(netdev);
577         struct be_tx_obj *tx_obj = &adapter->tx_obj;
578         struct be_queue_info *txq = &tx_obj->q;
579         u32 wrb_cnt = 0, copied = 0;
580         u32 start = txq->head;
581         bool dummy_wrb, stopped = false;
582
583         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
584
585         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
586         if (copied) {
587                 /* record the sent skb in the sent_skb table */
588                 BUG_ON(tx_obj->sent_skb_list[start]);
589                 tx_obj->sent_skb_list[start] = skb;
590
591                 /* Ensure txq has space for the next skb; Else stop the queue
592                  * *BEFORE* ringing the tx doorbell, so that we serialze the
593                  * tx compls of the current transmit which'll wake up the queue
594                  */
595                 atomic_add(wrb_cnt, &txq->used);
596                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
597                                                                 txq->len) {
598                         netif_stop_queue(netdev);
599                         stopped = true;
600                 }
601
602                 be_txq_notify(adapter, txq->id, wrb_cnt);
603
604                 be_tx_stats_update(adapter, wrb_cnt, copied,
605                                 skb_shinfo(skb)->gso_segs, stopped);
606         } else {
607                 txq->head = start;
608                 dev_kfree_skb_any(skb);
609         }
610         return NETDEV_TX_OK;
611 }
612
613 static int be_change_mtu(struct net_device *netdev, int new_mtu)
614 {
615         struct be_adapter *adapter = netdev_priv(netdev);
616         if (new_mtu < BE_MIN_MTU ||
617                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
618                                         (ETH_HLEN + ETH_FCS_LEN))) {
619                 dev_info(&adapter->pdev->dev,
620                         "MTU must be between %d and %d bytes\n",
621                         BE_MIN_MTU,
622                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
623                 return -EINVAL;
624         }
625         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
626                         netdev->mtu, new_mtu);
627         netdev->mtu = new_mtu;
628         return 0;
629 }
630
631 /*
632  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
633  * If the user configures more, place BE in vlan promiscuous mode.
634  */
635 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
636 {
637         u16 vtag[BE_NUM_VLANS_SUPPORTED];
638         u16 ntags = 0, i;
639         int status = 0;
640         u32 if_handle;
641
642         if (vf) {
643                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
644                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
645                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
646         }
647
648         if (adapter->vlans_added <= adapter->max_vlans)  {
649                 /* Construct VLAN Table to give to HW */
650                 for (i = 0; i < VLAN_N_VID; i++) {
651                         if (adapter->vlan_tag[i]) {
652                                 vtag[ntags] = cpu_to_le16(i);
653                                 ntags++;
654                         }
655                 }
656                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
657                                         vtag, ntags, 1, 0);
658         } else {
659                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
660                                         NULL, 0, 1, 1);
661         }
662
663         return status;
664 }
665
666 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
667 {
668         struct be_adapter *adapter = netdev_priv(netdev);
669
670         adapter->vlan_grp = grp;
671 }
672
673 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
674 {
675         struct be_adapter *adapter = netdev_priv(netdev);
676
677         adapter->vlans_added++;
678         if (!be_physfn(adapter))
679                 return;
680
681         adapter->vlan_tag[vid] = 1;
682         if (adapter->vlans_added <= (adapter->max_vlans + 1))
683                 be_vid_config(adapter, false, 0);
684 }
685
686 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
687 {
688         struct be_adapter *adapter = netdev_priv(netdev);
689
690         adapter->vlans_added--;
691         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
692
693         if (!be_physfn(adapter))
694                 return;
695
696         adapter->vlan_tag[vid] = 0;
697         if (adapter->vlans_added <= adapter->max_vlans)
698                 be_vid_config(adapter, false, 0);
699 }
700
701 static void be_set_multicast_list(struct net_device *netdev)
702 {
703         struct be_adapter *adapter = netdev_priv(netdev);
704
705         if (netdev->flags & IFF_PROMISC) {
706                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
707                 adapter->promiscuous = true;
708                 goto done;
709         }
710
711         /* BE was previously in promiscous mode; disable it */
712         if (adapter->promiscuous) {
713                 adapter->promiscuous = false;
714                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
715         }
716
717         /* Enable multicast promisc if num configured exceeds what we support */
718         if (netdev->flags & IFF_ALLMULTI ||
719             netdev_mc_count(netdev) > BE_MAX_MC) {
720                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
721                                 &adapter->mc_cmd_mem);
722                 goto done;
723         }
724
725         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
726                 &adapter->mc_cmd_mem);
727 done:
728         return;
729 }
730
731 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
732 {
733         struct be_adapter *adapter = netdev_priv(netdev);
734         int status;
735
736         if (!adapter->sriov_enabled)
737                 return -EPERM;
738
739         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
740                 return -EINVAL;
741
742         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
743                 status = be_cmd_pmac_del(adapter,
744                                         adapter->vf_cfg[vf].vf_if_handle,
745                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
746
747         status = be_cmd_pmac_add(adapter, mac,
748                                 adapter->vf_cfg[vf].vf_if_handle,
749                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
750
751         if (status)
752                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
753                                 mac, vf);
754         else
755                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
756
757         return status;
758 }
759
760 static int be_get_vf_config(struct net_device *netdev, int vf,
761                         struct ifla_vf_info *vi)
762 {
763         struct be_adapter *adapter = netdev_priv(netdev);
764
765         if (!adapter->sriov_enabled)
766                 return -EPERM;
767
768         if (vf >= num_vfs)
769                 return -EINVAL;
770
771         vi->vf = vf;
772         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
773         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
774         vi->qos = 0;
775         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
776
777         return 0;
778 }
779
780 static int be_set_vf_vlan(struct net_device *netdev,
781                         int vf, u16 vlan, u8 qos)
782 {
783         struct be_adapter *adapter = netdev_priv(netdev);
784         int status = 0;
785
786         if (!adapter->sriov_enabled)
787                 return -EPERM;
788
789         if ((vf >= num_vfs) || (vlan > 4095))
790                 return -EINVAL;
791
792         if (vlan) {
793                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
794                 adapter->vlans_added++;
795         } else {
796                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
797                 adapter->vlans_added--;
798         }
799
800         status = be_vid_config(adapter, true, vf);
801
802         if (status)
803                 dev_info(&adapter->pdev->dev,
804                                 "VLAN %d config on VF %d failed\n", vlan, vf);
805         return status;
806 }
807
808 static int be_set_vf_tx_rate(struct net_device *netdev,
809                         int vf, int rate)
810 {
811         struct be_adapter *adapter = netdev_priv(netdev);
812         int status = 0;
813
814         if (!adapter->sriov_enabled)
815                 return -EPERM;
816
817         if ((vf >= num_vfs) || (rate < 0))
818                 return -EINVAL;
819
820         if (rate > 10000)
821                 rate = 10000;
822
823         adapter->vf_cfg[vf].vf_tx_rate = rate;
824         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
825
826         if (status)
827                 dev_info(&adapter->pdev->dev,
828                                 "tx rate %d on VF %d failed\n", rate, vf);
829         return status;
830 }
831
832 static void be_rx_rate_update(struct be_rx_obj *rxo)
833 {
834         struct be_rx_stats *stats = &rxo->stats;
835         ulong now = jiffies;
836
837         /* Wrapped around */
838         if (time_before(now, stats->rx_jiffies)) {
839                 stats->rx_jiffies = now;
840                 return;
841         }
842
843         /* Update the rate once in two seconds */
844         if ((now - stats->rx_jiffies) < 2 * HZ)
845                 return;
846
847         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
848                                 now - stats->rx_jiffies);
849         stats->rx_jiffies = now;
850         stats->rx_bytes_prev = stats->rx_bytes;
851 }
852
853 static void be_rx_stats_update(struct be_rx_obj *rxo,
854                 u32 pktsize, u16 numfrags, u8 pkt_type)
855 {
856         struct be_rx_stats *stats = &rxo->stats;
857
858         stats->rx_compl++;
859         stats->rx_frags += numfrags;
860         stats->rx_bytes += pktsize;
861         stats->rx_pkts++;
862         if (pkt_type == BE_MULTICAST_PACKET)
863                 stats->rx_mcast_pkts++;
864 }
865
866 static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
867 {
868         u8 l4_cksm, ipv6, ipcksm, tcpf, udpf;
869
870         l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
871         ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
872         ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
873         tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
874         udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
875
876         /* L4 checksum is not reliable for non TCP/UDP packets.
877          * Also ignore ipcksm for ipv6 pkts */
878         return (tcpf || udpf) && l4_cksm && (ipcksm || ipv6);
879 }
880
881 static struct be_rx_page_info *
882 get_rx_page_info(struct be_adapter *adapter,
883                 struct be_rx_obj *rxo,
884                 u16 frag_idx)
885 {
886         struct be_rx_page_info *rx_page_info;
887         struct be_queue_info *rxq = &rxo->q;
888
889         rx_page_info = &rxo->page_info_tbl[frag_idx];
890         BUG_ON(!rx_page_info->page);
891
892         if (rx_page_info->last_page_user) {
893                 dma_unmap_page(&adapter->pdev->dev,
894                                dma_unmap_addr(rx_page_info, bus),
895                                adapter->big_page_size, DMA_FROM_DEVICE);
896                 rx_page_info->last_page_user = false;
897         }
898
899         atomic_dec(&rxq->used);
900         return rx_page_info;
901 }
902
903 /* Throwaway the data in the Rx completion */
904 static void be_rx_compl_discard(struct be_adapter *adapter,
905                 struct be_rx_obj *rxo,
906                 struct be_eth_rx_compl *rxcp)
907 {
908         struct be_queue_info *rxq = &rxo->q;
909         struct be_rx_page_info *page_info;
910         u16 rxq_idx, i, num_rcvd;
911
912         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
913         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
914
915         for (i = 0; i < num_rcvd; i++) {
916                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
917                 put_page(page_info->page);
918                 memset(page_info, 0, sizeof(*page_info));
919                 index_inc(&rxq_idx, rxq->len);
920         }
921 }
922
923 /*
924  * skb_fill_rx_data forms a complete skb for an ether frame
925  * indicated by rxcp.
926  */
927 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
928                         struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
929                         u16 num_rcvd)
930 {
931         struct be_queue_info *rxq = &rxo->q;
932         struct be_rx_page_info *page_info;
933         u16 rxq_idx, i, j;
934         u32 pktsize, hdr_len, curr_frag_len, size;
935         u8 *start;
936         u8 pkt_type;
937
938         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
939         pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
940         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
941
942         page_info = get_rx_page_info(adapter, rxo, rxq_idx);
943
944         start = page_address(page_info->page) + page_info->page_offset;
945         prefetch(start);
946
947         /* Copy data in the first descriptor of this completion */
948         curr_frag_len = min(pktsize, rx_frag_size);
949
950         /* Copy the header portion into skb_data */
951         hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
952         memcpy(skb->data, start, hdr_len);
953         skb->len = curr_frag_len;
954         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
955                 /* Complete packet has now been moved to data */
956                 put_page(page_info->page);
957                 skb->data_len = 0;
958                 skb->tail += curr_frag_len;
959         } else {
960                 skb_shinfo(skb)->nr_frags = 1;
961                 skb_shinfo(skb)->frags[0].page = page_info->page;
962                 skb_shinfo(skb)->frags[0].page_offset =
963                                         page_info->page_offset + hdr_len;
964                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
965                 skb->data_len = curr_frag_len - hdr_len;
966                 skb->tail += hdr_len;
967         }
968         page_info->page = NULL;
969
970         if (pktsize <= rx_frag_size) {
971                 BUG_ON(num_rcvd != 1);
972                 goto done;
973         }
974
975         /* More frags present for this completion */
976         size = pktsize;
977         for (i = 1, j = 0; i < num_rcvd; i++) {
978                 size -= curr_frag_len;
979                 index_inc(&rxq_idx, rxq->len);
980                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
981
982                 curr_frag_len = min(size, rx_frag_size);
983
984                 /* Coalesce all frags from the same physical page in one slot */
985                 if (page_info->page_offset == 0) {
986                         /* Fresh page */
987                         j++;
988                         skb_shinfo(skb)->frags[j].page = page_info->page;
989                         skb_shinfo(skb)->frags[j].page_offset =
990                                                         page_info->page_offset;
991                         skb_shinfo(skb)->frags[j].size = 0;
992                         skb_shinfo(skb)->nr_frags++;
993                 } else {
994                         put_page(page_info->page);
995                 }
996
997                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
998                 skb->len += curr_frag_len;
999                 skb->data_len += curr_frag_len;
1000
1001                 page_info->page = NULL;
1002         }
1003         BUG_ON(j > MAX_SKB_FRAGS);
1004
1005 done:
1006         be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
1007 }
1008
1009 /* Process the RX completion indicated by rxcp when GRO is disabled */
1010 static void be_rx_compl_process(struct be_adapter *adapter,
1011                         struct be_rx_obj *rxo,
1012                         struct be_eth_rx_compl *rxcp)
1013 {
1014         struct sk_buff *skb;
1015         u32 vlanf, vid;
1016         u16 num_rcvd;
1017         u8 vtm;
1018
1019         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1020
1021         skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1022         if (unlikely(!skb)) {
1023                 if (net_ratelimit())
1024                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1025                 be_rx_compl_discard(adapter, rxo, rxcp);
1026                 return;
1027         }
1028
1029         skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
1030
1031         if (likely(adapter->rx_csum && csum_passed(rxcp)))
1032                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1033         else
1034                 skb_checksum_none_assert(skb);
1035
1036         skb->truesize = skb->len + sizeof(struct sk_buff);
1037         skb->protocol = eth_type_trans(skb, adapter->netdev);
1038
1039         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1040         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1041
1042         /* vlanf could be wrongly set in some cards.
1043          * ignore if vtm is not set */
1044         if ((adapter->function_mode & 0x400) && !vtm)
1045                 vlanf = 0;
1046
1047         if ((adapter->pvid == vlanf) && !adapter->vlan_tag[vlanf])
1048                 vlanf = 0;
1049
1050         if (unlikely(vlanf)) {
1051                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1052                         kfree_skb(skb);
1053                         return;
1054                 }
1055                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1056                 if (!lancer_chip(adapter))
1057                         vid = swab16(vid);
1058                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1059         } else {
1060                 netif_receive_skb(skb);
1061         }
1062 }
1063
1064 /* Process the RX completion indicated by rxcp when GRO is enabled */
1065 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1066                 struct be_rx_obj *rxo,
1067                 struct be_eth_rx_compl *rxcp)
1068 {
1069         struct be_rx_page_info *page_info;
1070         struct sk_buff *skb = NULL;
1071         struct be_queue_info *rxq = &rxo->q;
1072         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1073         u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1074         u16 i, rxq_idx = 0, vid, j;
1075         u8 vtm;
1076         u8 pkt_type;
1077
1078         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1079         pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1080         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1081         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1082         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1083         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1084
1085         /* vlanf could be wrongly set in some cards.
1086          * ignore if vtm is not set */
1087         if ((adapter->function_mode & 0x400) && !vtm)
1088                 vlanf = 0;
1089
1090         if ((adapter->pvid == vlanf) && !adapter->vlan_tag[vlanf])
1091                 vlanf = 0;
1092
1093         skb = napi_get_frags(&eq_obj->napi);
1094         if (!skb) {
1095                 be_rx_compl_discard(adapter, rxo, rxcp);
1096                 return;
1097         }
1098
1099         remaining = pkt_size;
1100         for (i = 0, j = -1; i < num_rcvd; i++) {
1101                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
1102
1103                 curr_frag_len = min(remaining, rx_frag_size);
1104
1105                 /* Coalesce all frags from the same physical page in one slot */
1106                 if (i == 0 || page_info->page_offset == 0) {
1107                         /* First frag or Fresh page */
1108                         j++;
1109                         skb_shinfo(skb)->frags[j].page = page_info->page;
1110                         skb_shinfo(skb)->frags[j].page_offset =
1111                                                         page_info->page_offset;
1112                         skb_shinfo(skb)->frags[j].size = 0;
1113                 } else {
1114                         put_page(page_info->page);
1115                 }
1116                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1117
1118                 remaining -= curr_frag_len;
1119                 index_inc(&rxq_idx, rxq->len);
1120                 memset(page_info, 0, sizeof(*page_info));
1121         }
1122         BUG_ON(j > MAX_SKB_FRAGS);
1123
1124         skb_shinfo(skb)->nr_frags = j + 1;
1125         skb->len = pkt_size;
1126         skb->data_len = pkt_size;
1127         skb->truesize += pkt_size;
1128         skb->ip_summed = CHECKSUM_UNNECESSARY;
1129
1130         if (likely(!vlanf)) {
1131                 napi_gro_frags(&eq_obj->napi);
1132         } else {
1133                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1134                 if (!lancer_chip(adapter))
1135                         vid = swab16(vid);
1136
1137                 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1138                         return;
1139
1140                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1141         }
1142
1143         be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
1144 }
1145
1146 static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
1147 {
1148         struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
1149
1150         if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1151                 return NULL;
1152
1153         rmb();
1154         be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1155
1156         queue_tail_inc(&rxo->cq);
1157         return rxcp;
1158 }
1159
1160 /* To reset the valid bit, we need to reset the whole word as
1161  * when walking the queue the valid entries are little-endian
1162  * and invalid entries are host endian
1163  */
1164 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1165 {
1166         rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1167 }
1168
1169 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1170 {
1171         u32 order = get_order(size);
1172
1173         if (order > 0)
1174                 gfp |= __GFP_COMP;
1175         return  alloc_pages(gfp, order);
1176 }
1177
1178 /*
1179  * Allocate a page, split it to fragments of size rx_frag_size and post as
1180  * receive buffers to BE
1181  */
1182 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1183 {
1184         struct be_adapter *adapter = rxo->adapter;
1185         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1186         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1187         struct be_queue_info *rxq = &rxo->q;
1188         struct page *pagep = NULL;
1189         struct be_eth_rx_d *rxd;
1190         u64 page_dmaaddr = 0, frag_dmaaddr;
1191         u32 posted, page_offset = 0;
1192
1193         page_info = &rxo->page_info_tbl[rxq->head];
1194         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1195                 if (!pagep) {
1196                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1197                         if (unlikely(!pagep)) {
1198                                 rxo->stats.rx_post_fail++;
1199                                 break;
1200                         }
1201                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1202                                                     0, adapter->big_page_size,
1203                                                     DMA_FROM_DEVICE);
1204                         page_info->page_offset = 0;
1205                 } else {
1206                         get_page(pagep);
1207                         page_info->page_offset = page_offset + rx_frag_size;
1208                 }
1209                 page_offset = page_info->page_offset;
1210                 page_info->page = pagep;
1211                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1212                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1213
1214                 rxd = queue_head_node(rxq);
1215                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1216                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1217
1218                 /* Any space left in the current big page for another frag? */
1219                 if ((page_offset + rx_frag_size + rx_frag_size) >
1220                                         adapter->big_page_size) {
1221                         pagep = NULL;
1222                         page_info->last_page_user = true;
1223                 }
1224
1225                 prev_page_info = page_info;
1226                 queue_head_inc(rxq);
1227                 page_info = &page_info_tbl[rxq->head];
1228         }
1229         if (pagep)
1230                 prev_page_info->last_page_user = true;
1231
1232         if (posted) {
1233                 atomic_add(posted, &rxq->used);
1234                 be_rxq_notify(adapter, rxq->id, posted);
1235         } else if (atomic_read(&rxq->used) == 0) {
1236                 /* Let be_worker replenish when memory is available */
1237                 rxo->rx_post_starved = true;
1238         }
1239 }
1240
1241 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1242 {
1243         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1244
1245         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1246                 return NULL;
1247
1248         rmb();
1249         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1250
1251         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1252
1253         queue_tail_inc(tx_cq);
1254         return txcp;
1255 }
1256
1257 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1258 {
1259         struct be_queue_info *txq = &adapter->tx_obj.q;
1260         struct be_eth_wrb *wrb;
1261         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1262         struct sk_buff *sent_skb;
1263         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1264         bool unmap_skb_hdr = true;
1265
1266         sent_skb = sent_skbs[txq->tail];
1267         BUG_ON(!sent_skb);
1268         sent_skbs[txq->tail] = NULL;
1269
1270         /* skip header wrb */
1271         queue_tail_inc(txq);
1272
1273         do {
1274                 cur_index = txq->tail;
1275                 wrb = queue_tail_node(txq);
1276                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1277                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1278                 unmap_skb_hdr = false;
1279
1280                 num_wrbs++;
1281                 queue_tail_inc(txq);
1282         } while (cur_index != last_index);
1283
1284         atomic_sub(num_wrbs, &txq->used);
1285
1286         kfree_skb(sent_skb);
1287 }
1288
1289 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1290 {
1291         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1292
1293         if (!eqe->evt)
1294                 return NULL;
1295
1296         rmb();
1297         eqe->evt = le32_to_cpu(eqe->evt);
1298         queue_tail_inc(&eq_obj->q);
1299         return eqe;
1300 }
1301
1302 static int event_handle(struct be_adapter *adapter,
1303                         struct be_eq_obj *eq_obj)
1304 {
1305         struct be_eq_entry *eqe;
1306         u16 num = 0;
1307
1308         while ((eqe = event_get(eq_obj)) != NULL) {
1309                 eqe->evt = 0;
1310                 num++;
1311         }
1312
1313         /* Deal with any spurious interrupts that come
1314          * without events
1315          */
1316         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1317         if (num)
1318                 napi_schedule(&eq_obj->napi);
1319
1320         return num;
1321 }
1322
1323 /* Just read and notify events without processing them.
1324  * Used at the time of destroying event queues */
1325 static void be_eq_clean(struct be_adapter *adapter,
1326                         struct be_eq_obj *eq_obj)
1327 {
1328         struct be_eq_entry *eqe;
1329         u16 num = 0;
1330
1331         while ((eqe = event_get(eq_obj)) != NULL) {
1332                 eqe->evt = 0;
1333                 num++;
1334         }
1335
1336         if (num)
1337                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1338 }
1339
1340 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1341 {
1342         struct be_rx_page_info *page_info;
1343         struct be_queue_info *rxq = &rxo->q;
1344         struct be_queue_info *rx_cq = &rxo->cq;
1345         struct be_eth_rx_compl *rxcp;
1346         u16 tail;
1347
1348         /* First cleanup pending rx completions */
1349         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1350                 be_rx_compl_discard(adapter, rxo, rxcp);
1351                 be_rx_compl_reset(rxcp);
1352                 be_cq_notify(adapter, rx_cq->id, false, 1);
1353         }
1354
1355         /* Then free posted rx buffer that were not used */
1356         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1357         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1358                 page_info = get_rx_page_info(adapter, rxo, tail);
1359                 put_page(page_info->page);
1360                 memset(page_info, 0, sizeof(*page_info));
1361         }
1362         BUG_ON(atomic_read(&rxq->used));
1363 }
1364
1365 static void be_tx_compl_clean(struct be_adapter *adapter)
1366 {
1367         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1368         struct be_queue_info *txq = &adapter->tx_obj.q;
1369         struct be_eth_tx_compl *txcp;
1370         u16 end_idx, cmpl = 0, timeo = 0;
1371         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1372         struct sk_buff *sent_skb;
1373         bool dummy_wrb;
1374
1375         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1376         do {
1377                 while ((txcp = be_tx_compl_get(tx_cq))) {
1378                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1379                                         wrb_index, txcp);
1380                         be_tx_compl_process(adapter, end_idx);
1381                         cmpl++;
1382                 }
1383                 if (cmpl) {
1384                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1385                         cmpl = 0;
1386                 }
1387
1388                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1389                         break;
1390
1391                 mdelay(1);
1392         } while (true);
1393
1394         if (atomic_read(&txq->used))
1395                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1396                         atomic_read(&txq->used));
1397
1398         /* free posted tx for which compls will never arrive */
1399         while (atomic_read(&txq->used)) {
1400                 sent_skb = sent_skbs[txq->tail];
1401                 end_idx = txq->tail;
1402                 index_adv(&end_idx,
1403                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1404                         txq->len);
1405                 be_tx_compl_process(adapter, end_idx);
1406         }
1407 }
1408
1409 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1410 {
1411         struct be_queue_info *q;
1412
1413         q = &adapter->mcc_obj.q;
1414         if (q->created)
1415                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1416         be_queue_free(adapter, q);
1417
1418         q = &adapter->mcc_obj.cq;
1419         if (q->created)
1420                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1421         be_queue_free(adapter, q);
1422 }
1423
1424 /* Must be called only after TX qs are created as MCC shares TX EQ */
1425 static int be_mcc_queues_create(struct be_adapter *adapter)
1426 {
1427         struct be_queue_info *q, *cq;
1428
1429         /* Alloc MCC compl queue */
1430         cq = &adapter->mcc_obj.cq;
1431         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1432                         sizeof(struct be_mcc_compl)))
1433                 goto err;
1434
1435         /* Ask BE to create MCC compl queue; share TX's eq */
1436         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1437                 goto mcc_cq_free;
1438
1439         /* Alloc MCC queue */
1440         q = &adapter->mcc_obj.q;
1441         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1442                 goto mcc_cq_destroy;
1443
1444         /* Ask BE to create MCC queue */
1445         if (be_cmd_mccq_create(adapter, q, cq))
1446                 goto mcc_q_free;
1447
1448         return 0;
1449
1450 mcc_q_free:
1451         be_queue_free(adapter, q);
1452 mcc_cq_destroy:
1453         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1454 mcc_cq_free:
1455         be_queue_free(adapter, cq);
1456 err:
1457         return -1;
1458 }
1459
1460 static void be_tx_queues_destroy(struct be_adapter *adapter)
1461 {
1462         struct be_queue_info *q;
1463
1464         q = &adapter->tx_obj.q;
1465         if (q->created)
1466                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1467         be_queue_free(adapter, q);
1468
1469         q = &adapter->tx_obj.cq;
1470         if (q->created)
1471                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1472         be_queue_free(adapter, q);
1473
1474         /* Clear any residual events */
1475         be_eq_clean(adapter, &adapter->tx_eq);
1476
1477         q = &adapter->tx_eq.q;
1478         if (q->created)
1479                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1480         be_queue_free(adapter, q);
1481 }
1482
1483 static int be_tx_queues_create(struct be_adapter *adapter)
1484 {
1485         struct be_queue_info *eq, *q, *cq;
1486
1487         adapter->tx_eq.max_eqd = 0;
1488         adapter->tx_eq.min_eqd = 0;
1489         adapter->tx_eq.cur_eqd = 96;
1490         adapter->tx_eq.enable_aic = false;
1491         /* Alloc Tx Event queue */
1492         eq = &adapter->tx_eq.q;
1493         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1494                 return -1;
1495
1496         /* Ask BE to create Tx Event queue */
1497         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1498                 goto tx_eq_free;
1499
1500         adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1501
1502
1503         /* Alloc TX eth compl queue */
1504         cq = &adapter->tx_obj.cq;
1505         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1506                         sizeof(struct be_eth_tx_compl)))
1507                 goto tx_eq_destroy;
1508
1509         /* Ask BE to create Tx eth compl queue */
1510         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1511                 goto tx_cq_free;
1512
1513         /* Alloc TX eth queue */
1514         q = &adapter->tx_obj.q;
1515         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1516                 goto tx_cq_destroy;
1517
1518         /* Ask BE to create Tx eth queue */
1519         if (be_cmd_txq_create(adapter, q, cq))
1520                 goto tx_q_free;
1521         return 0;
1522
1523 tx_q_free:
1524         be_queue_free(adapter, q);
1525 tx_cq_destroy:
1526         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1527 tx_cq_free:
1528         be_queue_free(adapter, cq);
1529 tx_eq_destroy:
1530         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1531 tx_eq_free:
1532         be_queue_free(adapter, eq);
1533         return -1;
1534 }
1535
1536 static void be_rx_queues_destroy(struct be_adapter *adapter)
1537 {
1538         struct be_queue_info *q;
1539         struct be_rx_obj *rxo;
1540         int i;
1541
1542         for_all_rx_queues(adapter, rxo, i) {
1543                 q = &rxo->q;
1544                 if (q->created) {
1545                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1546                         /* After the rxq is invalidated, wait for a grace time
1547                          * of 1ms for all dma to end and the flush compl to
1548                          * arrive
1549                          */
1550                         mdelay(1);
1551                         be_rx_q_clean(adapter, rxo);
1552                 }
1553                 be_queue_free(adapter, q);
1554
1555                 q = &rxo->cq;
1556                 if (q->created)
1557                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1558                 be_queue_free(adapter, q);
1559
1560                 /* Clear any residual events */
1561                 q = &rxo->rx_eq.q;
1562                 if (q->created) {
1563                         be_eq_clean(adapter, &rxo->rx_eq);
1564                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1565                 }
1566                 be_queue_free(adapter, q);
1567         }
1568 }
1569
1570 static int be_rx_queues_create(struct be_adapter *adapter)
1571 {
1572         struct be_queue_info *eq, *q, *cq;
1573         struct be_rx_obj *rxo;
1574         int rc, i;
1575
1576         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1577         for_all_rx_queues(adapter, rxo, i) {
1578                 rxo->adapter = adapter;
1579                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1580                 rxo->rx_eq.enable_aic = true;
1581
1582                 /* EQ */
1583                 eq = &rxo->rx_eq.q;
1584                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1585                                         sizeof(struct be_eq_entry));
1586                 if (rc)
1587                         goto err;
1588
1589                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1590                 if (rc)
1591                         goto err;
1592
1593                 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1594
1595                 /* CQ */
1596                 cq = &rxo->cq;
1597                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1598                                 sizeof(struct be_eth_rx_compl));
1599                 if (rc)
1600                         goto err;
1601
1602                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1603                 if (rc)
1604                         goto err;
1605                 /* Rx Q */
1606                 q = &rxo->q;
1607                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1608                                 sizeof(struct be_eth_rx_d));
1609                 if (rc)
1610                         goto err;
1611
1612                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1613                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1614                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1615                 if (rc)
1616                         goto err;
1617         }
1618
1619         if (be_multi_rxq(adapter)) {
1620                 u8 rsstable[MAX_RSS_QS];
1621
1622                 for_all_rss_queues(adapter, rxo, i)
1623                         rsstable[i] = rxo->rss_id;
1624
1625                 rc = be_cmd_rss_config(adapter, rsstable,
1626                         adapter->num_rx_qs - 1);
1627                 if (rc)
1628                         goto err;
1629         }
1630
1631         return 0;
1632 err:
1633         be_rx_queues_destroy(adapter);
1634         return -1;
1635 }
1636
1637 static bool event_peek(struct be_eq_obj *eq_obj)
1638 {
1639         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1640         if (!eqe->evt)
1641                 return false;
1642         else
1643                 return true;
1644 }
1645
1646 static irqreturn_t be_intx(int irq, void *dev)
1647 {
1648         struct be_adapter *adapter = dev;
1649         struct be_rx_obj *rxo;
1650         int isr, i, tx = 0 , rx = 0;
1651
1652         if (lancer_chip(adapter)) {
1653                 if (event_peek(&adapter->tx_eq))
1654                         tx = event_handle(adapter, &adapter->tx_eq);
1655                 for_all_rx_queues(adapter, rxo, i) {
1656                         if (event_peek(&rxo->rx_eq))
1657                                 rx |= event_handle(adapter, &rxo->rx_eq);
1658                 }
1659
1660                 if (!(tx || rx))
1661                         return IRQ_NONE;
1662
1663         } else {
1664                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1665                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1666                 if (!isr)
1667                         return IRQ_NONE;
1668
1669                 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1670                         event_handle(adapter, &adapter->tx_eq);
1671
1672                 for_all_rx_queues(adapter, rxo, i) {
1673                         if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1674                                 event_handle(adapter, &rxo->rx_eq);
1675                 }
1676         }
1677
1678         return IRQ_HANDLED;
1679 }
1680
1681 static irqreturn_t be_msix_rx(int irq, void *dev)
1682 {
1683         struct be_rx_obj *rxo = dev;
1684         struct be_adapter *adapter = rxo->adapter;
1685
1686         event_handle(adapter, &rxo->rx_eq);
1687
1688         return IRQ_HANDLED;
1689 }
1690
1691 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1692 {
1693         struct be_adapter *adapter = dev;
1694
1695         event_handle(adapter, &adapter->tx_eq);
1696
1697         return IRQ_HANDLED;
1698 }
1699
1700 static inline bool do_gro(struct be_rx_obj *rxo,
1701                         struct be_eth_rx_compl *rxcp, u8 err)
1702 {
1703         int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1704
1705         if (err)
1706                 rxo->stats.rxcp_err++;
1707
1708         return (tcp_frame && !err) ? true : false;
1709 }
1710
1711 static int be_poll_rx(struct napi_struct *napi, int budget)
1712 {
1713         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1714         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1715         struct be_adapter *adapter = rxo->adapter;
1716         struct be_queue_info *rx_cq = &rxo->cq;
1717         struct be_eth_rx_compl *rxcp;
1718         u32 work_done;
1719         u16 num_rcvd;
1720         u8 err;
1721
1722         rxo->stats.rx_polls++;
1723         for (work_done = 0; work_done < budget; work_done++) {
1724                 rxcp = be_rx_compl_get(rxo);
1725                 if (!rxcp)
1726                         break;
1727
1728                 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1729                 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1730                                                                 rxcp);
1731                 /* Ignore flush completions */
1732                 if (num_rcvd) {
1733                         if (do_gro(rxo, rxcp, err))
1734                                 be_rx_compl_process_gro(adapter, rxo, rxcp);
1735                         else
1736                                 be_rx_compl_process(adapter, rxo, rxcp);
1737                 }
1738
1739                 be_rx_compl_reset(rxcp);
1740         }
1741
1742         /* Refill the queue */
1743         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1744                 be_post_rx_frags(rxo, GFP_ATOMIC);
1745
1746         /* All consumed */
1747         if (work_done < budget) {
1748                 napi_complete(napi);
1749                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1750         } else {
1751                 /* More to be consumed; continue with interrupts disabled */
1752                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1753         }
1754         return work_done;
1755 }
1756
1757 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1758  * For TX/MCC we don't honour budget; consume everything
1759  */
1760 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1761 {
1762         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1763         struct be_adapter *adapter =
1764                 container_of(tx_eq, struct be_adapter, tx_eq);
1765         struct be_queue_info *txq = &adapter->tx_obj.q;
1766         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1767         struct be_eth_tx_compl *txcp;
1768         int tx_compl = 0, mcc_compl, status = 0;
1769         u16 end_idx;
1770
1771         while ((txcp = be_tx_compl_get(tx_cq))) {
1772                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1773                                 wrb_index, txcp);
1774                 be_tx_compl_process(adapter, end_idx);
1775                 tx_compl++;
1776         }
1777
1778         mcc_compl = be_process_mcc(adapter, &status);
1779
1780         napi_complete(napi);
1781
1782         if (mcc_compl) {
1783                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1784                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1785         }
1786
1787         if (tx_compl) {
1788                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1789
1790                 /* As Tx wrbs have been freed up, wake up netdev queue if
1791                  * it was stopped due to lack of tx wrbs.
1792                  */
1793                 if (netif_queue_stopped(adapter->netdev) &&
1794                         atomic_read(&txq->used) < txq->len / 2) {
1795                         netif_wake_queue(adapter->netdev);
1796                 }
1797
1798                 tx_stats(adapter)->be_tx_events++;
1799                 tx_stats(adapter)->be_tx_compl += tx_compl;
1800         }
1801
1802         return 1;
1803 }
1804
1805 void be_detect_dump_ue(struct be_adapter *adapter)
1806 {
1807         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1808         u32 i;
1809
1810         pci_read_config_dword(adapter->pdev,
1811                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1812         pci_read_config_dword(adapter->pdev,
1813                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1814         pci_read_config_dword(adapter->pdev,
1815                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1816         pci_read_config_dword(adapter->pdev,
1817                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1818
1819         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1820         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1821
1822         if (ue_status_lo || ue_status_hi) {
1823                 adapter->ue_detected = true;
1824                 adapter->eeh_err = true;
1825                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1826         }
1827
1828         if (ue_status_lo) {
1829                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1830                         if (ue_status_lo & 1)
1831                                 dev_err(&adapter->pdev->dev,
1832                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1833                 }
1834         }
1835         if (ue_status_hi) {
1836                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1837                         if (ue_status_hi & 1)
1838                                 dev_err(&adapter->pdev->dev,
1839                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1840                 }
1841         }
1842
1843 }
1844
1845 static void be_worker(struct work_struct *work)
1846 {
1847         struct be_adapter *adapter =
1848                 container_of(work, struct be_adapter, work.work);
1849         struct be_rx_obj *rxo;
1850         int i;
1851
1852         /* when interrupts are not yet enabled, just reap any pending
1853         * mcc completions */
1854         if (!netif_running(adapter->netdev)) {
1855                 int mcc_compl, status = 0;
1856
1857                 mcc_compl = be_process_mcc(adapter, &status);
1858
1859                 if (mcc_compl) {
1860                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1861                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1862                 }
1863
1864                 if (!adapter->ue_detected && !lancer_chip(adapter))
1865                         be_detect_dump_ue(adapter);
1866
1867                 goto reschedule;
1868         }
1869
1870         if (!adapter->stats_cmd_sent)
1871                 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1872
1873         be_tx_rate_update(adapter);
1874
1875         for_all_rx_queues(adapter, rxo, i) {
1876                 be_rx_rate_update(rxo);
1877                 be_rx_eqd_update(adapter, rxo);
1878
1879                 if (rxo->rx_post_starved) {
1880                         rxo->rx_post_starved = false;
1881                         be_post_rx_frags(rxo, GFP_KERNEL);
1882                 }
1883         }
1884         if (!adapter->ue_detected && !lancer_chip(adapter))
1885                 be_detect_dump_ue(adapter);
1886
1887 reschedule:
1888         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1889 }
1890
1891 static void be_msix_disable(struct be_adapter *adapter)
1892 {
1893         if (adapter->msix_enabled) {
1894                 pci_disable_msix(adapter->pdev);
1895                 adapter->msix_enabled = false;
1896         }
1897 }
1898
1899 static int be_num_rxqs_get(struct be_adapter *adapter)
1900 {
1901         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1902                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1903                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1904         } else {
1905                 dev_warn(&adapter->pdev->dev,
1906                         "No support for multiple RX queues\n");
1907                 return 1;
1908         }
1909 }
1910
1911 static void be_msix_enable(struct be_adapter *adapter)
1912 {
1913 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
1914         int i, status;
1915
1916         adapter->num_rx_qs = be_num_rxqs_get(adapter);
1917
1918         for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1919                 adapter->msix_entries[i].entry = i;
1920
1921         status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1922                         adapter->num_rx_qs + 1);
1923         if (status == 0) {
1924                 goto done;
1925         } else if (status >= BE_MIN_MSIX_VECTORS) {
1926                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1927                                 status) == 0) {
1928                         adapter->num_rx_qs = status - 1;
1929                         dev_warn(&adapter->pdev->dev,
1930                                 "Could alloc only %d MSIx vectors. "
1931                                 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1932                         goto done;
1933                 }
1934         }
1935         return;
1936 done:
1937         adapter->msix_enabled = true;
1938 }
1939
1940 static void be_sriov_enable(struct be_adapter *adapter)
1941 {
1942         be_check_sriov_fn_type(adapter);
1943 #ifdef CONFIG_PCI_IOV
1944         if (be_physfn(adapter) && num_vfs) {
1945                 int status;
1946
1947                 status = pci_enable_sriov(adapter->pdev, num_vfs);
1948                 adapter->sriov_enabled = status ? false : true;
1949         }
1950 #endif
1951 }
1952
1953 static void be_sriov_disable(struct be_adapter *adapter)
1954 {
1955 #ifdef CONFIG_PCI_IOV
1956         if (adapter->sriov_enabled) {
1957                 pci_disable_sriov(adapter->pdev);
1958                 adapter->sriov_enabled = false;
1959         }
1960 #endif
1961 }
1962
1963 static inline int be_msix_vec_get(struct be_adapter *adapter,
1964                                         struct be_eq_obj *eq_obj)
1965 {
1966         return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
1967 }
1968
1969 static int be_request_irq(struct be_adapter *adapter,
1970                 struct be_eq_obj *eq_obj,
1971                 void *handler, char *desc, void *context)
1972 {
1973         struct net_device *netdev = adapter->netdev;
1974         int vec;
1975
1976         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1977         vec = be_msix_vec_get(adapter, eq_obj);
1978         return request_irq(vec, handler, 0, eq_obj->desc, context);
1979 }
1980
1981 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1982                         void *context)
1983 {
1984         int vec = be_msix_vec_get(adapter, eq_obj);
1985         free_irq(vec, context);
1986 }
1987
1988 static int be_msix_register(struct be_adapter *adapter)
1989 {
1990         struct be_rx_obj *rxo;
1991         int status, i;
1992         char qname[10];
1993
1994         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1995                                 adapter);
1996         if (status)
1997                 goto err;
1998
1999         for_all_rx_queues(adapter, rxo, i) {
2000                 sprintf(qname, "rxq%d", i);
2001                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2002                                 qname, rxo);
2003                 if (status)
2004                         goto err_msix;
2005         }
2006
2007         return 0;
2008
2009 err_msix:
2010         be_free_irq(adapter, &adapter->tx_eq, adapter);
2011
2012         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2013                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2014
2015 err:
2016         dev_warn(&adapter->pdev->dev,
2017                 "MSIX Request IRQ failed - err %d\n", status);
2018         pci_disable_msix(adapter->pdev);
2019         adapter->msix_enabled = false;
2020         return status;
2021 }
2022
2023 static int be_irq_register(struct be_adapter *adapter)
2024 {
2025         struct net_device *netdev = adapter->netdev;
2026         int status;
2027
2028         if (adapter->msix_enabled) {
2029                 status = be_msix_register(adapter);
2030                 if (status == 0)
2031                         goto done;
2032                 /* INTx is not supported for VF */
2033                 if (!be_physfn(adapter))
2034                         return status;
2035         }
2036
2037         /* INTx */
2038         netdev->irq = adapter->pdev->irq;
2039         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2040                         adapter);
2041         if (status) {
2042                 dev_err(&adapter->pdev->dev,
2043                         "INTx request IRQ failed - err %d\n", status);
2044                 return status;
2045         }
2046 done:
2047         adapter->isr_registered = true;
2048         return 0;
2049 }
2050
2051 static void be_irq_unregister(struct be_adapter *adapter)
2052 {
2053         struct net_device *netdev = adapter->netdev;
2054         struct be_rx_obj *rxo;
2055         int i;
2056
2057         if (!adapter->isr_registered)
2058                 return;
2059
2060         /* INTx */
2061         if (!adapter->msix_enabled) {
2062                 free_irq(netdev->irq, adapter);
2063                 goto done;
2064         }
2065
2066         /* MSIx */
2067         be_free_irq(adapter, &adapter->tx_eq, adapter);
2068
2069         for_all_rx_queues(adapter, rxo, i)
2070                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2071
2072 done:
2073         adapter->isr_registered = false;
2074 }
2075
2076 static int be_close(struct net_device *netdev)
2077 {
2078         struct be_adapter *adapter = netdev_priv(netdev);
2079         struct be_rx_obj *rxo;
2080         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2081         int vec, i;
2082
2083         be_async_mcc_disable(adapter);
2084
2085         netif_carrier_off(netdev);
2086         adapter->link_up = false;
2087
2088         if (!lancer_chip(adapter))
2089                 be_intr_set(adapter, false);
2090
2091         for_all_rx_queues(adapter, rxo, i)
2092                 napi_disable(&rxo->rx_eq.napi);
2093
2094         napi_disable(&tx_eq->napi);
2095
2096         if (lancer_chip(adapter)) {
2097                 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2098                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2099                 for_all_rx_queues(adapter, rxo, i)
2100                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2101         }
2102
2103         if (adapter->msix_enabled) {
2104                 vec = be_msix_vec_get(adapter, tx_eq);
2105                 synchronize_irq(vec);
2106
2107                 for_all_rx_queues(adapter, rxo, i) {
2108                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2109                         synchronize_irq(vec);
2110                 }
2111         } else {
2112                 synchronize_irq(netdev->irq);
2113         }
2114         be_irq_unregister(adapter);
2115
2116         /* Wait for all pending tx completions to arrive so that
2117          * all tx skbs are freed.
2118          */
2119         be_tx_compl_clean(adapter);
2120
2121         return 0;
2122 }
2123
2124 static int be_open(struct net_device *netdev)
2125 {
2126         struct be_adapter *adapter = netdev_priv(netdev);
2127         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2128         struct be_rx_obj *rxo;
2129         bool link_up;
2130         int status, i;
2131         u8 mac_speed;
2132         u16 link_speed;
2133
2134         for_all_rx_queues(adapter, rxo, i) {
2135                 be_post_rx_frags(rxo, GFP_KERNEL);
2136                 napi_enable(&rxo->rx_eq.napi);
2137         }
2138         napi_enable(&tx_eq->napi);
2139
2140         be_irq_register(adapter);
2141
2142         if (!lancer_chip(adapter))
2143                 be_intr_set(adapter, true);
2144
2145         /* The evt queues are created in unarmed state; arm them */
2146         for_all_rx_queues(adapter, rxo, i) {
2147                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2148                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2149         }
2150         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2151
2152         /* Now that interrupts are on we can process async mcc */
2153         be_async_mcc_enable(adapter);
2154
2155         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2156                         &link_speed);
2157         if (status)
2158                 goto err;
2159         be_link_status_update(adapter, link_up);
2160
2161         if (be_physfn(adapter)) {
2162                 status = be_vid_config(adapter, false, 0);
2163                 if (status)
2164                         goto err;
2165
2166                 status = be_cmd_set_flow_control(adapter,
2167                                 adapter->tx_fc, adapter->rx_fc);
2168                 if (status)
2169                         goto err;
2170         }
2171
2172         return 0;
2173 err:
2174         be_close(adapter->netdev);
2175         return -EIO;
2176 }
2177
2178 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2179 {
2180         struct be_dma_mem cmd;
2181         int status = 0;
2182         u8 mac[ETH_ALEN];
2183
2184         memset(mac, 0, ETH_ALEN);
2185
2186         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2187         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2188                                     GFP_KERNEL);
2189         if (cmd.va == NULL)
2190                 return -1;
2191         memset(cmd.va, 0, cmd.size);
2192
2193         if (enable) {
2194                 status = pci_write_config_dword(adapter->pdev,
2195                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2196                 if (status) {
2197                         dev_err(&adapter->pdev->dev,
2198                                 "Could not enable Wake-on-lan\n");
2199                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2200                                           cmd.dma);
2201                         return status;
2202                 }
2203                 status = be_cmd_enable_magic_wol(adapter,
2204                                 adapter->netdev->dev_addr, &cmd);
2205                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2206                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2207         } else {
2208                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2209                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2210                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2211         }
2212
2213         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2214         return status;
2215 }
2216
2217 /*
2218  * Generate a seed MAC address from the PF MAC Address using jhash.
2219  * MAC Address for VFs are assigned incrementally starting from the seed.
2220  * These addresses are programmed in the ASIC by the PF and the VF driver
2221  * queries for the MAC address during its probe.
2222  */
2223 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2224 {
2225         u32 vf = 0;
2226         int status = 0;
2227         u8 mac[ETH_ALEN];
2228
2229         be_vf_eth_addr_generate(adapter, mac);
2230
2231         for (vf = 0; vf < num_vfs; vf++) {
2232                 status = be_cmd_pmac_add(adapter, mac,
2233                                         adapter->vf_cfg[vf].vf_if_handle,
2234                                         &adapter->vf_cfg[vf].vf_pmac_id,
2235                                         vf + 1);
2236                 if (status)
2237                         dev_err(&adapter->pdev->dev,
2238                                 "Mac address add failed for VF %d\n", vf);
2239                 else
2240                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2241
2242                 mac[5] += 1;
2243         }
2244         return status;
2245 }
2246
2247 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2248 {
2249         u32 vf;
2250
2251         for (vf = 0; vf < num_vfs; vf++) {
2252                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2253                         be_cmd_pmac_del(adapter,
2254                                         adapter->vf_cfg[vf].vf_if_handle,
2255                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2256         }
2257 }
2258
2259 static int be_setup(struct be_adapter *adapter)
2260 {
2261         struct net_device *netdev = adapter->netdev;
2262         u32 cap_flags, en_flags, vf = 0;
2263         int status;
2264         u8 mac[ETH_ALEN];
2265
2266         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2267                                 BE_IF_FLAGS_BROADCAST |
2268                                 BE_IF_FLAGS_MULTICAST;
2269
2270         if (be_physfn(adapter)) {
2271                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2272                                 BE_IF_FLAGS_PROMISCUOUS |
2273                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2274                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2275
2276                 if (be_multi_rxq(adapter)) {
2277                         cap_flags |= BE_IF_FLAGS_RSS;
2278                         en_flags |= BE_IF_FLAGS_RSS;
2279                 }
2280         }
2281
2282         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2283                         netdev->dev_addr, false/* pmac_invalid */,
2284                         &adapter->if_handle, &adapter->pmac_id, 0);
2285         if (status != 0)
2286                 goto do_none;
2287
2288         if (be_physfn(adapter)) {
2289                 if (adapter->sriov_enabled) {
2290                         while (vf < num_vfs) {
2291                                 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2292                                                         BE_IF_FLAGS_BROADCAST;
2293                                 status = be_cmd_if_create(adapter, cap_flags,
2294                                         en_flags, mac, true,
2295                                         &adapter->vf_cfg[vf].vf_if_handle,
2296                                         NULL, vf+1);
2297                                 if (status) {
2298                                         dev_err(&adapter->pdev->dev,
2299                                         "Interface Create failed for VF %d\n",
2300                                         vf);
2301                                         goto if_destroy;
2302                                 }
2303                                 adapter->vf_cfg[vf].vf_pmac_id =
2304                                                         BE_INVALID_PMAC_ID;
2305                                 vf++;
2306                         }
2307                 }
2308         } else {
2309                 status = be_cmd_mac_addr_query(adapter, mac,
2310                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2311                 if (!status) {
2312                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2313                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2314                 }
2315         }
2316
2317         status = be_tx_queues_create(adapter);
2318         if (status != 0)
2319                 goto if_destroy;
2320
2321         status = be_rx_queues_create(adapter);
2322         if (status != 0)
2323                 goto tx_qs_destroy;
2324
2325         status = be_mcc_queues_create(adapter);
2326         if (status != 0)
2327                 goto rx_qs_destroy;
2328
2329         adapter->link_speed = -1;
2330
2331         return 0;
2332
2333         be_mcc_queues_destroy(adapter);
2334 rx_qs_destroy:
2335         be_rx_queues_destroy(adapter);
2336 tx_qs_destroy:
2337         be_tx_queues_destroy(adapter);
2338 if_destroy:
2339         if (be_physfn(adapter) && adapter->sriov_enabled)
2340                 for (vf = 0; vf < num_vfs; vf++)
2341                         if (adapter->vf_cfg[vf].vf_if_handle)
2342                                 be_cmd_if_destroy(adapter,
2343                                         adapter->vf_cfg[vf].vf_if_handle,
2344                                         vf + 1);
2345         be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2346 do_none:
2347         return status;
2348 }
2349
2350 static int be_clear(struct be_adapter *adapter)
2351 {
2352         int vf;
2353
2354         if (be_physfn(adapter) && adapter->sriov_enabled)
2355                 be_vf_eth_addr_rem(adapter);
2356
2357         be_mcc_queues_destroy(adapter);
2358         be_rx_queues_destroy(adapter);
2359         be_tx_queues_destroy(adapter);
2360
2361         if (be_physfn(adapter) && adapter->sriov_enabled)
2362                 for (vf = 0; vf < num_vfs; vf++)
2363                         if (adapter->vf_cfg[vf].vf_if_handle)
2364                                 be_cmd_if_destroy(adapter,
2365                                         adapter->vf_cfg[vf].vf_if_handle,
2366                                         vf + 1);
2367
2368         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2369
2370         /* tell fw we're done with firing cmds */
2371         be_cmd_fw_clean(adapter);
2372         return 0;
2373 }
2374
2375
2376 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2377 static bool be_flash_redboot(struct be_adapter *adapter,
2378                         const u8 *p, u32 img_start, int image_size,
2379                         int hdr_size)
2380 {
2381         u32 crc_offset;
2382         u8 flashed_crc[4];
2383         int status;
2384
2385         crc_offset = hdr_size + img_start + image_size - 4;
2386
2387         p += crc_offset;
2388
2389         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2390                         (image_size - 4));
2391         if (status) {
2392                 dev_err(&adapter->pdev->dev,
2393                 "could not get crc from flash, not flashing redboot\n");
2394                 return false;
2395         }
2396
2397         /*update redboot only if crc does not match*/
2398         if (!memcmp(flashed_crc, p, 4))
2399                 return false;
2400         else
2401                 return true;
2402 }
2403
2404 static int be_flash_data(struct be_adapter *adapter,
2405                         const struct firmware *fw,
2406                         struct be_dma_mem *flash_cmd, int num_of_images)
2407
2408 {
2409         int status = 0, i, filehdr_size = 0;
2410         u32 total_bytes = 0, flash_op;
2411         int num_bytes;
2412         const u8 *p = fw->data;
2413         struct be_cmd_write_flashrom *req = flash_cmd->va;
2414         const struct flash_comp *pflashcomp;
2415         int num_comp;
2416
2417         static const struct flash_comp gen3_flash_types[9] = {
2418                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2419                         FLASH_IMAGE_MAX_SIZE_g3},
2420                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2421                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2422                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2423                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2424                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2425                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2426                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2427                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2428                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2429                         FLASH_IMAGE_MAX_SIZE_g3},
2430                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2431                         FLASH_IMAGE_MAX_SIZE_g3},
2432                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2433                         FLASH_IMAGE_MAX_SIZE_g3},
2434                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2435                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2436         };
2437         static const struct flash_comp gen2_flash_types[8] = {
2438                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2439                         FLASH_IMAGE_MAX_SIZE_g2},
2440                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2441                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2442                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2443                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2444                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2445                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2446                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2447                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2448                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2449                         FLASH_IMAGE_MAX_SIZE_g2},
2450                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2451                         FLASH_IMAGE_MAX_SIZE_g2},
2452                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2453                          FLASH_IMAGE_MAX_SIZE_g2}
2454         };
2455
2456         if (adapter->generation == BE_GEN3) {
2457                 pflashcomp = gen3_flash_types;
2458                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2459                 num_comp = ARRAY_SIZE(gen3_flash_types);
2460         } else {
2461                 pflashcomp = gen2_flash_types;
2462                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2463                 num_comp = ARRAY_SIZE(gen2_flash_types);
2464         }
2465         for (i = 0; i < num_comp; i++) {
2466                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2467                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2468                         continue;
2469                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2470                         (!be_flash_redboot(adapter, fw->data,
2471                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2472                         (num_of_images * sizeof(struct image_hdr)))))
2473                         continue;
2474                 p = fw->data;
2475                 p += filehdr_size + pflashcomp[i].offset
2476                         + (num_of_images * sizeof(struct image_hdr));
2477         if (p + pflashcomp[i].size > fw->data + fw->size)
2478                 return -1;
2479         total_bytes = pflashcomp[i].size;
2480                 while (total_bytes) {
2481                         if (total_bytes > 32*1024)
2482                                 num_bytes = 32*1024;
2483                         else
2484                                 num_bytes = total_bytes;
2485                         total_bytes -= num_bytes;
2486
2487                         if (!total_bytes)
2488                                 flash_op = FLASHROM_OPER_FLASH;
2489                         else
2490                                 flash_op = FLASHROM_OPER_SAVE;
2491                         memcpy(req->params.data_buf, p, num_bytes);
2492                         p += num_bytes;
2493                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2494                                 pflashcomp[i].optype, flash_op, num_bytes);
2495                         if (status) {
2496                                 dev_err(&adapter->pdev->dev,
2497                                         "cmd to write to flash rom failed.\n");
2498                                 return -1;
2499                         }
2500                         yield();
2501                 }
2502         }
2503         return 0;
2504 }
2505
2506 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2507 {
2508         if (fhdr == NULL)
2509                 return 0;
2510         if (fhdr->build[0] == '3')
2511                 return BE_GEN3;
2512         else if (fhdr->build[0] == '2')
2513                 return BE_GEN2;
2514         else
2515                 return 0;
2516 }
2517
2518 int be_load_fw(struct be_adapter *adapter, u8 *func)
2519 {
2520         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2521         const struct firmware *fw;
2522         struct flash_file_hdr_g2 *fhdr;
2523         struct flash_file_hdr_g3 *fhdr3;
2524         struct image_hdr *img_hdr_ptr = NULL;
2525         struct be_dma_mem flash_cmd;
2526         int status, i = 0, num_imgs = 0;
2527         const u8 *p;
2528
2529         if (!netif_running(adapter->netdev)) {
2530                 dev_err(&adapter->pdev->dev,
2531                         "Firmware load not allowed (interface is down)\n");
2532                 return -EPERM;
2533         }
2534
2535         strcpy(fw_file, func);
2536
2537         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2538         if (status)
2539                 goto fw_exit;
2540
2541         p = fw->data;
2542         fhdr = (struct flash_file_hdr_g2 *) p;
2543         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2544
2545         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2546         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2547                                           &flash_cmd.dma, GFP_KERNEL);
2548         if (!flash_cmd.va) {
2549                 status = -ENOMEM;
2550                 dev_err(&adapter->pdev->dev,
2551                         "Memory allocation failure while flashing\n");
2552                 goto fw_exit;
2553         }
2554
2555         if ((adapter->generation == BE_GEN3) &&
2556                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2557                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2558                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2559                 for (i = 0; i < num_imgs; i++) {
2560                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2561                                         (sizeof(struct flash_file_hdr_g3) +
2562                                          i * sizeof(struct image_hdr)));
2563                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2564                                 status = be_flash_data(adapter, fw, &flash_cmd,
2565                                                         num_imgs);
2566                 }
2567         } else if ((adapter->generation == BE_GEN2) &&
2568                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2569                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2570         } else {
2571                 dev_err(&adapter->pdev->dev,
2572                         "UFI and Interface are not compatible for flashing\n");
2573                 status = -1;
2574         }
2575
2576         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2577                           flash_cmd.dma);
2578         if (status) {
2579                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2580                 goto fw_exit;
2581         }
2582
2583         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2584
2585 fw_exit:
2586         release_firmware(fw);
2587         return status;
2588 }
2589
2590 static struct net_device_ops be_netdev_ops = {
2591         .ndo_open               = be_open,
2592         .ndo_stop               = be_close,
2593         .ndo_start_xmit         = be_xmit,
2594         .ndo_set_rx_mode        = be_set_multicast_list,
2595         .ndo_set_mac_address    = be_mac_addr_set,
2596         .ndo_change_mtu         = be_change_mtu,
2597         .ndo_validate_addr      = eth_validate_addr,
2598         .ndo_vlan_rx_register   = be_vlan_register,
2599         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2600         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2601         .ndo_set_vf_mac         = be_set_vf_mac,
2602         .ndo_set_vf_vlan        = be_set_vf_vlan,
2603         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2604         .ndo_get_vf_config      = be_get_vf_config
2605 };
2606
2607 static void be_netdev_init(struct net_device *netdev)
2608 {
2609         struct be_adapter *adapter = netdev_priv(netdev);
2610         struct be_rx_obj *rxo;
2611         int i;
2612
2613         netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2614                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2615                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2616                 NETIF_F_GRO | NETIF_F_TSO6;
2617
2618         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2619                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2620
2621         if (lancer_chip(adapter))
2622                 netdev->vlan_features |= NETIF_F_TSO6;
2623
2624         netdev->flags |= IFF_MULTICAST;
2625
2626         adapter->rx_csum = true;
2627
2628         /* Default settings for Rx and Tx flow control */
2629         adapter->rx_fc = true;
2630         adapter->tx_fc = true;
2631
2632         netif_set_gso_max_size(netdev, 65535);
2633
2634         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2635
2636         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2637
2638         for_all_rx_queues(adapter, rxo, i)
2639                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2640                                 BE_NAPI_WEIGHT);
2641
2642         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2643                 BE_NAPI_WEIGHT);
2644 }
2645
2646 static void be_unmap_pci_bars(struct be_adapter *adapter)
2647 {
2648         if (adapter->csr)
2649                 iounmap(adapter->csr);
2650         if (adapter->db)
2651                 iounmap(adapter->db);
2652         if (adapter->pcicfg && be_physfn(adapter))
2653                 iounmap(adapter->pcicfg);
2654 }
2655
2656 static int be_map_pci_bars(struct be_adapter *adapter)
2657 {
2658         u8 __iomem *addr;
2659         int pcicfg_reg, db_reg;
2660
2661         if (lancer_chip(adapter)) {
2662                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2663                         pci_resource_len(adapter->pdev, 0));
2664                 if (addr == NULL)
2665                         return -ENOMEM;
2666                 adapter->db = addr;
2667                 return 0;
2668         }
2669
2670         if (be_physfn(adapter)) {
2671                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2672                                 pci_resource_len(adapter->pdev, 2));
2673                 if (addr == NULL)
2674                         return -ENOMEM;
2675                 adapter->csr = addr;
2676         }
2677
2678         if (adapter->generation == BE_GEN2) {
2679                 pcicfg_reg = 1;
2680                 db_reg = 4;
2681         } else {
2682                 pcicfg_reg = 0;
2683                 if (be_physfn(adapter))
2684                         db_reg = 4;
2685                 else
2686                         db_reg = 0;
2687         }
2688         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2689                                 pci_resource_len(adapter->pdev, db_reg));
2690         if (addr == NULL)
2691                 goto pci_map_err;
2692         adapter->db = addr;
2693
2694         if (be_physfn(adapter)) {
2695                 addr = ioremap_nocache(
2696                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2697                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2698                 if (addr == NULL)
2699                         goto pci_map_err;
2700                 adapter->pcicfg = addr;
2701         } else
2702                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2703
2704         return 0;
2705 pci_map_err:
2706         be_unmap_pci_bars(adapter);
2707         return -ENOMEM;
2708 }
2709
2710
2711 static void be_ctrl_cleanup(struct be_adapter *adapter)
2712 {
2713         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2714
2715         be_unmap_pci_bars(adapter);
2716
2717         if (mem->va)
2718                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2719                                   mem->dma);
2720
2721         mem = &adapter->mc_cmd_mem;
2722         if (mem->va)
2723                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2724                                   mem->dma);
2725 }
2726
2727 static int be_ctrl_init(struct be_adapter *adapter)
2728 {
2729         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2730         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2731         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2732         int status;
2733
2734         status = be_map_pci_bars(adapter);
2735         if (status)
2736                 goto done;
2737
2738         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2739         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2740                                                 mbox_mem_alloc->size,
2741                                                 &mbox_mem_alloc->dma,
2742                                                 GFP_KERNEL);
2743         if (!mbox_mem_alloc->va) {
2744                 status = -ENOMEM;
2745                 goto unmap_pci_bars;
2746         }
2747
2748         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2749         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2750         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2751         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2752
2753         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2754         mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2755                                             mc_cmd_mem->size, &mc_cmd_mem->dma,
2756                                             GFP_KERNEL);
2757         if (mc_cmd_mem->va == NULL) {
2758                 status = -ENOMEM;
2759                 goto free_mbox;
2760         }
2761         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2762
2763         mutex_init(&adapter->mbox_lock);
2764         spin_lock_init(&adapter->mcc_lock);
2765         spin_lock_init(&adapter->mcc_cq_lock);
2766
2767         init_completion(&adapter->flash_compl);
2768         pci_save_state(adapter->pdev);
2769         return 0;
2770
2771 free_mbox:
2772         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2773                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
2774
2775 unmap_pci_bars:
2776         be_unmap_pci_bars(adapter);
2777
2778 done:
2779         return status;
2780 }
2781
2782 static void be_stats_cleanup(struct be_adapter *adapter)
2783 {
2784         struct be_dma_mem *cmd = &adapter->stats_cmd;
2785
2786         if (cmd->va)
2787                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2788                                   cmd->va, cmd->dma);
2789 }
2790
2791 static int be_stats_init(struct be_adapter *adapter)
2792 {
2793         struct be_dma_mem *cmd = &adapter->stats_cmd;
2794
2795         cmd->size = sizeof(struct be_cmd_req_get_stats);
2796         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2797                                      GFP_KERNEL);
2798         if (cmd->va == NULL)
2799                 return -1;
2800         memset(cmd->va, 0, cmd->size);
2801         return 0;
2802 }
2803
2804 static void __devexit be_remove(struct pci_dev *pdev)
2805 {
2806         struct be_adapter *adapter = pci_get_drvdata(pdev);
2807
2808         if (!adapter)
2809                 return;
2810
2811         cancel_delayed_work_sync(&adapter->work);
2812
2813         unregister_netdev(adapter->netdev);
2814
2815         be_clear(adapter);
2816
2817         be_stats_cleanup(adapter);
2818
2819         be_ctrl_cleanup(adapter);
2820
2821         be_sriov_disable(adapter);
2822
2823         be_msix_disable(adapter);
2824
2825         pci_set_drvdata(pdev, NULL);
2826         pci_release_regions(pdev);
2827         pci_disable_device(pdev);
2828
2829         free_netdev(adapter->netdev);
2830 }
2831
2832 static int be_get_config(struct be_adapter *adapter)
2833 {
2834         int status;
2835         u8 mac[ETH_ALEN];
2836
2837         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2838         if (status)
2839                 return status;
2840
2841         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2842                         &adapter->function_mode, &adapter->function_caps);
2843         if (status)
2844                 return status;
2845
2846         memset(mac, 0, ETH_ALEN);
2847
2848         if (be_physfn(adapter)) {
2849                 status = be_cmd_mac_addr_query(adapter, mac,
2850                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2851
2852                 if (status)
2853                         return status;
2854
2855                 if (!is_valid_ether_addr(mac))
2856                         return -EADDRNOTAVAIL;
2857
2858                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2859                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2860         }
2861
2862         if (adapter->function_mode & 0x400)
2863                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2864         else
2865                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2866
2867         status = be_cmd_get_cntl_attributes(adapter);
2868         if (status)
2869                 return status;
2870
2871         return 0;
2872 }
2873
2874 static int be_dev_family_check(struct be_adapter *adapter)
2875 {
2876         struct pci_dev *pdev = adapter->pdev;
2877         u32 sli_intf = 0, if_type;
2878
2879         switch (pdev->device) {
2880         case BE_DEVICE_ID1:
2881         case OC_DEVICE_ID1:
2882                 adapter->generation = BE_GEN2;
2883                 break;
2884         case BE_DEVICE_ID2:
2885         case OC_DEVICE_ID2:
2886                 adapter->generation = BE_GEN3;
2887                 break;
2888         case OC_DEVICE_ID3:
2889                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2890                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2891                                                 SLI_INTF_IF_TYPE_SHIFT;
2892
2893                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2894                         if_type != 0x02) {
2895                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2896                         return -EINVAL;
2897                 }
2898                 if (num_vfs > 0) {
2899                         dev_err(&pdev->dev, "VFs not supported\n");
2900                         return -EINVAL;
2901                 }
2902                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2903                                          SLI_INTF_FAMILY_SHIFT);
2904                 adapter->generation = BE_GEN3;
2905                 break;
2906         default:
2907                 adapter->generation = 0;
2908         }
2909         return 0;
2910 }
2911
2912 static int lancer_wait_ready(struct be_adapter *adapter)
2913 {
2914 #define SLIPORT_READY_TIMEOUT 500
2915         u32 sliport_status;
2916         int status = 0, i;
2917
2918         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2919                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2920                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2921                         break;
2922
2923                 msleep(20);
2924         }
2925
2926         if (i == SLIPORT_READY_TIMEOUT)
2927                 status = -1;
2928
2929         return status;
2930 }
2931
2932 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2933 {
2934         int status;
2935         u32 sliport_status, err, reset_needed;
2936         status = lancer_wait_ready(adapter);
2937         if (!status) {
2938                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2939                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2940                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2941                 if (err && reset_needed) {
2942                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
2943                                         adapter->db + SLIPORT_CONTROL_OFFSET);
2944
2945                         /* check adapter has corrected the error */
2946                         status = lancer_wait_ready(adapter);
2947                         sliport_status = ioread32(adapter->db +
2948                                                         SLIPORT_STATUS_OFFSET);
2949                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2950                                                 SLIPORT_STATUS_RN_MASK);
2951                         if (status || sliport_status)
2952                                 status = -1;
2953                 } else if (err || reset_needed) {
2954                         status = -1;
2955                 }
2956         }
2957         return status;
2958 }
2959
2960 static int __devinit be_probe(struct pci_dev *pdev,
2961                         const struct pci_device_id *pdev_id)
2962 {
2963         int status = 0;
2964         struct be_adapter *adapter;
2965         struct net_device *netdev;
2966
2967         status = pci_enable_device(pdev);
2968         if (status)
2969                 goto do_none;
2970
2971         status = pci_request_regions(pdev, DRV_NAME);
2972         if (status)
2973                 goto disable_dev;
2974         pci_set_master(pdev);
2975
2976         netdev = alloc_etherdev(sizeof(struct be_adapter));
2977         if (netdev == NULL) {
2978                 status = -ENOMEM;
2979                 goto rel_reg;
2980         }
2981         adapter = netdev_priv(netdev);
2982         adapter->pdev = pdev;
2983         pci_set_drvdata(pdev, adapter);
2984
2985         status = be_dev_family_check(adapter);
2986         if (status)
2987                 goto free_netdev;
2988
2989         adapter->netdev = netdev;
2990         SET_NETDEV_DEV(netdev, &pdev->dev);
2991
2992         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2993         if (!status) {
2994                 netdev->features |= NETIF_F_HIGHDMA;
2995         } else {
2996                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2997                 if (status) {
2998                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2999                         goto free_netdev;
3000                 }
3001         }
3002
3003         be_sriov_enable(adapter);
3004
3005         status = be_ctrl_init(adapter);
3006         if (status)
3007                 goto free_netdev;
3008
3009         if (lancer_chip(adapter)) {
3010                 status = lancer_test_and_set_rdy_state(adapter);
3011                 if (status) {
3012                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3013                         goto free_netdev;
3014                 }
3015         }
3016
3017         /* sync up with fw's ready state */
3018         if (be_physfn(adapter)) {
3019                 status = be_cmd_POST(adapter);
3020                 if (status)
3021                         goto ctrl_clean;
3022         }
3023
3024         /* tell fw we're ready to fire cmds */
3025         status = be_cmd_fw_init(adapter);
3026         if (status)
3027                 goto ctrl_clean;
3028
3029         status = be_cmd_reset_function(adapter);
3030         if (status)
3031                 goto ctrl_clean;
3032
3033         status = be_stats_init(adapter);
3034         if (status)
3035                 goto ctrl_clean;
3036
3037         status = be_get_config(adapter);
3038         if (status)
3039                 goto stats_clean;
3040
3041         be_msix_enable(adapter);
3042
3043         INIT_DELAYED_WORK(&adapter->work, be_worker);
3044
3045         status = be_setup(adapter);
3046         if (status)
3047                 goto msix_disable;
3048
3049         be_netdev_init(netdev);
3050         status = register_netdev(netdev);
3051         if (status != 0)
3052                 goto unsetup;
3053         netif_carrier_off(netdev);
3054
3055         if (be_physfn(adapter) && adapter->sriov_enabled) {
3056                 status = be_vf_eth_addr_config(adapter);
3057                 if (status)
3058                         goto unreg_netdev;
3059         }
3060
3061         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3062         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3063         return 0;
3064
3065 unreg_netdev:
3066         unregister_netdev(netdev);
3067 unsetup:
3068         be_clear(adapter);
3069 msix_disable:
3070         be_msix_disable(adapter);
3071 stats_clean:
3072         be_stats_cleanup(adapter);
3073 ctrl_clean:
3074         be_ctrl_cleanup(adapter);
3075 free_netdev:
3076         be_sriov_disable(adapter);
3077         free_netdev(netdev);
3078         pci_set_drvdata(pdev, NULL);
3079 rel_reg:
3080         pci_release_regions(pdev);
3081 disable_dev:
3082         pci_disable_device(pdev);
3083 do_none:
3084         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3085         return status;
3086 }
3087
3088 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3089 {
3090         struct be_adapter *adapter = pci_get_drvdata(pdev);
3091         struct net_device *netdev =  adapter->netdev;
3092
3093         cancel_delayed_work_sync(&adapter->work);
3094         if (adapter->wol)
3095                 be_setup_wol(adapter, true);
3096
3097         netif_device_detach(netdev);
3098         if (netif_running(netdev)) {
3099                 rtnl_lock();
3100                 be_close(netdev);
3101                 rtnl_unlock();
3102         }
3103         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3104         be_clear(adapter);
3105
3106         be_msix_disable(adapter);
3107         pci_save_state(pdev);
3108         pci_disable_device(pdev);
3109         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3110         return 0;
3111 }
3112
3113 static int be_resume(struct pci_dev *pdev)
3114 {
3115         int status = 0;
3116         struct be_adapter *adapter = pci_get_drvdata(pdev);
3117         struct net_device *netdev =  adapter->netdev;
3118
3119         netif_device_detach(netdev);
3120
3121         status = pci_enable_device(pdev);
3122         if (status)
3123                 return status;
3124
3125         pci_set_power_state(pdev, 0);
3126         pci_restore_state(pdev);
3127
3128         be_msix_enable(adapter);
3129         /* tell fw we're ready to fire cmds */
3130         status = be_cmd_fw_init(adapter);
3131         if (status)
3132                 return status;
3133
3134         be_setup(adapter);
3135         if (netif_running(netdev)) {
3136                 rtnl_lock();
3137                 be_open(netdev);
3138                 rtnl_unlock();
3139         }
3140         netif_device_attach(netdev);
3141
3142         if (adapter->wol)
3143                 be_setup_wol(adapter, false);
3144
3145         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3146         return 0;
3147 }
3148
3149 /*
3150  * An FLR will stop BE from DMAing any data.
3151  */
3152 static void be_shutdown(struct pci_dev *pdev)
3153 {
3154         struct be_adapter *adapter = pci_get_drvdata(pdev);
3155         struct net_device *netdev =  adapter->netdev;
3156
3157         if (netif_running(netdev))
3158                 cancel_delayed_work_sync(&adapter->work);
3159
3160         netif_device_detach(netdev);
3161
3162         be_cmd_reset_function(adapter);
3163
3164         if (adapter->wol)
3165                 be_setup_wol(adapter, true);
3166
3167         pci_disable_device(pdev);
3168 }
3169
3170 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3171                                 pci_channel_state_t state)
3172 {
3173         struct be_adapter *adapter = pci_get_drvdata(pdev);
3174         struct net_device *netdev =  adapter->netdev;
3175
3176         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3177
3178         adapter->eeh_err = true;
3179
3180         netif_device_detach(netdev);
3181
3182         if (netif_running(netdev)) {
3183                 rtnl_lock();
3184                 be_close(netdev);
3185                 rtnl_unlock();
3186         }
3187         be_clear(adapter);
3188
3189         if (state == pci_channel_io_perm_failure)
3190                 return PCI_ERS_RESULT_DISCONNECT;
3191
3192         pci_disable_device(pdev);
3193
3194         return PCI_ERS_RESULT_NEED_RESET;
3195 }
3196
3197 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3198 {
3199         struct be_adapter *adapter = pci_get_drvdata(pdev);
3200         int status;
3201
3202         dev_info(&adapter->pdev->dev, "EEH reset\n");
3203         adapter->eeh_err = false;
3204
3205         status = pci_enable_device(pdev);
3206         if (status)
3207                 return PCI_ERS_RESULT_DISCONNECT;
3208
3209         pci_set_master(pdev);
3210         pci_set_power_state(pdev, 0);
3211         pci_restore_state(pdev);
3212
3213         /* Check if card is ok and fw is ready */
3214         status = be_cmd_POST(adapter);
3215         if (status)
3216                 return PCI_ERS_RESULT_DISCONNECT;
3217
3218         return PCI_ERS_RESULT_RECOVERED;
3219 }
3220
3221 static void be_eeh_resume(struct pci_dev *pdev)
3222 {
3223         int status = 0;
3224         struct be_adapter *adapter = pci_get_drvdata(pdev);
3225         struct net_device *netdev =  adapter->netdev;
3226
3227         dev_info(&adapter->pdev->dev, "EEH resume\n");
3228
3229         pci_save_state(pdev);
3230
3231         /* tell fw we're ready to fire cmds */
3232         status = be_cmd_fw_init(adapter);
3233         if (status)
3234                 goto err;
3235
3236         status = be_setup(adapter);
3237         if (status)
3238                 goto err;
3239
3240         if (netif_running(netdev)) {
3241                 status = be_open(netdev);
3242                 if (status)
3243                         goto err;
3244         }
3245         netif_device_attach(netdev);
3246         return;
3247 err:
3248         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3249 }
3250
3251 static struct pci_error_handlers be_eeh_handlers = {
3252         .error_detected = be_eeh_err_detected,
3253         .slot_reset = be_eeh_reset,
3254         .resume = be_eeh_resume,
3255 };
3256
3257 static struct pci_driver be_driver = {
3258         .name = DRV_NAME,
3259         .id_table = be_dev_ids,
3260         .probe = be_probe,
3261         .remove = be_remove,
3262         .suspend = be_suspend,
3263         .resume = be_resume,
3264         .shutdown = be_shutdown,
3265         .err_handler = &be_eeh_handlers
3266 };
3267
3268 static int __init be_init_module(void)
3269 {
3270         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3271             rx_frag_size != 2048) {
3272                 printk(KERN_WARNING DRV_NAME
3273                         " : Module param rx_frag_size must be 2048/4096/8192."
3274                         " Using 2048\n");
3275                 rx_frag_size = 2048;
3276         }
3277
3278         if (num_vfs > 32) {
3279                 printk(KERN_WARNING DRV_NAME
3280                         " : Module param num_vfs must not be greater than 32."
3281                         "Using 32\n");
3282                 num_vfs = 32;
3283         }
3284
3285         return pci_register_driver(&be_driver);
3286 }
3287 module_init(be_init_module);
3288
3289 static void __exit be_exit_module(void)
3290 {
3291         pci_unregister_driver(&be_driver);
3292 }
3293 module_exit(be_exit_module);