]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/benet/be_main.c
be2net: changes for BE3 native mode support
[karo-tx-linux.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static ushort rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, ushort, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { 0 }
46 };
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
50         "CEV",
51         "CTX",
52         "DBUF",
53         "ERX",
54         "Host",
55         "MPU",
56         "NDMA",
57         "PTC ",
58         "RDMA ",
59         "RXF ",
60         "RXIPS ",
61         "RXULP0 ",
62         "RXULP1 ",
63         "RXULP2 ",
64         "TIM ",
65         "TPOST ",
66         "TPRE ",
67         "TXIPS ",
68         "TXULP0 ",
69         "TXULP1 ",
70         "UC ",
71         "WDMA ",
72         "TXULP2 ",
73         "HOST1 ",
74         "P0_OB_LINK ",
75         "P1_OB_LINK ",
76         "HOST_GPIO ",
77         "MBOX ",
78         "AXGMAC0",
79         "AXGMAC1",
80         "JTAG",
81         "MPU_INTPEND"
82 };
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
85         "LPCMEMHOST",
86         "MGMT_MAC",
87         "PCS0ONLINE",
88         "MPU_IRAM",
89         "PCS1ONLINE",
90         "PCTL0",
91         "PCTL1",
92         "PMEM",
93         "RR",
94         "TXPB",
95         "RXPP",
96         "XAUI",
97         "TXP",
98         "ARM",
99         "IPC",
100         "HOST2",
101         "HOST3",
102         "HOST4",
103         "HOST5",
104         "HOST6",
105         "HOST7",
106         "HOST8",
107         "HOST9",
108         "NETC"
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown"
117 };
118
119 static inline bool be_multi_rxq(struct be_adapter *adapter)
120 {
121         return (adapter->num_rx_qs > 1);
122 }
123
124 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125 {
126         struct be_dma_mem *mem = &q->dma_mem;
127         if (mem->va)
128                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129                                   mem->dma);
130 }
131
132 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133                 u16 len, u16 entry_size)
134 {
135         struct be_dma_mem *mem = &q->dma_mem;
136
137         memset(q, 0, sizeof(*q));
138         q->len = len;
139         q->entry_size = entry_size;
140         mem->size = len * entry_size;
141         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142                                      GFP_KERNEL);
143         if (!mem->va)
144                 return -1;
145         memset(mem->va, 0, mem->size);
146         return 0;
147 }
148
149 static void be_intr_set(struct be_adapter *adapter, bool enable)
150 {
151         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
152         u32 reg = ioread32(addr);
153         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
154
155         if (adapter->eeh_err)
156                 return;
157
158         if (!enabled && enable)
159                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160         else if (enabled && !enable)
161                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162         else
163                 return;
164
165         iowrite32(reg, addr);
166 }
167
168 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
169 {
170         u32 val = 0;
171         val |= qid & DB_RQ_RING_ID_MASK;
172         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
173
174         wmb();
175         iowrite32(val, adapter->db + DB_RQ_OFFSET);
176 }
177
178 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180         u32 val = 0;
181         val |= qid & DB_TXULP_RING_ID_MASK;
182         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
183
184         wmb();
185         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
186 }
187
188 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
189                 bool arm, bool clear_int, u16 num_popped)
190 {
191         u32 val = 0;
192         val |= qid & DB_EQ_RING_ID_MASK;
193         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
194                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
195
196         if (adapter->eeh_err)
197                 return;
198
199         if (arm)
200                 val |= 1 << DB_EQ_REARM_SHIFT;
201         if (clear_int)
202                 val |= 1 << DB_EQ_CLR_SHIFT;
203         val |= 1 << DB_EQ_EVNT_SHIFT;
204         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
205         iowrite32(val, adapter->db + DB_EQ_OFFSET);
206 }
207
208 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
209 {
210         u32 val = 0;
211         val |= qid & DB_CQ_RING_ID_MASK;
212         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
213                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
214
215         if (adapter->eeh_err)
216                 return;
217
218         if (arm)
219                 val |= 1 << DB_CQ_REARM_SHIFT;
220         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
221         iowrite32(val, adapter->db + DB_CQ_OFFSET);
222 }
223
224 static int be_mac_addr_set(struct net_device *netdev, void *p)
225 {
226         struct be_adapter *adapter = netdev_priv(netdev);
227         struct sockaddr *addr = p;
228         int status = 0;
229
230         if (!is_valid_ether_addr(addr->sa_data))
231                 return -EADDRNOTAVAIL;
232
233         /* MAC addr configuration will be done in hardware for VFs
234          * by their corresponding PFs. Just copy to netdev addr here
235          */
236         if (!be_physfn(adapter))
237                 goto netdev_addr;
238
239         status = be_cmd_pmac_del(adapter, adapter->if_handle,
240                                 adapter->pmac_id, 0);
241         if (status)
242                 return status;
243
244         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
245                                 adapter->if_handle, &adapter->pmac_id, 0);
246 netdev_addr:
247         if (!status)
248                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
249
250         return status;
251 }
252
253 void netdev_stats_update(struct be_adapter *adapter)
254 {
255         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
256         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
257         struct be_port_rxf_stats *port_stats =
258                         &rxf_stats->port[adapter->port_num];
259         struct net_device_stats *dev_stats = &adapter->netdev->stats;
260         struct be_erx_stats *erx_stats = &hw_stats->erx;
261         struct be_rx_obj *rxo;
262         int i;
263
264         memset(dev_stats, 0, sizeof(*dev_stats));
265         for_all_rx_queues(adapter, rxo, i) {
266                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
267                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
268                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
269                 /*  no space in linux buffers: best possible approximation */
270                 dev_stats->rx_dropped +=
271                         erx_stats->rx_drops_no_fragments[rxo->q.id];
272         }
273
274         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
275         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
276
277         /* bad pkts received */
278         dev_stats->rx_errors = port_stats->rx_crc_errors +
279                 port_stats->rx_alignment_symbol_errors +
280                 port_stats->rx_in_range_errors +
281                 port_stats->rx_out_range_errors +
282                 port_stats->rx_frame_too_long +
283                 port_stats->rx_dropped_too_small +
284                 port_stats->rx_dropped_too_short +
285                 port_stats->rx_dropped_header_too_small +
286                 port_stats->rx_dropped_tcp_length +
287                 port_stats->rx_dropped_runt +
288                 port_stats->rx_tcp_checksum_errs +
289                 port_stats->rx_ip_checksum_errs +
290                 port_stats->rx_udp_checksum_errs;
291
292         /* detailed rx errors */
293         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
294                 port_stats->rx_out_range_errors +
295                 port_stats->rx_frame_too_long;
296
297         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
298
299         /* frame alignment errors */
300         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
301
302         /* receiver fifo overrun */
303         /* drops_no_pbuf is no per i/f, it's per BE card */
304         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
305                                         port_stats->rx_input_fifo_overflow +
306                                         rxf_stats->rx_drops_no_pbuf;
307 }
308
309 void be_link_status_update(struct be_adapter *adapter, bool link_up)
310 {
311         struct net_device *netdev = adapter->netdev;
312
313         /* If link came up or went down */
314         if (adapter->link_up != link_up) {
315                 adapter->link_speed = -1;
316                 if (link_up) {
317                         netif_carrier_on(netdev);
318                         printk(KERN_INFO "%s: Link up\n", netdev->name);
319                 } else {
320                         netif_carrier_off(netdev);
321                         printk(KERN_INFO "%s: Link down\n", netdev->name);
322                 }
323                 adapter->link_up = link_up;
324         }
325 }
326
327 /* Update the EQ delay n BE based on the RX frags consumed / sec */
328 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
329 {
330         struct be_eq_obj *rx_eq = &rxo->rx_eq;
331         struct be_rx_stats *stats = &rxo->stats;
332         ulong now = jiffies;
333         u32 eqd;
334
335         if (!rx_eq->enable_aic)
336                 return;
337
338         /* Wrapped around */
339         if (time_before(now, stats->rx_fps_jiffies)) {
340                 stats->rx_fps_jiffies = now;
341                 return;
342         }
343
344         /* Update once a second */
345         if ((now - stats->rx_fps_jiffies) < HZ)
346                 return;
347
348         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
349                         ((now - stats->rx_fps_jiffies) / HZ);
350
351         stats->rx_fps_jiffies = now;
352         stats->prev_rx_frags = stats->rx_frags;
353         eqd = stats->rx_fps / 110000;
354         eqd = eqd << 3;
355         if (eqd > rx_eq->max_eqd)
356                 eqd = rx_eq->max_eqd;
357         if (eqd < rx_eq->min_eqd)
358                 eqd = rx_eq->min_eqd;
359         if (eqd < 10)
360                 eqd = 0;
361         if (eqd != rx_eq->cur_eqd)
362                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
363
364         rx_eq->cur_eqd = eqd;
365 }
366
367 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
368 {
369         u64 rate = bytes;
370
371         do_div(rate, ticks / HZ);
372         rate <<= 3;                     /* bytes/sec -> bits/sec */
373         do_div(rate, 1000000ul);        /* MB/Sec */
374
375         return rate;
376 }
377
378 static void be_tx_rate_update(struct be_adapter *adapter)
379 {
380         struct be_tx_stats *stats = tx_stats(adapter);
381         ulong now = jiffies;
382
383         /* Wrapped around? */
384         if (time_before(now, stats->be_tx_jiffies)) {
385                 stats->be_tx_jiffies = now;
386                 return;
387         }
388
389         /* Update tx rate once in two seconds */
390         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
391                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
392                                                   - stats->be_tx_bytes_prev,
393                                                  now - stats->be_tx_jiffies);
394                 stats->be_tx_jiffies = now;
395                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
396         }
397 }
398
399 static void be_tx_stats_update(struct be_adapter *adapter,
400                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
401 {
402         struct be_tx_stats *stats = tx_stats(adapter);
403         stats->be_tx_reqs++;
404         stats->be_tx_wrbs += wrb_cnt;
405         stats->be_tx_bytes += copied;
406         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
407         if (stopped)
408                 stats->be_tx_stops++;
409 }
410
411 /* Determine number of WRB entries needed to xmit data in an skb */
412 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413                                                                 bool *dummy)
414 {
415         int cnt = (skb->len > skb->data_len);
416
417         cnt += skb_shinfo(skb)->nr_frags;
418
419         /* to account for hdr wrb */
420         cnt++;
421         if (lancer_chip(adapter) || !(cnt & 1)) {
422                 *dummy = false;
423         } else {
424                 /* add a dummy to make it an even num */
425                 cnt++;
426                 *dummy = true;
427         }
428         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
429         return cnt;
430 }
431
432 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
433 {
434         wrb->frag_pa_hi = upper_32_bits(addr);
435         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
436         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
437 }
438
439 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
440                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
441 {
442         u8 vlan_prio = 0;
443         u16 vlan_tag = 0;
444
445         memset(hdr, 0, sizeof(*hdr));
446
447         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
448
449         if (skb_is_gso(skb)) {
450                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
451                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
452                         hdr, skb_shinfo(skb)->gso_size);
453                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
454                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
455                 if (lancer_chip(adapter) && adapter->sli_family  ==
456                                                         LANCER_A0_SLI_FAMILY) {
457                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
458                         if (is_tcp_pkt(skb))
459                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460                                                                 tcpcs, hdr, 1);
461                         else if (is_udp_pkt(skb))
462                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463                                                                 udpcs, hdr, 1);
464                 }
465         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
466                 if (is_tcp_pkt(skb))
467                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
468                 else if (is_udp_pkt(skb))
469                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
470         }
471
472         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
473                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
474                 vlan_tag = vlan_tx_tag_get(skb);
475                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
476                 /* If vlan priority provided by OS is NOT in available bmap */
477                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
478                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
479                                         adapter->recommended_prio;
480                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
481         }
482
483         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
484         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
485         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
486         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
487 }
488
489 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
490                 bool unmap_single)
491 {
492         dma_addr_t dma;
493
494         be_dws_le_to_cpu(wrb, sizeof(*wrb));
495
496         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
497         if (wrb->frag_len) {
498                 if (unmap_single)
499                         dma_unmap_single(dev, dma, wrb->frag_len,
500                                          DMA_TO_DEVICE);
501                 else
502                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
503         }
504 }
505
506 static int make_tx_wrbs(struct be_adapter *adapter,
507                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
508 {
509         dma_addr_t busaddr;
510         int i, copied = 0;
511         struct device *dev = &adapter->pdev->dev;
512         struct sk_buff *first_skb = skb;
513         struct be_queue_info *txq = &adapter->tx_obj.q;
514         struct be_eth_wrb *wrb;
515         struct be_eth_hdr_wrb *hdr;
516         bool map_single = false;
517         u16 map_head;
518
519         hdr = queue_head_node(txq);
520         queue_head_inc(txq);
521         map_head = txq->head;
522
523         if (skb->len > skb->data_len) {
524                 int len = skb_headlen(skb);
525                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
526                 if (dma_mapping_error(dev, busaddr))
527                         goto dma_err;
528                 map_single = true;
529                 wrb = queue_head_node(txq);
530                 wrb_fill(wrb, busaddr, len);
531                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
532                 queue_head_inc(txq);
533                 copied += len;
534         }
535
536         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
537                 struct skb_frag_struct *frag =
538                         &skb_shinfo(skb)->frags[i];
539                 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
540                                        frag->size, DMA_TO_DEVICE);
541                 if (dma_mapping_error(dev, busaddr))
542                         goto dma_err;
543                 wrb = queue_head_node(txq);
544                 wrb_fill(wrb, busaddr, frag->size);
545                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
546                 queue_head_inc(txq);
547                 copied += frag->size;
548         }
549
550         if (dummy_wrb) {
551                 wrb = queue_head_node(txq);
552                 wrb_fill(wrb, 0, 0);
553                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
554                 queue_head_inc(txq);
555         }
556
557         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
558         be_dws_cpu_to_le(hdr, sizeof(*hdr));
559
560         return copied;
561 dma_err:
562         txq->head = map_head;
563         while (copied) {
564                 wrb = queue_head_node(txq);
565                 unmap_tx_frag(dev, wrb, map_single);
566                 map_single = false;
567                 copied -= wrb->frag_len;
568                 queue_head_inc(txq);
569         }
570         return 0;
571 }
572
573 static netdev_tx_t be_xmit(struct sk_buff *skb,
574                         struct net_device *netdev)
575 {
576         struct be_adapter *adapter = netdev_priv(netdev);
577         struct be_tx_obj *tx_obj = &adapter->tx_obj;
578         struct be_queue_info *txq = &tx_obj->q;
579         u32 wrb_cnt = 0, copied = 0;
580         u32 start = txq->head;
581         bool dummy_wrb, stopped = false;
582
583         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
584
585         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
586         if (copied) {
587                 /* record the sent skb in the sent_skb table */
588                 BUG_ON(tx_obj->sent_skb_list[start]);
589                 tx_obj->sent_skb_list[start] = skb;
590
591                 /* Ensure txq has space for the next skb; Else stop the queue
592                  * *BEFORE* ringing the tx doorbell, so that we serialze the
593                  * tx compls of the current transmit which'll wake up the queue
594                  */
595                 atomic_add(wrb_cnt, &txq->used);
596                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
597                                                                 txq->len) {
598                         netif_stop_queue(netdev);
599                         stopped = true;
600                 }
601
602                 be_txq_notify(adapter, txq->id, wrb_cnt);
603
604                 be_tx_stats_update(adapter, wrb_cnt, copied,
605                                 skb_shinfo(skb)->gso_segs, stopped);
606         } else {
607                 txq->head = start;
608                 dev_kfree_skb_any(skb);
609         }
610         return NETDEV_TX_OK;
611 }
612
613 static int be_change_mtu(struct net_device *netdev, int new_mtu)
614 {
615         struct be_adapter *adapter = netdev_priv(netdev);
616         if (new_mtu < BE_MIN_MTU ||
617                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
618                                         (ETH_HLEN + ETH_FCS_LEN))) {
619                 dev_info(&adapter->pdev->dev,
620                         "MTU must be between %d and %d bytes\n",
621                         BE_MIN_MTU,
622                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
623                 return -EINVAL;
624         }
625         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
626                         netdev->mtu, new_mtu);
627         netdev->mtu = new_mtu;
628         return 0;
629 }
630
631 /*
632  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
633  * If the user configures more, place BE in vlan promiscuous mode.
634  */
635 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
636 {
637         u16 vtag[BE_NUM_VLANS_SUPPORTED];
638         u16 ntags = 0, i;
639         int status = 0;
640         u32 if_handle;
641
642         if (vf) {
643                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
644                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
645                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
646         }
647
648         if (adapter->vlans_added <= adapter->max_vlans)  {
649                 /* Construct VLAN Table to give to HW */
650                 for (i = 0; i < VLAN_N_VID; i++) {
651                         if (adapter->vlan_tag[i]) {
652                                 vtag[ntags] = cpu_to_le16(i);
653                                 ntags++;
654                         }
655                 }
656                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
657                                         vtag, ntags, 1, 0);
658         } else {
659                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
660                                         NULL, 0, 1, 1);
661         }
662
663         return status;
664 }
665
666 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
667 {
668         struct be_adapter *adapter = netdev_priv(netdev);
669
670         adapter->vlan_grp = grp;
671 }
672
673 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
674 {
675         struct be_adapter *adapter = netdev_priv(netdev);
676
677         adapter->vlans_added++;
678         if (!be_physfn(adapter))
679                 return;
680
681         adapter->vlan_tag[vid] = 1;
682         if (adapter->vlans_added <= (adapter->max_vlans + 1))
683                 be_vid_config(adapter, false, 0);
684 }
685
686 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
687 {
688         struct be_adapter *adapter = netdev_priv(netdev);
689
690         adapter->vlans_added--;
691         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
692
693         if (!be_physfn(adapter))
694                 return;
695
696         adapter->vlan_tag[vid] = 0;
697         if (adapter->vlans_added <= adapter->max_vlans)
698                 be_vid_config(adapter, false, 0);
699 }
700
701 static void be_set_multicast_list(struct net_device *netdev)
702 {
703         struct be_adapter *adapter = netdev_priv(netdev);
704
705         if (netdev->flags & IFF_PROMISC) {
706                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
707                 adapter->promiscuous = true;
708                 goto done;
709         }
710
711         /* BE was previously in promiscous mode; disable it */
712         if (adapter->promiscuous) {
713                 adapter->promiscuous = false;
714                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
715         }
716
717         /* Enable multicast promisc if num configured exceeds what we support */
718         if (netdev->flags & IFF_ALLMULTI ||
719             netdev_mc_count(netdev) > BE_MAX_MC) {
720                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
721                                 &adapter->mc_cmd_mem);
722                 goto done;
723         }
724
725         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
726                 &adapter->mc_cmd_mem);
727 done:
728         return;
729 }
730
731 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
732 {
733         struct be_adapter *adapter = netdev_priv(netdev);
734         int status;
735
736         if (!adapter->sriov_enabled)
737                 return -EPERM;
738
739         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
740                 return -EINVAL;
741
742         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
743                 status = be_cmd_pmac_del(adapter,
744                                         adapter->vf_cfg[vf].vf_if_handle,
745                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
746
747         status = be_cmd_pmac_add(adapter, mac,
748                                 adapter->vf_cfg[vf].vf_if_handle,
749                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
750
751         if (status)
752                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
753                                 mac, vf);
754         else
755                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
756
757         return status;
758 }
759
760 static int be_get_vf_config(struct net_device *netdev, int vf,
761                         struct ifla_vf_info *vi)
762 {
763         struct be_adapter *adapter = netdev_priv(netdev);
764
765         if (!adapter->sriov_enabled)
766                 return -EPERM;
767
768         if (vf >= num_vfs)
769                 return -EINVAL;
770
771         vi->vf = vf;
772         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
773         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
774         vi->qos = 0;
775         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
776
777         return 0;
778 }
779
780 static int be_set_vf_vlan(struct net_device *netdev,
781                         int vf, u16 vlan, u8 qos)
782 {
783         struct be_adapter *adapter = netdev_priv(netdev);
784         int status = 0;
785
786         if (!adapter->sriov_enabled)
787                 return -EPERM;
788
789         if ((vf >= num_vfs) || (vlan > 4095))
790                 return -EINVAL;
791
792         if (vlan) {
793                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
794                 adapter->vlans_added++;
795         } else {
796                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
797                 adapter->vlans_added--;
798         }
799
800         status = be_vid_config(adapter, true, vf);
801
802         if (status)
803                 dev_info(&adapter->pdev->dev,
804                                 "VLAN %d config on VF %d failed\n", vlan, vf);
805         return status;
806 }
807
808 static int be_set_vf_tx_rate(struct net_device *netdev,
809                         int vf, int rate)
810 {
811         struct be_adapter *adapter = netdev_priv(netdev);
812         int status = 0;
813
814         if (!adapter->sriov_enabled)
815                 return -EPERM;
816
817         if ((vf >= num_vfs) || (rate < 0))
818                 return -EINVAL;
819
820         if (rate > 10000)
821                 rate = 10000;
822
823         adapter->vf_cfg[vf].vf_tx_rate = rate;
824         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
825
826         if (status)
827                 dev_info(&adapter->pdev->dev,
828                                 "tx rate %d on VF %d failed\n", rate, vf);
829         return status;
830 }
831
832 static void be_rx_rate_update(struct be_rx_obj *rxo)
833 {
834         struct be_rx_stats *stats = &rxo->stats;
835         ulong now = jiffies;
836
837         /* Wrapped around */
838         if (time_before(now, stats->rx_jiffies)) {
839                 stats->rx_jiffies = now;
840                 return;
841         }
842
843         /* Update the rate once in two seconds */
844         if ((now - stats->rx_jiffies) < 2 * HZ)
845                 return;
846
847         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
848                                 now - stats->rx_jiffies);
849         stats->rx_jiffies = now;
850         stats->rx_bytes_prev = stats->rx_bytes;
851 }
852
853 static void be_rx_stats_update(struct be_rx_obj *rxo,
854                 struct be_rx_compl_info *rxcp)
855 {
856         struct be_rx_stats *stats = &rxo->stats;
857
858         stats->rx_compl++;
859         stats->rx_frags += rxcp->num_rcvd;
860         stats->rx_bytes += rxcp->pkt_size;
861         stats->rx_pkts++;
862         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
863                 stats->rx_mcast_pkts++;
864         if (rxcp->err)
865                 stats->rxcp_err++;
866 }
867
868 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
869 {
870         /* L4 checksum is not reliable for non TCP/UDP packets.
871          * Also ignore ipcksm for ipv6 pkts */
872         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
873                                 (rxcp->ip_csum || rxcp->ipv6);
874 }
875
876 static struct be_rx_page_info *
877 get_rx_page_info(struct be_adapter *adapter,
878                 struct be_rx_obj *rxo,
879                 u16 frag_idx)
880 {
881         struct be_rx_page_info *rx_page_info;
882         struct be_queue_info *rxq = &rxo->q;
883
884         rx_page_info = &rxo->page_info_tbl[frag_idx];
885         BUG_ON(!rx_page_info->page);
886
887         if (rx_page_info->last_page_user) {
888                 dma_unmap_page(&adapter->pdev->dev,
889                                dma_unmap_addr(rx_page_info, bus),
890                                adapter->big_page_size, DMA_FROM_DEVICE);
891                 rx_page_info->last_page_user = false;
892         }
893
894         atomic_dec(&rxq->used);
895         return rx_page_info;
896 }
897
898 /* Throwaway the data in the Rx completion */
899 static void be_rx_compl_discard(struct be_adapter *adapter,
900                 struct be_rx_obj *rxo,
901                 struct be_rx_compl_info *rxcp)
902 {
903         struct be_queue_info *rxq = &rxo->q;
904         struct be_rx_page_info *page_info;
905         u16 i, num_rcvd = rxcp->num_rcvd;
906
907         for (i = 0; i < num_rcvd; i++) {
908                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
909                 put_page(page_info->page);
910                 memset(page_info, 0, sizeof(*page_info));
911                 index_inc(&rxcp->rxq_idx, rxq->len);
912         }
913 }
914
915 /*
916  * skb_fill_rx_data forms a complete skb for an ether frame
917  * indicated by rxcp.
918  */
919 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
920                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
921 {
922         struct be_queue_info *rxq = &rxo->q;
923         struct be_rx_page_info *page_info;
924         u16 i, j;
925         u16 hdr_len, curr_frag_len, remaining;
926         u8 *start;
927
928         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
929         start = page_address(page_info->page) + page_info->page_offset;
930         prefetch(start);
931
932         /* Copy data in the first descriptor of this completion */
933         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
934
935         /* Copy the header portion into skb_data */
936         hdr_len = min(BE_HDR_LEN, curr_frag_len);
937         memcpy(skb->data, start, hdr_len);
938         skb->len = curr_frag_len;
939         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
940                 /* Complete packet has now been moved to data */
941                 put_page(page_info->page);
942                 skb->data_len = 0;
943                 skb->tail += curr_frag_len;
944         } else {
945                 skb_shinfo(skb)->nr_frags = 1;
946                 skb_shinfo(skb)->frags[0].page = page_info->page;
947                 skb_shinfo(skb)->frags[0].page_offset =
948                                         page_info->page_offset + hdr_len;
949                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
950                 skb->data_len = curr_frag_len - hdr_len;
951                 skb->tail += hdr_len;
952         }
953         page_info->page = NULL;
954
955         if (rxcp->pkt_size <= rx_frag_size) {
956                 BUG_ON(rxcp->num_rcvd != 1);
957                 return;
958         }
959
960         /* More frags present for this completion */
961         index_inc(&rxcp->rxq_idx, rxq->len);
962         remaining = rxcp->pkt_size - curr_frag_len;
963         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
964                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
965                 curr_frag_len = min(remaining, rx_frag_size);
966
967                 /* Coalesce all frags from the same physical page in one slot */
968                 if (page_info->page_offset == 0) {
969                         /* Fresh page */
970                         j++;
971                         skb_shinfo(skb)->frags[j].page = page_info->page;
972                         skb_shinfo(skb)->frags[j].page_offset =
973                                                         page_info->page_offset;
974                         skb_shinfo(skb)->frags[j].size = 0;
975                         skb_shinfo(skb)->nr_frags++;
976                 } else {
977                         put_page(page_info->page);
978                 }
979
980                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
981                 skb->len += curr_frag_len;
982                 skb->data_len += curr_frag_len;
983
984                 remaining -= curr_frag_len;
985                 index_inc(&rxcp->rxq_idx, rxq->len);
986                 page_info->page = NULL;
987         }
988         BUG_ON(j > MAX_SKB_FRAGS);
989 }
990
991 /* Process the RX completion indicated by rxcp when GRO is disabled */
992 static void be_rx_compl_process(struct be_adapter *adapter,
993                         struct be_rx_obj *rxo,
994                         struct be_rx_compl_info *rxcp)
995 {
996         struct sk_buff *skb;
997
998         skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
999         if (unlikely(!skb)) {
1000                 if (net_ratelimit())
1001                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1002                 be_rx_compl_discard(adapter, rxo, rxcp);
1003                 return;
1004         }
1005
1006         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1007
1008         if (likely(adapter->rx_csum && csum_passed(rxcp)))
1009                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1010         else
1011                 skb_checksum_none_assert(skb);
1012
1013         skb->truesize = skb->len + sizeof(struct sk_buff);
1014         skb->protocol = eth_type_trans(skb, adapter->netdev);
1015
1016         if (unlikely(rxcp->vlanf)) {
1017                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1018                         kfree_skb(skb);
1019                         return;
1020                 }
1021                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid);
1022         } else {
1023                 netif_receive_skb(skb);
1024         }
1025 }
1026
1027 /* Process the RX completion indicated by rxcp when GRO is enabled */
1028 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1029                 struct be_rx_obj *rxo,
1030                 struct be_rx_compl_info *rxcp)
1031 {
1032         struct be_rx_page_info *page_info;
1033         struct sk_buff *skb = NULL;
1034         struct be_queue_info *rxq = &rxo->q;
1035         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1036         u16 remaining, curr_frag_len;
1037         u16 i, j;
1038
1039         skb = napi_get_frags(&eq_obj->napi);
1040         if (!skb) {
1041                 be_rx_compl_discard(adapter, rxo, rxcp);
1042                 return;
1043         }
1044
1045         remaining = rxcp->pkt_size;
1046         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1047                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1048
1049                 curr_frag_len = min(remaining, rx_frag_size);
1050
1051                 /* Coalesce all frags from the same physical page in one slot */
1052                 if (i == 0 || page_info->page_offset == 0) {
1053                         /* First frag or Fresh page */
1054                         j++;
1055                         skb_shinfo(skb)->frags[j].page = page_info->page;
1056                         skb_shinfo(skb)->frags[j].page_offset =
1057                                                         page_info->page_offset;
1058                         skb_shinfo(skb)->frags[j].size = 0;
1059                 } else {
1060                         put_page(page_info->page);
1061                 }
1062                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1063
1064                 remaining -= curr_frag_len;
1065                 index_inc(&rxcp->rxq_idx, rxq->len);
1066                 memset(page_info, 0, sizeof(*page_info));
1067         }
1068         BUG_ON(j > MAX_SKB_FRAGS);
1069
1070         skb_shinfo(skb)->nr_frags = j + 1;
1071         skb->len = rxcp->pkt_size;
1072         skb->data_len = rxcp->pkt_size;
1073         skb->truesize += rxcp->pkt_size;
1074         skb->ip_summed = CHECKSUM_UNNECESSARY;
1075
1076         if (likely(!rxcp->vlanf))
1077                 napi_gro_frags(&eq_obj->napi);
1078         else
1079                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid);
1080 }
1081
1082 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1083                                 struct be_eth_rx_compl *compl,
1084                                 struct be_rx_compl_info *rxcp)
1085 {
1086         rxcp->pkt_size =
1087                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1088         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1089         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1090         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1091         rxcp->ip_csum =
1092                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1093         rxcp->l4_csum =
1094                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1095         rxcp->ipv6 =
1096                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1097         rxcp->rxq_idx =
1098                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1099         rxcp->num_rcvd =
1100                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1101         rxcp->pkt_type =
1102                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1103         rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl);
1104         rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl);
1105 }
1106
1107 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1108                                 struct be_eth_rx_compl *compl,
1109                                 struct be_rx_compl_info *rxcp)
1110 {
1111         rxcp->pkt_size =
1112                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1113         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1114         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1115         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1116         rxcp->ip_csum =
1117                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1118         rxcp->l4_csum =
1119                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1120         rxcp->ipv6 =
1121                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1122         rxcp->rxq_idx =
1123                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1124         rxcp->num_rcvd =
1125                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1126         rxcp->pkt_type =
1127                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1128         rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl);
1129         rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl);
1130 }
1131
1132 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1133 {
1134         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1135         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1136         struct be_adapter *adapter = rxo->adapter;
1137
1138         /* For checking the valid bit it is Ok to use either definition as the
1139          * valid bit is at the same position in both v0 and v1 Rx compl */
1140         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1141                 return NULL;
1142
1143         rmb();
1144         be_dws_le_to_cpu(compl, sizeof(*compl));
1145
1146         if (adapter->be3_native)
1147                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1148         else
1149                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1150
1151         /* vlanf could be wrongly set in some cards. ignore if vtm is not set */
1152         if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1153                 rxcp->vlanf = 0;
1154
1155         if (!lancer_chip(adapter))
1156                 rxcp->vid = swab16(rxcp->vid);
1157
1158         if ((adapter->pvid == rxcp->vid) && !adapter->vlan_tag[rxcp->vid])
1159                 rxcp->vlanf = 0;
1160
1161         /* As the compl has been parsed, reset it; we wont touch it again */
1162         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1163
1164         queue_tail_inc(&rxo->cq);
1165         return rxcp;
1166 }
1167
1168 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1169 {
1170         u32 order = get_order(size);
1171
1172         if (order > 0)
1173                 gfp |= __GFP_COMP;
1174         return  alloc_pages(gfp, order);
1175 }
1176
1177 /*
1178  * Allocate a page, split it to fragments of size rx_frag_size and post as
1179  * receive buffers to BE
1180  */
1181 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1182 {
1183         struct be_adapter *adapter = rxo->adapter;
1184         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1185         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1186         struct be_queue_info *rxq = &rxo->q;
1187         struct page *pagep = NULL;
1188         struct be_eth_rx_d *rxd;
1189         u64 page_dmaaddr = 0, frag_dmaaddr;
1190         u32 posted, page_offset = 0;
1191
1192         page_info = &rxo->page_info_tbl[rxq->head];
1193         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1194                 if (!pagep) {
1195                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1196                         if (unlikely(!pagep)) {
1197                                 rxo->stats.rx_post_fail++;
1198                                 break;
1199                         }
1200                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1201                                                     0, adapter->big_page_size,
1202                                                     DMA_FROM_DEVICE);
1203                         page_info->page_offset = 0;
1204                 } else {
1205                         get_page(pagep);
1206                         page_info->page_offset = page_offset + rx_frag_size;
1207                 }
1208                 page_offset = page_info->page_offset;
1209                 page_info->page = pagep;
1210                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1211                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1212
1213                 rxd = queue_head_node(rxq);
1214                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1215                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1216
1217                 /* Any space left in the current big page for another frag? */
1218                 if ((page_offset + rx_frag_size + rx_frag_size) >
1219                                         adapter->big_page_size) {
1220                         pagep = NULL;
1221                         page_info->last_page_user = true;
1222                 }
1223
1224                 prev_page_info = page_info;
1225                 queue_head_inc(rxq);
1226                 page_info = &page_info_tbl[rxq->head];
1227         }
1228         if (pagep)
1229                 prev_page_info->last_page_user = true;
1230
1231         if (posted) {
1232                 atomic_add(posted, &rxq->used);
1233                 be_rxq_notify(adapter, rxq->id, posted);
1234         } else if (atomic_read(&rxq->used) == 0) {
1235                 /* Let be_worker replenish when memory is available */
1236                 rxo->rx_post_starved = true;
1237         }
1238 }
1239
1240 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1241 {
1242         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1243
1244         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1245                 return NULL;
1246
1247         rmb();
1248         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1249
1250         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1251
1252         queue_tail_inc(tx_cq);
1253         return txcp;
1254 }
1255
1256 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1257 {
1258         struct be_queue_info *txq = &adapter->tx_obj.q;
1259         struct be_eth_wrb *wrb;
1260         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1261         struct sk_buff *sent_skb;
1262         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1263         bool unmap_skb_hdr = true;
1264
1265         sent_skb = sent_skbs[txq->tail];
1266         BUG_ON(!sent_skb);
1267         sent_skbs[txq->tail] = NULL;
1268
1269         /* skip header wrb */
1270         queue_tail_inc(txq);
1271
1272         do {
1273                 cur_index = txq->tail;
1274                 wrb = queue_tail_node(txq);
1275                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1276                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1277                 unmap_skb_hdr = false;
1278
1279                 num_wrbs++;
1280                 queue_tail_inc(txq);
1281         } while (cur_index != last_index);
1282
1283         atomic_sub(num_wrbs, &txq->used);
1284
1285         kfree_skb(sent_skb);
1286 }
1287
1288 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1289 {
1290         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1291
1292         if (!eqe->evt)
1293                 return NULL;
1294
1295         rmb();
1296         eqe->evt = le32_to_cpu(eqe->evt);
1297         queue_tail_inc(&eq_obj->q);
1298         return eqe;
1299 }
1300
1301 static int event_handle(struct be_adapter *adapter,
1302                         struct be_eq_obj *eq_obj)
1303 {
1304         struct be_eq_entry *eqe;
1305         u16 num = 0;
1306
1307         while ((eqe = event_get(eq_obj)) != NULL) {
1308                 eqe->evt = 0;
1309                 num++;
1310         }
1311
1312         /* Deal with any spurious interrupts that come
1313          * without events
1314          */
1315         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1316         if (num)
1317                 napi_schedule(&eq_obj->napi);
1318
1319         return num;
1320 }
1321
1322 /* Just read and notify events without processing them.
1323  * Used at the time of destroying event queues */
1324 static void be_eq_clean(struct be_adapter *adapter,
1325                         struct be_eq_obj *eq_obj)
1326 {
1327         struct be_eq_entry *eqe;
1328         u16 num = 0;
1329
1330         while ((eqe = event_get(eq_obj)) != NULL) {
1331                 eqe->evt = 0;
1332                 num++;
1333         }
1334
1335         if (num)
1336                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1337 }
1338
1339 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1340 {
1341         struct be_rx_page_info *page_info;
1342         struct be_queue_info *rxq = &rxo->q;
1343         struct be_queue_info *rx_cq = &rxo->cq;
1344         struct be_rx_compl_info *rxcp;
1345         u16 tail;
1346
1347         /* First cleanup pending rx completions */
1348         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1349                 be_rx_compl_discard(adapter, rxo, rxcp);
1350                 be_cq_notify(adapter, rx_cq->id, false, 1);
1351         }
1352
1353         /* Then free posted rx buffer that were not used */
1354         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1355         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1356                 page_info = get_rx_page_info(adapter, rxo, tail);
1357                 put_page(page_info->page);
1358                 memset(page_info, 0, sizeof(*page_info));
1359         }
1360         BUG_ON(atomic_read(&rxq->used));
1361 }
1362
1363 static void be_tx_compl_clean(struct be_adapter *adapter)
1364 {
1365         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1366         struct be_queue_info *txq = &adapter->tx_obj.q;
1367         struct be_eth_tx_compl *txcp;
1368         u16 end_idx, cmpl = 0, timeo = 0;
1369         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1370         struct sk_buff *sent_skb;
1371         bool dummy_wrb;
1372
1373         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1374         do {
1375                 while ((txcp = be_tx_compl_get(tx_cq))) {
1376                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1377                                         wrb_index, txcp);
1378                         be_tx_compl_process(adapter, end_idx);
1379                         cmpl++;
1380                 }
1381                 if (cmpl) {
1382                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1383                         cmpl = 0;
1384                 }
1385
1386                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1387                         break;
1388
1389                 mdelay(1);
1390         } while (true);
1391
1392         if (atomic_read(&txq->used))
1393                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1394                         atomic_read(&txq->used));
1395
1396         /* free posted tx for which compls will never arrive */
1397         while (atomic_read(&txq->used)) {
1398                 sent_skb = sent_skbs[txq->tail];
1399                 end_idx = txq->tail;
1400                 index_adv(&end_idx,
1401                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1402                         txq->len);
1403                 be_tx_compl_process(adapter, end_idx);
1404         }
1405 }
1406
1407 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1408 {
1409         struct be_queue_info *q;
1410
1411         q = &adapter->mcc_obj.q;
1412         if (q->created)
1413                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1414         be_queue_free(adapter, q);
1415
1416         q = &adapter->mcc_obj.cq;
1417         if (q->created)
1418                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1419         be_queue_free(adapter, q);
1420 }
1421
1422 /* Must be called only after TX qs are created as MCC shares TX EQ */
1423 static int be_mcc_queues_create(struct be_adapter *adapter)
1424 {
1425         struct be_queue_info *q, *cq;
1426
1427         /* Alloc MCC compl queue */
1428         cq = &adapter->mcc_obj.cq;
1429         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1430                         sizeof(struct be_mcc_compl)))
1431                 goto err;
1432
1433         /* Ask BE to create MCC compl queue; share TX's eq */
1434         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1435                 goto mcc_cq_free;
1436
1437         /* Alloc MCC queue */
1438         q = &adapter->mcc_obj.q;
1439         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1440                 goto mcc_cq_destroy;
1441
1442         /* Ask BE to create MCC queue */
1443         if (be_cmd_mccq_create(adapter, q, cq))
1444                 goto mcc_q_free;
1445
1446         return 0;
1447
1448 mcc_q_free:
1449         be_queue_free(adapter, q);
1450 mcc_cq_destroy:
1451         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1452 mcc_cq_free:
1453         be_queue_free(adapter, cq);
1454 err:
1455         return -1;
1456 }
1457
1458 static void be_tx_queues_destroy(struct be_adapter *adapter)
1459 {
1460         struct be_queue_info *q;
1461
1462         q = &adapter->tx_obj.q;
1463         if (q->created)
1464                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1465         be_queue_free(adapter, q);
1466
1467         q = &adapter->tx_obj.cq;
1468         if (q->created)
1469                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1470         be_queue_free(adapter, q);
1471
1472         /* Clear any residual events */
1473         be_eq_clean(adapter, &adapter->tx_eq);
1474
1475         q = &adapter->tx_eq.q;
1476         if (q->created)
1477                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1478         be_queue_free(adapter, q);
1479 }
1480
1481 static int be_tx_queues_create(struct be_adapter *adapter)
1482 {
1483         struct be_queue_info *eq, *q, *cq;
1484
1485         adapter->tx_eq.max_eqd = 0;
1486         adapter->tx_eq.min_eqd = 0;
1487         adapter->tx_eq.cur_eqd = 96;
1488         adapter->tx_eq.enable_aic = false;
1489         /* Alloc Tx Event queue */
1490         eq = &adapter->tx_eq.q;
1491         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1492                 return -1;
1493
1494         /* Ask BE to create Tx Event queue */
1495         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1496                 goto tx_eq_free;
1497
1498         adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1499
1500
1501         /* Alloc TX eth compl queue */
1502         cq = &adapter->tx_obj.cq;
1503         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1504                         sizeof(struct be_eth_tx_compl)))
1505                 goto tx_eq_destroy;
1506
1507         /* Ask BE to create Tx eth compl queue */
1508         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1509                 goto tx_cq_free;
1510
1511         /* Alloc TX eth queue */
1512         q = &adapter->tx_obj.q;
1513         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1514                 goto tx_cq_destroy;
1515
1516         /* Ask BE to create Tx eth queue */
1517         if (be_cmd_txq_create(adapter, q, cq))
1518                 goto tx_q_free;
1519         return 0;
1520
1521 tx_q_free:
1522         be_queue_free(adapter, q);
1523 tx_cq_destroy:
1524         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1525 tx_cq_free:
1526         be_queue_free(adapter, cq);
1527 tx_eq_destroy:
1528         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1529 tx_eq_free:
1530         be_queue_free(adapter, eq);
1531         return -1;
1532 }
1533
1534 static void be_rx_queues_destroy(struct be_adapter *adapter)
1535 {
1536         struct be_queue_info *q;
1537         struct be_rx_obj *rxo;
1538         int i;
1539
1540         for_all_rx_queues(adapter, rxo, i) {
1541                 q = &rxo->q;
1542                 if (q->created) {
1543                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1544                         /* After the rxq is invalidated, wait for a grace time
1545                          * of 1ms for all dma to end and the flush compl to
1546                          * arrive
1547                          */
1548                         mdelay(1);
1549                         be_rx_q_clean(adapter, rxo);
1550                 }
1551                 be_queue_free(adapter, q);
1552
1553                 q = &rxo->cq;
1554                 if (q->created)
1555                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1556                 be_queue_free(adapter, q);
1557
1558                 /* Clear any residual events */
1559                 q = &rxo->rx_eq.q;
1560                 if (q->created) {
1561                         be_eq_clean(adapter, &rxo->rx_eq);
1562                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1563                 }
1564                 be_queue_free(adapter, q);
1565         }
1566 }
1567
1568 static int be_rx_queues_create(struct be_adapter *adapter)
1569 {
1570         struct be_queue_info *eq, *q, *cq;
1571         struct be_rx_obj *rxo;
1572         int rc, i;
1573
1574         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1575         for_all_rx_queues(adapter, rxo, i) {
1576                 rxo->adapter = adapter;
1577                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1578                 rxo->rx_eq.enable_aic = true;
1579
1580                 /* EQ */
1581                 eq = &rxo->rx_eq.q;
1582                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1583                                         sizeof(struct be_eq_entry));
1584                 if (rc)
1585                         goto err;
1586
1587                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1588                 if (rc)
1589                         goto err;
1590
1591                 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1592
1593                 /* CQ */
1594                 cq = &rxo->cq;
1595                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1596                                 sizeof(struct be_eth_rx_compl));
1597                 if (rc)
1598                         goto err;
1599
1600                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1601                 if (rc)
1602                         goto err;
1603                 /* Rx Q */
1604                 q = &rxo->q;
1605                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1606                                 sizeof(struct be_eth_rx_d));
1607                 if (rc)
1608                         goto err;
1609
1610                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1611                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1612                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1613                 if (rc)
1614                         goto err;
1615         }
1616
1617         if (be_multi_rxq(adapter)) {
1618                 u8 rsstable[MAX_RSS_QS];
1619
1620                 for_all_rss_queues(adapter, rxo, i)
1621                         rsstable[i] = rxo->rss_id;
1622
1623                 rc = be_cmd_rss_config(adapter, rsstable,
1624                         adapter->num_rx_qs - 1);
1625                 if (rc)
1626                         goto err;
1627         }
1628
1629         return 0;
1630 err:
1631         be_rx_queues_destroy(adapter);
1632         return -1;
1633 }
1634
1635 static bool event_peek(struct be_eq_obj *eq_obj)
1636 {
1637         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1638         if (!eqe->evt)
1639                 return false;
1640         else
1641                 return true;
1642 }
1643
1644 static irqreturn_t be_intx(int irq, void *dev)
1645 {
1646         struct be_adapter *adapter = dev;
1647         struct be_rx_obj *rxo;
1648         int isr, i, tx = 0 , rx = 0;
1649
1650         if (lancer_chip(adapter)) {
1651                 if (event_peek(&adapter->tx_eq))
1652                         tx = event_handle(adapter, &adapter->tx_eq);
1653                 for_all_rx_queues(adapter, rxo, i) {
1654                         if (event_peek(&rxo->rx_eq))
1655                                 rx |= event_handle(adapter, &rxo->rx_eq);
1656                 }
1657
1658                 if (!(tx || rx))
1659                         return IRQ_NONE;
1660
1661         } else {
1662                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1663                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1664                 if (!isr)
1665                         return IRQ_NONE;
1666
1667                 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1668                         event_handle(adapter, &adapter->tx_eq);
1669
1670                 for_all_rx_queues(adapter, rxo, i) {
1671                         if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1672                                 event_handle(adapter, &rxo->rx_eq);
1673                 }
1674         }
1675
1676         return IRQ_HANDLED;
1677 }
1678
1679 static irqreturn_t be_msix_rx(int irq, void *dev)
1680 {
1681         struct be_rx_obj *rxo = dev;
1682         struct be_adapter *adapter = rxo->adapter;
1683
1684         event_handle(adapter, &rxo->rx_eq);
1685
1686         return IRQ_HANDLED;
1687 }
1688
1689 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1690 {
1691         struct be_adapter *adapter = dev;
1692
1693         event_handle(adapter, &adapter->tx_eq);
1694
1695         return IRQ_HANDLED;
1696 }
1697
1698 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1699 {
1700         return (rxcp->tcpf && !rxcp->err) ? true : false;
1701 }
1702
1703 static int be_poll_rx(struct napi_struct *napi, int budget)
1704 {
1705         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1706         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1707         struct be_adapter *adapter = rxo->adapter;
1708         struct be_queue_info *rx_cq = &rxo->cq;
1709         struct be_rx_compl_info *rxcp;
1710         u32 work_done;
1711
1712         rxo->stats.rx_polls++;
1713         for (work_done = 0; work_done < budget; work_done++) {
1714                 rxcp = be_rx_compl_get(rxo);
1715                 if (!rxcp)
1716                         break;
1717
1718                 /* Ignore flush completions */
1719                 if (rxcp->num_rcvd) {
1720                         if (do_gro(rxcp))
1721                                 be_rx_compl_process_gro(adapter, rxo, rxcp);
1722                         else
1723                                 be_rx_compl_process(adapter, rxo, rxcp);
1724                 }
1725                 be_rx_stats_update(rxo, rxcp);
1726         }
1727
1728         /* Refill the queue */
1729         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1730                 be_post_rx_frags(rxo, GFP_ATOMIC);
1731
1732         /* All consumed */
1733         if (work_done < budget) {
1734                 napi_complete(napi);
1735                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1736         } else {
1737                 /* More to be consumed; continue with interrupts disabled */
1738                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1739         }
1740         return work_done;
1741 }
1742
1743 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1744  * For TX/MCC we don't honour budget; consume everything
1745  */
1746 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1747 {
1748         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1749         struct be_adapter *adapter =
1750                 container_of(tx_eq, struct be_adapter, tx_eq);
1751         struct be_queue_info *txq = &adapter->tx_obj.q;
1752         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1753         struct be_eth_tx_compl *txcp;
1754         int tx_compl = 0, mcc_compl, status = 0;
1755         u16 end_idx;
1756
1757         while ((txcp = be_tx_compl_get(tx_cq))) {
1758                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1759                                 wrb_index, txcp);
1760                 be_tx_compl_process(adapter, end_idx);
1761                 tx_compl++;
1762         }
1763
1764         mcc_compl = be_process_mcc(adapter, &status);
1765
1766         napi_complete(napi);
1767
1768         if (mcc_compl) {
1769                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1770                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1771         }
1772
1773         if (tx_compl) {
1774                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1775
1776                 /* As Tx wrbs have been freed up, wake up netdev queue if
1777                  * it was stopped due to lack of tx wrbs.
1778                  */
1779                 if (netif_queue_stopped(adapter->netdev) &&
1780                         atomic_read(&txq->used) < txq->len / 2) {
1781                         netif_wake_queue(adapter->netdev);
1782                 }
1783
1784                 tx_stats(adapter)->be_tx_events++;
1785                 tx_stats(adapter)->be_tx_compl += tx_compl;
1786         }
1787
1788         return 1;
1789 }
1790
1791 void be_detect_dump_ue(struct be_adapter *adapter)
1792 {
1793         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1794         u32 i;
1795
1796         pci_read_config_dword(adapter->pdev,
1797                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1798         pci_read_config_dword(adapter->pdev,
1799                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1800         pci_read_config_dword(adapter->pdev,
1801                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1802         pci_read_config_dword(adapter->pdev,
1803                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1804
1805         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1806         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1807
1808         if (ue_status_lo || ue_status_hi) {
1809                 adapter->ue_detected = true;
1810                 adapter->eeh_err = true;
1811                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1812         }
1813
1814         if (ue_status_lo) {
1815                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1816                         if (ue_status_lo & 1)
1817                                 dev_err(&adapter->pdev->dev,
1818                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1819                 }
1820         }
1821         if (ue_status_hi) {
1822                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1823                         if (ue_status_hi & 1)
1824                                 dev_err(&adapter->pdev->dev,
1825                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1826                 }
1827         }
1828
1829 }
1830
1831 static void be_worker(struct work_struct *work)
1832 {
1833         struct be_adapter *adapter =
1834                 container_of(work, struct be_adapter, work.work);
1835         struct be_rx_obj *rxo;
1836         int i;
1837
1838         /* when interrupts are not yet enabled, just reap any pending
1839         * mcc completions */
1840         if (!netif_running(adapter->netdev)) {
1841                 int mcc_compl, status = 0;
1842
1843                 mcc_compl = be_process_mcc(adapter, &status);
1844
1845                 if (mcc_compl) {
1846                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1847                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1848                 }
1849
1850                 if (!adapter->ue_detected && !lancer_chip(adapter))
1851                         be_detect_dump_ue(adapter);
1852
1853                 goto reschedule;
1854         }
1855
1856         if (!adapter->stats_cmd_sent)
1857                 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1858
1859         be_tx_rate_update(adapter);
1860
1861         for_all_rx_queues(adapter, rxo, i) {
1862                 be_rx_rate_update(rxo);
1863                 be_rx_eqd_update(adapter, rxo);
1864
1865                 if (rxo->rx_post_starved) {
1866                         rxo->rx_post_starved = false;
1867                         be_post_rx_frags(rxo, GFP_KERNEL);
1868                 }
1869         }
1870         if (!adapter->ue_detected && !lancer_chip(adapter))
1871                 be_detect_dump_ue(adapter);
1872
1873 reschedule:
1874         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1875 }
1876
1877 static void be_msix_disable(struct be_adapter *adapter)
1878 {
1879         if (adapter->msix_enabled) {
1880                 pci_disable_msix(adapter->pdev);
1881                 adapter->msix_enabled = false;
1882         }
1883 }
1884
1885 static int be_num_rxqs_get(struct be_adapter *adapter)
1886 {
1887         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1888                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1889                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1890         } else {
1891                 dev_warn(&adapter->pdev->dev,
1892                         "No support for multiple RX queues\n");
1893                 return 1;
1894         }
1895 }
1896
1897 static void be_msix_enable(struct be_adapter *adapter)
1898 {
1899 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
1900         int i, status;
1901
1902         adapter->num_rx_qs = be_num_rxqs_get(adapter);
1903
1904         for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1905                 adapter->msix_entries[i].entry = i;
1906
1907         status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1908                         adapter->num_rx_qs + 1);
1909         if (status == 0) {
1910                 goto done;
1911         } else if (status >= BE_MIN_MSIX_VECTORS) {
1912                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1913                                 status) == 0) {
1914                         adapter->num_rx_qs = status - 1;
1915                         dev_warn(&adapter->pdev->dev,
1916                                 "Could alloc only %d MSIx vectors. "
1917                                 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1918                         goto done;
1919                 }
1920         }
1921         return;
1922 done:
1923         adapter->msix_enabled = true;
1924 }
1925
1926 static void be_sriov_enable(struct be_adapter *adapter)
1927 {
1928         be_check_sriov_fn_type(adapter);
1929 #ifdef CONFIG_PCI_IOV
1930         if (be_physfn(adapter) && num_vfs) {
1931                 int status;
1932
1933                 status = pci_enable_sriov(adapter->pdev, num_vfs);
1934                 adapter->sriov_enabled = status ? false : true;
1935         }
1936 #endif
1937 }
1938
1939 static void be_sriov_disable(struct be_adapter *adapter)
1940 {
1941 #ifdef CONFIG_PCI_IOV
1942         if (adapter->sriov_enabled) {
1943                 pci_disable_sriov(adapter->pdev);
1944                 adapter->sriov_enabled = false;
1945         }
1946 #endif
1947 }
1948
1949 static inline int be_msix_vec_get(struct be_adapter *adapter,
1950                                         struct be_eq_obj *eq_obj)
1951 {
1952         return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
1953 }
1954
1955 static int be_request_irq(struct be_adapter *adapter,
1956                 struct be_eq_obj *eq_obj,
1957                 void *handler, char *desc, void *context)
1958 {
1959         struct net_device *netdev = adapter->netdev;
1960         int vec;
1961
1962         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1963         vec = be_msix_vec_get(adapter, eq_obj);
1964         return request_irq(vec, handler, 0, eq_obj->desc, context);
1965 }
1966
1967 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1968                         void *context)
1969 {
1970         int vec = be_msix_vec_get(adapter, eq_obj);
1971         free_irq(vec, context);
1972 }
1973
1974 static int be_msix_register(struct be_adapter *adapter)
1975 {
1976         struct be_rx_obj *rxo;
1977         int status, i;
1978         char qname[10];
1979
1980         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1981                                 adapter);
1982         if (status)
1983                 goto err;
1984
1985         for_all_rx_queues(adapter, rxo, i) {
1986                 sprintf(qname, "rxq%d", i);
1987                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
1988                                 qname, rxo);
1989                 if (status)
1990                         goto err_msix;
1991         }
1992
1993         return 0;
1994
1995 err_msix:
1996         be_free_irq(adapter, &adapter->tx_eq, adapter);
1997
1998         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
1999                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2000
2001 err:
2002         dev_warn(&adapter->pdev->dev,
2003                 "MSIX Request IRQ failed - err %d\n", status);
2004         pci_disable_msix(adapter->pdev);
2005         adapter->msix_enabled = false;
2006         return status;
2007 }
2008
2009 static int be_irq_register(struct be_adapter *adapter)
2010 {
2011         struct net_device *netdev = adapter->netdev;
2012         int status;
2013
2014         if (adapter->msix_enabled) {
2015                 status = be_msix_register(adapter);
2016                 if (status == 0)
2017                         goto done;
2018                 /* INTx is not supported for VF */
2019                 if (!be_physfn(adapter))
2020                         return status;
2021         }
2022
2023         /* INTx */
2024         netdev->irq = adapter->pdev->irq;
2025         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2026                         adapter);
2027         if (status) {
2028                 dev_err(&adapter->pdev->dev,
2029                         "INTx request IRQ failed - err %d\n", status);
2030                 return status;
2031         }
2032 done:
2033         adapter->isr_registered = true;
2034         return 0;
2035 }
2036
2037 static void be_irq_unregister(struct be_adapter *adapter)
2038 {
2039         struct net_device *netdev = adapter->netdev;
2040         struct be_rx_obj *rxo;
2041         int i;
2042
2043         if (!adapter->isr_registered)
2044                 return;
2045
2046         /* INTx */
2047         if (!adapter->msix_enabled) {
2048                 free_irq(netdev->irq, adapter);
2049                 goto done;
2050         }
2051
2052         /* MSIx */
2053         be_free_irq(adapter, &adapter->tx_eq, adapter);
2054
2055         for_all_rx_queues(adapter, rxo, i)
2056                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2057
2058 done:
2059         adapter->isr_registered = false;
2060 }
2061
2062 static int be_close(struct net_device *netdev)
2063 {
2064         struct be_adapter *adapter = netdev_priv(netdev);
2065         struct be_rx_obj *rxo;
2066         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2067         int vec, i;
2068
2069         be_async_mcc_disable(adapter);
2070
2071         netif_carrier_off(netdev);
2072         adapter->link_up = false;
2073
2074         if (!lancer_chip(adapter))
2075                 be_intr_set(adapter, false);
2076
2077         for_all_rx_queues(adapter, rxo, i)
2078                 napi_disable(&rxo->rx_eq.napi);
2079
2080         napi_disable(&tx_eq->napi);
2081
2082         if (lancer_chip(adapter)) {
2083                 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2084                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2085                 for_all_rx_queues(adapter, rxo, i)
2086                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2087         }
2088
2089         if (adapter->msix_enabled) {
2090                 vec = be_msix_vec_get(adapter, tx_eq);
2091                 synchronize_irq(vec);
2092
2093                 for_all_rx_queues(adapter, rxo, i) {
2094                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2095                         synchronize_irq(vec);
2096                 }
2097         } else {
2098                 synchronize_irq(netdev->irq);
2099         }
2100         be_irq_unregister(adapter);
2101
2102         /* Wait for all pending tx completions to arrive so that
2103          * all tx skbs are freed.
2104          */
2105         be_tx_compl_clean(adapter);
2106
2107         return 0;
2108 }
2109
2110 static int be_open(struct net_device *netdev)
2111 {
2112         struct be_adapter *adapter = netdev_priv(netdev);
2113         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2114         struct be_rx_obj *rxo;
2115         bool link_up;
2116         int status, i;
2117         u8 mac_speed;
2118         u16 link_speed;
2119
2120         for_all_rx_queues(adapter, rxo, i) {
2121                 be_post_rx_frags(rxo, GFP_KERNEL);
2122                 napi_enable(&rxo->rx_eq.napi);
2123         }
2124         napi_enable(&tx_eq->napi);
2125
2126         be_irq_register(adapter);
2127
2128         if (!lancer_chip(adapter))
2129                 be_intr_set(adapter, true);
2130
2131         /* The evt queues are created in unarmed state; arm them */
2132         for_all_rx_queues(adapter, rxo, i) {
2133                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2134                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2135         }
2136         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2137
2138         /* Now that interrupts are on we can process async mcc */
2139         be_async_mcc_enable(adapter);
2140
2141         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2142                         &link_speed);
2143         if (status)
2144                 goto err;
2145         be_link_status_update(adapter, link_up);
2146
2147         if (be_physfn(adapter)) {
2148                 status = be_vid_config(adapter, false, 0);
2149                 if (status)
2150                         goto err;
2151
2152                 status = be_cmd_set_flow_control(adapter,
2153                                 adapter->tx_fc, adapter->rx_fc);
2154                 if (status)
2155                         goto err;
2156         }
2157
2158         return 0;
2159 err:
2160         be_close(adapter->netdev);
2161         return -EIO;
2162 }
2163
2164 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2165 {
2166         struct be_dma_mem cmd;
2167         int status = 0;
2168         u8 mac[ETH_ALEN];
2169
2170         memset(mac, 0, ETH_ALEN);
2171
2172         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2173         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2174                                     GFP_KERNEL);
2175         if (cmd.va == NULL)
2176                 return -1;
2177         memset(cmd.va, 0, cmd.size);
2178
2179         if (enable) {
2180                 status = pci_write_config_dword(adapter->pdev,
2181                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2182                 if (status) {
2183                         dev_err(&adapter->pdev->dev,
2184                                 "Could not enable Wake-on-lan\n");
2185                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2186                                           cmd.dma);
2187                         return status;
2188                 }
2189                 status = be_cmd_enable_magic_wol(adapter,
2190                                 adapter->netdev->dev_addr, &cmd);
2191                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2192                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2193         } else {
2194                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2195                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2196                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2197         }
2198
2199         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2200         return status;
2201 }
2202
2203 /*
2204  * Generate a seed MAC address from the PF MAC Address using jhash.
2205  * MAC Address for VFs are assigned incrementally starting from the seed.
2206  * These addresses are programmed in the ASIC by the PF and the VF driver
2207  * queries for the MAC address during its probe.
2208  */
2209 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2210 {
2211         u32 vf = 0;
2212         int status = 0;
2213         u8 mac[ETH_ALEN];
2214
2215         be_vf_eth_addr_generate(adapter, mac);
2216
2217         for (vf = 0; vf < num_vfs; vf++) {
2218                 status = be_cmd_pmac_add(adapter, mac,
2219                                         adapter->vf_cfg[vf].vf_if_handle,
2220                                         &adapter->vf_cfg[vf].vf_pmac_id,
2221                                         vf + 1);
2222                 if (status)
2223                         dev_err(&adapter->pdev->dev,
2224                                 "Mac address add failed for VF %d\n", vf);
2225                 else
2226                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2227
2228                 mac[5] += 1;
2229         }
2230         return status;
2231 }
2232
2233 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2234 {
2235         u32 vf;
2236
2237         for (vf = 0; vf < num_vfs; vf++) {
2238                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2239                         be_cmd_pmac_del(adapter,
2240                                         adapter->vf_cfg[vf].vf_if_handle,
2241                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2242         }
2243 }
2244
2245 static int be_setup(struct be_adapter *adapter)
2246 {
2247         struct net_device *netdev = adapter->netdev;
2248         u32 cap_flags, en_flags, vf = 0;
2249         int status;
2250         u8 mac[ETH_ALEN];
2251
2252         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2253                                 BE_IF_FLAGS_BROADCAST |
2254                                 BE_IF_FLAGS_MULTICAST;
2255
2256         if (be_physfn(adapter)) {
2257                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2258                                 BE_IF_FLAGS_PROMISCUOUS |
2259                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2260                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2261
2262                 if (be_multi_rxq(adapter)) {
2263                         cap_flags |= BE_IF_FLAGS_RSS;
2264                         en_flags |= BE_IF_FLAGS_RSS;
2265                 }
2266         }
2267
2268         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2269                         netdev->dev_addr, false/* pmac_invalid */,
2270                         &adapter->if_handle, &adapter->pmac_id, 0);
2271         if (status != 0)
2272                 goto do_none;
2273
2274         if (be_physfn(adapter)) {
2275                 if (adapter->sriov_enabled) {
2276                         while (vf < num_vfs) {
2277                                 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2278                                                         BE_IF_FLAGS_BROADCAST;
2279                                 status = be_cmd_if_create(adapter, cap_flags,
2280                                         en_flags, mac, true,
2281                                         &adapter->vf_cfg[vf].vf_if_handle,
2282                                         NULL, vf+1);
2283                                 if (status) {
2284                                         dev_err(&adapter->pdev->dev,
2285                                         "Interface Create failed for VF %d\n",
2286                                         vf);
2287                                         goto if_destroy;
2288                                 }
2289                                 adapter->vf_cfg[vf].vf_pmac_id =
2290                                                         BE_INVALID_PMAC_ID;
2291                                 vf++;
2292                         }
2293                 }
2294         } else {
2295                 status = be_cmd_mac_addr_query(adapter, mac,
2296                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2297                 if (!status) {
2298                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2299                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2300                 }
2301         }
2302
2303         status = be_tx_queues_create(adapter);
2304         if (status != 0)
2305                 goto if_destroy;
2306
2307         status = be_rx_queues_create(adapter);
2308         if (status != 0)
2309                 goto tx_qs_destroy;
2310
2311         status = be_mcc_queues_create(adapter);
2312         if (status != 0)
2313                 goto rx_qs_destroy;
2314
2315         adapter->link_speed = -1;
2316
2317         return 0;
2318
2319         be_mcc_queues_destroy(adapter);
2320 rx_qs_destroy:
2321         be_rx_queues_destroy(adapter);
2322 tx_qs_destroy:
2323         be_tx_queues_destroy(adapter);
2324 if_destroy:
2325         if (be_physfn(adapter) && adapter->sriov_enabled)
2326                 for (vf = 0; vf < num_vfs; vf++)
2327                         if (adapter->vf_cfg[vf].vf_if_handle)
2328                                 be_cmd_if_destroy(adapter,
2329                                         adapter->vf_cfg[vf].vf_if_handle,
2330                                         vf + 1);
2331         be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2332 do_none:
2333         return status;
2334 }
2335
2336 static int be_clear(struct be_adapter *adapter)
2337 {
2338         int vf;
2339
2340         if (be_physfn(adapter) && adapter->sriov_enabled)
2341                 be_vf_eth_addr_rem(adapter);
2342
2343         be_mcc_queues_destroy(adapter);
2344         be_rx_queues_destroy(adapter);
2345         be_tx_queues_destroy(adapter);
2346
2347         if (be_physfn(adapter) && adapter->sriov_enabled)
2348                 for (vf = 0; vf < num_vfs; vf++)
2349                         if (adapter->vf_cfg[vf].vf_if_handle)
2350                                 be_cmd_if_destroy(adapter,
2351                                         adapter->vf_cfg[vf].vf_if_handle,
2352                                         vf + 1);
2353
2354         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2355
2356         /* tell fw we're done with firing cmds */
2357         be_cmd_fw_clean(adapter);
2358         return 0;
2359 }
2360
2361
2362 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2363 static bool be_flash_redboot(struct be_adapter *adapter,
2364                         const u8 *p, u32 img_start, int image_size,
2365                         int hdr_size)
2366 {
2367         u32 crc_offset;
2368         u8 flashed_crc[4];
2369         int status;
2370
2371         crc_offset = hdr_size + img_start + image_size - 4;
2372
2373         p += crc_offset;
2374
2375         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2376                         (image_size - 4));
2377         if (status) {
2378                 dev_err(&adapter->pdev->dev,
2379                 "could not get crc from flash, not flashing redboot\n");
2380                 return false;
2381         }
2382
2383         /*update redboot only if crc does not match*/
2384         if (!memcmp(flashed_crc, p, 4))
2385                 return false;
2386         else
2387                 return true;
2388 }
2389
2390 static int be_flash_data(struct be_adapter *adapter,
2391                         const struct firmware *fw,
2392                         struct be_dma_mem *flash_cmd, int num_of_images)
2393
2394 {
2395         int status = 0, i, filehdr_size = 0;
2396         u32 total_bytes = 0, flash_op;
2397         int num_bytes;
2398         const u8 *p = fw->data;
2399         struct be_cmd_write_flashrom *req = flash_cmd->va;
2400         const struct flash_comp *pflashcomp;
2401         int num_comp;
2402
2403         static const struct flash_comp gen3_flash_types[9] = {
2404                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2405                         FLASH_IMAGE_MAX_SIZE_g3},
2406                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2407                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2408                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2409                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2410                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2411                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2412                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2413                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2414                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2415                         FLASH_IMAGE_MAX_SIZE_g3},
2416                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2417                         FLASH_IMAGE_MAX_SIZE_g3},
2418                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2419                         FLASH_IMAGE_MAX_SIZE_g3},
2420                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2421                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2422         };
2423         static const struct flash_comp gen2_flash_types[8] = {
2424                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2425                         FLASH_IMAGE_MAX_SIZE_g2},
2426                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2427                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2428                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2429                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2430                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2431                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2432                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2433                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2434                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2435                         FLASH_IMAGE_MAX_SIZE_g2},
2436                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2437                         FLASH_IMAGE_MAX_SIZE_g2},
2438                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2439                          FLASH_IMAGE_MAX_SIZE_g2}
2440         };
2441
2442         if (adapter->generation == BE_GEN3) {
2443                 pflashcomp = gen3_flash_types;
2444                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2445                 num_comp = ARRAY_SIZE(gen3_flash_types);
2446         } else {
2447                 pflashcomp = gen2_flash_types;
2448                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2449                 num_comp = ARRAY_SIZE(gen2_flash_types);
2450         }
2451         for (i = 0; i < num_comp; i++) {
2452                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2453                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2454                         continue;
2455                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2456                         (!be_flash_redboot(adapter, fw->data,
2457                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2458                         (num_of_images * sizeof(struct image_hdr)))))
2459                         continue;
2460                 p = fw->data;
2461                 p += filehdr_size + pflashcomp[i].offset
2462                         + (num_of_images * sizeof(struct image_hdr));
2463         if (p + pflashcomp[i].size > fw->data + fw->size)
2464                 return -1;
2465         total_bytes = pflashcomp[i].size;
2466                 while (total_bytes) {
2467                         if (total_bytes > 32*1024)
2468                                 num_bytes = 32*1024;
2469                         else
2470                                 num_bytes = total_bytes;
2471                         total_bytes -= num_bytes;
2472
2473                         if (!total_bytes)
2474                                 flash_op = FLASHROM_OPER_FLASH;
2475                         else
2476                                 flash_op = FLASHROM_OPER_SAVE;
2477                         memcpy(req->params.data_buf, p, num_bytes);
2478                         p += num_bytes;
2479                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2480                                 pflashcomp[i].optype, flash_op, num_bytes);
2481                         if (status) {
2482                                 dev_err(&adapter->pdev->dev,
2483                                         "cmd to write to flash rom failed.\n");
2484                                 return -1;
2485                         }
2486                         yield();
2487                 }
2488         }
2489         return 0;
2490 }
2491
2492 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2493 {
2494         if (fhdr == NULL)
2495                 return 0;
2496         if (fhdr->build[0] == '3')
2497                 return BE_GEN3;
2498         else if (fhdr->build[0] == '2')
2499                 return BE_GEN2;
2500         else
2501                 return 0;
2502 }
2503
2504 int be_load_fw(struct be_adapter *adapter, u8 *func)
2505 {
2506         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2507         const struct firmware *fw;
2508         struct flash_file_hdr_g2 *fhdr;
2509         struct flash_file_hdr_g3 *fhdr3;
2510         struct image_hdr *img_hdr_ptr = NULL;
2511         struct be_dma_mem flash_cmd;
2512         int status, i = 0, num_imgs = 0;
2513         const u8 *p;
2514
2515         if (!netif_running(adapter->netdev)) {
2516                 dev_err(&adapter->pdev->dev,
2517                         "Firmware load not allowed (interface is down)\n");
2518                 return -EPERM;
2519         }
2520
2521         strcpy(fw_file, func);
2522
2523         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2524         if (status)
2525                 goto fw_exit;
2526
2527         p = fw->data;
2528         fhdr = (struct flash_file_hdr_g2 *) p;
2529         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2530
2531         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2532         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2533                                           &flash_cmd.dma, GFP_KERNEL);
2534         if (!flash_cmd.va) {
2535                 status = -ENOMEM;
2536                 dev_err(&adapter->pdev->dev,
2537                         "Memory allocation failure while flashing\n");
2538                 goto fw_exit;
2539         }
2540
2541         if ((adapter->generation == BE_GEN3) &&
2542                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2543                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2544                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2545                 for (i = 0; i < num_imgs; i++) {
2546                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2547                                         (sizeof(struct flash_file_hdr_g3) +
2548                                          i * sizeof(struct image_hdr)));
2549                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2550                                 status = be_flash_data(adapter, fw, &flash_cmd,
2551                                                         num_imgs);
2552                 }
2553         } else if ((adapter->generation == BE_GEN2) &&
2554                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2555                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2556         } else {
2557                 dev_err(&adapter->pdev->dev,
2558                         "UFI and Interface are not compatible for flashing\n");
2559                 status = -1;
2560         }
2561
2562         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2563                           flash_cmd.dma);
2564         if (status) {
2565                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2566                 goto fw_exit;
2567         }
2568
2569         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2570
2571 fw_exit:
2572         release_firmware(fw);
2573         return status;
2574 }
2575
2576 static struct net_device_ops be_netdev_ops = {
2577         .ndo_open               = be_open,
2578         .ndo_stop               = be_close,
2579         .ndo_start_xmit         = be_xmit,
2580         .ndo_set_rx_mode        = be_set_multicast_list,
2581         .ndo_set_mac_address    = be_mac_addr_set,
2582         .ndo_change_mtu         = be_change_mtu,
2583         .ndo_validate_addr      = eth_validate_addr,
2584         .ndo_vlan_rx_register   = be_vlan_register,
2585         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2586         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2587         .ndo_set_vf_mac         = be_set_vf_mac,
2588         .ndo_set_vf_vlan        = be_set_vf_vlan,
2589         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2590         .ndo_get_vf_config      = be_get_vf_config
2591 };
2592
2593 static void be_netdev_init(struct net_device *netdev)
2594 {
2595         struct be_adapter *adapter = netdev_priv(netdev);
2596         struct be_rx_obj *rxo;
2597         int i;
2598
2599         netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2600                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2601                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2602                 NETIF_F_GRO | NETIF_F_TSO6;
2603
2604         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2605                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2606
2607         if (lancer_chip(adapter))
2608                 netdev->vlan_features |= NETIF_F_TSO6;
2609
2610         netdev->flags |= IFF_MULTICAST;
2611
2612         adapter->rx_csum = true;
2613
2614         /* Default settings for Rx and Tx flow control */
2615         adapter->rx_fc = true;
2616         adapter->tx_fc = true;
2617
2618         netif_set_gso_max_size(netdev, 65535);
2619
2620         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2621
2622         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2623
2624         for_all_rx_queues(adapter, rxo, i)
2625                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2626                                 BE_NAPI_WEIGHT);
2627
2628         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2629                 BE_NAPI_WEIGHT);
2630 }
2631
2632 static void be_unmap_pci_bars(struct be_adapter *adapter)
2633 {
2634         if (adapter->csr)
2635                 iounmap(adapter->csr);
2636         if (adapter->db)
2637                 iounmap(adapter->db);
2638         if (adapter->pcicfg && be_physfn(adapter))
2639                 iounmap(adapter->pcicfg);
2640 }
2641
2642 static int be_map_pci_bars(struct be_adapter *adapter)
2643 {
2644         u8 __iomem *addr;
2645         int pcicfg_reg, db_reg;
2646
2647         if (lancer_chip(adapter)) {
2648                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2649                         pci_resource_len(adapter->pdev, 0));
2650                 if (addr == NULL)
2651                         return -ENOMEM;
2652                 adapter->db = addr;
2653                 return 0;
2654         }
2655
2656         if (be_physfn(adapter)) {
2657                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2658                                 pci_resource_len(adapter->pdev, 2));
2659                 if (addr == NULL)
2660                         return -ENOMEM;
2661                 adapter->csr = addr;
2662         }
2663
2664         if (adapter->generation == BE_GEN2) {
2665                 pcicfg_reg = 1;
2666                 db_reg = 4;
2667         } else {
2668                 pcicfg_reg = 0;
2669                 if (be_physfn(adapter))
2670                         db_reg = 4;
2671                 else
2672                         db_reg = 0;
2673         }
2674         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2675                                 pci_resource_len(adapter->pdev, db_reg));
2676         if (addr == NULL)
2677                 goto pci_map_err;
2678         adapter->db = addr;
2679
2680         if (be_physfn(adapter)) {
2681                 addr = ioremap_nocache(
2682                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2683                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2684                 if (addr == NULL)
2685                         goto pci_map_err;
2686                 adapter->pcicfg = addr;
2687         } else
2688                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2689
2690         return 0;
2691 pci_map_err:
2692         be_unmap_pci_bars(adapter);
2693         return -ENOMEM;
2694 }
2695
2696
2697 static void be_ctrl_cleanup(struct be_adapter *adapter)
2698 {
2699         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2700
2701         be_unmap_pci_bars(adapter);
2702
2703         if (mem->va)
2704                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2705                                   mem->dma);
2706
2707         mem = &adapter->mc_cmd_mem;
2708         if (mem->va)
2709                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2710                                   mem->dma);
2711 }
2712
2713 static int be_ctrl_init(struct be_adapter *adapter)
2714 {
2715         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2716         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2717         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2718         int status;
2719
2720         status = be_map_pci_bars(adapter);
2721         if (status)
2722                 goto done;
2723
2724         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2725         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2726                                                 mbox_mem_alloc->size,
2727                                                 &mbox_mem_alloc->dma,
2728                                                 GFP_KERNEL);
2729         if (!mbox_mem_alloc->va) {
2730                 status = -ENOMEM;
2731                 goto unmap_pci_bars;
2732         }
2733
2734         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2735         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2736         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2737         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2738
2739         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2740         mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2741                                             mc_cmd_mem->size, &mc_cmd_mem->dma,
2742                                             GFP_KERNEL);
2743         if (mc_cmd_mem->va == NULL) {
2744                 status = -ENOMEM;
2745                 goto free_mbox;
2746         }
2747         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2748
2749         mutex_init(&adapter->mbox_lock);
2750         spin_lock_init(&adapter->mcc_lock);
2751         spin_lock_init(&adapter->mcc_cq_lock);
2752
2753         init_completion(&adapter->flash_compl);
2754         pci_save_state(adapter->pdev);
2755         return 0;
2756
2757 free_mbox:
2758         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2759                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
2760
2761 unmap_pci_bars:
2762         be_unmap_pci_bars(adapter);
2763
2764 done:
2765         return status;
2766 }
2767
2768 static void be_stats_cleanup(struct be_adapter *adapter)
2769 {
2770         struct be_dma_mem *cmd = &adapter->stats_cmd;
2771
2772         if (cmd->va)
2773                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2774                                   cmd->va, cmd->dma);
2775 }
2776
2777 static int be_stats_init(struct be_adapter *adapter)
2778 {
2779         struct be_dma_mem *cmd = &adapter->stats_cmd;
2780
2781         cmd->size = sizeof(struct be_cmd_req_get_stats);
2782         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2783                                      GFP_KERNEL);
2784         if (cmd->va == NULL)
2785                 return -1;
2786         memset(cmd->va, 0, cmd->size);
2787         return 0;
2788 }
2789
2790 static void __devexit be_remove(struct pci_dev *pdev)
2791 {
2792         struct be_adapter *adapter = pci_get_drvdata(pdev);
2793
2794         if (!adapter)
2795                 return;
2796
2797         cancel_delayed_work_sync(&adapter->work);
2798
2799         unregister_netdev(adapter->netdev);
2800
2801         be_clear(adapter);
2802
2803         be_stats_cleanup(adapter);
2804
2805         be_ctrl_cleanup(adapter);
2806
2807         be_sriov_disable(adapter);
2808
2809         be_msix_disable(adapter);
2810
2811         pci_set_drvdata(pdev, NULL);
2812         pci_release_regions(pdev);
2813         pci_disable_device(pdev);
2814
2815         free_netdev(adapter->netdev);
2816 }
2817
2818 static int be_get_config(struct be_adapter *adapter)
2819 {
2820         int status;
2821         u8 mac[ETH_ALEN];
2822
2823         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2824         if (status)
2825                 return status;
2826
2827         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2828                         &adapter->function_mode, &adapter->function_caps);
2829         if (status)
2830                 return status;
2831
2832         memset(mac, 0, ETH_ALEN);
2833
2834         if (be_physfn(adapter)) {
2835                 status = be_cmd_mac_addr_query(adapter, mac,
2836                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2837
2838                 if (status)
2839                         return status;
2840
2841                 if (!is_valid_ether_addr(mac))
2842                         return -EADDRNOTAVAIL;
2843
2844                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2845                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2846         }
2847
2848         if (adapter->function_mode & 0x400)
2849                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2850         else
2851                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2852
2853         status = be_cmd_get_cntl_attributes(adapter);
2854         if (status)
2855                 return status;
2856
2857         be_cmd_check_native_mode(adapter);
2858         return 0;
2859 }
2860
2861 static int be_dev_family_check(struct be_adapter *adapter)
2862 {
2863         struct pci_dev *pdev = adapter->pdev;
2864         u32 sli_intf = 0, if_type;
2865
2866         switch (pdev->device) {
2867         case BE_DEVICE_ID1:
2868         case OC_DEVICE_ID1:
2869                 adapter->generation = BE_GEN2;
2870                 break;
2871         case BE_DEVICE_ID2:
2872         case OC_DEVICE_ID2:
2873                 adapter->generation = BE_GEN3;
2874                 break;
2875         case OC_DEVICE_ID3:
2876                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2877                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2878                                                 SLI_INTF_IF_TYPE_SHIFT;
2879
2880                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2881                         if_type != 0x02) {
2882                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2883                         return -EINVAL;
2884                 }
2885                 if (num_vfs > 0) {
2886                         dev_err(&pdev->dev, "VFs not supported\n");
2887                         return -EINVAL;
2888                 }
2889                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2890                                          SLI_INTF_FAMILY_SHIFT);
2891                 adapter->generation = BE_GEN3;
2892                 break;
2893         default:
2894                 adapter->generation = 0;
2895         }
2896         return 0;
2897 }
2898
2899 static int lancer_wait_ready(struct be_adapter *adapter)
2900 {
2901 #define SLIPORT_READY_TIMEOUT 500
2902         u32 sliport_status;
2903         int status = 0, i;
2904
2905         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2906                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2907                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2908                         break;
2909
2910                 msleep(20);
2911         }
2912
2913         if (i == SLIPORT_READY_TIMEOUT)
2914                 status = -1;
2915
2916         return status;
2917 }
2918
2919 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2920 {
2921         int status;
2922         u32 sliport_status, err, reset_needed;
2923         status = lancer_wait_ready(adapter);
2924         if (!status) {
2925                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2926                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2927                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2928                 if (err && reset_needed) {
2929                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
2930                                         adapter->db + SLIPORT_CONTROL_OFFSET);
2931
2932                         /* check adapter has corrected the error */
2933                         status = lancer_wait_ready(adapter);
2934                         sliport_status = ioread32(adapter->db +
2935                                                         SLIPORT_STATUS_OFFSET);
2936                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2937                                                 SLIPORT_STATUS_RN_MASK);
2938                         if (status || sliport_status)
2939                                 status = -1;
2940                 } else if (err || reset_needed) {
2941                         status = -1;
2942                 }
2943         }
2944         return status;
2945 }
2946
2947 static int __devinit be_probe(struct pci_dev *pdev,
2948                         const struct pci_device_id *pdev_id)
2949 {
2950         int status = 0;
2951         struct be_adapter *adapter;
2952         struct net_device *netdev;
2953
2954         status = pci_enable_device(pdev);
2955         if (status)
2956                 goto do_none;
2957
2958         status = pci_request_regions(pdev, DRV_NAME);
2959         if (status)
2960                 goto disable_dev;
2961         pci_set_master(pdev);
2962
2963         netdev = alloc_etherdev(sizeof(struct be_adapter));
2964         if (netdev == NULL) {
2965                 status = -ENOMEM;
2966                 goto rel_reg;
2967         }
2968         adapter = netdev_priv(netdev);
2969         adapter->pdev = pdev;
2970         pci_set_drvdata(pdev, adapter);
2971
2972         status = be_dev_family_check(adapter);
2973         if (status)
2974                 goto free_netdev;
2975
2976         adapter->netdev = netdev;
2977         SET_NETDEV_DEV(netdev, &pdev->dev);
2978
2979         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2980         if (!status) {
2981                 netdev->features |= NETIF_F_HIGHDMA;
2982         } else {
2983                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2984                 if (status) {
2985                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2986                         goto free_netdev;
2987                 }
2988         }
2989
2990         be_sriov_enable(adapter);
2991
2992         status = be_ctrl_init(adapter);
2993         if (status)
2994                 goto free_netdev;
2995
2996         if (lancer_chip(adapter)) {
2997                 status = lancer_test_and_set_rdy_state(adapter);
2998                 if (status) {
2999                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3000                         goto free_netdev;
3001                 }
3002         }
3003
3004         /* sync up with fw's ready state */
3005         if (be_physfn(adapter)) {
3006                 status = be_cmd_POST(adapter);
3007                 if (status)
3008                         goto ctrl_clean;
3009         }
3010
3011         /* tell fw we're ready to fire cmds */
3012         status = be_cmd_fw_init(adapter);
3013         if (status)
3014                 goto ctrl_clean;
3015
3016         status = be_cmd_reset_function(adapter);
3017         if (status)
3018                 goto ctrl_clean;
3019
3020         status = be_stats_init(adapter);
3021         if (status)
3022                 goto ctrl_clean;
3023
3024         status = be_get_config(adapter);
3025         if (status)
3026                 goto stats_clean;
3027
3028         be_msix_enable(adapter);
3029
3030         INIT_DELAYED_WORK(&adapter->work, be_worker);
3031
3032         status = be_setup(adapter);
3033         if (status)
3034                 goto msix_disable;
3035
3036         be_netdev_init(netdev);
3037         status = register_netdev(netdev);
3038         if (status != 0)
3039                 goto unsetup;
3040         netif_carrier_off(netdev);
3041
3042         if (be_physfn(adapter) && adapter->sriov_enabled) {
3043                 status = be_vf_eth_addr_config(adapter);
3044                 if (status)
3045                         goto unreg_netdev;
3046         }
3047
3048         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3049         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3050         return 0;
3051
3052 unreg_netdev:
3053         unregister_netdev(netdev);
3054 unsetup:
3055         be_clear(adapter);
3056 msix_disable:
3057         be_msix_disable(adapter);
3058 stats_clean:
3059         be_stats_cleanup(adapter);
3060 ctrl_clean:
3061         be_ctrl_cleanup(adapter);
3062 free_netdev:
3063         be_sriov_disable(adapter);
3064         free_netdev(netdev);
3065         pci_set_drvdata(pdev, NULL);
3066 rel_reg:
3067         pci_release_regions(pdev);
3068 disable_dev:
3069         pci_disable_device(pdev);
3070 do_none:
3071         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3072         return status;
3073 }
3074
3075 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3076 {
3077         struct be_adapter *adapter = pci_get_drvdata(pdev);
3078         struct net_device *netdev =  adapter->netdev;
3079
3080         cancel_delayed_work_sync(&adapter->work);
3081         if (adapter->wol)
3082                 be_setup_wol(adapter, true);
3083
3084         netif_device_detach(netdev);
3085         if (netif_running(netdev)) {
3086                 rtnl_lock();
3087                 be_close(netdev);
3088                 rtnl_unlock();
3089         }
3090         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3091         be_clear(adapter);
3092
3093         be_msix_disable(adapter);
3094         pci_save_state(pdev);
3095         pci_disable_device(pdev);
3096         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3097         return 0;
3098 }
3099
3100 static int be_resume(struct pci_dev *pdev)
3101 {
3102         int status = 0;
3103         struct be_adapter *adapter = pci_get_drvdata(pdev);
3104         struct net_device *netdev =  adapter->netdev;
3105
3106         netif_device_detach(netdev);
3107
3108         status = pci_enable_device(pdev);
3109         if (status)
3110                 return status;
3111
3112         pci_set_power_state(pdev, 0);
3113         pci_restore_state(pdev);
3114
3115         be_msix_enable(adapter);
3116         /* tell fw we're ready to fire cmds */
3117         status = be_cmd_fw_init(adapter);
3118         if (status)
3119                 return status;
3120
3121         be_setup(adapter);
3122         if (netif_running(netdev)) {
3123                 rtnl_lock();
3124                 be_open(netdev);
3125                 rtnl_unlock();
3126         }
3127         netif_device_attach(netdev);
3128
3129         if (adapter->wol)
3130                 be_setup_wol(adapter, false);
3131
3132         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3133         return 0;
3134 }
3135
3136 /*
3137  * An FLR will stop BE from DMAing any data.
3138  */
3139 static void be_shutdown(struct pci_dev *pdev)
3140 {
3141         struct be_adapter *adapter = pci_get_drvdata(pdev);
3142         struct net_device *netdev =  adapter->netdev;
3143
3144         if (netif_running(netdev))
3145                 cancel_delayed_work_sync(&adapter->work);
3146
3147         netif_device_detach(netdev);
3148
3149         be_cmd_reset_function(adapter);
3150
3151         if (adapter->wol)
3152                 be_setup_wol(adapter, true);
3153
3154         pci_disable_device(pdev);
3155 }
3156
3157 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3158                                 pci_channel_state_t state)
3159 {
3160         struct be_adapter *adapter = pci_get_drvdata(pdev);
3161         struct net_device *netdev =  adapter->netdev;
3162
3163         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3164
3165         adapter->eeh_err = true;
3166
3167         netif_device_detach(netdev);
3168
3169         if (netif_running(netdev)) {
3170                 rtnl_lock();
3171                 be_close(netdev);
3172                 rtnl_unlock();
3173         }
3174         be_clear(adapter);
3175
3176         if (state == pci_channel_io_perm_failure)
3177                 return PCI_ERS_RESULT_DISCONNECT;
3178
3179         pci_disable_device(pdev);
3180
3181         return PCI_ERS_RESULT_NEED_RESET;
3182 }
3183
3184 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3185 {
3186         struct be_adapter *adapter = pci_get_drvdata(pdev);
3187         int status;
3188
3189         dev_info(&adapter->pdev->dev, "EEH reset\n");
3190         adapter->eeh_err = false;
3191
3192         status = pci_enable_device(pdev);
3193         if (status)
3194                 return PCI_ERS_RESULT_DISCONNECT;
3195
3196         pci_set_master(pdev);
3197         pci_set_power_state(pdev, 0);
3198         pci_restore_state(pdev);
3199
3200         /* Check if card is ok and fw is ready */
3201         status = be_cmd_POST(adapter);
3202         if (status)
3203                 return PCI_ERS_RESULT_DISCONNECT;
3204
3205         return PCI_ERS_RESULT_RECOVERED;
3206 }
3207
3208 static void be_eeh_resume(struct pci_dev *pdev)
3209 {
3210         int status = 0;
3211         struct be_adapter *adapter = pci_get_drvdata(pdev);
3212         struct net_device *netdev =  adapter->netdev;
3213
3214         dev_info(&adapter->pdev->dev, "EEH resume\n");
3215
3216         pci_save_state(pdev);
3217
3218         /* tell fw we're ready to fire cmds */
3219         status = be_cmd_fw_init(adapter);
3220         if (status)
3221                 goto err;
3222
3223         status = be_setup(adapter);
3224         if (status)
3225                 goto err;
3226
3227         if (netif_running(netdev)) {
3228                 status = be_open(netdev);
3229                 if (status)
3230                         goto err;
3231         }
3232         netif_device_attach(netdev);
3233         return;
3234 err:
3235         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3236 }
3237
3238 static struct pci_error_handlers be_eeh_handlers = {
3239         .error_detected = be_eeh_err_detected,
3240         .slot_reset = be_eeh_reset,
3241         .resume = be_eeh_resume,
3242 };
3243
3244 static struct pci_driver be_driver = {
3245         .name = DRV_NAME,
3246         .id_table = be_dev_ids,
3247         .probe = be_probe,
3248         .remove = be_remove,
3249         .suspend = be_suspend,
3250         .resume = be_resume,
3251         .shutdown = be_shutdown,
3252         .err_handler = &be_eeh_handlers
3253 };
3254
3255 static int __init be_init_module(void)
3256 {
3257         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3258             rx_frag_size != 2048) {
3259                 printk(KERN_WARNING DRV_NAME
3260                         " : Module param rx_frag_size must be 2048/4096/8192."
3261                         " Using 2048\n");
3262                 rx_frag_size = 2048;
3263         }
3264
3265         if (num_vfs > 32) {
3266                 printk(KERN_WARNING DRV_NAME
3267                         " : Module param num_vfs must not be greater than 32."
3268                         "Using 32\n");
3269                 num_vfs = 32;
3270         }
3271
3272         return pci_register_driver(&be_driver);
3273 }
3274 module_init(be_init_module);
3275
3276 static void __exit be_exit_module(void)
3277 {
3278         pci_unregister_driver(&be_driver);
3279 }
3280 module_exit(be_exit_module);