]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/benet/be_main.c
be2net: use hba_port_num instead of port_num
[karo-tx-linux.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static unsigned int rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, uint, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { 0 }
46 };
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
50         "CEV",
51         "CTX",
52         "DBUF",
53         "ERX",
54         "Host",
55         "MPU",
56         "NDMA",
57         "PTC ",
58         "RDMA ",
59         "RXF ",
60         "RXIPS ",
61         "RXULP0 ",
62         "RXULP1 ",
63         "RXULP2 ",
64         "TIM ",
65         "TPOST ",
66         "TPRE ",
67         "TXIPS ",
68         "TXULP0 ",
69         "TXULP1 ",
70         "UC ",
71         "WDMA ",
72         "TXULP2 ",
73         "HOST1 ",
74         "P0_OB_LINK ",
75         "P1_OB_LINK ",
76         "HOST_GPIO ",
77         "MBOX ",
78         "AXGMAC0",
79         "AXGMAC1",
80         "JTAG",
81         "MPU_INTPEND"
82 };
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
85         "LPCMEMHOST",
86         "MGMT_MAC",
87         "PCS0ONLINE",
88         "MPU_IRAM",
89         "PCS1ONLINE",
90         "PCTL0",
91         "PCTL1",
92         "PMEM",
93         "RR",
94         "TXPB",
95         "RXPP",
96         "XAUI",
97         "TXP",
98         "ARM",
99         "IPC",
100         "HOST2",
101         "HOST3",
102         "HOST4",
103         "HOST5",
104         "HOST6",
105         "HOST7",
106         "HOST8",
107         "HOST9",
108         "NETC"
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown"
117 };
118
119 static inline bool be_multi_rxq(struct be_adapter *adapter)
120 {
121         return (adapter->num_rx_qs > 1);
122 }
123
124 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125 {
126         struct be_dma_mem *mem = &q->dma_mem;
127         if (mem->va)
128                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129                                   mem->dma);
130 }
131
132 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133                 u16 len, u16 entry_size)
134 {
135         struct be_dma_mem *mem = &q->dma_mem;
136
137         memset(q, 0, sizeof(*q));
138         q->len = len;
139         q->entry_size = entry_size;
140         mem->size = len * entry_size;
141         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142                                      GFP_KERNEL);
143         if (!mem->va)
144                 return -1;
145         memset(mem->va, 0, mem->size);
146         return 0;
147 }
148
149 static void be_intr_set(struct be_adapter *adapter, bool enable)
150 {
151         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
152         u32 reg = ioread32(addr);
153         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
154
155         if (adapter->eeh_err)
156                 return;
157
158         if (!enabled && enable)
159                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160         else if (enabled && !enable)
161                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162         else
163                 return;
164
165         iowrite32(reg, addr);
166 }
167
168 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
169 {
170         u32 val = 0;
171         val |= qid & DB_RQ_RING_ID_MASK;
172         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
173
174         wmb();
175         iowrite32(val, adapter->db + DB_RQ_OFFSET);
176 }
177
178 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180         u32 val = 0;
181         val |= qid & DB_TXULP_RING_ID_MASK;
182         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
183
184         wmb();
185         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
186 }
187
188 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
189                 bool arm, bool clear_int, u16 num_popped)
190 {
191         u32 val = 0;
192         val |= qid & DB_EQ_RING_ID_MASK;
193         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
194                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
195
196         if (adapter->eeh_err)
197                 return;
198
199         if (arm)
200                 val |= 1 << DB_EQ_REARM_SHIFT;
201         if (clear_int)
202                 val |= 1 << DB_EQ_CLR_SHIFT;
203         val |= 1 << DB_EQ_EVNT_SHIFT;
204         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
205         iowrite32(val, adapter->db + DB_EQ_OFFSET);
206 }
207
208 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
209 {
210         u32 val = 0;
211         val |= qid & DB_CQ_RING_ID_MASK;
212         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
213                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
214
215         if (adapter->eeh_err)
216                 return;
217
218         if (arm)
219                 val |= 1 << DB_CQ_REARM_SHIFT;
220         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
221         iowrite32(val, adapter->db + DB_CQ_OFFSET);
222 }
223
224 static int be_mac_addr_set(struct net_device *netdev, void *p)
225 {
226         struct be_adapter *adapter = netdev_priv(netdev);
227         struct sockaddr *addr = p;
228         int status = 0;
229
230         if (!is_valid_ether_addr(addr->sa_data))
231                 return -EADDRNOTAVAIL;
232
233         /* MAC addr configuration will be done in hardware for VFs
234          * by their corresponding PFs. Just copy to netdev addr here
235          */
236         if (!be_physfn(adapter))
237                 goto netdev_addr;
238
239         status = be_cmd_pmac_del(adapter, adapter->if_handle,
240                                 adapter->pmac_id, 0);
241         if (status)
242                 return status;
243
244         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
245                                 adapter->if_handle, &adapter->pmac_id, 0);
246 netdev_addr:
247         if (!status)
248                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
249
250         return status;
251 }
252
253 void netdev_stats_update(struct be_adapter *adapter)
254 {
255         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
256         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
257         struct be_port_rxf_stats *port_stats =
258                         &rxf_stats->port[adapter->port_num];
259         struct net_device_stats *dev_stats = &adapter->netdev->stats;
260         struct be_erx_stats *erx_stats = &hw_stats->erx;
261         struct be_rx_obj *rxo;
262         int i;
263
264         memset(dev_stats, 0, sizeof(*dev_stats));
265         for_all_rx_queues(adapter, rxo, i) {
266                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
267                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
268                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
269                 /*  no space in linux buffers: best possible approximation */
270                 dev_stats->rx_dropped +=
271                         erx_stats->rx_drops_no_fragments[rxo->q.id];
272         }
273
274         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
275         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
276
277         /* bad pkts received */
278         dev_stats->rx_errors = port_stats->rx_crc_errors +
279                 port_stats->rx_alignment_symbol_errors +
280                 port_stats->rx_in_range_errors +
281                 port_stats->rx_out_range_errors +
282                 port_stats->rx_frame_too_long +
283                 port_stats->rx_dropped_too_small +
284                 port_stats->rx_dropped_too_short +
285                 port_stats->rx_dropped_header_too_small +
286                 port_stats->rx_dropped_tcp_length +
287                 port_stats->rx_dropped_runt +
288                 port_stats->rx_tcp_checksum_errs +
289                 port_stats->rx_ip_checksum_errs +
290                 port_stats->rx_udp_checksum_errs;
291
292         /* detailed rx errors */
293         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
294                 port_stats->rx_out_range_errors +
295                 port_stats->rx_frame_too_long;
296
297         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
298
299         /* frame alignment errors */
300         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
301
302         /* receiver fifo overrun */
303         /* drops_no_pbuf is no per i/f, it's per BE card */
304         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
305                                         port_stats->rx_input_fifo_overflow +
306                                         rxf_stats->rx_drops_no_pbuf;
307 }
308
309 void be_link_status_update(struct be_adapter *adapter, bool link_up)
310 {
311         struct net_device *netdev = adapter->netdev;
312
313         /* If link came up or went down */
314         if (adapter->link_up != link_up) {
315                 adapter->link_speed = -1;
316                 if (link_up) {
317                         netif_carrier_on(netdev);
318                         printk(KERN_INFO "%s: Link up\n", netdev->name);
319                 } else {
320                         netif_carrier_off(netdev);
321                         printk(KERN_INFO "%s: Link down\n", netdev->name);
322                 }
323                 adapter->link_up = link_up;
324         }
325 }
326
327 /* Update the EQ delay n BE based on the RX frags consumed / sec */
328 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
329 {
330         struct be_eq_obj *rx_eq = &rxo->rx_eq;
331         struct be_rx_stats *stats = &rxo->stats;
332         ulong now = jiffies;
333         u32 eqd;
334
335         if (!rx_eq->enable_aic)
336                 return;
337
338         /* Wrapped around */
339         if (time_before(now, stats->rx_fps_jiffies)) {
340                 stats->rx_fps_jiffies = now;
341                 return;
342         }
343
344         /* Update once a second */
345         if ((now - stats->rx_fps_jiffies) < HZ)
346                 return;
347
348         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
349                         ((now - stats->rx_fps_jiffies) / HZ);
350
351         stats->rx_fps_jiffies = now;
352         stats->prev_rx_frags = stats->rx_frags;
353         eqd = stats->rx_fps / 110000;
354         eqd = eqd << 3;
355         if (eqd > rx_eq->max_eqd)
356                 eqd = rx_eq->max_eqd;
357         if (eqd < rx_eq->min_eqd)
358                 eqd = rx_eq->min_eqd;
359         if (eqd < 10)
360                 eqd = 0;
361         if (eqd != rx_eq->cur_eqd)
362                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
363
364         rx_eq->cur_eqd = eqd;
365 }
366
367 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
368 {
369         u64 rate = bytes;
370
371         do_div(rate, ticks / HZ);
372         rate <<= 3;                     /* bytes/sec -> bits/sec */
373         do_div(rate, 1000000ul);        /* MB/Sec */
374
375         return rate;
376 }
377
378 static void be_tx_rate_update(struct be_adapter *adapter)
379 {
380         struct be_tx_stats *stats = tx_stats(adapter);
381         ulong now = jiffies;
382
383         /* Wrapped around? */
384         if (time_before(now, stats->be_tx_jiffies)) {
385                 stats->be_tx_jiffies = now;
386                 return;
387         }
388
389         /* Update tx rate once in two seconds */
390         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
391                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
392                                                   - stats->be_tx_bytes_prev,
393                                                  now - stats->be_tx_jiffies);
394                 stats->be_tx_jiffies = now;
395                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
396         }
397 }
398
399 static void be_tx_stats_update(struct be_adapter *adapter,
400                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
401 {
402         struct be_tx_stats *stats = tx_stats(adapter);
403         stats->be_tx_reqs++;
404         stats->be_tx_wrbs += wrb_cnt;
405         stats->be_tx_bytes += copied;
406         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
407         if (stopped)
408                 stats->be_tx_stops++;
409 }
410
411 /* Determine number of WRB entries needed to xmit data in an skb */
412 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413                                                                 bool *dummy)
414 {
415         int cnt = (skb->len > skb->data_len);
416
417         cnt += skb_shinfo(skb)->nr_frags;
418
419         /* to account for hdr wrb */
420         cnt++;
421         if (lancer_chip(adapter) || !(cnt & 1)) {
422                 *dummy = false;
423         } else {
424                 /* add a dummy to make it an even num */
425                 cnt++;
426                 *dummy = true;
427         }
428         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
429         return cnt;
430 }
431
432 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
433 {
434         wrb->frag_pa_hi = upper_32_bits(addr);
435         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
436         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
437 }
438
439 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
440                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
441 {
442         u8 vlan_prio = 0;
443         u16 vlan_tag = 0;
444
445         memset(hdr, 0, sizeof(*hdr));
446
447         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
448
449         if (skb_is_gso(skb)) {
450                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
451                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
452                         hdr, skb_shinfo(skb)->gso_size);
453                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
454                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
455                 if (lancer_chip(adapter) && adapter->sli_family  ==
456                                                         LANCER_A0_SLI_FAMILY) {
457                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
458                         if (is_tcp_pkt(skb))
459                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460                                                                 tcpcs, hdr, 1);
461                         else if (is_udp_pkt(skb))
462                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463                                                                 udpcs, hdr, 1);
464                 }
465         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
466                 if (is_tcp_pkt(skb))
467                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
468                 else if (is_udp_pkt(skb))
469                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
470         }
471
472         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
473                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
474                 vlan_tag = vlan_tx_tag_get(skb);
475                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
476                 /* If vlan priority provided by OS is NOT in available bmap */
477                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
478                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
479                                         adapter->recommended_prio;
480                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
481         }
482
483         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
484         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
485         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
486         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
487 }
488
489 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
490                 bool unmap_single)
491 {
492         dma_addr_t dma;
493
494         be_dws_le_to_cpu(wrb, sizeof(*wrb));
495
496         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
497         if (wrb->frag_len) {
498                 if (unmap_single)
499                         dma_unmap_single(dev, dma, wrb->frag_len,
500                                          DMA_TO_DEVICE);
501                 else
502                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
503         }
504 }
505
506 static int make_tx_wrbs(struct be_adapter *adapter,
507                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
508 {
509         dma_addr_t busaddr;
510         int i, copied = 0;
511         struct device *dev = &adapter->pdev->dev;
512         struct sk_buff *first_skb = skb;
513         struct be_queue_info *txq = &adapter->tx_obj.q;
514         struct be_eth_wrb *wrb;
515         struct be_eth_hdr_wrb *hdr;
516         bool map_single = false;
517         u16 map_head;
518
519         hdr = queue_head_node(txq);
520         queue_head_inc(txq);
521         map_head = txq->head;
522
523         if (skb->len > skb->data_len) {
524                 int len = skb_headlen(skb);
525                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
526                 if (dma_mapping_error(dev, busaddr))
527                         goto dma_err;
528                 map_single = true;
529                 wrb = queue_head_node(txq);
530                 wrb_fill(wrb, busaddr, len);
531                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
532                 queue_head_inc(txq);
533                 copied += len;
534         }
535
536         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
537                 struct skb_frag_struct *frag =
538                         &skb_shinfo(skb)->frags[i];
539                 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
540                                        frag->size, DMA_TO_DEVICE);
541                 if (dma_mapping_error(dev, busaddr))
542                         goto dma_err;
543                 wrb = queue_head_node(txq);
544                 wrb_fill(wrb, busaddr, frag->size);
545                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
546                 queue_head_inc(txq);
547                 copied += frag->size;
548         }
549
550         if (dummy_wrb) {
551                 wrb = queue_head_node(txq);
552                 wrb_fill(wrb, 0, 0);
553                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
554                 queue_head_inc(txq);
555         }
556
557         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
558         be_dws_cpu_to_le(hdr, sizeof(*hdr));
559
560         return copied;
561 dma_err:
562         txq->head = map_head;
563         while (copied) {
564                 wrb = queue_head_node(txq);
565                 unmap_tx_frag(dev, wrb, map_single);
566                 map_single = false;
567                 copied -= wrb->frag_len;
568                 queue_head_inc(txq);
569         }
570         return 0;
571 }
572
573 static netdev_tx_t be_xmit(struct sk_buff *skb,
574                         struct net_device *netdev)
575 {
576         struct be_adapter *adapter = netdev_priv(netdev);
577         struct be_tx_obj *tx_obj = &adapter->tx_obj;
578         struct be_queue_info *txq = &tx_obj->q;
579         u32 wrb_cnt = 0, copied = 0;
580         u32 start = txq->head;
581         bool dummy_wrb, stopped = false;
582
583         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
584
585         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
586         if (copied) {
587                 /* record the sent skb in the sent_skb table */
588                 BUG_ON(tx_obj->sent_skb_list[start]);
589                 tx_obj->sent_skb_list[start] = skb;
590
591                 /* Ensure txq has space for the next skb; Else stop the queue
592                  * *BEFORE* ringing the tx doorbell, so that we serialze the
593                  * tx compls of the current transmit which'll wake up the queue
594                  */
595                 atomic_add(wrb_cnt, &txq->used);
596                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
597                                                                 txq->len) {
598                         netif_stop_queue(netdev);
599                         stopped = true;
600                 }
601
602                 be_txq_notify(adapter, txq->id, wrb_cnt);
603
604                 be_tx_stats_update(adapter, wrb_cnt, copied,
605                                 skb_shinfo(skb)->gso_segs, stopped);
606         } else {
607                 txq->head = start;
608                 dev_kfree_skb_any(skb);
609         }
610         return NETDEV_TX_OK;
611 }
612
613 static int be_change_mtu(struct net_device *netdev, int new_mtu)
614 {
615         struct be_adapter *adapter = netdev_priv(netdev);
616         if (new_mtu < BE_MIN_MTU ||
617                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
618                                         (ETH_HLEN + ETH_FCS_LEN))) {
619                 dev_info(&adapter->pdev->dev,
620                         "MTU must be between %d and %d bytes\n",
621                         BE_MIN_MTU,
622                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
623                 return -EINVAL;
624         }
625         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
626                         netdev->mtu, new_mtu);
627         netdev->mtu = new_mtu;
628         return 0;
629 }
630
631 /*
632  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
633  * If the user configures more, place BE in vlan promiscuous mode.
634  */
635 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
636 {
637         u16 vtag[BE_NUM_VLANS_SUPPORTED];
638         u16 ntags = 0, i;
639         int status = 0;
640         u32 if_handle;
641
642         if (vf) {
643                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
644                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
645                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
646         }
647
648         if (adapter->vlans_added <= adapter->max_vlans)  {
649                 /* Construct VLAN Table to give to HW */
650                 for (i = 0; i < VLAN_N_VID; i++) {
651                         if (adapter->vlan_tag[i]) {
652                                 vtag[ntags] = cpu_to_le16(i);
653                                 ntags++;
654                         }
655                 }
656                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
657                                         vtag, ntags, 1, 0);
658         } else {
659                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
660                                         NULL, 0, 1, 1);
661         }
662
663         return status;
664 }
665
666 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
667 {
668         struct be_adapter *adapter = netdev_priv(netdev);
669
670         adapter->vlan_grp = grp;
671 }
672
673 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
674 {
675         struct be_adapter *adapter = netdev_priv(netdev);
676
677         adapter->vlans_added++;
678         if (!be_physfn(adapter))
679                 return;
680
681         adapter->vlan_tag[vid] = 1;
682         if (adapter->vlans_added <= (adapter->max_vlans + 1))
683                 be_vid_config(adapter, false, 0);
684 }
685
686 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
687 {
688         struct be_adapter *adapter = netdev_priv(netdev);
689
690         adapter->vlans_added--;
691         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
692
693         if (!be_physfn(adapter))
694                 return;
695
696         adapter->vlan_tag[vid] = 0;
697         if (adapter->vlans_added <= adapter->max_vlans)
698                 be_vid_config(adapter, false, 0);
699 }
700
701 static void be_set_multicast_list(struct net_device *netdev)
702 {
703         struct be_adapter *adapter = netdev_priv(netdev);
704
705         if (netdev->flags & IFF_PROMISC) {
706                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
707                 adapter->promiscuous = true;
708                 goto done;
709         }
710
711         /* BE was previously in promiscous mode; disable it */
712         if (adapter->promiscuous) {
713                 adapter->promiscuous = false;
714                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
715         }
716
717         /* Enable multicast promisc if num configured exceeds what we support */
718         if (netdev->flags & IFF_ALLMULTI ||
719             netdev_mc_count(netdev) > BE_MAX_MC) {
720                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
721                                 &adapter->mc_cmd_mem);
722                 goto done;
723         }
724
725         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
726                 &adapter->mc_cmd_mem);
727 done:
728         return;
729 }
730
731 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
732 {
733         struct be_adapter *adapter = netdev_priv(netdev);
734         int status;
735
736         if (!adapter->sriov_enabled)
737                 return -EPERM;
738
739         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
740                 return -EINVAL;
741
742         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
743                 status = be_cmd_pmac_del(adapter,
744                                         adapter->vf_cfg[vf].vf_if_handle,
745                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
746
747         status = be_cmd_pmac_add(adapter, mac,
748                                 adapter->vf_cfg[vf].vf_if_handle,
749                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
750
751         if (status)
752                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
753                                 mac, vf);
754         else
755                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
756
757         return status;
758 }
759
760 static int be_get_vf_config(struct net_device *netdev, int vf,
761                         struct ifla_vf_info *vi)
762 {
763         struct be_adapter *adapter = netdev_priv(netdev);
764
765         if (!adapter->sriov_enabled)
766                 return -EPERM;
767
768         if (vf >= num_vfs)
769                 return -EINVAL;
770
771         vi->vf = vf;
772         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
773         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
774         vi->qos = 0;
775         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
776
777         return 0;
778 }
779
780 static int be_set_vf_vlan(struct net_device *netdev,
781                         int vf, u16 vlan, u8 qos)
782 {
783         struct be_adapter *adapter = netdev_priv(netdev);
784         int status = 0;
785
786         if (!adapter->sriov_enabled)
787                 return -EPERM;
788
789         if ((vf >= num_vfs) || (vlan > 4095))
790                 return -EINVAL;
791
792         if (vlan) {
793                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
794                 adapter->vlans_added++;
795         } else {
796                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
797                 adapter->vlans_added--;
798         }
799
800         status = be_vid_config(adapter, true, vf);
801
802         if (status)
803                 dev_info(&adapter->pdev->dev,
804                                 "VLAN %d config on VF %d failed\n", vlan, vf);
805         return status;
806 }
807
808 static int be_set_vf_tx_rate(struct net_device *netdev,
809                         int vf, int rate)
810 {
811         struct be_adapter *adapter = netdev_priv(netdev);
812         int status = 0;
813
814         if (!adapter->sriov_enabled)
815                 return -EPERM;
816
817         if ((vf >= num_vfs) || (rate < 0))
818                 return -EINVAL;
819
820         if (rate > 10000)
821                 rate = 10000;
822
823         adapter->vf_cfg[vf].vf_tx_rate = rate;
824         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
825
826         if (status)
827                 dev_info(&adapter->pdev->dev,
828                                 "tx rate %d on VF %d failed\n", rate, vf);
829         return status;
830 }
831
832 static void be_rx_rate_update(struct be_rx_obj *rxo)
833 {
834         struct be_rx_stats *stats = &rxo->stats;
835         ulong now = jiffies;
836
837         /* Wrapped around */
838         if (time_before(now, stats->rx_jiffies)) {
839                 stats->rx_jiffies = now;
840                 return;
841         }
842
843         /* Update the rate once in two seconds */
844         if ((now - stats->rx_jiffies) < 2 * HZ)
845                 return;
846
847         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
848                                 now - stats->rx_jiffies);
849         stats->rx_jiffies = now;
850         stats->rx_bytes_prev = stats->rx_bytes;
851 }
852
853 static void be_rx_stats_update(struct be_rx_obj *rxo,
854                 u32 pktsize, u16 numfrags, u8 pkt_type)
855 {
856         struct be_rx_stats *stats = &rxo->stats;
857
858         stats->rx_compl++;
859         stats->rx_frags += numfrags;
860         stats->rx_bytes += pktsize;
861         stats->rx_pkts++;
862         if (pkt_type == BE_MULTICAST_PACKET)
863                 stats->rx_mcast_pkts++;
864 }
865
866 static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
867 {
868         u8 l4_cksm, ipv6, ipcksm;
869
870         l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
871         ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
872         ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
873
874         /* Ignore ipcksm for ipv6 pkts */
875         return l4_cksm && (ipcksm || ipv6);
876 }
877
878 static struct be_rx_page_info *
879 get_rx_page_info(struct be_adapter *adapter,
880                 struct be_rx_obj *rxo,
881                 u16 frag_idx)
882 {
883         struct be_rx_page_info *rx_page_info;
884         struct be_queue_info *rxq = &rxo->q;
885
886         rx_page_info = &rxo->page_info_tbl[frag_idx];
887         BUG_ON(!rx_page_info->page);
888
889         if (rx_page_info->last_page_user) {
890                 dma_unmap_page(&adapter->pdev->dev,
891                                dma_unmap_addr(rx_page_info, bus),
892                                adapter->big_page_size, DMA_FROM_DEVICE);
893                 rx_page_info->last_page_user = false;
894         }
895
896         atomic_dec(&rxq->used);
897         return rx_page_info;
898 }
899
900 /* Throwaway the data in the Rx completion */
901 static void be_rx_compl_discard(struct be_adapter *adapter,
902                 struct be_rx_obj *rxo,
903                 struct be_eth_rx_compl *rxcp)
904 {
905         struct be_queue_info *rxq = &rxo->q;
906         struct be_rx_page_info *page_info;
907         u16 rxq_idx, i, num_rcvd;
908
909         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
910         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
911
912          /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
913         if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
914
915                 rxo->last_frag_index = rxq_idx;
916
917                 for (i = 0; i < num_rcvd; i++) {
918                         page_info = get_rx_page_info(adapter, rxo, rxq_idx);
919                         put_page(page_info->page);
920                         memset(page_info, 0, sizeof(*page_info));
921                         index_inc(&rxq_idx, rxq->len);
922                 }
923         }
924 }
925
926 /*
927  * skb_fill_rx_data forms a complete skb for an ether frame
928  * indicated by rxcp.
929  */
930 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
931                         struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
932                         u16 num_rcvd)
933 {
934         struct be_queue_info *rxq = &rxo->q;
935         struct be_rx_page_info *page_info;
936         u16 rxq_idx, i, j;
937         u32 pktsize, hdr_len, curr_frag_len, size;
938         u8 *start;
939         u8 pkt_type;
940
941         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
942         pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
943         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
944
945         page_info = get_rx_page_info(adapter, rxo, rxq_idx);
946
947         start = page_address(page_info->page) + page_info->page_offset;
948         prefetch(start);
949
950         /* Copy data in the first descriptor of this completion */
951         curr_frag_len = min(pktsize, rx_frag_size);
952
953         /* Copy the header portion into skb_data */
954         hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
955         memcpy(skb->data, start, hdr_len);
956         skb->len = curr_frag_len;
957         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
958                 /* Complete packet has now been moved to data */
959                 put_page(page_info->page);
960                 skb->data_len = 0;
961                 skb->tail += curr_frag_len;
962         } else {
963                 skb_shinfo(skb)->nr_frags = 1;
964                 skb_shinfo(skb)->frags[0].page = page_info->page;
965                 skb_shinfo(skb)->frags[0].page_offset =
966                                         page_info->page_offset + hdr_len;
967                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
968                 skb->data_len = curr_frag_len - hdr_len;
969                 skb->tail += hdr_len;
970         }
971         page_info->page = NULL;
972
973         if (pktsize <= rx_frag_size) {
974                 BUG_ON(num_rcvd != 1);
975                 goto done;
976         }
977
978         /* More frags present for this completion */
979         size = pktsize;
980         for (i = 1, j = 0; i < num_rcvd; i++) {
981                 size -= curr_frag_len;
982                 index_inc(&rxq_idx, rxq->len);
983                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
984
985                 curr_frag_len = min(size, rx_frag_size);
986
987                 /* Coalesce all frags from the same physical page in one slot */
988                 if (page_info->page_offset == 0) {
989                         /* Fresh page */
990                         j++;
991                         skb_shinfo(skb)->frags[j].page = page_info->page;
992                         skb_shinfo(skb)->frags[j].page_offset =
993                                                         page_info->page_offset;
994                         skb_shinfo(skb)->frags[j].size = 0;
995                         skb_shinfo(skb)->nr_frags++;
996                 } else {
997                         put_page(page_info->page);
998                 }
999
1000                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1001                 skb->len += curr_frag_len;
1002                 skb->data_len += curr_frag_len;
1003
1004                 page_info->page = NULL;
1005         }
1006         BUG_ON(j > MAX_SKB_FRAGS);
1007
1008 done:
1009         be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
1010 }
1011
1012 /* Process the RX completion indicated by rxcp when GRO is disabled */
1013 static void be_rx_compl_process(struct be_adapter *adapter,
1014                         struct be_rx_obj *rxo,
1015                         struct be_eth_rx_compl *rxcp)
1016 {
1017         struct sk_buff *skb;
1018         u32 vlanf, vid;
1019         u16 num_rcvd;
1020         u8 vtm;
1021
1022         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1023
1024         skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1025         if (unlikely(!skb)) {
1026                 if (net_ratelimit())
1027                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1028                 be_rx_compl_discard(adapter, rxo, rxcp);
1029                 return;
1030         }
1031
1032         skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
1033
1034         if (likely(adapter->rx_csum && csum_passed(rxcp)))
1035                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1036         else
1037                 skb_checksum_none_assert(skb);
1038
1039         skb->truesize = skb->len + sizeof(struct sk_buff);
1040         skb->protocol = eth_type_trans(skb, adapter->netdev);
1041
1042         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1043         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1044
1045         /* vlanf could be wrongly set in some cards.
1046          * ignore if vtm is not set */
1047         if ((adapter->function_mode & 0x400) && !vtm)
1048                 vlanf = 0;
1049
1050         if ((adapter->pvid == vlanf) && !adapter->vlan_tag[vlanf])
1051                 vlanf = 0;
1052
1053         if (unlikely(vlanf)) {
1054                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1055                         kfree_skb(skb);
1056                         return;
1057                 }
1058                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1059                 if (!lancer_chip(adapter))
1060                         vid = swab16(vid);
1061                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1062         } else {
1063                 netif_receive_skb(skb);
1064         }
1065 }
1066
1067 /* Process the RX completion indicated by rxcp when GRO is enabled */
1068 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1069                 struct be_rx_obj *rxo,
1070                 struct be_eth_rx_compl *rxcp)
1071 {
1072         struct be_rx_page_info *page_info;
1073         struct sk_buff *skb = NULL;
1074         struct be_queue_info *rxq = &rxo->q;
1075         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1076         u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1077         u16 i, rxq_idx = 0, vid, j;
1078         u8 vtm;
1079         u8 pkt_type;
1080
1081         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1082         pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1083         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1084         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1085         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1086         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1087
1088         /* vlanf could be wrongly set in some cards.
1089          * ignore if vtm is not set */
1090         if ((adapter->function_mode & 0x400) && !vtm)
1091                 vlanf = 0;
1092
1093         if ((adapter->pvid == vlanf) && !adapter->vlan_tag[vlanf])
1094                 vlanf = 0;
1095
1096         skb = napi_get_frags(&eq_obj->napi);
1097         if (!skb) {
1098                 be_rx_compl_discard(adapter, rxo, rxcp);
1099                 return;
1100         }
1101
1102         remaining = pkt_size;
1103         for (i = 0, j = -1; i < num_rcvd; i++) {
1104                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
1105
1106                 curr_frag_len = min(remaining, rx_frag_size);
1107
1108                 /* Coalesce all frags from the same physical page in one slot */
1109                 if (i == 0 || page_info->page_offset == 0) {
1110                         /* First frag or Fresh page */
1111                         j++;
1112                         skb_shinfo(skb)->frags[j].page = page_info->page;
1113                         skb_shinfo(skb)->frags[j].page_offset =
1114                                                         page_info->page_offset;
1115                         skb_shinfo(skb)->frags[j].size = 0;
1116                 } else {
1117                         put_page(page_info->page);
1118                 }
1119                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1120
1121                 remaining -= curr_frag_len;
1122                 index_inc(&rxq_idx, rxq->len);
1123                 memset(page_info, 0, sizeof(*page_info));
1124         }
1125         BUG_ON(j > MAX_SKB_FRAGS);
1126
1127         skb_shinfo(skb)->nr_frags = j + 1;
1128         skb->len = pkt_size;
1129         skb->data_len = pkt_size;
1130         skb->truesize += pkt_size;
1131         skb->ip_summed = CHECKSUM_UNNECESSARY;
1132
1133         if (likely(!vlanf)) {
1134                 napi_gro_frags(&eq_obj->napi);
1135         } else {
1136                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1137                 if (!lancer_chip(adapter))
1138                         vid = swab16(vid);
1139
1140                 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1141                         return;
1142
1143                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1144         }
1145
1146         be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
1147 }
1148
1149 static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
1150 {
1151         struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
1152
1153         if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1154                 return NULL;
1155
1156         rmb();
1157         be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1158
1159         queue_tail_inc(&rxo->cq);
1160         return rxcp;
1161 }
1162
1163 /* To reset the valid bit, we need to reset the whole word as
1164  * when walking the queue the valid entries are little-endian
1165  * and invalid entries are host endian
1166  */
1167 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1168 {
1169         rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1170 }
1171
1172 static inline struct page *be_alloc_pages(u32 size)
1173 {
1174         gfp_t alloc_flags = GFP_ATOMIC;
1175         u32 order = get_order(size);
1176         if (order > 0)
1177                 alloc_flags |= __GFP_COMP;
1178         return  alloc_pages(alloc_flags, order);
1179 }
1180
1181 /*
1182  * Allocate a page, split it to fragments of size rx_frag_size and post as
1183  * receive buffers to BE
1184  */
1185 static void be_post_rx_frags(struct be_rx_obj *rxo)
1186 {
1187         struct be_adapter *adapter = rxo->adapter;
1188         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1189         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1190         struct be_queue_info *rxq = &rxo->q;
1191         struct page *pagep = NULL;
1192         struct be_eth_rx_d *rxd;
1193         u64 page_dmaaddr = 0, frag_dmaaddr;
1194         u32 posted, page_offset = 0;
1195
1196         page_info = &rxo->page_info_tbl[rxq->head];
1197         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1198                 if (!pagep) {
1199                         pagep = be_alloc_pages(adapter->big_page_size);
1200                         if (unlikely(!pagep)) {
1201                                 rxo->stats.rx_post_fail++;
1202                                 break;
1203                         }
1204                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1205                                                     0, adapter->big_page_size,
1206                                                     DMA_FROM_DEVICE);
1207                         page_info->page_offset = 0;
1208                 } else {
1209                         get_page(pagep);
1210                         page_info->page_offset = page_offset + rx_frag_size;
1211                 }
1212                 page_offset = page_info->page_offset;
1213                 page_info->page = pagep;
1214                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1215                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1216
1217                 rxd = queue_head_node(rxq);
1218                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1219                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1220
1221                 /* Any space left in the current big page for another frag? */
1222                 if ((page_offset + rx_frag_size + rx_frag_size) >
1223                                         adapter->big_page_size) {
1224                         pagep = NULL;
1225                         page_info->last_page_user = true;
1226                 }
1227
1228                 prev_page_info = page_info;
1229                 queue_head_inc(rxq);
1230                 page_info = &page_info_tbl[rxq->head];
1231         }
1232         if (pagep)
1233                 prev_page_info->last_page_user = true;
1234
1235         if (posted) {
1236                 atomic_add(posted, &rxq->used);
1237                 be_rxq_notify(adapter, rxq->id, posted);
1238         } else if (atomic_read(&rxq->used) == 0) {
1239                 /* Let be_worker replenish when memory is available */
1240                 rxo->rx_post_starved = true;
1241         }
1242 }
1243
1244 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1245 {
1246         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1247
1248         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1249                 return NULL;
1250
1251         rmb();
1252         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1253
1254         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1255
1256         queue_tail_inc(tx_cq);
1257         return txcp;
1258 }
1259
1260 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1261 {
1262         struct be_queue_info *txq = &adapter->tx_obj.q;
1263         struct be_eth_wrb *wrb;
1264         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1265         struct sk_buff *sent_skb;
1266         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1267         bool unmap_skb_hdr = true;
1268
1269         sent_skb = sent_skbs[txq->tail];
1270         BUG_ON(!sent_skb);
1271         sent_skbs[txq->tail] = NULL;
1272
1273         /* skip header wrb */
1274         queue_tail_inc(txq);
1275
1276         do {
1277                 cur_index = txq->tail;
1278                 wrb = queue_tail_node(txq);
1279                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1280                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1281                 unmap_skb_hdr = false;
1282
1283                 num_wrbs++;
1284                 queue_tail_inc(txq);
1285         } while (cur_index != last_index);
1286
1287         atomic_sub(num_wrbs, &txq->used);
1288
1289         kfree_skb(sent_skb);
1290 }
1291
1292 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1293 {
1294         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1295
1296         if (!eqe->evt)
1297                 return NULL;
1298
1299         rmb();
1300         eqe->evt = le32_to_cpu(eqe->evt);
1301         queue_tail_inc(&eq_obj->q);
1302         return eqe;
1303 }
1304
1305 static int event_handle(struct be_adapter *adapter,
1306                         struct be_eq_obj *eq_obj)
1307 {
1308         struct be_eq_entry *eqe;
1309         u16 num = 0;
1310
1311         while ((eqe = event_get(eq_obj)) != NULL) {
1312                 eqe->evt = 0;
1313                 num++;
1314         }
1315
1316         /* Deal with any spurious interrupts that come
1317          * without events
1318          */
1319         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1320         if (num)
1321                 napi_schedule(&eq_obj->napi);
1322
1323         return num;
1324 }
1325
1326 /* Just read and notify events without processing them.
1327  * Used at the time of destroying event queues */
1328 static void be_eq_clean(struct be_adapter *adapter,
1329                         struct be_eq_obj *eq_obj)
1330 {
1331         struct be_eq_entry *eqe;
1332         u16 num = 0;
1333
1334         while ((eqe = event_get(eq_obj)) != NULL) {
1335                 eqe->evt = 0;
1336                 num++;
1337         }
1338
1339         if (num)
1340                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1341 }
1342
1343 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1344 {
1345         struct be_rx_page_info *page_info;
1346         struct be_queue_info *rxq = &rxo->q;
1347         struct be_queue_info *rx_cq = &rxo->cq;
1348         struct be_eth_rx_compl *rxcp;
1349         u16 tail;
1350
1351         /* First cleanup pending rx completions */
1352         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1353                 be_rx_compl_discard(adapter, rxo, rxcp);
1354                 be_rx_compl_reset(rxcp);
1355                 be_cq_notify(adapter, rx_cq->id, false, 1);
1356         }
1357
1358         /* Then free posted rx buffer that were not used */
1359         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1360         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1361                 page_info = get_rx_page_info(adapter, rxo, tail);
1362                 put_page(page_info->page);
1363                 memset(page_info, 0, sizeof(*page_info));
1364         }
1365         BUG_ON(atomic_read(&rxq->used));
1366 }
1367
1368 static void be_tx_compl_clean(struct be_adapter *adapter)
1369 {
1370         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1371         struct be_queue_info *txq = &adapter->tx_obj.q;
1372         struct be_eth_tx_compl *txcp;
1373         u16 end_idx, cmpl = 0, timeo = 0;
1374         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1375         struct sk_buff *sent_skb;
1376         bool dummy_wrb;
1377
1378         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1379         do {
1380                 while ((txcp = be_tx_compl_get(tx_cq))) {
1381                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1382                                         wrb_index, txcp);
1383                         be_tx_compl_process(adapter, end_idx);
1384                         cmpl++;
1385                 }
1386                 if (cmpl) {
1387                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1388                         cmpl = 0;
1389                 }
1390
1391                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1392                         break;
1393
1394                 mdelay(1);
1395         } while (true);
1396
1397         if (atomic_read(&txq->used))
1398                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1399                         atomic_read(&txq->used));
1400
1401         /* free posted tx for which compls will never arrive */
1402         while (atomic_read(&txq->used)) {
1403                 sent_skb = sent_skbs[txq->tail];
1404                 end_idx = txq->tail;
1405                 index_adv(&end_idx,
1406                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1407                         txq->len);
1408                 be_tx_compl_process(adapter, end_idx);
1409         }
1410 }
1411
1412 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1413 {
1414         struct be_queue_info *q;
1415
1416         q = &adapter->mcc_obj.q;
1417         if (q->created)
1418                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1419         be_queue_free(adapter, q);
1420
1421         q = &adapter->mcc_obj.cq;
1422         if (q->created)
1423                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1424         be_queue_free(adapter, q);
1425 }
1426
1427 /* Must be called only after TX qs are created as MCC shares TX EQ */
1428 static int be_mcc_queues_create(struct be_adapter *adapter)
1429 {
1430         struct be_queue_info *q, *cq;
1431
1432         /* Alloc MCC compl queue */
1433         cq = &adapter->mcc_obj.cq;
1434         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1435                         sizeof(struct be_mcc_compl)))
1436                 goto err;
1437
1438         /* Ask BE to create MCC compl queue; share TX's eq */
1439         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1440                 goto mcc_cq_free;
1441
1442         /* Alloc MCC queue */
1443         q = &adapter->mcc_obj.q;
1444         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1445                 goto mcc_cq_destroy;
1446
1447         /* Ask BE to create MCC queue */
1448         if (be_cmd_mccq_create(adapter, q, cq))
1449                 goto mcc_q_free;
1450
1451         return 0;
1452
1453 mcc_q_free:
1454         be_queue_free(adapter, q);
1455 mcc_cq_destroy:
1456         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1457 mcc_cq_free:
1458         be_queue_free(adapter, cq);
1459 err:
1460         return -1;
1461 }
1462
1463 static void be_tx_queues_destroy(struct be_adapter *adapter)
1464 {
1465         struct be_queue_info *q;
1466
1467         q = &adapter->tx_obj.q;
1468         if (q->created)
1469                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1470         be_queue_free(adapter, q);
1471
1472         q = &adapter->tx_obj.cq;
1473         if (q->created)
1474                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1475         be_queue_free(adapter, q);
1476
1477         /* Clear any residual events */
1478         be_eq_clean(adapter, &adapter->tx_eq);
1479
1480         q = &adapter->tx_eq.q;
1481         if (q->created)
1482                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1483         be_queue_free(adapter, q);
1484 }
1485
1486 static int be_tx_queues_create(struct be_adapter *adapter)
1487 {
1488         struct be_queue_info *eq, *q, *cq;
1489
1490         adapter->tx_eq.max_eqd = 0;
1491         adapter->tx_eq.min_eqd = 0;
1492         adapter->tx_eq.cur_eqd = 96;
1493         adapter->tx_eq.enable_aic = false;
1494         /* Alloc Tx Event queue */
1495         eq = &adapter->tx_eq.q;
1496         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1497                 return -1;
1498
1499         /* Ask BE to create Tx Event queue */
1500         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1501                 goto tx_eq_free;
1502
1503         adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1504
1505
1506         /* Alloc TX eth compl queue */
1507         cq = &adapter->tx_obj.cq;
1508         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1509                         sizeof(struct be_eth_tx_compl)))
1510                 goto tx_eq_destroy;
1511
1512         /* Ask BE to create Tx eth compl queue */
1513         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1514                 goto tx_cq_free;
1515
1516         /* Alloc TX eth queue */
1517         q = &adapter->tx_obj.q;
1518         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1519                 goto tx_cq_destroy;
1520
1521         /* Ask BE to create Tx eth queue */
1522         if (be_cmd_txq_create(adapter, q, cq))
1523                 goto tx_q_free;
1524         return 0;
1525
1526 tx_q_free:
1527         be_queue_free(adapter, q);
1528 tx_cq_destroy:
1529         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1530 tx_cq_free:
1531         be_queue_free(adapter, cq);
1532 tx_eq_destroy:
1533         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1534 tx_eq_free:
1535         be_queue_free(adapter, eq);
1536         return -1;
1537 }
1538
1539 static void be_rx_queues_destroy(struct be_adapter *adapter)
1540 {
1541         struct be_queue_info *q;
1542         struct be_rx_obj *rxo;
1543         int i;
1544
1545         for_all_rx_queues(adapter, rxo, i) {
1546                 q = &rxo->q;
1547                 if (q->created) {
1548                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1549                         /* After the rxq is invalidated, wait for a grace time
1550                          * of 1ms for all dma to end and the flush compl to
1551                          * arrive
1552                          */
1553                         mdelay(1);
1554                         be_rx_q_clean(adapter, rxo);
1555                 }
1556                 be_queue_free(adapter, q);
1557
1558                 q = &rxo->cq;
1559                 if (q->created)
1560                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1561                 be_queue_free(adapter, q);
1562
1563                 /* Clear any residual events */
1564                 q = &rxo->rx_eq.q;
1565                 if (q->created) {
1566                         be_eq_clean(adapter, &rxo->rx_eq);
1567                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1568                 }
1569                 be_queue_free(adapter, q);
1570         }
1571 }
1572
1573 static int be_rx_queues_create(struct be_adapter *adapter)
1574 {
1575         struct be_queue_info *eq, *q, *cq;
1576         struct be_rx_obj *rxo;
1577         int rc, i;
1578
1579         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1580         for_all_rx_queues(adapter, rxo, i) {
1581                 rxo->adapter = adapter;
1582                 /* Init last_frag_index so that the frag index in the first
1583                  * completion will never match */
1584                 rxo->last_frag_index = 0xffff;
1585                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1586                 rxo->rx_eq.enable_aic = true;
1587
1588                 /* EQ */
1589                 eq = &rxo->rx_eq.q;
1590                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1591                                         sizeof(struct be_eq_entry));
1592                 if (rc)
1593                         goto err;
1594
1595                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1596                 if (rc)
1597                         goto err;
1598
1599                 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1600
1601                 /* CQ */
1602                 cq = &rxo->cq;
1603                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1604                                 sizeof(struct be_eth_rx_compl));
1605                 if (rc)
1606                         goto err;
1607
1608                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1609                 if (rc)
1610                         goto err;
1611                 /* Rx Q */
1612                 q = &rxo->q;
1613                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1614                                 sizeof(struct be_eth_rx_d));
1615                 if (rc)
1616                         goto err;
1617
1618                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1619                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1620                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1621                 if (rc)
1622                         goto err;
1623         }
1624
1625         if (be_multi_rxq(adapter)) {
1626                 u8 rsstable[MAX_RSS_QS];
1627
1628                 for_all_rss_queues(adapter, rxo, i)
1629                         rsstable[i] = rxo->rss_id;
1630
1631                 rc = be_cmd_rss_config(adapter, rsstable,
1632                         adapter->num_rx_qs - 1);
1633                 if (rc)
1634                         goto err;
1635         }
1636
1637         return 0;
1638 err:
1639         be_rx_queues_destroy(adapter);
1640         return -1;
1641 }
1642
1643 static bool event_peek(struct be_eq_obj *eq_obj)
1644 {
1645         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1646         if (!eqe->evt)
1647                 return false;
1648         else
1649                 return true;
1650 }
1651
1652 static irqreturn_t be_intx(int irq, void *dev)
1653 {
1654         struct be_adapter *adapter = dev;
1655         struct be_rx_obj *rxo;
1656         int isr, i, tx = 0 , rx = 0;
1657
1658         if (lancer_chip(adapter)) {
1659                 if (event_peek(&adapter->tx_eq))
1660                         tx = event_handle(adapter, &adapter->tx_eq);
1661                 for_all_rx_queues(adapter, rxo, i) {
1662                         if (event_peek(&rxo->rx_eq))
1663                                 rx |= event_handle(adapter, &rxo->rx_eq);
1664                 }
1665
1666                 if (!(tx || rx))
1667                         return IRQ_NONE;
1668
1669         } else {
1670                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1671                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1672                 if (!isr)
1673                         return IRQ_NONE;
1674
1675                 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1676                         event_handle(adapter, &adapter->tx_eq);
1677
1678                 for_all_rx_queues(adapter, rxo, i) {
1679                         if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1680                                 event_handle(adapter, &rxo->rx_eq);
1681                 }
1682         }
1683
1684         return IRQ_HANDLED;
1685 }
1686
1687 static irqreturn_t be_msix_rx(int irq, void *dev)
1688 {
1689         struct be_rx_obj *rxo = dev;
1690         struct be_adapter *adapter = rxo->adapter;
1691
1692         event_handle(adapter, &rxo->rx_eq);
1693
1694         return IRQ_HANDLED;
1695 }
1696
1697 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1698 {
1699         struct be_adapter *adapter = dev;
1700
1701         event_handle(adapter, &adapter->tx_eq);
1702
1703         return IRQ_HANDLED;
1704 }
1705
1706 static inline bool do_gro(struct be_rx_obj *rxo,
1707                         struct be_eth_rx_compl *rxcp, u8 err)
1708 {
1709         int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1710
1711         if (err)
1712                 rxo->stats.rxcp_err++;
1713
1714         return (tcp_frame && !err) ? true : false;
1715 }
1716
1717 static int be_poll_rx(struct napi_struct *napi, int budget)
1718 {
1719         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1720         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1721         struct be_adapter *adapter = rxo->adapter;
1722         struct be_queue_info *rx_cq = &rxo->cq;
1723         struct be_eth_rx_compl *rxcp;
1724         u32 work_done;
1725         u16 frag_index, num_rcvd;
1726         u8 err;
1727
1728         rxo->stats.rx_polls++;
1729         for (work_done = 0; work_done < budget; work_done++) {
1730                 rxcp = be_rx_compl_get(rxo);
1731                 if (!rxcp)
1732                         break;
1733
1734                 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1735                 frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
1736                                                                 rxcp);
1737                 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1738                                                                 rxcp);
1739
1740                 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1741                 if (likely(frag_index != rxo->last_frag_index &&
1742                                 num_rcvd != 0)) {
1743                         rxo->last_frag_index = frag_index;
1744
1745                         if (do_gro(rxo, rxcp, err))
1746                                 be_rx_compl_process_gro(adapter, rxo, rxcp);
1747                         else
1748                                 be_rx_compl_process(adapter, rxo, rxcp);
1749                 }
1750
1751                 be_rx_compl_reset(rxcp);
1752         }
1753
1754         /* Refill the queue */
1755         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1756                 be_post_rx_frags(rxo);
1757
1758         /* All consumed */
1759         if (work_done < budget) {
1760                 napi_complete(napi);
1761                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1762         } else {
1763                 /* More to be consumed; continue with interrupts disabled */
1764                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1765         }
1766         return work_done;
1767 }
1768
1769 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1770  * For TX/MCC we don't honour budget; consume everything
1771  */
1772 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1773 {
1774         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1775         struct be_adapter *adapter =
1776                 container_of(tx_eq, struct be_adapter, tx_eq);
1777         struct be_queue_info *txq = &adapter->tx_obj.q;
1778         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1779         struct be_eth_tx_compl *txcp;
1780         int tx_compl = 0, mcc_compl, status = 0;
1781         u16 end_idx;
1782
1783         while ((txcp = be_tx_compl_get(tx_cq))) {
1784                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1785                                 wrb_index, txcp);
1786                 be_tx_compl_process(adapter, end_idx);
1787                 tx_compl++;
1788         }
1789
1790         mcc_compl = be_process_mcc(adapter, &status);
1791
1792         napi_complete(napi);
1793
1794         if (mcc_compl) {
1795                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1796                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1797         }
1798
1799         if (tx_compl) {
1800                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1801
1802                 /* As Tx wrbs have been freed up, wake up netdev queue if
1803                  * it was stopped due to lack of tx wrbs.
1804                  */
1805                 if (netif_queue_stopped(adapter->netdev) &&
1806                         atomic_read(&txq->used) < txq->len / 2) {
1807                         netif_wake_queue(adapter->netdev);
1808                 }
1809
1810                 tx_stats(adapter)->be_tx_events++;
1811                 tx_stats(adapter)->be_tx_compl += tx_compl;
1812         }
1813
1814         return 1;
1815 }
1816
1817 void be_detect_dump_ue(struct be_adapter *adapter)
1818 {
1819         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1820         u32 i;
1821
1822         pci_read_config_dword(adapter->pdev,
1823                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1824         pci_read_config_dword(adapter->pdev,
1825                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1826         pci_read_config_dword(adapter->pdev,
1827                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1828         pci_read_config_dword(adapter->pdev,
1829                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1830
1831         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1832         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1833
1834         if (ue_status_lo || ue_status_hi) {
1835                 adapter->ue_detected = true;
1836                 adapter->eeh_err = true;
1837                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1838         }
1839
1840         if (ue_status_lo) {
1841                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1842                         if (ue_status_lo & 1)
1843                                 dev_err(&adapter->pdev->dev,
1844                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1845                 }
1846         }
1847         if (ue_status_hi) {
1848                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1849                         if (ue_status_hi & 1)
1850                                 dev_err(&adapter->pdev->dev,
1851                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1852                 }
1853         }
1854
1855 }
1856
1857 static void be_worker(struct work_struct *work)
1858 {
1859         struct be_adapter *adapter =
1860                 container_of(work, struct be_adapter, work.work);
1861         struct be_rx_obj *rxo;
1862         int i;
1863
1864         /* when interrupts are not yet enabled, just reap any pending
1865         * mcc completions */
1866         if (!netif_running(adapter->netdev)) {
1867                 int mcc_compl, status = 0;
1868
1869                 mcc_compl = be_process_mcc(adapter, &status);
1870
1871                 if (mcc_compl) {
1872                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1873                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1874                 }
1875
1876                 if (!adapter->ue_detected && !lancer_chip(adapter))
1877                         be_detect_dump_ue(adapter);
1878
1879                 goto reschedule;
1880         }
1881
1882         if (!adapter->stats_cmd_sent)
1883                 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1884
1885         be_tx_rate_update(adapter);
1886
1887         for_all_rx_queues(adapter, rxo, i) {
1888                 be_rx_rate_update(rxo);
1889                 be_rx_eqd_update(adapter, rxo);
1890
1891                 if (rxo->rx_post_starved) {
1892                         rxo->rx_post_starved = false;
1893                         be_post_rx_frags(rxo);
1894                 }
1895         }
1896         if (!adapter->ue_detected && !lancer_chip(adapter))
1897                 be_detect_dump_ue(adapter);
1898
1899 reschedule:
1900         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1901 }
1902
1903 static void be_msix_disable(struct be_adapter *adapter)
1904 {
1905         if (adapter->msix_enabled) {
1906                 pci_disable_msix(adapter->pdev);
1907                 adapter->msix_enabled = false;
1908         }
1909 }
1910
1911 static int be_num_rxqs_get(struct be_adapter *adapter)
1912 {
1913         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1914                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1915                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1916         } else {
1917                 dev_warn(&adapter->pdev->dev,
1918                         "No support for multiple RX queues\n");
1919                 return 1;
1920         }
1921 }
1922
1923 static void be_msix_enable(struct be_adapter *adapter)
1924 {
1925 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
1926         int i, status;
1927
1928         adapter->num_rx_qs = be_num_rxqs_get(adapter);
1929
1930         for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1931                 adapter->msix_entries[i].entry = i;
1932
1933         status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1934                         adapter->num_rx_qs + 1);
1935         if (status == 0) {
1936                 goto done;
1937         } else if (status >= BE_MIN_MSIX_VECTORS) {
1938                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1939                                 status) == 0) {
1940                         adapter->num_rx_qs = status - 1;
1941                         dev_warn(&adapter->pdev->dev,
1942                                 "Could alloc only %d MSIx vectors. "
1943                                 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1944                         goto done;
1945                 }
1946         }
1947         return;
1948 done:
1949         adapter->msix_enabled = true;
1950 }
1951
1952 static void be_sriov_enable(struct be_adapter *adapter)
1953 {
1954         be_check_sriov_fn_type(adapter);
1955 #ifdef CONFIG_PCI_IOV
1956         if (be_physfn(adapter) && num_vfs) {
1957                 int status;
1958
1959                 status = pci_enable_sriov(adapter->pdev, num_vfs);
1960                 adapter->sriov_enabled = status ? false : true;
1961         }
1962 #endif
1963 }
1964
1965 static void be_sriov_disable(struct be_adapter *adapter)
1966 {
1967 #ifdef CONFIG_PCI_IOV
1968         if (adapter->sriov_enabled) {
1969                 pci_disable_sriov(adapter->pdev);
1970                 adapter->sriov_enabled = false;
1971         }
1972 #endif
1973 }
1974
1975 static inline int be_msix_vec_get(struct be_adapter *adapter,
1976                                         struct be_eq_obj *eq_obj)
1977 {
1978         return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
1979 }
1980
1981 static int be_request_irq(struct be_adapter *adapter,
1982                 struct be_eq_obj *eq_obj,
1983                 void *handler, char *desc, void *context)
1984 {
1985         struct net_device *netdev = adapter->netdev;
1986         int vec;
1987
1988         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1989         vec = be_msix_vec_get(adapter, eq_obj);
1990         return request_irq(vec, handler, 0, eq_obj->desc, context);
1991 }
1992
1993 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1994                         void *context)
1995 {
1996         int vec = be_msix_vec_get(adapter, eq_obj);
1997         free_irq(vec, context);
1998 }
1999
2000 static int be_msix_register(struct be_adapter *adapter)
2001 {
2002         struct be_rx_obj *rxo;
2003         int status, i;
2004         char qname[10];
2005
2006         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2007                                 adapter);
2008         if (status)
2009                 goto err;
2010
2011         for_all_rx_queues(adapter, rxo, i) {
2012                 sprintf(qname, "rxq%d", i);
2013                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2014                                 qname, rxo);
2015                 if (status)
2016                         goto err_msix;
2017         }
2018
2019         return 0;
2020
2021 err_msix:
2022         be_free_irq(adapter, &adapter->tx_eq, adapter);
2023
2024         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2025                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2026
2027 err:
2028         dev_warn(&adapter->pdev->dev,
2029                 "MSIX Request IRQ failed - err %d\n", status);
2030         pci_disable_msix(adapter->pdev);
2031         adapter->msix_enabled = false;
2032         return status;
2033 }
2034
2035 static int be_irq_register(struct be_adapter *adapter)
2036 {
2037         struct net_device *netdev = adapter->netdev;
2038         int status;
2039
2040         if (adapter->msix_enabled) {
2041                 status = be_msix_register(adapter);
2042                 if (status == 0)
2043                         goto done;
2044                 /* INTx is not supported for VF */
2045                 if (!be_physfn(adapter))
2046                         return status;
2047         }
2048
2049         /* INTx */
2050         netdev->irq = adapter->pdev->irq;
2051         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2052                         adapter);
2053         if (status) {
2054                 dev_err(&adapter->pdev->dev,
2055                         "INTx request IRQ failed - err %d\n", status);
2056                 return status;
2057         }
2058 done:
2059         adapter->isr_registered = true;
2060         return 0;
2061 }
2062
2063 static void be_irq_unregister(struct be_adapter *adapter)
2064 {
2065         struct net_device *netdev = adapter->netdev;
2066         struct be_rx_obj *rxo;
2067         int i;
2068
2069         if (!adapter->isr_registered)
2070                 return;
2071
2072         /* INTx */
2073         if (!adapter->msix_enabled) {
2074                 free_irq(netdev->irq, adapter);
2075                 goto done;
2076         }
2077
2078         /* MSIx */
2079         be_free_irq(adapter, &adapter->tx_eq, adapter);
2080
2081         for_all_rx_queues(adapter, rxo, i)
2082                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2083
2084 done:
2085         adapter->isr_registered = false;
2086 }
2087
2088 static int be_close(struct net_device *netdev)
2089 {
2090         struct be_adapter *adapter = netdev_priv(netdev);
2091         struct be_rx_obj *rxo;
2092         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2093         int vec, i;
2094
2095         be_async_mcc_disable(adapter);
2096
2097         netif_stop_queue(netdev);
2098         netif_carrier_off(netdev);
2099         adapter->link_up = false;
2100
2101         if (!lancer_chip(adapter))
2102                 be_intr_set(adapter, false);
2103
2104         if (adapter->msix_enabled) {
2105                 vec = be_msix_vec_get(adapter, tx_eq);
2106                 synchronize_irq(vec);
2107
2108                 for_all_rx_queues(adapter, rxo, i) {
2109                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2110                         synchronize_irq(vec);
2111                 }
2112         } else {
2113                 synchronize_irq(netdev->irq);
2114         }
2115         be_irq_unregister(adapter);
2116
2117         for_all_rx_queues(adapter, rxo, i)
2118                 napi_disable(&rxo->rx_eq.napi);
2119
2120         napi_disable(&tx_eq->napi);
2121
2122         /* Wait for all pending tx completions to arrive so that
2123          * all tx skbs are freed.
2124          */
2125         be_tx_compl_clean(adapter);
2126
2127         return 0;
2128 }
2129
2130 static int be_open(struct net_device *netdev)
2131 {
2132         struct be_adapter *adapter = netdev_priv(netdev);
2133         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2134         struct be_rx_obj *rxo;
2135         bool link_up;
2136         int status, i;
2137         u8 mac_speed;
2138         u16 link_speed;
2139
2140         for_all_rx_queues(adapter, rxo, i) {
2141                 be_post_rx_frags(rxo);
2142                 napi_enable(&rxo->rx_eq.napi);
2143         }
2144         napi_enable(&tx_eq->napi);
2145
2146         be_irq_register(adapter);
2147
2148         if (!lancer_chip(adapter))
2149                 be_intr_set(adapter, true);
2150
2151         /* The evt queues are created in unarmed state; arm them */
2152         for_all_rx_queues(adapter, rxo, i) {
2153                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2154                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2155         }
2156         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2157
2158         /* Now that interrupts are on we can process async mcc */
2159         be_async_mcc_enable(adapter);
2160
2161         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2162                         &link_speed);
2163         if (status)
2164                 goto err;
2165         be_link_status_update(adapter, link_up);
2166
2167         if (be_physfn(adapter)) {
2168                 status = be_vid_config(adapter, false, 0);
2169                 if (status)
2170                         goto err;
2171
2172                 status = be_cmd_set_flow_control(adapter,
2173                                 adapter->tx_fc, adapter->rx_fc);
2174                 if (status)
2175                         goto err;
2176         }
2177
2178         return 0;
2179 err:
2180         be_close(adapter->netdev);
2181         return -EIO;
2182 }
2183
2184 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2185 {
2186         struct be_dma_mem cmd;
2187         int status = 0;
2188         u8 mac[ETH_ALEN];
2189
2190         memset(mac, 0, ETH_ALEN);
2191
2192         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2193         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2194                                     GFP_KERNEL);
2195         if (cmd.va == NULL)
2196                 return -1;
2197         memset(cmd.va, 0, cmd.size);
2198
2199         if (enable) {
2200                 status = pci_write_config_dword(adapter->pdev,
2201                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2202                 if (status) {
2203                         dev_err(&adapter->pdev->dev,
2204                                 "Could not enable Wake-on-lan\n");
2205                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2206                                           cmd.dma);
2207                         return status;
2208                 }
2209                 status = be_cmd_enable_magic_wol(adapter,
2210                                 adapter->netdev->dev_addr, &cmd);
2211                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2212                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2213         } else {
2214                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2215                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2216                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2217         }
2218
2219         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2220         return status;
2221 }
2222
2223 /*
2224  * Generate a seed MAC address from the PF MAC Address using jhash.
2225  * MAC Address for VFs are assigned incrementally starting from the seed.
2226  * These addresses are programmed in the ASIC by the PF and the VF driver
2227  * queries for the MAC address during its probe.
2228  */
2229 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2230 {
2231         u32 vf = 0;
2232         int status = 0;
2233         u8 mac[ETH_ALEN];
2234
2235         be_vf_eth_addr_generate(adapter, mac);
2236
2237         for (vf = 0; vf < num_vfs; vf++) {
2238                 status = be_cmd_pmac_add(adapter, mac,
2239                                         adapter->vf_cfg[vf].vf_if_handle,
2240                                         &adapter->vf_cfg[vf].vf_pmac_id,
2241                                         vf + 1);
2242                 if (status)
2243                         dev_err(&adapter->pdev->dev,
2244                                 "Mac address add failed for VF %d\n", vf);
2245                 else
2246                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2247
2248                 mac[5] += 1;
2249         }
2250         return status;
2251 }
2252
2253 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2254 {
2255         u32 vf;
2256
2257         for (vf = 0; vf < num_vfs; vf++) {
2258                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2259                         be_cmd_pmac_del(adapter,
2260                                         adapter->vf_cfg[vf].vf_if_handle,
2261                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2262         }
2263 }
2264
2265 static int be_setup(struct be_adapter *adapter)
2266 {
2267         struct net_device *netdev = adapter->netdev;
2268         u32 cap_flags, en_flags, vf = 0;
2269         int status;
2270         u8 mac[ETH_ALEN];
2271
2272         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2273
2274         if (be_physfn(adapter)) {
2275                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2276                                 BE_IF_FLAGS_PROMISCUOUS |
2277                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2278                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2279
2280                 if (be_multi_rxq(adapter)) {
2281                         cap_flags |= BE_IF_FLAGS_RSS;
2282                         en_flags |= BE_IF_FLAGS_RSS;
2283                 }
2284         }
2285
2286         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2287                         netdev->dev_addr, false/* pmac_invalid */,
2288                         &adapter->if_handle, &adapter->pmac_id, 0);
2289         if (status != 0)
2290                 goto do_none;
2291
2292         if (be_physfn(adapter)) {
2293                 if (adapter->sriov_enabled) {
2294                         while (vf < num_vfs) {
2295                                 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2296                                                         BE_IF_FLAGS_BROADCAST;
2297                                 status = be_cmd_if_create(adapter, cap_flags,
2298                                         en_flags, mac, true,
2299                                         &adapter->vf_cfg[vf].vf_if_handle,
2300                                         NULL, vf+1);
2301                                 if (status) {
2302                                         dev_err(&adapter->pdev->dev,
2303                                         "Interface Create failed for VF %d\n",
2304                                         vf);
2305                                         goto if_destroy;
2306                                 }
2307                                 adapter->vf_cfg[vf].vf_pmac_id =
2308                                                         BE_INVALID_PMAC_ID;
2309                                 vf++;
2310                         }
2311                 }
2312         } else {
2313                 status = be_cmd_mac_addr_query(adapter, mac,
2314                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2315                 if (!status) {
2316                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2317                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2318                 }
2319         }
2320
2321         status = be_tx_queues_create(adapter);
2322         if (status != 0)
2323                 goto if_destroy;
2324
2325         status = be_rx_queues_create(adapter);
2326         if (status != 0)
2327                 goto tx_qs_destroy;
2328
2329         status = be_mcc_queues_create(adapter);
2330         if (status != 0)
2331                 goto rx_qs_destroy;
2332
2333         adapter->link_speed = -1;
2334
2335         return 0;
2336
2337         be_mcc_queues_destroy(adapter);
2338 rx_qs_destroy:
2339         be_rx_queues_destroy(adapter);
2340 tx_qs_destroy:
2341         be_tx_queues_destroy(adapter);
2342 if_destroy:
2343         if (be_physfn(adapter) && adapter->sriov_enabled)
2344                 for (vf = 0; vf < num_vfs; vf++)
2345                         if (adapter->vf_cfg[vf].vf_if_handle)
2346                                 be_cmd_if_destroy(adapter,
2347                                         adapter->vf_cfg[vf].vf_if_handle,
2348                                         vf + 1);
2349         be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2350 do_none:
2351         return status;
2352 }
2353
2354 static int be_clear(struct be_adapter *adapter)
2355 {
2356         int vf;
2357
2358         if (be_physfn(adapter) && adapter->sriov_enabled)
2359                 be_vf_eth_addr_rem(adapter);
2360
2361         be_mcc_queues_destroy(adapter);
2362         be_rx_queues_destroy(adapter);
2363         be_tx_queues_destroy(adapter);
2364
2365         if (be_physfn(adapter) && adapter->sriov_enabled)
2366                 for (vf = 0; vf < num_vfs; vf++)
2367                         if (adapter->vf_cfg[vf].vf_if_handle)
2368                                 be_cmd_if_destroy(adapter,
2369                                         adapter->vf_cfg[vf].vf_if_handle,
2370                                         vf + 1);
2371
2372         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2373
2374         /* tell fw we're done with firing cmds */
2375         be_cmd_fw_clean(adapter);
2376         return 0;
2377 }
2378
2379
2380 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2381 static bool be_flash_redboot(struct be_adapter *adapter,
2382                         const u8 *p, u32 img_start, int image_size,
2383                         int hdr_size)
2384 {
2385         u32 crc_offset;
2386         u8 flashed_crc[4];
2387         int status;
2388
2389         crc_offset = hdr_size + img_start + image_size - 4;
2390
2391         p += crc_offset;
2392
2393         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2394                         (image_size - 4));
2395         if (status) {
2396                 dev_err(&adapter->pdev->dev,
2397                 "could not get crc from flash, not flashing redboot\n");
2398                 return false;
2399         }
2400
2401         /*update redboot only if crc does not match*/
2402         if (!memcmp(flashed_crc, p, 4))
2403                 return false;
2404         else
2405                 return true;
2406 }
2407
2408 static int be_flash_data(struct be_adapter *adapter,
2409                         const struct firmware *fw,
2410                         struct be_dma_mem *flash_cmd, int num_of_images)
2411
2412 {
2413         int status = 0, i, filehdr_size = 0;
2414         u32 total_bytes = 0, flash_op;
2415         int num_bytes;
2416         const u8 *p = fw->data;
2417         struct be_cmd_write_flashrom *req = flash_cmd->va;
2418         const struct flash_comp *pflashcomp;
2419         int num_comp;
2420
2421         static const struct flash_comp gen3_flash_types[9] = {
2422                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2423                         FLASH_IMAGE_MAX_SIZE_g3},
2424                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2425                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2426                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2427                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2428                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2429                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2430                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2431                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2432                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2433                         FLASH_IMAGE_MAX_SIZE_g3},
2434                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2435                         FLASH_IMAGE_MAX_SIZE_g3},
2436                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2437                         FLASH_IMAGE_MAX_SIZE_g3},
2438                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2439                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2440         };
2441         static const struct flash_comp gen2_flash_types[8] = {
2442                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2443                         FLASH_IMAGE_MAX_SIZE_g2},
2444                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2445                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2446                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2447                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2448                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2449                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2450                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2451                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2452                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2453                         FLASH_IMAGE_MAX_SIZE_g2},
2454                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2455                         FLASH_IMAGE_MAX_SIZE_g2},
2456                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2457                          FLASH_IMAGE_MAX_SIZE_g2}
2458         };
2459
2460         if (adapter->generation == BE_GEN3) {
2461                 pflashcomp = gen3_flash_types;
2462                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2463                 num_comp = ARRAY_SIZE(gen3_flash_types);
2464         } else {
2465                 pflashcomp = gen2_flash_types;
2466                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2467                 num_comp = ARRAY_SIZE(gen2_flash_types);
2468         }
2469         for (i = 0; i < num_comp; i++) {
2470                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2471                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2472                         continue;
2473                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2474                         (!be_flash_redboot(adapter, fw->data,
2475                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2476                         (num_of_images * sizeof(struct image_hdr)))))
2477                         continue;
2478                 p = fw->data;
2479                 p += filehdr_size + pflashcomp[i].offset
2480                         + (num_of_images * sizeof(struct image_hdr));
2481         if (p + pflashcomp[i].size > fw->data + fw->size)
2482                 return -1;
2483         total_bytes = pflashcomp[i].size;
2484                 while (total_bytes) {
2485                         if (total_bytes > 32*1024)
2486                                 num_bytes = 32*1024;
2487                         else
2488                                 num_bytes = total_bytes;
2489                         total_bytes -= num_bytes;
2490
2491                         if (!total_bytes)
2492                                 flash_op = FLASHROM_OPER_FLASH;
2493                         else
2494                                 flash_op = FLASHROM_OPER_SAVE;
2495                         memcpy(req->params.data_buf, p, num_bytes);
2496                         p += num_bytes;
2497                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2498                                 pflashcomp[i].optype, flash_op, num_bytes);
2499                         if (status) {
2500                                 dev_err(&adapter->pdev->dev,
2501                                         "cmd to write to flash rom failed.\n");
2502                                 return -1;
2503                         }
2504                         yield();
2505                 }
2506         }
2507         return 0;
2508 }
2509
2510 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2511 {
2512         if (fhdr == NULL)
2513                 return 0;
2514         if (fhdr->build[0] == '3')
2515                 return BE_GEN3;
2516         else if (fhdr->build[0] == '2')
2517                 return BE_GEN2;
2518         else
2519                 return 0;
2520 }
2521
2522 int be_load_fw(struct be_adapter *adapter, u8 *func)
2523 {
2524         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2525         const struct firmware *fw;
2526         struct flash_file_hdr_g2 *fhdr;
2527         struct flash_file_hdr_g3 *fhdr3;
2528         struct image_hdr *img_hdr_ptr = NULL;
2529         struct be_dma_mem flash_cmd;
2530         int status, i = 0, num_imgs = 0;
2531         const u8 *p;
2532
2533         if (!netif_running(adapter->netdev)) {
2534                 dev_err(&adapter->pdev->dev,
2535                         "Firmware load not allowed (interface is down)\n");
2536                 return -EPERM;
2537         }
2538
2539         strcpy(fw_file, func);
2540
2541         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2542         if (status)
2543                 goto fw_exit;
2544
2545         p = fw->data;
2546         fhdr = (struct flash_file_hdr_g2 *) p;
2547         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2548
2549         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2550         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2551                                           &flash_cmd.dma, GFP_KERNEL);
2552         if (!flash_cmd.va) {
2553                 status = -ENOMEM;
2554                 dev_err(&adapter->pdev->dev,
2555                         "Memory allocation failure while flashing\n");
2556                 goto fw_exit;
2557         }
2558
2559         if ((adapter->generation == BE_GEN3) &&
2560                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2561                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2562                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2563                 for (i = 0; i < num_imgs; i++) {
2564                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2565                                         (sizeof(struct flash_file_hdr_g3) +
2566                                          i * sizeof(struct image_hdr)));
2567                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2568                                 status = be_flash_data(adapter, fw, &flash_cmd,
2569                                                         num_imgs);
2570                 }
2571         } else if ((adapter->generation == BE_GEN2) &&
2572                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2573                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2574         } else {
2575                 dev_err(&adapter->pdev->dev,
2576                         "UFI and Interface are not compatible for flashing\n");
2577                 status = -1;
2578         }
2579
2580         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2581                           flash_cmd.dma);
2582         if (status) {
2583                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2584                 goto fw_exit;
2585         }
2586
2587         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2588
2589 fw_exit:
2590         release_firmware(fw);
2591         return status;
2592 }
2593
2594 static struct net_device_ops be_netdev_ops = {
2595         .ndo_open               = be_open,
2596         .ndo_stop               = be_close,
2597         .ndo_start_xmit         = be_xmit,
2598         .ndo_set_rx_mode        = be_set_multicast_list,
2599         .ndo_set_mac_address    = be_mac_addr_set,
2600         .ndo_change_mtu         = be_change_mtu,
2601         .ndo_validate_addr      = eth_validate_addr,
2602         .ndo_vlan_rx_register   = be_vlan_register,
2603         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2604         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2605         .ndo_set_vf_mac         = be_set_vf_mac,
2606         .ndo_set_vf_vlan        = be_set_vf_vlan,
2607         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2608         .ndo_get_vf_config      = be_get_vf_config
2609 };
2610
2611 static void be_netdev_init(struct net_device *netdev)
2612 {
2613         struct be_adapter *adapter = netdev_priv(netdev);
2614         struct be_rx_obj *rxo;
2615         int i;
2616
2617         netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2618                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2619                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2620                 NETIF_F_GRO | NETIF_F_TSO6;
2621
2622         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2623                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2624
2625         if (lancer_chip(adapter))
2626                 netdev->vlan_features |= NETIF_F_TSO6;
2627
2628         netdev->flags |= IFF_MULTICAST;
2629
2630         adapter->rx_csum = true;
2631
2632         /* Default settings for Rx and Tx flow control */
2633         adapter->rx_fc = true;
2634         adapter->tx_fc = true;
2635
2636         netif_set_gso_max_size(netdev, 65535);
2637
2638         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2639
2640         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2641
2642         for_all_rx_queues(adapter, rxo, i)
2643                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2644                                 BE_NAPI_WEIGHT);
2645
2646         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2647                 BE_NAPI_WEIGHT);
2648 }
2649
2650 static void be_unmap_pci_bars(struct be_adapter *adapter)
2651 {
2652         if (adapter->csr)
2653                 iounmap(adapter->csr);
2654         if (adapter->db)
2655                 iounmap(adapter->db);
2656         if (adapter->pcicfg && be_physfn(adapter))
2657                 iounmap(adapter->pcicfg);
2658 }
2659
2660 static int be_map_pci_bars(struct be_adapter *adapter)
2661 {
2662         u8 __iomem *addr;
2663         int pcicfg_reg, db_reg;
2664
2665         if (lancer_chip(adapter)) {
2666                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2667                         pci_resource_len(adapter->pdev, 0));
2668                 if (addr == NULL)
2669                         return -ENOMEM;
2670                 adapter->db = addr;
2671                 return 0;
2672         }
2673
2674         if (be_physfn(adapter)) {
2675                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2676                                 pci_resource_len(adapter->pdev, 2));
2677                 if (addr == NULL)
2678                         return -ENOMEM;
2679                 adapter->csr = addr;
2680         }
2681
2682         if (adapter->generation == BE_GEN2) {
2683                 pcicfg_reg = 1;
2684                 db_reg = 4;
2685         } else {
2686                 pcicfg_reg = 0;
2687                 if (be_physfn(adapter))
2688                         db_reg = 4;
2689                 else
2690                         db_reg = 0;
2691         }
2692         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2693                                 pci_resource_len(adapter->pdev, db_reg));
2694         if (addr == NULL)
2695                 goto pci_map_err;
2696         adapter->db = addr;
2697
2698         if (be_physfn(adapter)) {
2699                 addr = ioremap_nocache(
2700                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2701                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2702                 if (addr == NULL)
2703                         goto pci_map_err;
2704                 adapter->pcicfg = addr;
2705         } else
2706                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2707
2708         return 0;
2709 pci_map_err:
2710         be_unmap_pci_bars(adapter);
2711         return -ENOMEM;
2712 }
2713
2714
2715 static void be_ctrl_cleanup(struct be_adapter *adapter)
2716 {
2717         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2718
2719         be_unmap_pci_bars(adapter);
2720
2721         if (mem->va)
2722                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2723                                   mem->dma);
2724
2725         mem = &adapter->mc_cmd_mem;
2726         if (mem->va)
2727                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2728                                   mem->dma);
2729 }
2730
2731 static int be_ctrl_init(struct be_adapter *adapter)
2732 {
2733         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2734         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2735         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2736         int status;
2737
2738         status = be_map_pci_bars(adapter);
2739         if (status)
2740                 goto done;
2741
2742         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2743         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2744                                                 mbox_mem_alloc->size,
2745                                                 &mbox_mem_alloc->dma,
2746                                                 GFP_KERNEL);
2747         if (!mbox_mem_alloc->va) {
2748                 status = -ENOMEM;
2749                 goto unmap_pci_bars;
2750         }
2751
2752         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2753         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2754         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2755         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2756
2757         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2758         mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2759                                             mc_cmd_mem->size, &mc_cmd_mem->dma,
2760                                             GFP_KERNEL);
2761         if (mc_cmd_mem->va == NULL) {
2762                 status = -ENOMEM;
2763                 goto free_mbox;
2764         }
2765         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2766
2767         mutex_init(&adapter->mbox_lock);
2768         spin_lock_init(&adapter->mcc_lock);
2769         spin_lock_init(&adapter->mcc_cq_lock);
2770
2771         init_completion(&adapter->flash_compl);
2772         pci_save_state(adapter->pdev);
2773         return 0;
2774
2775 free_mbox:
2776         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2777                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
2778
2779 unmap_pci_bars:
2780         be_unmap_pci_bars(adapter);
2781
2782 done:
2783         return status;
2784 }
2785
2786 static void be_stats_cleanup(struct be_adapter *adapter)
2787 {
2788         struct be_dma_mem *cmd = &adapter->stats_cmd;
2789
2790         if (cmd->va)
2791                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2792                                   cmd->va, cmd->dma);
2793 }
2794
2795 static int be_stats_init(struct be_adapter *adapter)
2796 {
2797         struct be_dma_mem *cmd = &adapter->stats_cmd;
2798
2799         cmd->size = sizeof(struct be_cmd_req_get_stats);
2800         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2801                                      GFP_KERNEL);
2802         if (cmd->va == NULL)
2803                 return -1;
2804         memset(cmd->va, 0, cmd->size);
2805         return 0;
2806 }
2807
2808 static void __devexit be_remove(struct pci_dev *pdev)
2809 {
2810         struct be_adapter *adapter = pci_get_drvdata(pdev);
2811
2812         if (!adapter)
2813                 return;
2814
2815         cancel_delayed_work_sync(&adapter->work);
2816
2817         unregister_netdev(adapter->netdev);
2818
2819         be_clear(adapter);
2820
2821         be_stats_cleanup(adapter);
2822
2823         be_ctrl_cleanup(adapter);
2824
2825         be_sriov_disable(adapter);
2826
2827         be_msix_disable(adapter);
2828
2829         pci_set_drvdata(pdev, NULL);
2830         pci_release_regions(pdev);
2831         pci_disable_device(pdev);
2832
2833         free_netdev(adapter->netdev);
2834 }
2835
2836 static int be_get_config(struct be_adapter *adapter)
2837 {
2838         int status;
2839         u8 mac[ETH_ALEN];
2840
2841         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2842         if (status)
2843                 return status;
2844
2845         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2846                         &adapter->function_mode, &adapter->function_caps);
2847         if (status)
2848                 return status;
2849
2850         memset(mac, 0, ETH_ALEN);
2851
2852         if (be_physfn(adapter)) {
2853                 status = be_cmd_mac_addr_query(adapter, mac,
2854                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2855
2856                 if (status)
2857                         return status;
2858
2859                 if (!is_valid_ether_addr(mac))
2860                         return -EADDRNOTAVAIL;
2861
2862                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2863                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2864         }
2865
2866         if (adapter->function_mode & 0x400)
2867                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2868         else
2869                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2870
2871         status = be_cmd_get_cntl_attributes(adapter);
2872         if (status)
2873                 return status;
2874
2875         return 0;
2876 }
2877
2878 static int be_dev_family_check(struct be_adapter *adapter)
2879 {
2880         struct pci_dev *pdev = adapter->pdev;
2881         u32 sli_intf = 0, if_type;
2882
2883         switch (pdev->device) {
2884         case BE_DEVICE_ID1:
2885         case OC_DEVICE_ID1:
2886                 adapter->generation = BE_GEN2;
2887                 break;
2888         case BE_DEVICE_ID2:
2889         case OC_DEVICE_ID2:
2890                 adapter->generation = BE_GEN3;
2891                 break;
2892         case OC_DEVICE_ID3:
2893                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2894                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2895                                                 SLI_INTF_IF_TYPE_SHIFT;
2896
2897                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2898                         if_type != 0x02) {
2899                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2900                         return -EINVAL;
2901                 }
2902                 if (num_vfs > 0) {
2903                         dev_err(&pdev->dev, "VFs not supported\n");
2904                         return -EINVAL;
2905                 }
2906                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2907                                          SLI_INTF_FAMILY_SHIFT);
2908                 adapter->generation = BE_GEN3;
2909                 break;
2910         default:
2911                 adapter->generation = 0;
2912         }
2913         return 0;
2914 }
2915
2916 static int __devinit be_probe(struct pci_dev *pdev,
2917                         const struct pci_device_id *pdev_id)
2918 {
2919         int status = 0;
2920         struct be_adapter *adapter;
2921         struct net_device *netdev;
2922
2923         status = pci_enable_device(pdev);
2924         if (status)
2925                 goto do_none;
2926
2927         status = pci_request_regions(pdev, DRV_NAME);
2928         if (status)
2929                 goto disable_dev;
2930         pci_set_master(pdev);
2931
2932         netdev = alloc_etherdev(sizeof(struct be_adapter));
2933         if (netdev == NULL) {
2934                 status = -ENOMEM;
2935                 goto rel_reg;
2936         }
2937         adapter = netdev_priv(netdev);
2938         adapter->pdev = pdev;
2939         pci_set_drvdata(pdev, adapter);
2940
2941         status = be_dev_family_check(adapter);
2942         if (status)
2943                 goto free_netdev;
2944
2945         adapter->netdev = netdev;
2946         SET_NETDEV_DEV(netdev, &pdev->dev);
2947
2948         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2949         if (!status) {
2950                 netdev->features |= NETIF_F_HIGHDMA;
2951         } else {
2952                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2953                 if (status) {
2954                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2955                         goto free_netdev;
2956                 }
2957         }
2958
2959         be_sriov_enable(adapter);
2960
2961         status = be_ctrl_init(adapter);
2962         if (status)
2963                 goto free_netdev;
2964
2965         /* sync up with fw's ready state */
2966         if (be_physfn(adapter)) {
2967                 status = be_cmd_POST(adapter);
2968                 if (status)
2969                         goto ctrl_clean;
2970         }
2971
2972         /* tell fw we're ready to fire cmds */
2973         status = be_cmd_fw_init(adapter);
2974         if (status)
2975                 goto ctrl_clean;
2976
2977         status = be_cmd_reset_function(adapter);
2978         if (status)
2979                 goto ctrl_clean;
2980
2981         status = be_stats_init(adapter);
2982         if (status)
2983                 goto ctrl_clean;
2984
2985         status = be_get_config(adapter);
2986         if (status)
2987                 goto stats_clean;
2988
2989         be_msix_enable(adapter);
2990
2991         INIT_DELAYED_WORK(&adapter->work, be_worker);
2992
2993         status = be_setup(adapter);
2994         if (status)
2995                 goto msix_disable;
2996
2997         be_netdev_init(netdev);
2998         status = register_netdev(netdev);
2999         if (status != 0)
3000                 goto unsetup;
3001         netif_carrier_off(netdev);
3002
3003         if (be_physfn(adapter) && adapter->sriov_enabled) {
3004                 status = be_vf_eth_addr_config(adapter);
3005                 if (status)
3006                         goto unreg_netdev;
3007         }
3008
3009         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3010         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3011         return 0;
3012
3013 unreg_netdev:
3014         unregister_netdev(netdev);
3015 unsetup:
3016         be_clear(adapter);
3017 msix_disable:
3018         be_msix_disable(adapter);
3019 stats_clean:
3020         be_stats_cleanup(adapter);
3021 ctrl_clean:
3022         be_ctrl_cleanup(adapter);
3023 free_netdev:
3024         be_sriov_disable(adapter);
3025         free_netdev(netdev);
3026         pci_set_drvdata(pdev, NULL);
3027 rel_reg:
3028         pci_release_regions(pdev);
3029 disable_dev:
3030         pci_disable_device(pdev);
3031 do_none:
3032         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3033         return status;
3034 }
3035
3036 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3037 {
3038         struct be_adapter *adapter = pci_get_drvdata(pdev);
3039         struct net_device *netdev =  adapter->netdev;
3040
3041         cancel_delayed_work_sync(&adapter->work);
3042         if (adapter->wol)
3043                 be_setup_wol(adapter, true);
3044
3045         netif_device_detach(netdev);
3046         if (netif_running(netdev)) {
3047                 rtnl_lock();
3048                 be_close(netdev);
3049                 rtnl_unlock();
3050         }
3051         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3052         be_clear(adapter);
3053
3054         be_msix_disable(adapter);
3055         pci_save_state(pdev);
3056         pci_disable_device(pdev);
3057         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3058         return 0;
3059 }
3060
3061 static int be_resume(struct pci_dev *pdev)
3062 {
3063         int status = 0;
3064         struct be_adapter *adapter = pci_get_drvdata(pdev);
3065         struct net_device *netdev =  adapter->netdev;
3066
3067         netif_device_detach(netdev);
3068
3069         status = pci_enable_device(pdev);
3070         if (status)
3071                 return status;
3072
3073         pci_set_power_state(pdev, 0);
3074         pci_restore_state(pdev);
3075
3076         be_msix_enable(adapter);
3077         /* tell fw we're ready to fire cmds */
3078         status = be_cmd_fw_init(adapter);
3079         if (status)
3080                 return status;
3081
3082         be_setup(adapter);
3083         if (netif_running(netdev)) {
3084                 rtnl_lock();
3085                 be_open(netdev);
3086                 rtnl_unlock();
3087         }
3088         netif_device_attach(netdev);
3089
3090         if (adapter->wol)
3091                 be_setup_wol(adapter, false);
3092
3093         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3094         return 0;
3095 }
3096
3097 /*
3098  * An FLR will stop BE from DMAing any data.
3099  */
3100 static void be_shutdown(struct pci_dev *pdev)
3101 {
3102         struct be_adapter *adapter = pci_get_drvdata(pdev);
3103         struct net_device *netdev =  adapter->netdev;
3104
3105         if (netif_running(netdev))
3106                 cancel_delayed_work_sync(&adapter->work);
3107
3108         netif_device_detach(netdev);
3109
3110         be_cmd_reset_function(adapter);
3111
3112         if (adapter->wol)
3113                 be_setup_wol(adapter, true);
3114
3115         pci_disable_device(pdev);
3116 }
3117
3118 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3119                                 pci_channel_state_t state)
3120 {
3121         struct be_adapter *adapter = pci_get_drvdata(pdev);
3122         struct net_device *netdev =  adapter->netdev;
3123
3124         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3125
3126         adapter->eeh_err = true;
3127
3128         netif_device_detach(netdev);
3129
3130         if (netif_running(netdev)) {
3131                 rtnl_lock();
3132                 be_close(netdev);
3133                 rtnl_unlock();
3134         }
3135         be_clear(adapter);
3136
3137         if (state == pci_channel_io_perm_failure)
3138                 return PCI_ERS_RESULT_DISCONNECT;
3139
3140         pci_disable_device(pdev);
3141
3142         return PCI_ERS_RESULT_NEED_RESET;
3143 }
3144
3145 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3146 {
3147         struct be_adapter *adapter = pci_get_drvdata(pdev);
3148         int status;
3149
3150         dev_info(&adapter->pdev->dev, "EEH reset\n");
3151         adapter->eeh_err = false;
3152
3153         status = pci_enable_device(pdev);
3154         if (status)
3155                 return PCI_ERS_RESULT_DISCONNECT;
3156
3157         pci_set_master(pdev);
3158         pci_set_power_state(pdev, 0);
3159         pci_restore_state(pdev);
3160
3161         /* Check if card is ok and fw is ready */
3162         status = be_cmd_POST(adapter);
3163         if (status)
3164                 return PCI_ERS_RESULT_DISCONNECT;
3165
3166         return PCI_ERS_RESULT_RECOVERED;
3167 }
3168
3169 static void be_eeh_resume(struct pci_dev *pdev)
3170 {
3171         int status = 0;
3172         struct be_adapter *adapter = pci_get_drvdata(pdev);
3173         struct net_device *netdev =  adapter->netdev;
3174
3175         dev_info(&adapter->pdev->dev, "EEH resume\n");
3176
3177         pci_save_state(pdev);
3178
3179         /* tell fw we're ready to fire cmds */
3180         status = be_cmd_fw_init(adapter);
3181         if (status)
3182                 goto err;
3183
3184         status = be_setup(adapter);
3185         if (status)
3186                 goto err;
3187
3188         if (netif_running(netdev)) {
3189                 status = be_open(netdev);
3190                 if (status)
3191                         goto err;
3192         }
3193         netif_device_attach(netdev);
3194         return;
3195 err:
3196         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3197 }
3198
3199 static struct pci_error_handlers be_eeh_handlers = {
3200         .error_detected = be_eeh_err_detected,
3201         .slot_reset = be_eeh_reset,
3202         .resume = be_eeh_resume,
3203 };
3204
3205 static struct pci_driver be_driver = {
3206         .name = DRV_NAME,
3207         .id_table = be_dev_ids,
3208         .probe = be_probe,
3209         .remove = be_remove,
3210         .suspend = be_suspend,
3211         .resume = be_resume,
3212         .shutdown = be_shutdown,
3213         .err_handler = &be_eeh_handlers
3214 };
3215
3216 static int __init be_init_module(void)
3217 {
3218         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3219             rx_frag_size != 2048) {
3220                 printk(KERN_WARNING DRV_NAME
3221                         " : Module param rx_frag_size must be 2048/4096/8192."
3222                         " Using 2048\n");
3223                 rx_frag_size = 2048;
3224         }
3225
3226         if (num_vfs > 32) {
3227                 printk(KERN_WARNING DRV_NAME
3228                         " : Module param num_vfs must not be greater than 32."
3229                         "Using 32\n");
3230                 num_vfs = 32;
3231         }
3232
3233         return pci_register_driver(&be_driver);
3234 }
3235 module_init(be_init_module);
3236
3237 static void __exit be_exit_module(void)
3238 {
3239         pci_unregister_driver(&be_driver);
3240 }
3241 module_exit(be_exit_module);