]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/benet/be_main.c
be2net: Stats for Lancer
[karo-tx-linux.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static ushort rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, ushort, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { 0 }
46 };
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
50         "CEV",
51         "CTX",
52         "DBUF",
53         "ERX",
54         "Host",
55         "MPU",
56         "NDMA",
57         "PTC ",
58         "RDMA ",
59         "RXF ",
60         "RXIPS ",
61         "RXULP0 ",
62         "RXULP1 ",
63         "RXULP2 ",
64         "TIM ",
65         "TPOST ",
66         "TPRE ",
67         "TXIPS ",
68         "TXULP0 ",
69         "TXULP1 ",
70         "UC ",
71         "WDMA ",
72         "TXULP2 ",
73         "HOST1 ",
74         "P0_OB_LINK ",
75         "P1_OB_LINK ",
76         "HOST_GPIO ",
77         "MBOX ",
78         "AXGMAC0",
79         "AXGMAC1",
80         "JTAG",
81         "MPU_INTPEND"
82 };
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
85         "LPCMEMHOST",
86         "MGMT_MAC",
87         "PCS0ONLINE",
88         "MPU_IRAM",
89         "PCS1ONLINE",
90         "PCTL0",
91         "PCTL1",
92         "PMEM",
93         "RR",
94         "TXPB",
95         "RXPP",
96         "XAUI",
97         "TXP",
98         "ARM",
99         "IPC",
100         "HOST2",
101         "HOST3",
102         "HOST4",
103         "HOST5",
104         "HOST6",
105         "HOST7",
106         "HOST8",
107         "HOST9",
108         "NETC"
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown"
117 };
118
119 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
120 {
121         struct be_dma_mem *mem = &q->dma_mem;
122         if (mem->va)
123                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
124                                   mem->dma);
125 }
126
127 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
128                 u16 len, u16 entry_size)
129 {
130         struct be_dma_mem *mem = &q->dma_mem;
131
132         memset(q, 0, sizeof(*q));
133         q->len = len;
134         q->entry_size = entry_size;
135         mem->size = len * entry_size;
136         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
137                                      GFP_KERNEL);
138         if (!mem->va)
139                 return -1;
140         memset(mem->va, 0, mem->size);
141         return 0;
142 }
143
144 static void be_intr_set(struct be_adapter *adapter, bool enable)
145 {
146         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
147         u32 reg = ioread32(addr);
148         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
149
150         if (adapter->eeh_err)
151                 return;
152
153         if (!enabled && enable)
154                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155         else if (enabled && !enable)
156                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
157         else
158                 return;
159
160         iowrite32(reg, addr);
161 }
162
163 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
164 {
165         u32 val = 0;
166         val |= qid & DB_RQ_RING_ID_MASK;
167         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
168
169         wmb();
170         iowrite32(val, adapter->db + DB_RQ_OFFSET);
171 }
172
173 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
174 {
175         u32 val = 0;
176         val |= qid & DB_TXULP_RING_ID_MASK;
177         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
178
179         wmb();
180         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
181 }
182
183 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
184                 bool arm, bool clear_int, u16 num_popped)
185 {
186         u32 val = 0;
187         val |= qid & DB_EQ_RING_ID_MASK;
188         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
189                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
190
191         if (adapter->eeh_err)
192                 return;
193
194         if (arm)
195                 val |= 1 << DB_EQ_REARM_SHIFT;
196         if (clear_int)
197                 val |= 1 << DB_EQ_CLR_SHIFT;
198         val |= 1 << DB_EQ_EVNT_SHIFT;
199         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
200         iowrite32(val, adapter->db + DB_EQ_OFFSET);
201 }
202
203 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
204 {
205         u32 val = 0;
206         val |= qid & DB_CQ_RING_ID_MASK;
207         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
208                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
209
210         if (adapter->eeh_err)
211                 return;
212
213         if (arm)
214                 val |= 1 << DB_CQ_REARM_SHIFT;
215         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
216         iowrite32(val, adapter->db + DB_CQ_OFFSET);
217 }
218
219 static int be_mac_addr_set(struct net_device *netdev, void *p)
220 {
221         struct be_adapter *adapter = netdev_priv(netdev);
222         struct sockaddr *addr = p;
223         int status = 0;
224
225         if (!is_valid_ether_addr(addr->sa_data))
226                 return -EADDRNOTAVAIL;
227
228         /* MAC addr configuration will be done in hardware for VFs
229          * by their corresponding PFs. Just copy to netdev addr here
230          */
231         if (!be_physfn(adapter))
232                 goto netdev_addr;
233
234         status = be_cmd_pmac_del(adapter, adapter->if_handle,
235                                 adapter->pmac_id, 0);
236         if (status)
237                 return status;
238
239         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
240                                 adapter->if_handle, &adapter->pmac_id, 0);
241 netdev_addr:
242         if (!status)
243                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
244
245         return status;
246 }
247
248 static void populate_be2_stats(struct be_adapter *adapter)
249 {
250
251         struct be_drv_stats *drvs = &adapter->drv_stats;
252         struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
253         struct be_port_rxf_stats_v0 *port_stats =
254                 be_port_rxf_stats_from_cmd(adapter);
255         struct be_rxf_stats_v0 *rxf_stats =
256                 be_rxf_stats_from_cmd(adapter);
257
258         drvs->rx_pause_frames = port_stats->rx_pause_frames;
259         drvs->rx_crc_errors = port_stats->rx_crc_errors;
260         drvs->rx_control_frames = port_stats->rx_control_frames;
261         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
262         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
263         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
264         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
265         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
266         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
267         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
268         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
269         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
270         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
271         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
272         drvs->rx_input_fifo_overflow_drop =
273                 port_stats->rx_input_fifo_overflow;
274         drvs->rx_dropped_header_too_small =
275                 port_stats->rx_dropped_header_too_small;
276         drvs->rx_address_match_errors =
277                 port_stats->rx_address_match_errors;
278         drvs->rx_alignment_symbol_errors =
279                 port_stats->rx_alignment_symbol_errors;
280
281         drvs->tx_pauseframes = port_stats->tx_pauseframes;
282         drvs->tx_controlframes = port_stats->tx_controlframes;
283
284         if (adapter->port_num)
285                 drvs->jabber_events =
286                         rxf_stats->port1_jabber_events;
287         else
288                 drvs->jabber_events =
289                         rxf_stats->port0_jabber_events;
290         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
291         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
292         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
293         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
294         drvs->forwarded_packets = rxf_stats->forwarded_packets;
295         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
296         drvs->rx_drops_no_tpre_descr =
297                 rxf_stats->rx_drops_no_tpre_descr;
298         drvs->rx_drops_too_many_frags =
299                 rxf_stats->rx_drops_too_many_frags;
300         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
301 }
302
303 static void populate_be3_stats(struct be_adapter *adapter)
304 {
305         struct be_drv_stats *drvs = &adapter->drv_stats;
306         struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
307
308         struct be_rxf_stats_v1 *rxf_stats =
309                 be_rxf_stats_from_cmd(adapter);
310         struct be_port_rxf_stats_v1 *port_stats =
311                 be_port_rxf_stats_from_cmd(adapter);
312
313         drvs->rx_priority_pause_frames = 0;
314         drvs->pmem_fifo_overflow_drop = 0;
315         drvs->rx_pause_frames = port_stats->rx_pause_frames;
316         drvs->rx_crc_errors = port_stats->rx_crc_errors;
317         drvs->rx_control_frames = port_stats->rx_control_frames;
318         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
319         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
320         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
321         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
322         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
323         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
324         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
325         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
326         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
327         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
328         drvs->rx_dropped_header_too_small =
329                 port_stats->rx_dropped_header_too_small;
330         drvs->rx_input_fifo_overflow_drop =
331                 port_stats->rx_input_fifo_overflow_drop;
332         drvs->rx_address_match_errors =
333                 port_stats->rx_address_match_errors;
334         drvs->rx_alignment_symbol_errors =
335                 port_stats->rx_alignment_symbol_errors;
336         drvs->rxpp_fifo_overflow_drop =
337                 port_stats->rxpp_fifo_overflow_drop;
338         drvs->tx_pauseframes = port_stats->tx_pauseframes;
339         drvs->tx_controlframes = port_stats->tx_controlframes;
340         drvs->jabber_events = port_stats->jabber_events;
341         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
342         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
343         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
344         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
345         drvs->forwarded_packets = rxf_stats->forwarded_packets;
346         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
347         drvs->rx_drops_no_tpre_descr =
348                 rxf_stats->rx_drops_no_tpre_descr;
349         drvs->rx_drops_too_many_frags =
350                 rxf_stats->rx_drops_too_many_frags;
351         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
352 }
353
354 static void populate_lancer_stats(struct be_adapter *adapter)
355 {
356
357         struct be_drv_stats *drvs = &adapter->drv_stats;
358         struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
359                                                 (adapter);
360         drvs->rx_priority_pause_frames = 0;
361         drvs->pmem_fifo_overflow_drop = 0;
362         drvs->rx_pause_frames =
363                 make_64bit_val(pport_stats->rx_pause_frames_lo,
364                                  pport_stats->rx_pause_frames_hi);
365         drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
366                                                 pport_stats->rx_crc_errors_lo);
367         drvs->rx_control_frames =
368                         make_64bit_val(pport_stats->rx_control_frames_hi,
369                         pport_stats->rx_control_frames_lo);
370         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
371         drvs->rx_frame_too_long =
372                 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
373                                         pport_stats->rx_frames_too_long_lo);
374         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
375         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
376         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
377         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
378         drvs->rx_dropped_tcp_length =
379                                 pport_stats->rx_dropped_invalid_tcp_length;
380         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
381         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
382         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
383         drvs->rx_dropped_header_too_small =
384                                 pport_stats->rx_dropped_header_too_small;
385         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
386         drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
387         drvs->rx_alignment_symbol_errors =
388                 make_64bit_val(pport_stats->rx_symbol_errors_hi,
389                                 pport_stats->rx_symbol_errors_lo);
390         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
391         drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
392                                         pport_stats->tx_pause_frames_lo);
393         drvs->tx_controlframes =
394                 make_64bit_val(pport_stats->tx_control_frames_hi,
395                                 pport_stats->tx_control_frames_lo);
396         drvs->jabber_events = pport_stats->rx_jabbers;
397         drvs->rx_drops_no_pbuf = 0;
398         drvs->rx_drops_no_txpb = 0;
399         drvs->rx_drops_no_erx_descr = 0;
400         drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
401         drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
402                                                 pport_stats->num_forwards_lo);
403         drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
404                                                 pport_stats->rx_drops_mtu_lo);
405         drvs->rx_drops_no_tpre_descr = 0;
406         drvs->rx_drops_too_many_frags =
407                 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
408                                 pport_stats->rx_drops_too_many_frags_lo);
409 }
410
411 void be_parse_stats(struct be_adapter *adapter)
412 {
413         if (adapter->generation == BE_GEN3) {
414                 if (lancer_chip(adapter))
415                         populate_lancer_stats(adapter);
416                  else
417                         populate_be3_stats(adapter);
418         } else {
419                 populate_be2_stats(adapter);
420         }
421 }
422
423 void netdev_stats_update(struct be_adapter *adapter)
424 {
425         struct be_drv_stats *drvs = &adapter->drv_stats;
426         struct net_device_stats *dev_stats = &adapter->netdev->stats;
427         struct be_rx_obj *rxo;
428         int i;
429
430         memset(dev_stats, 0, sizeof(*dev_stats));
431         for_all_rx_queues(adapter, rxo, i) {
432                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
433                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
434                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
435                 /*  no space in linux buffers: best possible approximation */
436                 if (adapter->generation == BE_GEN3) {
437                         if (!(lancer_chip(adapter))) {
438                                 struct be_erx_stats_v1 *erx_stats =
439                                         be_erx_stats_from_cmd(adapter);
440                                 dev_stats->rx_dropped +=
441                                 erx_stats->rx_drops_no_fragments[rxo->q.id];
442                         }
443                 } else {
444                         struct be_erx_stats_v0 *erx_stats =
445                                         be_erx_stats_from_cmd(adapter);
446                         dev_stats->rx_dropped +=
447                                 erx_stats->rx_drops_no_fragments[rxo->q.id];
448                 }
449         }
450
451         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
452         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
453
454         /* bad pkts received */
455         dev_stats->rx_errors = drvs->rx_crc_errors +
456                 drvs->rx_alignment_symbol_errors +
457                 drvs->rx_in_range_errors +
458                 drvs->rx_out_range_errors +
459                 drvs->rx_frame_too_long +
460                 drvs->rx_dropped_too_small +
461                 drvs->rx_dropped_too_short +
462                 drvs->rx_dropped_header_too_small +
463                 drvs->rx_dropped_tcp_length +
464                 drvs->rx_dropped_runt +
465                 drvs->rx_tcp_checksum_errs +
466                 drvs->rx_ip_checksum_errs +
467                 drvs->rx_udp_checksum_errs;
468
469         /* detailed rx errors */
470         dev_stats->rx_length_errors = drvs->rx_in_range_errors +
471                 drvs->rx_out_range_errors +
472                 drvs->rx_frame_too_long;
473
474         dev_stats->rx_crc_errors = drvs->rx_crc_errors;
475
476         /* frame alignment errors */
477         dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
478
479         /* receiver fifo overrun */
480         /* drops_no_pbuf is no per i/f, it's per BE card */
481         dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
482                                 drvs->rx_input_fifo_overflow_drop +
483                                 drvs->rx_drops_no_pbuf;
484 }
485
486 void be_link_status_update(struct be_adapter *adapter, bool link_up)
487 {
488         struct net_device *netdev = adapter->netdev;
489
490         /* If link came up or went down */
491         if (adapter->link_up != link_up) {
492                 adapter->link_speed = -1;
493                 if (link_up) {
494                         netif_carrier_on(netdev);
495                         printk(KERN_INFO "%s: Link up\n", netdev->name);
496                 } else {
497                         netif_carrier_off(netdev);
498                         printk(KERN_INFO "%s: Link down\n", netdev->name);
499                 }
500                 adapter->link_up = link_up;
501         }
502 }
503
504 /* Update the EQ delay n BE based on the RX frags consumed / sec */
505 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
506 {
507         struct be_eq_obj *rx_eq = &rxo->rx_eq;
508         struct be_rx_stats *stats = &rxo->stats;
509         ulong now = jiffies;
510         u32 eqd;
511
512         if (!rx_eq->enable_aic)
513                 return;
514
515         /* Wrapped around */
516         if (time_before(now, stats->rx_fps_jiffies)) {
517                 stats->rx_fps_jiffies = now;
518                 return;
519         }
520
521         /* Update once a second */
522         if ((now - stats->rx_fps_jiffies) < HZ)
523                 return;
524
525         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
526                         ((now - stats->rx_fps_jiffies) / HZ);
527
528         stats->rx_fps_jiffies = now;
529         stats->prev_rx_frags = stats->rx_frags;
530         eqd = stats->rx_fps / 110000;
531         eqd = eqd << 3;
532         if (eqd > rx_eq->max_eqd)
533                 eqd = rx_eq->max_eqd;
534         if (eqd < rx_eq->min_eqd)
535                 eqd = rx_eq->min_eqd;
536         if (eqd < 10)
537                 eqd = 0;
538         if (eqd != rx_eq->cur_eqd)
539                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
540
541         rx_eq->cur_eqd = eqd;
542 }
543
544 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
545 {
546         u64 rate = bytes;
547
548         do_div(rate, ticks / HZ);
549         rate <<= 3;                     /* bytes/sec -> bits/sec */
550         do_div(rate, 1000000ul);        /* MB/Sec */
551
552         return rate;
553 }
554
555 static void be_tx_rate_update(struct be_adapter *adapter)
556 {
557         struct be_tx_stats *stats = tx_stats(adapter);
558         ulong now = jiffies;
559
560         /* Wrapped around? */
561         if (time_before(now, stats->be_tx_jiffies)) {
562                 stats->be_tx_jiffies = now;
563                 return;
564         }
565
566         /* Update tx rate once in two seconds */
567         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
568                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
569                                                   - stats->be_tx_bytes_prev,
570                                                  now - stats->be_tx_jiffies);
571                 stats->be_tx_jiffies = now;
572                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
573         }
574 }
575
576 static void be_tx_stats_update(struct be_adapter *adapter,
577                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
578 {
579         struct be_tx_stats *stats = tx_stats(adapter);
580         stats->be_tx_reqs++;
581         stats->be_tx_wrbs += wrb_cnt;
582         stats->be_tx_bytes += copied;
583         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
584         if (stopped)
585                 stats->be_tx_stops++;
586 }
587
588 /* Determine number of WRB entries needed to xmit data in an skb */
589 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
590                                                                 bool *dummy)
591 {
592         int cnt = (skb->len > skb->data_len);
593
594         cnt += skb_shinfo(skb)->nr_frags;
595
596         /* to account for hdr wrb */
597         cnt++;
598         if (lancer_chip(adapter) || !(cnt & 1)) {
599                 *dummy = false;
600         } else {
601                 /* add a dummy to make it an even num */
602                 cnt++;
603                 *dummy = true;
604         }
605         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
606         return cnt;
607 }
608
609 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
610 {
611         wrb->frag_pa_hi = upper_32_bits(addr);
612         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
613         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
614 }
615
616 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
617                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
618 {
619         u8 vlan_prio = 0;
620         u16 vlan_tag = 0;
621
622         memset(hdr, 0, sizeof(*hdr));
623
624         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
625
626         if (skb_is_gso(skb)) {
627                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
628                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
629                         hdr, skb_shinfo(skb)->gso_size);
630                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
631                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
632                 if (lancer_chip(adapter) && adapter->sli_family  ==
633                                                         LANCER_A0_SLI_FAMILY) {
634                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
635                         if (is_tcp_pkt(skb))
636                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
637                                                                 tcpcs, hdr, 1);
638                         else if (is_udp_pkt(skb))
639                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
640                                                                 udpcs, hdr, 1);
641                 }
642         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
643                 if (is_tcp_pkt(skb))
644                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
645                 else if (is_udp_pkt(skb))
646                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
647         }
648
649         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
650                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
651                 vlan_tag = vlan_tx_tag_get(skb);
652                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
653                 /* If vlan priority provided by OS is NOT in available bmap */
654                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
655                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
656                                         adapter->recommended_prio;
657                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
658         }
659
660         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
661         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
662         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
663         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
664 }
665
666 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
667                 bool unmap_single)
668 {
669         dma_addr_t dma;
670
671         be_dws_le_to_cpu(wrb, sizeof(*wrb));
672
673         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
674         if (wrb->frag_len) {
675                 if (unmap_single)
676                         dma_unmap_single(dev, dma, wrb->frag_len,
677                                          DMA_TO_DEVICE);
678                 else
679                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
680         }
681 }
682
683 static int make_tx_wrbs(struct be_adapter *adapter,
684                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
685 {
686         dma_addr_t busaddr;
687         int i, copied = 0;
688         struct device *dev = &adapter->pdev->dev;
689         struct sk_buff *first_skb = skb;
690         struct be_queue_info *txq = &adapter->tx_obj.q;
691         struct be_eth_wrb *wrb;
692         struct be_eth_hdr_wrb *hdr;
693         bool map_single = false;
694         u16 map_head;
695
696         hdr = queue_head_node(txq);
697         queue_head_inc(txq);
698         map_head = txq->head;
699
700         if (skb->len > skb->data_len) {
701                 int len = skb_headlen(skb);
702                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
703                 if (dma_mapping_error(dev, busaddr))
704                         goto dma_err;
705                 map_single = true;
706                 wrb = queue_head_node(txq);
707                 wrb_fill(wrb, busaddr, len);
708                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
709                 queue_head_inc(txq);
710                 copied += len;
711         }
712
713         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
714                 struct skb_frag_struct *frag =
715                         &skb_shinfo(skb)->frags[i];
716                 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
717                                        frag->size, DMA_TO_DEVICE);
718                 if (dma_mapping_error(dev, busaddr))
719                         goto dma_err;
720                 wrb = queue_head_node(txq);
721                 wrb_fill(wrb, busaddr, frag->size);
722                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
723                 queue_head_inc(txq);
724                 copied += frag->size;
725         }
726
727         if (dummy_wrb) {
728                 wrb = queue_head_node(txq);
729                 wrb_fill(wrb, 0, 0);
730                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
731                 queue_head_inc(txq);
732         }
733
734         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
735         be_dws_cpu_to_le(hdr, sizeof(*hdr));
736
737         return copied;
738 dma_err:
739         txq->head = map_head;
740         while (copied) {
741                 wrb = queue_head_node(txq);
742                 unmap_tx_frag(dev, wrb, map_single);
743                 map_single = false;
744                 copied -= wrb->frag_len;
745                 queue_head_inc(txq);
746         }
747         return 0;
748 }
749
750 static netdev_tx_t be_xmit(struct sk_buff *skb,
751                         struct net_device *netdev)
752 {
753         struct be_adapter *adapter = netdev_priv(netdev);
754         struct be_tx_obj *tx_obj = &adapter->tx_obj;
755         struct be_queue_info *txq = &tx_obj->q;
756         u32 wrb_cnt = 0, copied = 0;
757         u32 start = txq->head;
758         bool dummy_wrb, stopped = false;
759
760         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
761
762         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
763         if (copied) {
764                 /* record the sent skb in the sent_skb table */
765                 BUG_ON(tx_obj->sent_skb_list[start]);
766                 tx_obj->sent_skb_list[start] = skb;
767
768                 /* Ensure txq has space for the next skb; Else stop the queue
769                  * *BEFORE* ringing the tx doorbell, so that we serialze the
770                  * tx compls of the current transmit which'll wake up the queue
771                  */
772                 atomic_add(wrb_cnt, &txq->used);
773                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
774                                                                 txq->len) {
775                         netif_stop_queue(netdev);
776                         stopped = true;
777                 }
778
779                 be_txq_notify(adapter, txq->id, wrb_cnt);
780
781                 be_tx_stats_update(adapter, wrb_cnt, copied,
782                                 skb_shinfo(skb)->gso_segs, stopped);
783         } else {
784                 txq->head = start;
785                 dev_kfree_skb_any(skb);
786         }
787         return NETDEV_TX_OK;
788 }
789
790 static int be_change_mtu(struct net_device *netdev, int new_mtu)
791 {
792         struct be_adapter *adapter = netdev_priv(netdev);
793         if (new_mtu < BE_MIN_MTU ||
794                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
795                                         (ETH_HLEN + ETH_FCS_LEN))) {
796                 dev_info(&adapter->pdev->dev,
797                         "MTU must be between %d and %d bytes\n",
798                         BE_MIN_MTU,
799                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
800                 return -EINVAL;
801         }
802         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
803                         netdev->mtu, new_mtu);
804         netdev->mtu = new_mtu;
805         return 0;
806 }
807
808 /*
809  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
810  * If the user configures more, place BE in vlan promiscuous mode.
811  */
812 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
813 {
814         u16 vtag[BE_NUM_VLANS_SUPPORTED];
815         u16 ntags = 0, i;
816         int status = 0;
817         u32 if_handle;
818
819         if (vf) {
820                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
821                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
822                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
823         }
824
825         if (adapter->vlans_added <= adapter->max_vlans)  {
826                 /* Construct VLAN Table to give to HW */
827                 for (i = 0; i < VLAN_N_VID; i++) {
828                         if (adapter->vlan_tag[i]) {
829                                 vtag[ntags] = cpu_to_le16(i);
830                                 ntags++;
831                         }
832                 }
833                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
834                                         vtag, ntags, 1, 0);
835         } else {
836                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
837                                         NULL, 0, 1, 1);
838         }
839
840         return status;
841 }
842
843 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
844 {
845         struct be_adapter *adapter = netdev_priv(netdev);
846
847         adapter->vlan_grp = grp;
848 }
849
850 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
851 {
852         struct be_adapter *adapter = netdev_priv(netdev);
853
854         adapter->vlans_added++;
855         if (!be_physfn(adapter))
856                 return;
857
858         adapter->vlan_tag[vid] = 1;
859         if (adapter->vlans_added <= (adapter->max_vlans + 1))
860                 be_vid_config(adapter, false, 0);
861 }
862
863 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
864 {
865         struct be_adapter *adapter = netdev_priv(netdev);
866
867         adapter->vlans_added--;
868         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
869
870         if (!be_physfn(adapter))
871                 return;
872
873         adapter->vlan_tag[vid] = 0;
874         if (adapter->vlans_added <= adapter->max_vlans)
875                 be_vid_config(adapter, false, 0);
876 }
877
878 static void be_set_multicast_list(struct net_device *netdev)
879 {
880         struct be_adapter *adapter = netdev_priv(netdev);
881
882         if (netdev->flags & IFF_PROMISC) {
883                 be_cmd_promiscuous_config(adapter, true);
884                 adapter->promiscuous = true;
885                 goto done;
886         }
887
888         /* BE was previously in promiscuous mode; disable it */
889         if (adapter->promiscuous) {
890                 adapter->promiscuous = false;
891                 be_cmd_promiscuous_config(adapter, false);
892         }
893
894         /* Enable multicast promisc if num configured exceeds what we support */
895         if (netdev->flags & IFF_ALLMULTI ||
896             netdev_mc_count(netdev) > BE_MAX_MC) {
897                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
898                                 &adapter->mc_cmd_mem);
899                 goto done;
900         }
901
902         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
903                 &adapter->mc_cmd_mem);
904 done:
905         return;
906 }
907
908 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
909 {
910         struct be_adapter *adapter = netdev_priv(netdev);
911         int status;
912
913         if (!adapter->sriov_enabled)
914                 return -EPERM;
915
916         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
917                 return -EINVAL;
918
919         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
920                 status = be_cmd_pmac_del(adapter,
921                                         adapter->vf_cfg[vf].vf_if_handle,
922                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
923
924         status = be_cmd_pmac_add(adapter, mac,
925                                 adapter->vf_cfg[vf].vf_if_handle,
926                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
927
928         if (status)
929                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
930                                 mac, vf);
931         else
932                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
933
934         return status;
935 }
936
937 static int be_get_vf_config(struct net_device *netdev, int vf,
938                         struct ifla_vf_info *vi)
939 {
940         struct be_adapter *adapter = netdev_priv(netdev);
941
942         if (!adapter->sriov_enabled)
943                 return -EPERM;
944
945         if (vf >= num_vfs)
946                 return -EINVAL;
947
948         vi->vf = vf;
949         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
950         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
951         vi->qos = 0;
952         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
953
954         return 0;
955 }
956
957 static int be_set_vf_vlan(struct net_device *netdev,
958                         int vf, u16 vlan, u8 qos)
959 {
960         struct be_adapter *adapter = netdev_priv(netdev);
961         int status = 0;
962
963         if (!adapter->sriov_enabled)
964                 return -EPERM;
965
966         if ((vf >= num_vfs) || (vlan > 4095))
967                 return -EINVAL;
968
969         if (vlan) {
970                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
971                 adapter->vlans_added++;
972         } else {
973                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
974                 adapter->vlans_added--;
975         }
976
977         status = be_vid_config(adapter, true, vf);
978
979         if (status)
980                 dev_info(&adapter->pdev->dev,
981                                 "VLAN %d config on VF %d failed\n", vlan, vf);
982         return status;
983 }
984
985 static int be_set_vf_tx_rate(struct net_device *netdev,
986                         int vf, int rate)
987 {
988         struct be_adapter *adapter = netdev_priv(netdev);
989         int status = 0;
990
991         if (!adapter->sriov_enabled)
992                 return -EPERM;
993
994         if ((vf >= num_vfs) || (rate < 0))
995                 return -EINVAL;
996
997         if (rate > 10000)
998                 rate = 10000;
999
1000         adapter->vf_cfg[vf].vf_tx_rate = rate;
1001         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1002
1003         if (status)
1004                 dev_info(&adapter->pdev->dev,
1005                                 "tx rate %d on VF %d failed\n", rate, vf);
1006         return status;
1007 }
1008
1009 static void be_rx_rate_update(struct be_rx_obj *rxo)
1010 {
1011         struct be_rx_stats *stats = &rxo->stats;
1012         ulong now = jiffies;
1013
1014         /* Wrapped around */
1015         if (time_before(now, stats->rx_jiffies)) {
1016                 stats->rx_jiffies = now;
1017                 return;
1018         }
1019
1020         /* Update the rate once in two seconds */
1021         if ((now - stats->rx_jiffies) < 2 * HZ)
1022                 return;
1023
1024         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1025                                 now - stats->rx_jiffies);
1026         stats->rx_jiffies = now;
1027         stats->rx_bytes_prev = stats->rx_bytes;
1028 }
1029
1030 static void be_rx_stats_update(struct be_rx_obj *rxo,
1031                 struct be_rx_compl_info *rxcp)
1032 {
1033         struct be_rx_stats *stats = &rxo->stats;
1034
1035         stats->rx_compl++;
1036         stats->rx_frags += rxcp->num_rcvd;
1037         stats->rx_bytes += rxcp->pkt_size;
1038         stats->rx_pkts++;
1039         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1040                 stats->rx_mcast_pkts++;
1041         if (rxcp->err)
1042                 stats->rxcp_err++;
1043 }
1044
1045 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1046 {
1047         /* L4 checksum is not reliable for non TCP/UDP packets.
1048          * Also ignore ipcksm for ipv6 pkts */
1049         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1050                                 (rxcp->ip_csum || rxcp->ipv6);
1051 }
1052
1053 static struct be_rx_page_info *
1054 get_rx_page_info(struct be_adapter *adapter,
1055                 struct be_rx_obj *rxo,
1056                 u16 frag_idx)
1057 {
1058         struct be_rx_page_info *rx_page_info;
1059         struct be_queue_info *rxq = &rxo->q;
1060
1061         rx_page_info = &rxo->page_info_tbl[frag_idx];
1062         BUG_ON(!rx_page_info->page);
1063
1064         if (rx_page_info->last_page_user) {
1065                 dma_unmap_page(&adapter->pdev->dev,
1066                                dma_unmap_addr(rx_page_info, bus),
1067                                adapter->big_page_size, DMA_FROM_DEVICE);
1068                 rx_page_info->last_page_user = false;
1069         }
1070
1071         atomic_dec(&rxq->used);
1072         return rx_page_info;
1073 }
1074
1075 /* Throwaway the data in the Rx completion */
1076 static void be_rx_compl_discard(struct be_adapter *adapter,
1077                 struct be_rx_obj *rxo,
1078                 struct be_rx_compl_info *rxcp)
1079 {
1080         struct be_queue_info *rxq = &rxo->q;
1081         struct be_rx_page_info *page_info;
1082         u16 i, num_rcvd = rxcp->num_rcvd;
1083
1084         for (i = 0; i < num_rcvd; i++) {
1085                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1086                 put_page(page_info->page);
1087                 memset(page_info, 0, sizeof(*page_info));
1088                 index_inc(&rxcp->rxq_idx, rxq->len);
1089         }
1090 }
1091
1092 /*
1093  * skb_fill_rx_data forms a complete skb for an ether frame
1094  * indicated by rxcp.
1095  */
1096 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1097                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1098 {
1099         struct be_queue_info *rxq = &rxo->q;
1100         struct be_rx_page_info *page_info;
1101         u16 i, j;
1102         u16 hdr_len, curr_frag_len, remaining;
1103         u8 *start;
1104
1105         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1106         start = page_address(page_info->page) + page_info->page_offset;
1107         prefetch(start);
1108
1109         /* Copy data in the first descriptor of this completion */
1110         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1111
1112         /* Copy the header portion into skb_data */
1113         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1114         memcpy(skb->data, start, hdr_len);
1115         skb->len = curr_frag_len;
1116         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1117                 /* Complete packet has now been moved to data */
1118                 put_page(page_info->page);
1119                 skb->data_len = 0;
1120                 skb->tail += curr_frag_len;
1121         } else {
1122                 skb_shinfo(skb)->nr_frags = 1;
1123                 skb_shinfo(skb)->frags[0].page = page_info->page;
1124                 skb_shinfo(skb)->frags[0].page_offset =
1125                                         page_info->page_offset + hdr_len;
1126                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1127                 skb->data_len = curr_frag_len - hdr_len;
1128                 skb->tail += hdr_len;
1129         }
1130         page_info->page = NULL;
1131
1132         if (rxcp->pkt_size <= rx_frag_size) {
1133                 BUG_ON(rxcp->num_rcvd != 1);
1134                 return;
1135         }
1136
1137         /* More frags present for this completion */
1138         index_inc(&rxcp->rxq_idx, rxq->len);
1139         remaining = rxcp->pkt_size - curr_frag_len;
1140         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1141                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1142                 curr_frag_len = min(remaining, rx_frag_size);
1143
1144                 /* Coalesce all frags from the same physical page in one slot */
1145                 if (page_info->page_offset == 0) {
1146                         /* Fresh page */
1147                         j++;
1148                         skb_shinfo(skb)->frags[j].page = page_info->page;
1149                         skb_shinfo(skb)->frags[j].page_offset =
1150                                                         page_info->page_offset;
1151                         skb_shinfo(skb)->frags[j].size = 0;
1152                         skb_shinfo(skb)->nr_frags++;
1153                 } else {
1154                         put_page(page_info->page);
1155                 }
1156
1157                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1158                 skb->len += curr_frag_len;
1159                 skb->data_len += curr_frag_len;
1160
1161                 remaining -= curr_frag_len;
1162                 index_inc(&rxcp->rxq_idx, rxq->len);
1163                 page_info->page = NULL;
1164         }
1165         BUG_ON(j > MAX_SKB_FRAGS);
1166 }
1167
1168 /* Process the RX completion indicated by rxcp when GRO is disabled */
1169 static void be_rx_compl_process(struct be_adapter *adapter,
1170                         struct be_rx_obj *rxo,
1171                         struct be_rx_compl_info *rxcp)
1172 {
1173         struct net_device *netdev = adapter->netdev;
1174         struct sk_buff *skb;
1175
1176         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1177         if (unlikely(!skb)) {
1178                 if (net_ratelimit())
1179                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1180                 be_rx_compl_discard(adapter, rxo, rxcp);
1181                 return;
1182         }
1183
1184         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1185
1186         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1187                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1188         else
1189                 skb_checksum_none_assert(skb);
1190
1191         skb->truesize = skb->len + sizeof(struct sk_buff);
1192         skb->protocol = eth_type_trans(skb, netdev);
1193         if (adapter->netdev->features & NETIF_F_RXHASH)
1194                 skb->rxhash = rxcp->rss_hash;
1195
1196
1197         if (unlikely(rxcp->vlanf)) {
1198                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1199                         kfree_skb(skb);
1200                         return;
1201                 }
1202                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1203                                         rxcp->vlan_tag);
1204         } else {
1205                 netif_receive_skb(skb);
1206         }
1207 }
1208
1209 /* Process the RX completion indicated by rxcp when GRO is enabled */
1210 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1211                 struct be_rx_obj *rxo,
1212                 struct be_rx_compl_info *rxcp)
1213 {
1214         struct be_rx_page_info *page_info;
1215         struct sk_buff *skb = NULL;
1216         struct be_queue_info *rxq = &rxo->q;
1217         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1218         u16 remaining, curr_frag_len;
1219         u16 i, j;
1220
1221         skb = napi_get_frags(&eq_obj->napi);
1222         if (!skb) {
1223                 be_rx_compl_discard(adapter, rxo, rxcp);
1224                 return;
1225         }
1226
1227         remaining = rxcp->pkt_size;
1228         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1229                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1230
1231                 curr_frag_len = min(remaining, rx_frag_size);
1232
1233                 /* Coalesce all frags from the same physical page in one slot */
1234                 if (i == 0 || page_info->page_offset == 0) {
1235                         /* First frag or Fresh page */
1236                         j++;
1237                         skb_shinfo(skb)->frags[j].page = page_info->page;
1238                         skb_shinfo(skb)->frags[j].page_offset =
1239                                                         page_info->page_offset;
1240                         skb_shinfo(skb)->frags[j].size = 0;
1241                 } else {
1242                         put_page(page_info->page);
1243                 }
1244                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1245
1246                 remaining -= curr_frag_len;
1247                 index_inc(&rxcp->rxq_idx, rxq->len);
1248                 memset(page_info, 0, sizeof(*page_info));
1249         }
1250         BUG_ON(j > MAX_SKB_FRAGS);
1251
1252         skb_shinfo(skb)->nr_frags = j + 1;
1253         skb->len = rxcp->pkt_size;
1254         skb->data_len = rxcp->pkt_size;
1255         skb->truesize += rxcp->pkt_size;
1256         skb->ip_summed = CHECKSUM_UNNECESSARY;
1257         if (adapter->netdev->features & NETIF_F_RXHASH)
1258                 skb->rxhash = rxcp->rss_hash;
1259
1260         if (likely(!rxcp->vlanf))
1261                 napi_gro_frags(&eq_obj->napi);
1262         else
1263                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1264                                 rxcp->vlan_tag);
1265 }
1266
1267 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1268                                 struct be_eth_rx_compl *compl,
1269                                 struct be_rx_compl_info *rxcp)
1270 {
1271         rxcp->pkt_size =
1272                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1273         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1274         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1275         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1276         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1277         rxcp->ip_csum =
1278                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1279         rxcp->l4_csum =
1280                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1281         rxcp->ipv6 =
1282                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1283         rxcp->rxq_idx =
1284                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1285         rxcp->num_rcvd =
1286                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1287         rxcp->pkt_type =
1288                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1289         rxcp->rss_hash =
1290                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1291         if (rxcp->vlanf) {
1292                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1293                                           compl);
1294                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1295                                                compl);
1296         }
1297 }
1298
1299 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1300                                 struct be_eth_rx_compl *compl,
1301                                 struct be_rx_compl_info *rxcp)
1302 {
1303         rxcp->pkt_size =
1304                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1305         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1306         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1307         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1308         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1309         rxcp->ip_csum =
1310                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1311         rxcp->l4_csum =
1312                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1313         rxcp->ipv6 =
1314                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1315         rxcp->rxq_idx =
1316                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1317         rxcp->num_rcvd =
1318                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1319         rxcp->pkt_type =
1320                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1321         rxcp->rss_hash =
1322                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1323         if (rxcp->vlanf) {
1324                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1325                                           compl);
1326                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1327                                                compl);
1328         }
1329 }
1330
1331 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1332 {
1333         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1334         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1335         struct be_adapter *adapter = rxo->adapter;
1336
1337         /* For checking the valid bit it is Ok to use either definition as the
1338          * valid bit is at the same position in both v0 and v1 Rx compl */
1339         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1340                 return NULL;
1341
1342         rmb();
1343         be_dws_le_to_cpu(compl, sizeof(*compl));
1344
1345         if (adapter->be3_native)
1346                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1347         else
1348                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1349
1350         if (rxcp->vlanf) {
1351                 /* vlanf could be wrongly set in some cards.
1352                  * ignore if vtm is not set */
1353                 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1354                         rxcp->vlanf = 0;
1355
1356                 if (!lancer_chip(adapter))
1357                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1358
1359                 if (((adapter->pvid & VLAN_VID_MASK) ==
1360                      (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1361                     !adapter->vlan_tag[rxcp->vlan_tag])
1362                         rxcp->vlanf = 0;
1363         }
1364
1365         /* As the compl has been parsed, reset it; we wont touch it again */
1366         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1367
1368         queue_tail_inc(&rxo->cq);
1369         return rxcp;
1370 }
1371
1372 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1373 {
1374         u32 order = get_order(size);
1375
1376         if (order > 0)
1377                 gfp |= __GFP_COMP;
1378         return  alloc_pages(gfp, order);
1379 }
1380
1381 /*
1382  * Allocate a page, split it to fragments of size rx_frag_size and post as
1383  * receive buffers to BE
1384  */
1385 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1386 {
1387         struct be_adapter *adapter = rxo->adapter;
1388         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1389         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1390         struct be_queue_info *rxq = &rxo->q;
1391         struct page *pagep = NULL;
1392         struct be_eth_rx_d *rxd;
1393         u64 page_dmaaddr = 0, frag_dmaaddr;
1394         u32 posted, page_offset = 0;
1395
1396         page_info = &rxo->page_info_tbl[rxq->head];
1397         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1398                 if (!pagep) {
1399                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1400                         if (unlikely(!pagep)) {
1401                                 rxo->stats.rx_post_fail++;
1402                                 break;
1403                         }
1404                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1405                                                     0, adapter->big_page_size,
1406                                                     DMA_FROM_DEVICE);
1407                         page_info->page_offset = 0;
1408                 } else {
1409                         get_page(pagep);
1410                         page_info->page_offset = page_offset + rx_frag_size;
1411                 }
1412                 page_offset = page_info->page_offset;
1413                 page_info->page = pagep;
1414                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1415                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1416
1417                 rxd = queue_head_node(rxq);
1418                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1419                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1420
1421                 /* Any space left in the current big page for another frag? */
1422                 if ((page_offset + rx_frag_size + rx_frag_size) >
1423                                         adapter->big_page_size) {
1424                         pagep = NULL;
1425                         page_info->last_page_user = true;
1426                 }
1427
1428                 prev_page_info = page_info;
1429                 queue_head_inc(rxq);
1430                 page_info = &page_info_tbl[rxq->head];
1431         }
1432         if (pagep)
1433                 prev_page_info->last_page_user = true;
1434
1435         if (posted) {
1436                 atomic_add(posted, &rxq->used);
1437                 be_rxq_notify(adapter, rxq->id, posted);
1438         } else if (atomic_read(&rxq->used) == 0) {
1439                 /* Let be_worker replenish when memory is available */
1440                 rxo->rx_post_starved = true;
1441         }
1442 }
1443
1444 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1445 {
1446         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1447
1448         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1449                 return NULL;
1450
1451         rmb();
1452         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1453
1454         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1455
1456         queue_tail_inc(tx_cq);
1457         return txcp;
1458 }
1459
1460 static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1461 {
1462         struct be_queue_info *txq = &adapter->tx_obj.q;
1463         struct be_eth_wrb *wrb;
1464         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1465         struct sk_buff *sent_skb;
1466         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1467         bool unmap_skb_hdr = true;
1468
1469         sent_skb = sent_skbs[txq->tail];
1470         BUG_ON(!sent_skb);
1471         sent_skbs[txq->tail] = NULL;
1472
1473         /* skip header wrb */
1474         queue_tail_inc(txq);
1475
1476         do {
1477                 cur_index = txq->tail;
1478                 wrb = queue_tail_node(txq);
1479                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1480                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1481                 unmap_skb_hdr = false;
1482
1483                 num_wrbs++;
1484                 queue_tail_inc(txq);
1485         } while (cur_index != last_index);
1486
1487         kfree_skb(sent_skb);
1488         return num_wrbs;
1489 }
1490
1491 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1492 {
1493         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1494
1495         if (!eqe->evt)
1496                 return NULL;
1497
1498         rmb();
1499         eqe->evt = le32_to_cpu(eqe->evt);
1500         queue_tail_inc(&eq_obj->q);
1501         return eqe;
1502 }
1503
1504 static int event_handle(struct be_adapter *adapter,
1505                         struct be_eq_obj *eq_obj)
1506 {
1507         struct be_eq_entry *eqe;
1508         u16 num = 0;
1509
1510         while ((eqe = event_get(eq_obj)) != NULL) {
1511                 eqe->evt = 0;
1512                 num++;
1513         }
1514
1515         /* Deal with any spurious interrupts that come
1516          * without events
1517          */
1518         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1519         if (num)
1520                 napi_schedule(&eq_obj->napi);
1521
1522         return num;
1523 }
1524
1525 /* Just read and notify events without processing them.
1526  * Used at the time of destroying event queues */
1527 static void be_eq_clean(struct be_adapter *adapter,
1528                         struct be_eq_obj *eq_obj)
1529 {
1530         struct be_eq_entry *eqe;
1531         u16 num = 0;
1532
1533         while ((eqe = event_get(eq_obj)) != NULL) {
1534                 eqe->evt = 0;
1535                 num++;
1536         }
1537
1538         if (num)
1539                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1540 }
1541
1542 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1543 {
1544         struct be_rx_page_info *page_info;
1545         struct be_queue_info *rxq = &rxo->q;
1546         struct be_queue_info *rx_cq = &rxo->cq;
1547         struct be_rx_compl_info *rxcp;
1548         u16 tail;
1549
1550         /* First cleanup pending rx completions */
1551         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1552                 be_rx_compl_discard(adapter, rxo, rxcp);
1553                 be_cq_notify(adapter, rx_cq->id, false, 1);
1554         }
1555
1556         /* Then free posted rx buffer that were not used */
1557         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1558         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1559                 page_info = get_rx_page_info(adapter, rxo, tail);
1560                 put_page(page_info->page);
1561                 memset(page_info, 0, sizeof(*page_info));
1562         }
1563         BUG_ON(atomic_read(&rxq->used));
1564 }
1565
1566 static void be_tx_compl_clean(struct be_adapter *adapter)
1567 {
1568         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1569         struct be_queue_info *txq = &adapter->tx_obj.q;
1570         struct be_eth_tx_compl *txcp;
1571         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1572         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1573         struct sk_buff *sent_skb;
1574         bool dummy_wrb;
1575
1576         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1577         do {
1578                 while ((txcp = be_tx_compl_get(tx_cq))) {
1579                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1580                                         wrb_index, txcp);
1581                         num_wrbs += be_tx_compl_process(adapter, end_idx);
1582                         cmpl++;
1583                 }
1584                 if (cmpl) {
1585                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1586                         atomic_sub(num_wrbs, &txq->used);
1587                         cmpl = 0;
1588                         num_wrbs = 0;
1589                 }
1590
1591                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1592                         break;
1593
1594                 mdelay(1);
1595         } while (true);
1596
1597         if (atomic_read(&txq->used))
1598                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1599                         atomic_read(&txq->used));
1600
1601         /* free posted tx for which compls will never arrive */
1602         while (atomic_read(&txq->used)) {
1603                 sent_skb = sent_skbs[txq->tail];
1604                 end_idx = txq->tail;
1605                 index_adv(&end_idx,
1606                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1607                         txq->len);
1608                 num_wrbs = be_tx_compl_process(adapter, end_idx);
1609                 atomic_sub(num_wrbs, &txq->used);
1610         }
1611 }
1612
1613 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1614 {
1615         struct be_queue_info *q;
1616
1617         q = &adapter->mcc_obj.q;
1618         if (q->created)
1619                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1620         be_queue_free(adapter, q);
1621
1622         q = &adapter->mcc_obj.cq;
1623         if (q->created)
1624                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1625         be_queue_free(adapter, q);
1626 }
1627
1628 /* Must be called only after TX qs are created as MCC shares TX EQ */
1629 static int be_mcc_queues_create(struct be_adapter *adapter)
1630 {
1631         struct be_queue_info *q, *cq;
1632
1633         /* Alloc MCC compl queue */
1634         cq = &adapter->mcc_obj.cq;
1635         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1636                         sizeof(struct be_mcc_compl)))
1637                 goto err;
1638
1639         /* Ask BE to create MCC compl queue; share TX's eq */
1640         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1641                 goto mcc_cq_free;
1642
1643         /* Alloc MCC queue */
1644         q = &adapter->mcc_obj.q;
1645         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1646                 goto mcc_cq_destroy;
1647
1648         /* Ask BE to create MCC queue */
1649         if (be_cmd_mccq_create(adapter, q, cq))
1650                 goto mcc_q_free;
1651
1652         return 0;
1653
1654 mcc_q_free:
1655         be_queue_free(adapter, q);
1656 mcc_cq_destroy:
1657         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1658 mcc_cq_free:
1659         be_queue_free(adapter, cq);
1660 err:
1661         return -1;
1662 }
1663
1664 static void be_tx_queues_destroy(struct be_adapter *adapter)
1665 {
1666         struct be_queue_info *q;
1667
1668         q = &adapter->tx_obj.q;
1669         if (q->created)
1670                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1671         be_queue_free(adapter, q);
1672
1673         q = &adapter->tx_obj.cq;
1674         if (q->created)
1675                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1676         be_queue_free(adapter, q);
1677
1678         /* Clear any residual events */
1679         be_eq_clean(adapter, &adapter->tx_eq);
1680
1681         q = &adapter->tx_eq.q;
1682         if (q->created)
1683                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1684         be_queue_free(adapter, q);
1685 }
1686
1687 static int be_tx_queues_create(struct be_adapter *adapter)
1688 {
1689         struct be_queue_info *eq, *q, *cq;
1690
1691         adapter->tx_eq.max_eqd = 0;
1692         adapter->tx_eq.min_eqd = 0;
1693         adapter->tx_eq.cur_eqd = 96;
1694         adapter->tx_eq.enable_aic = false;
1695         /* Alloc Tx Event queue */
1696         eq = &adapter->tx_eq.q;
1697         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1698                 return -1;
1699
1700         /* Ask BE to create Tx Event queue */
1701         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1702                 goto tx_eq_free;
1703
1704         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1705
1706
1707         /* Alloc TX eth compl queue */
1708         cq = &adapter->tx_obj.cq;
1709         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1710                         sizeof(struct be_eth_tx_compl)))
1711                 goto tx_eq_destroy;
1712
1713         /* Ask BE to create Tx eth compl queue */
1714         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1715                 goto tx_cq_free;
1716
1717         /* Alloc TX eth queue */
1718         q = &adapter->tx_obj.q;
1719         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1720                 goto tx_cq_destroy;
1721
1722         /* Ask BE to create Tx eth queue */
1723         if (be_cmd_txq_create(adapter, q, cq))
1724                 goto tx_q_free;
1725         return 0;
1726
1727 tx_q_free:
1728         be_queue_free(adapter, q);
1729 tx_cq_destroy:
1730         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1731 tx_cq_free:
1732         be_queue_free(adapter, cq);
1733 tx_eq_destroy:
1734         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1735 tx_eq_free:
1736         be_queue_free(adapter, eq);
1737         return -1;
1738 }
1739
1740 static void be_rx_queues_destroy(struct be_adapter *adapter)
1741 {
1742         struct be_queue_info *q;
1743         struct be_rx_obj *rxo;
1744         int i;
1745
1746         for_all_rx_queues(adapter, rxo, i) {
1747                 q = &rxo->q;
1748                 if (q->created) {
1749                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1750                         /* After the rxq is invalidated, wait for a grace time
1751                          * of 1ms for all dma to end and the flush compl to
1752                          * arrive
1753                          */
1754                         mdelay(1);
1755                         be_rx_q_clean(adapter, rxo);
1756                 }
1757                 be_queue_free(adapter, q);
1758
1759                 q = &rxo->cq;
1760                 if (q->created)
1761                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1762                 be_queue_free(adapter, q);
1763
1764                 /* Clear any residual events */
1765                 q = &rxo->rx_eq.q;
1766                 if (q->created) {
1767                         be_eq_clean(adapter, &rxo->rx_eq);
1768                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1769                 }
1770                 be_queue_free(adapter, q);
1771         }
1772 }
1773
1774 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1775 {
1776         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1777                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1778                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1779         } else {
1780                 dev_warn(&adapter->pdev->dev,
1781                         "No support for multiple RX queues\n");
1782                 return 1;
1783         }
1784 }
1785
1786 static int be_rx_queues_create(struct be_adapter *adapter)
1787 {
1788         struct be_queue_info *eq, *q, *cq;
1789         struct be_rx_obj *rxo;
1790         int rc, i;
1791
1792         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1793                                 msix_enabled(adapter) ?
1794                                         adapter->num_msix_vec - 1 : 1);
1795         if (adapter->num_rx_qs != MAX_RX_QS)
1796                 dev_warn(&adapter->pdev->dev,
1797                         "Can create only %d RX queues", adapter->num_rx_qs);
1798
1799         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1800         for_all_rx_queues(adapter, rxo, i) {
1801                 rxo->adapter = adapter;
1802                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1803                 rxo->rx_eq.enable_aic = true;
1804
1805                 /* EQ */
1806                 eq = &rxo->rx_eq.q;
1807                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1808                                         sizeof(struct be_eq_entry));
1809                 if (rc)
1810                         goto err;
1811
1812                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1813                 if (rc)
1814                         goto err;
1815
1816                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1817
1818                 /* CQ */
1819                 cq = &rxo->cq;
1820                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1821                                 sizeof(struct be_eth_rx_compl));
1822                 if (rc)
1823                         goto err;
1824
1825                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1826                 if (rc)
1827                         goto err;
1828                 /* Rx Q */
1829                 q = &rxo->q;
1830                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1831                                 sizeof(struct be_eth_rx_d));
1832                 if (rc)
1833                         goto err;
1834
1835                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1836                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1837                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1838                 if (rc)
1839                         goto err;
1840         }
1841
1842         if (be_multi_rxq(adapter)) {
1843                 u8 rsstable[MAX_RSS_QS];
1844
1845                 for_all_rss_queues(adapter, rxo, i)
1846                         rsstable[i] = rxo->rss_id;
1847
1848                 rc = be_cmd_rss_config(adapter, rsstable,
1849                         adapter->num_rx_qs - 1);
1850                 if (rc)
1851                         goto err;
1852         }
1853
1854         return 0;
1855 err:
1856         be_rx_queues_destroy(adapter);
1857         return -1;
1858 }
1859
1860 static bool event_peek(struct be_eq_obj *eq_obj)
1861 {
1862         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1863         if (!eqe->evt)
1864                 return false;
1865         else
1866                 return true;
1867 }
1868
1869 static irqreturn_t be_intx(int irq, void *dev)
1870 {
1871         struct be_adapter *adapter = dev;
1872         struct be_rx_obj *rxo;
1873         int isr, i, tx = 0 , rx = 0;
1874
1875         if (lancer_chip(adapter)) {
1876                 if (event_peek(&adapter->tx_eq))
1877                         tx = event_handle(adapter, &adapter->tx_eq);
1878                 for_all_rx_queues(adapter, rxo, i) {
1879                         if (event_peek(&rxo->rx_eq))
1880                                 rx |= event_handle(adapter, &rxo->rx_eq);
1881                 }
1882
1883                 if (!(tx || rx))
1884                         return IRQ_NONE;
1885
1886         } else {
1887                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1888                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1889                 if (!isr)
1890                         return IRQ_NONE;
1891
1892                 if ((1 << adapter->tx_eq.eq_idx & isr))
1893                         event_handle(adapter, &adapter->tx_eq);
1894
1895                 for_all_rx_queues(adapter, rxo, i) {
1896                         if ((1 << rxo->rx_eq.eq_idx & isr))
1897                                 event_handle(adapter, &rxo->rx_eq);
1898                 }
1899         }
1900
1901         return IRQ_HANDLED;
1902 }
1903
1904 static irqreturn_t be_msix_rx(int irq, void *dev)
1905 {
1906         struct be_rx_obj *rxo = dev;
1907         struct be_adapter *adapter = rxo->adapter;
1908
1909         event_handle(adapter, &rxo->rx_eq);
1910
1911         return IRQ_HANDLED;
1912 }
1913
1914 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1915 {
1916         struct be_adapter *adapter = dev;
1917
1918         event_handle(adapter, &adapter->tx_eq);
1919
1920         return IRQ_HANDLED;
1921 }
1922
1923 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1924 {
1925         return (rxcp->tcpf && !rxcp->err) ? true : false;
1926 }
1927
1928 static int be_poll_rx(struct napi_struct *napi, int budget)
1929 {
1930         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1931         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1932         struct be_adapter *adapter = rxo->adapter;
1933         struct be_queue_info *rx_cq = &rxo->cq;
1934         struct be_rx_compl_info *rxcp;
1935         u32 work_done;
1936
1937         rxo->stats.rx_polls++;
1938         for (work_done = 0; work_done < budget; work_done++) {
1939                 rxcp = be_rx_compl_get(rxo);
1940                 if (!rxcp)
1941                         break;
1942
1943                 /* Ignore flush completions */
1944                 if (rxcp->num_rcvd && rxcp->pkt_size) {
1945                         if (do_gro(rxcp))
1946                                 be_rx_compl_process_gro(adapter, rxo, rxcp);
1947                         else
1948                                 be_rx_compl_process(adapter, rxo, rxcp);
1949                 } else if (rxcp->pkt_size == 0) {
1950                         be_rx_compl_discard(adapter, rxo, rxcp);
1951                 }
1952
1953                 be_rx_stats_update(rxo, rxcp);
1954         }
1955
1956         /* Refill the queue */
1957         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1958                 be_post_rx_frags(rxo, GFP_ATOMIC);
1959
1960         /* All consumed */
1961         if (work_done < budget) {
1962                 napi_complete(napi);
1963                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1964         } else {
1965                 /* More to be consumed; continue with interrupts disabled */
1966                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1967         }
1968         return work_done;
1969 }
1970
1971 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1972  * For TX/MCC we don't honour budget; consume everything
1973  */
1974 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1975 {
1976         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1977         struct be_adapter *adapter =
1978                 container_of(tx_eq, struct be_adapter, tx_eq);
1979         struct be_queue_info *txq = &adapter->tx_obj.q;
1980         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1981         struct be_eth_tx_compl *txcp;
1982         int tx_compl = 0, mcc_compl, status = 0;
1983         u16 end_idx, num_wrbs = 0;
1984
1985         while ((txcp = be_tx_compl_get(tx_cq))) {
1986                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1987                                 wrb_index, txcp);
1988                 num_wrbs += be_tx_compl_process(adapter, end_idx);
1989                 tx_compl++;
1990         }
1991
1992         mcc_compl = be_process_mcc(adapter, &status);
1993
1994         napi_complete(napi);
1995
1996         if (mcc_compl) {
1997                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1998                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1999         }
2000
2001         if (tx_compl) {
2002                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
2003
2004                 atomic_sub(num_wrbs, &txq->used);
2005
2006                 /* As Tx wrbs have been freed up, wake up netdev queue if
2007                  * it was stopped due to lack of tx wrbs.
2008                  */
2009                 if (netif_queue_stopped(adapter->netdev) &&
2010                         atomic_read(&txq->used) < txq->len / 2) {
2011                         netif_wake_queue(adapter->netdev);
2012                 }
2013
2014                 tx_stats(adapter)->be_tx_events++;
2015                 tx_stats(adapter)->be_tx_compl += tx_compl;
2016         }
2017
2018         return 1;
2019 }
2020
2021 void be_detect_dump_ue(struct be_adapter *adapter)
2022 {
2023         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
2024         u32 i;
2025
2026         pci_read_config_dword(adapter->pdev,
2027                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
2028         pci_read_config_dword(adapter->pdev,
2029                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
2030         pci_read_config_dword(adapter->pdev,
2031                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2032         pci_read_config_dword(adapter->pdev,
2033                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2034
2035         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2036         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2037
2038         if (ue_status_lo || ue_status_hi) {
2039                 adapter->ue_detected = true;
2040                 adapter->eeh_err = true;
2041                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2042         }
2043
2044         if (ue_status_lo) {
2045                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2046                         if (ue_status_lo & 1)
2047                                 dev_err(&adapter->pdev->dev,
2048                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2049                 }
2050         }
2051         if (ue_status_hi) {
2052                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2053                         if (ue_status_hi & 1)
2054                                 dev_err(&adapter->pdev->dev,
2055                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2056                 }
2057         }
2058
2059 }
2060
2061 static void be_worker(struct work_struct *work)
2062 {
2063         struct be_adapter *adapter =
2064                 container_of(work, struct be_adapter, work.work);
2065         struct be_rx_obj *rxo;
2066         int i;
2067
2068         if (!adapter->ue_detected && !lancer_chip(adapter))
2069                 be_detect_dump_ue(adapter);
2070
2071         /* when interrupts are not yet enabled, just reap any pending
2072         * mcc completions */
2073         if (!netif_running(adapter->netdev)) {
2074                 int mcc_compl, status = 0;
2075
2076                 mcc_compl = be_process_mcc(adapter, &status);
2077
2078                 if (mcc_compl) {
2079                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2080                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2081                 }
2082
2083                 goto reschedule;
2084         }
2085
2086         if (!adapter->stats_cmd_sent) {
2087                 if (lancer_chip(adapter))
2088                         lancer_cmd_get_pport_stats(adapter,
2089                                                 &adapter->stats_cmd);
2090                 else
2091                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
2092         }
2093         be_tx_rate_update(adapter);
2094
2095         for_all_rx_queues(adapter, rxo, i) {
2096                 be_rx_rate_update(rxo);
2097                 be_rx_eqd_update(adapter, rxo);
2098
2099                 if (rxo->rx_post_starved) {
2100                         rxo->rx_post_starved = false;
2101                         be_post_rx_frags(rxo, GFP_KERNEL);
2102                 }
2103         }
2104
2105 reschedule:
2106         adapter->work_counter++;
2107         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2108 }
2109
2110 static void be_msix_disable(struct be_adapter *adapter)
2111 {
2112         if (msix_enabled(adapter)) {
2113                 pci_disable_msix(adapter->pdev);
2114                 adapter->num_msix_vec = 0;
2115         }
2116 }
2117
2118 static void be_msix_enable(struct be_adapter *adapter)
2119 {
2120 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
2121         int i, status, num_vec;
2122
2123         num_vec = be_num_rxqs_want(adapter) + 1;
2124
2125         for (i = 0; i < num_vec; i++)
2126                 adapter->msix_entries[i].entry = i;
2127
2128         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2129         if (status == 0) {
2130                 goto done;
2131         } else if (status >= BE_MIN_MSIX_VECTORS) {
2132                 num_vec = status;
2133                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2134                                 num_vec) == 0)
2135                         goto done;
2136         }
2137         return;
2138 done:
2139         adapter->num_msix_vec = num_vec;
2140         return;
2141 }
2142
2143 static void be_sriov_enable(struct be_adapter *adapter)
2144 {
2145         be_check_sriov_fn_type(adapter);
2146 #ifdef CONFIG_PCI_IOV
2147         if (be_physfn(adapter) && num_vfs) {
2148                 int status, pos;
2149                 u16 nvfs;
2150
2151                 pos = pci_find_ext_capability(adapter->pdev,
2152                                                 PCI_EXT_CAP_ID_SRIOV);
2153                 pci_read_config_word(adapter->pdev,
2154                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2155
2156                 if (num_vfs > nvfs) {
2157                         dev_info(&adapter->pdev->dev,
2158                                         "Device supports %d VFs and not %d\n",
2159                                         nvfs, num_vfs);
2160                         num_vfs = nvfs;
2161                 }
2162
2163                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2164                 adapter->sriov_enabled = status ? false : true;
2165         }
2166 #endif
2167 }
2168
2169 static void be_sriov_disable(struct be_adapter *adapter)
2170 {
2171 #ifdef CONFIG_PCI_IOV
2172         if (adapter->sriov_enabled) {
2173                 pci_disable_sriov(adapter->pdev);
2174                 adapter->sriov_enabled = false;
2175         }
2176 #endif
2177 }
2178
2179 static inline int be_msix_vec_get(struct be_adapter *adapter,
2180                                         struct be_eq_obj *eq_obj)
2181 {
2182         return adapter->msix_entries[eq_obj->eq_idx].vector;
2183 }
2184
2185 static int be_request_irq(struct be_adapter *adapter,
2186                 struct be_eq_obj *eq_obj,
2187                 void *handler, char *desc, void *context)
2188 {
2189         struct net_device *netdev = adapter->netdev;
2190         int vec;
2191
2192         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2193         vec = be_msix_vec_get(adapter, eq_obj);
2194         return request_irq(vec, handler, 0, eq_obj->desc, context);
2195 }
2196
2197 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2198                         void *context)
2199 {
2200         int vec = be_msix_vec_get(adapter, eq_obj);
2201         free_irq(vec, context);
2202 }
2203
2204 static int be_msix_register(struct be_adapter *adapter)
2205 {
2206         struct be_rx_obj *rxo;
2207         int status, i;
2208         char qname[10];
2209
2210         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2211                                 adapter);
2212         if (status)
2213                 goto err;
2214
2215         for_all_rx_queues(adapter, rxo, i) {
2216                 sprintf(qname, "rxq%d", i);
2217                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2218                                 qname, rxo);
2219                 if (status)
2220                         goto err_msix;
2221         }
2222
2223         return 0;
2224
2225 err_msix:
2226         be_free_irq(adapter, &adapter->tx_eq, adapter);
2227
2228         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2229                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2230
2231 err:
2232         dev_warn(&adapter->pdev->dev,
2233                 "MSIX Request IRQ failed - err %d\n", status);
2234         be_msix_disable(adapter);
2235         return status;
2236 }
2237
2238 static int be_irq_register(struct be_adapter *adapter)
2239 {
2240         struct net_device *netdev = adapter->netdev;
2241         int status;
2242
2243         if (msix_enabled(adapter)) {
2244                 status = be_msix_register(adapter);
2245                 if (status == 0)
2246                         goto done;
2247                 /* INTx is not supported for VF */
2248                 if (!be_physfn(adapter))
2249                         return status;
2250         }
2251
2252         /* INTx */
2253         netdev->irq = adapter->pdev->irq;
2254         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2255                         adapter);
2256         if (status) {
2257                 dev_err(&adapter->pdev->dev,
2258                         "INTx request IRQ failed - err %d\n", status);
2259                 return status;
2260         }
2261 done:
2262         adapter->isr_registered = true;
2263         return 0;
2264 }
2265
2266 static void be_irq_unregister(struct be_adapter *adapter)
2267 {
2268         struct net_device *netdev = adapter->netdev;
2269         struct be_rx_obj *rxo;
2270         int i;
2271
2272         if (!adapter->isr_registered)
2273                 return;
2274
2275         /* INTx */
2276         if (!msix_enabled(adapter)) {
2277                 free_irq(netdev->irq, adapter);
2278                 goto done;
2279         }
2280
2281         /* MSIx */
2282         be_free_irq(adapter, &adapter->tx_eq, adapter);
2283
2284         for_all_rx_queues(adapter, rxo, i)
2285                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2286
2287 done:
2288         adapter->isr_registered = false;
2289 }
2290
2291 static int be_close(struct net_device *netdev)
2292 {
2293         struct be_adapter *adapter = netdev_priv(netdev);
2294         struct be_rx_obj *rxo;
2295         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2296         int vec, i;
2297
2298         be_async_mcc_disable(adapter);
2299
2300         netif_carrier_off(netdev);
2301         adapter->link_up = false;
2302
2303         if (!lancer_chip(adapter))
2304                 be_intr_set(adapter, false);
2305
2306         for_all_rx_queues(adapter, rxo, i)
2307                 napi_disable(&rxo->rx_eq.napi);
2308
2309         napi_disable(&tx_eq->napi);
2310
2311         if (lancer_chip(adapter)) {
2312                 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2313                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2314                 for_all_rx_queues(adapter, rxo, i)
2315                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2316         }
2317
2318         if (msix_enabled(adapter)) {
2319                 vec = be_msix_vec_get(adapter, tx_eq);
2320                 synchronize_irq(vec);
2321
2322                 for_all_rx_queues(adapter, rxo, i) {
2323                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2324                         synchronize_irq(vec);
2325                 }
2326         } else {
2327                 synchronize_irq(netdev->irq);
2328         }
2329         be_irq_unregister(adapter);
2330
2331         /* Wait for all pending tx completions to arrive so that
2332          * all tx skbs are freed.
2333          */
2334         be_tx_compl_clean(adapter);
2335
2336         return 0;
2337 }
2338
2339 static int be_open(struct net_device *netdev)
2340 {
2341         struct be_adapter *adapter = netdev_priv(netdev);
2342         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2343         struct be_rx_obj *rxo;
2344         bool link_up;
2345         int status, i;
2346         u8 mac_speed;
2347         u16 link_speed;
2348
2349         for_all_rx_queues(adapter, rxo, i) {
2350                 be_post_rx_frags(rxo, GFP_KERNEL);
2351                 napi_enable(&rxo->rx_eq.napi);
2352         }
2353         napi_enable(&tx_eq->napi);
2354
2355         be_irq_register(adapter);
2356
2357         if (!lancer_chip(adapter))
2358                 be_intr_set(adapter, true);
2359
2360         /* The evt queues are created in unarmed state; arm them */
2361         for_all_rx_queues(adapter, rxo, i) {
2362                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2363                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2364         }
2365         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2366
2367         /* Now that interrupts are on we can process async mcc */
2368         be_async_mcc_enable(adapter);
2369
2370         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2371                         &link_speed, 0);
2372         if (status)
2373                 goto err;
2374         be_link_status_update(adapter, link_up);
2375
2376         if (be_physfn(adapter)) {
2377                 status = be_vid_config(adapter, false, 0);
2378                 if (status)
2379                         goto err;
2380
2381                 status = be_cmd_set_flow_control(adapter,
2382                                 adapter->tx_fc, adapter->rx_fc);
2383                 if (status)
2384                         goto err;
2385         }
2386
2387         return 0;
2388 err:
2389         be_close(adapter->netdev);
2390         return -EIO;
2391 }
2392
2393 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2394 {
2395         struct be_dma_mem cmd;
2396         int status = 0;
2397         u8 mac[ETH_ALEN];
2398
2399         memset(mac, 0, ETH_ALEN);
2400
2401         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2402         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2403                                     GFP_KERNEL);
2404         if (cmd.va == NULL)
2405                 return -1;
2406         memset(cmd.va, 0, cmd.size);
2407
2408         if (enable) {
2409                 status = pci_write_config_dword(adapter->pdev,
2410                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2411                 if (status) {
2412                         dev_err(&adapter->pdev->dev,
2413                                 "Could not enable Wake-on-lan\n");
2414                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2415                                           cmd.dma);
2416                         return status;
2417                 }
2418                 status = be_cmd_enable_magic_wol(adapter,
2419                                 adapter->netdev->dev_addr, &cmd);
2420                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2421                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2422         } else {
2423                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2424                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2425                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2426         }
2427
2428         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2429         return status;
2430 }
2431
2432 /*
2433  * Generate a seed MAC address from the PF MAC Address using jhash.
2434  * MAC Address for VFs are assigned incrementally starting from the seed.
2435  * These addresses are programmed in the ASIC by the PF and the VF driver
2436  * queries for the MAC address during its probe.
2437  */
2438 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2439 {
2440         u32 vf = 0;
2441         int status = 0;
2442         u8 mac[ETH_ALEN];
2443
2444         be_vf_eth_addr_generate(adapter, mac);
2445
2446         for (vf = 0; vf < num_vfs; vf++) {
2447                 status = be_cmd_pmac_add(adapter, mac,
2448                                         adapter->vf_cfg[vf].vf_if_handle,
2449                                         &adapter->vf_cfg[vf].vf_pmac_id,
2450                                         vf + 1);
2451                 if (status)
2452                         dev_err(&adapter->pdev->dev,
2453                                 "Mac address add failed for VF %d\n", vf);
2454                 else
2455                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2456
2457                 mac[5] += 1;
2458         }
2459         return status;
2460 }
2461
2462 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2463 {
2464         u32 vf;
2465
2466         for (vf = 0; vf < num_vfs; vf++) {
2467                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2468                         be_cmd_pmac_del(adapter,
2469                                         adapter->vf_cfg[vf].vf_if_handle,
2470                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2471         }
2472 }
2473
2474 static int be_setup(struct be_adapter *adapter)
2475 {
2476         struct net_device *netdev = adapter->netdev;
2477         u32 cap_flags, en_flags, vf = 0;
2478         int status;
2479         u8 mac[ETH_ALEN];
2480
2481         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2482                                 BE_IF_FLAGS_BROADCAST |
2483                                 BE_IF_FLAGS_MULTICAST;
2484
2485         if (be_physfn(adapter)) {
2486                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2487                                 BE_IF_FLAGS_PROMISCUOUS |
2488                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2489                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2490
2491                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2492                         cap_flags |= BE_IF_FLAGS_RSS;
2493                         en_flags |= BE_IF_FLAGS_RSS;
2494                 }
2495         }
2496
2497         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2498                         netdev->dev_addr, false/* pmac_invalid */,
2499                         &adapter->if_handle, &adapter->pmac_id, 0);
2500         if (status != 0)
2501                 goto do_none;
2502
2503         if (be_physfn(adapter)) {
2504                 if (adapter->sriov_enabled) {
2505                         while (vf < num_vfs) {
2506                                 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2507                                                         BE_IF_FLAGS_BROADCAST;
2508                                 status = be_cmd_if_create(adapter, cap_flags,
2509                                         en_flags, mac, true,
2510                                         &adapter->vf_cfg[vf].vf_if_handle,
2511                                         NULL, vf+1);
2512                                 if (status) {
2513                                         dev_err(&adapter->pdev->dev,
2514                                         "Interface Create failed for VF %d\n",
2515                                         vf);
2516                                         goto if_destroy;
2517                                 }
2518                                 adapter->vf_cfg[vf].vf_pmac_id =
2519                                                         BE_INVALID_PMAC_ID;
2520                                 vf++;
2521                         }
2522                 }
2523         } else {
2524                 status = be_cmd_mac_addr_query(adapter, mac,
2525                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2526                 if (!status) {
2527                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2528                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2529                 }
2530         }
2531
2532         status = be_tx_queues_create(adapter);
2533         if (status != 0)
2534                 goto if_destroy;
2535
2536         status = be_rx_queues_create(adapter);
2537         if (status != 0)
2538                 goto tx_qs_destroy;
2539
2540         status = be_mcc_queues_create(adapter);
2541         if (status != 0)
2542                 goto rx_qs_destroy;
2543
2544         adapter->link_speed = -1;
2545
2546         return 0;
2547
2548 rx_qs_destroy:
2549         be_rx_queues_destroy(adapter);
2550 tx_qs_destroy:
2551         be_tx_queues_destroy(adapter);
2552 if_destroy:
2553         if (be_physfn(adapter) && adapter->sriov_enabled)
2554                 for (vf = 0; vf < num_vfs; vf++)
2555                         if (adapter->vf_cfg[vf].vf_if_handle)
2556                                 be_cmd_if_destroy(adapter,
2557                                         adapter->vf_cfg[vf].vf_if_handle,
2558                                         vf + 1);
2559         be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2560 do_none:
2561         return status;
2562 }
2563
2564 static int be_clear(struct be_adapter *adapter)
2565 {
2566         int vf;
2567
2568         if (be_physfn(adapter) && adapter->sriov_enabled)
2569                 be_vf_eth_addr_rem(adapter);
2570
2571         be_mcc_queues_destroy(adapter);
2572         be_rx_queues_destroy(adapter);
2573         be_tx_queues_destroy(adapter);
2574         adapter->eq_next_idx = 0;
2575
2576         if (be_physfn(adapter) && adapter->sriov_enabled)
2577                 for (vf = 0; vf < num_vfs; vf++)
2578                         if (adapter->vf_cfg[vf].vf_if_handle)
2579                                 be_cmd_if_destroy(adapter,
2580                                         adapter->vf_cfg[vf].vf_if_handle,
2581                                         vf + 1);
2582
2583         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2584
2585         /* tell fw we're done with firing cmds */
2586         be_cmd_fw_clean(adapter);
2587         return 0;
2588 }
2589
2590
2591 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2592 static bool be_flash_redboot(struct be_adapter *adapter,
2593                         const u8 *p, u32 img_start, int image_size,
2594                         int hdr_size)
2595 {
2596         u32 crc_offset;
2597         u8 flashed_crc[4];
2598         int status;
2599
2600         crc_offset = hdr_size + img_start + image_size - 4;
2601
2602         p += crc_offset;
2603
2604         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2605                         (image_size - 4));
2606         if (status) {
2607                 dev_err(&adapter->pdev->dev,
2608                 "could not get crc from flash, not flashing redboot\n");
2609                 return false;
2610         }
2611
2612         /*update redboot only if crc does not match*/
2613         if (!memcmp(flashed_crc, p, 4))
2614                 return false;
2615         else
2616                 return true;
2617 }
2618
2619 static int be_flash_data(struct be_adapter *adapter,
2620                         const struct firmware *fw,
2621                         struct be_dma_mem *flash_cmd, int num_of_images)
2622
2623 {
2624         int status = 0, i, filehdr_size = 0;
2625         u32 total_bytes = 0, flash_op;
2626         int num_bytes;
2627         const u8 *p = fw->data;
2628         struct be_cmd_write_flashrom *req = flash_cmd->va;
2629         const struct flash_comp *pflashcomp;
2630         int num_comp;
2631
2632         static const struct flash_comp gen3_flash_types[9] = {
2633                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2634                         FLASH_IMAGE_MAX_SIZE_g3},
2635                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2636                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2637                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2638                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2639                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2640                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2641                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2642                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2643                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2644                         FLASH_IMAGE_MAX_SIZE_g3},
2645                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2646                         FLASH_IMAGE_MAX_SIZE_g3},
2647                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2648                         FLASH_IMAGE_MAX_SIZE_g3},
2649                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2650                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2651         };
2652         static const struct flash_comp gen2_flash_types[8] = {
2653                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2654                         FLASH_IMAGE_MAX_SIZE_g2},
2655                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2656                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2657                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2658                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2659                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2660                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2661                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2662                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2663                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2664                         FLASH_IMAGE_MAX_SIZE_g2},
2665                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2666                         FLASH_IMAGE_MAX_SIZE_g2},
2667                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2668                          FLASH_IMAGE_MAX_SIZE_g2}
2669         };
2670
2671         if (adapter->generation == BE_GEN3) {
2672                 pflashcomp = gen3_flash_types;
2673                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2674                 num_comp = ARRAY_SIZE(gen3_flash_types);
2675         } else {
2676                 pflashcomp = gen2_flash_types;
2677                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2678                 num_comp = ARRAY_SIZE(gen2_flash_types);
2679         }
2680         for (i = 0; i < num_comp; i++) {
2681                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2682                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2683                         continue;
2684                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2685                         (!be_flash_redboot(adapter, fw->data,
2686                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2687                         (num_of_images * sizeof(struct image_hdr)))))
2688                         continue;
2689                 p = fw->data;
2690                 p += filehdr_size + pflashcomp[i].offset
2691                         + (num_of_images * sizeof(struct image_hdr));
2692         if (p + pflashcomp[i].size > fw->data + fw->size)
2693                 return -1;
2694         total_bytes = pflashcomp[i].size;
2695                 while (total_bytes) {
2696                         if (total_bytes > 32*1024)
2697                                 num_bytes = 32*1024;
2698                         else
2699                                 num_bytes = total_bytes;
2700                         total_bytes -= num_bytes;
2701
2702                         if (!total_bytes)
2703                                 flash_op = FLASHROM_OPER_FLASH;
2704                         else
2705                                 flash_op = FLASHROM_OPER_SAVE;
2706                         memcpy(req->params.data_buf, p, num_bytes);
2707                         p += num_bytes;
2708                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2709                                 pflashcomp[i].optype, flash_op, num_bytes);
2710                         if (status) {
2711                                 dev_err(&adapter->pdev->dev,
2712                                         "cmd to write to flash rom failed.\n");
2713                                 return -1;
2714                         }
2715                         yield();
2716                 }
2717         }
2718         return 0;
2719 }
2720
2721 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2722 {
2723         if (fhdr == NULL)
2724                 return 0;
2725         if (fhdr->build[0] == '3')
2726                 return BE_GEN3;
2727         else if (fhdr->build[0] == '2')
2728                 return BE_GEN2;
2729         else
2730                 return 0;
2731 }
2732
2733 int be_load_fw(struct be_adapter *adapter, u8 *func)
2734 {
2735         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2736         const struct firmware *fw;
2737         struct flash_file_hdr_g2 *fhdr;
2738         struct flash_file_hdr_g3 *fhdr3;
2739         struct image_hdr *img_hdr_ptr = NULL;
2740         struct be_dma_mem flash_cmd;
2741         int status, i = 0, num_imgs = 0;
2742         const u8 *p;
2743
2744         if (!netif_running(adapter->netdev)) {
2745                 dev_err(&adapter->pdev->dev,
2746                         "Firmware load not allowed (interface is down)\n");
2747                 return -EPERM;
2748         }
2749
2750         strcpy(fw_file, func);
2751
2752         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2753         if (status)
2754                 goto fw_exit;
2755
2756         p = fw->data;
2757         fhdr = (struct flash_file_hdr_g2 *) p;
2758         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2759
2760         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2761         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2762                                           &flash_cmd.dma, GFP_KERNEL);
2763         if (!flash_cmd.va) {
2764                 status = -ENOMEM;
2765                 dev_err(&adapter->pdev->dev,
2766                         "Memory allocation failure while flashing\n");
2767                 goto fw_exit;
2768         }
2769
2770         if ((adapter->generation == BE_GEN3) &&
2771                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2772                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2773                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2774                 for (i = 0; i < num_imgs; i++) {
2775                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2776                                         (sizeof(struct flash_file_hdr_g3) +
2777                                          i * sizeof(struct image_hdr)));
2778                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2779                                 status = be_flash_data(adapter, fw, &flash_cmd,
2780                                                         num_imgs);
2781                 }
2782         } else if ((adapter->generation == BE_GEN2) &&
2783                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2784                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2785         } else {
2786                 dev_err(&adapter->pdev->dev,
2787                         "UFI and Interface are not compatible for flashing\n");
2788                 status = -1;
2789         }
2790
2791         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2792                           flash_cmd.dma);
2793         if (status) {
2794                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2795                 goto fw_exit;
2796         }
2797
2798         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2799
2800 fw_exit:
2801         release_firmware(fw);
2802         return status;
2803 }
2804
2805 static struct net_device_ops be_netdev_ops = {
2806         .ndo_open               = be_open,
2807         .ndo_stop               = be_close,
2808         .ndo_start_xmit         = be_xmit,
2809         .ndo_set_rx_mode        = be_set_multicast_list,
2810         .ndo_set_mac_address    = be_mac_addr_set,
2811         .ndo_change_mtu         = be_change_mtu,
2812         .ndo_validate_addr      = eth_validate_addr,
2813         .ndo_vlan_rx_register   = be_vlan_register,
2814         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2815         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2816         .ndo_set_vf_mac         = be_set_vf_mac,
2817         .ndo_set_vf_vlan        = be_set_vf_vlan,
2818         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2819         .ndo_get_vf_config      = be_get_vf_config
2820 };
2821
2822 static void be_netdev_init(struct net_device *netdev)
2823 {
2824         struct be_adapter *adapter = netdev_priv(netdev);
2825         struct be_rx_obj *rxo;
2826         int i;
2827
2828         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2829                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2830                 NETIF_F_HW_VLAN_TX;
2831         if (be_multi_rxq(adapter))
2832                 netdev->hw_features |= NETIF_F_RXHASH;
2833
2834         netdev->features |= netdev->hw_features |
2835                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2836
2837         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2838                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2839
2840         if (lancer_chip(adapter))
2841                 netdev->vlan_features |= NETIF_F_TSO6;
2842
2843         netdev->flags |= IFF_MULTICAST;
2844
2845         /* Default settings for Rx and Tx flow control */
2846         adapter->rx_fc = true;
2847         adapter->tx_fc = true;
2848
2849         netif_set_gso_max_size(netdev, 65535);
2850
2851         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2852
2853         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2854
2855         for_all_rx_queues(adapter, rxo, i)
2856                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2857                                 BE_NAPI_WEIGHT);
2858
2859         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2860                 BE_NAPI_WEIGHT);
2861 }
2862
2863 static void be_unmap_pci_bars(struct be_adapter *adapter)
2864 {
2865         if (adapter->csr)
2866                 iounmap(adapter->csr);
2867         if (adapter->db)
2868                 iounmap(adapter->db);
2869         if (adapter->pcicfg && be_physfn(adapter))
2870                 iounmap(adapter->pcicfg);
2871 }
2872
2873 static int be_map_pci_bars(struct be_adapter *adapter)
2874 {
2875         u8 __iomem *addr;
2876         int pcicfg_reg, db_reg;
2877
2878         if (lancer_chip(adapter)) {
2879                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2880                         pci_resource_len(adapter->pdev, 0));
2881                 if (addr == NULL)
2882                         return -ENOMEM;
2883                 adapter->db = addr;
2884                 return 0;
2885         }
2886
2887         if (be_physfn(adapter)) {
2888                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2889                                 pci_resource_len(adapter->pdev, 2));
2890                 if (addr == NULL)
2891                         return -ENOMEM;
2892                 adapter->csr = addr;
2893         }
2894
2895         if (adapter->generation == BE_GEN2) {
2896                 pcicfg_reg = 1;
2897                 db_reg = 4;
2898         } else {
2899                 pcicfg_reg = 0;
2900                 if (be_physfn(adapter))
2901                         db_reg = 4;
2902                 else
2903                         db_reg = 0;
2904         }
2905         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2906                                 pci_resource_len(adapter->pdev, db_reg));
2907         if (addr == NULL)
2908                 goto pci_map_err;
2909         adapter->db = addr;
2910
2911         if (be_physfn(adapter)) {
2912                 addr = ioremap_nocache(
2913                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2914                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2915                 if (addr == NULL)
2916                         goto pci_map_err;
2917                 adapter->pcicfg = addr;
2918         } else
2919                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2920
2921         return 0;
2922 pci_map_err:
2923         be_unmap_pci_bars(adapter);
2924         return -ENOMEM;
2925 }
2926
2927
2928 static void be_ctrl_cleanup(struct be_adapter *adapter)
2929 {
2930         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2931
2932         be_unmap_pci_bars(adapter);
2933
2934         if (mem->va)
2935                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2936                                   mem->dma);
2937
2938         mem = &adapter->mc_cmd_mem;
2939         if (mem->va)
2940                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2941                                   mem->dma);
2942 }
2943
2944 static int be_ctrl_init(struct be_adapter *adapter)
2945 {
2946         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2947         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2948         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2949         int status;
2950
2951         status = be_map_pci_bars(adapter);
2952         if (status)
2953                 goto done;
2954
2955         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2956         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2957                                                 mbox_mem_alloc->size,
2958                                                 &mbox_mem_alloc->dma,
2959                                                 GFP_KERNEL);
2960         if (!mbox_mem_alloc->va) {
2961                 status = -ENOMEM;
2962                 goto unmap_pci_bars;
2963         }
2964
2965         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2966         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2967         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2968         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2969
2970         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2971         mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2972                                             mc_cmd_mem->size, &mc_cmd_mem->dma,
2973                                             GFP_KERNEL);
2974         if (mc_cmd_mem->va == NULL) {
2975                 status = -ENOMEM;
2976                 goto free_mbox;
2977         }
2978         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2979
2980         mutex_init(&adapter->mbox_lock);
2981         spin_lock_init(&adapter->mcc_lock);
2982         spin_lock_init(&adapter->mcc_cq_lock);
2983
2984         init_completion(&adapter->flash_compl);
2985         pci_save_state(adapter->pdev);
2986         return 0;
2987
2988 free_mbox:
2989         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2990                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
2991
2992 unmap_pci_bars:
2993         be_unmap_pci_bars(adapter);
2994
2995 done:
2996         return status;
2997 }
2998
2999 static void be_stats_cleanup(struct be_adapter *adapter)
3000 {
3001         struct be_dma_mem *cmd = &adapter->stats_cmd;
3002
3003         if (cmd->va)
3004                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3005                                   cmd->va, cmd->dma);
3006 }
3007
3008 static int be_stats_init(struct be_adapter *adapter)
3009 {
3010         struct be_dma_mem *cmd = &adapter->stats_cmd;
3011
3012         if (adapter->generation == BE_GEN2) {
3013                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3014         } else {
3015                 if (lancer_chip(adapter))
3016                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3017                 else
3018                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3019         }
3020         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3021                                      GFP_KERNEL);
3022         if (cmd->va == NULL)
3023                 return -1;
3024         memset(cmd->va, 0, cmd->size);
3025         return 0;
3026 }
3027
3028 static void __devexit be_remove(struct pci_dev *pdev)
3029 {
3030         struct be_adapter *adapter = pci_get_drvdata(pdev);
3031
3032         if (!adapter)
3033                 return;
3034
3035         cancel_delayed_work_sync(&adapter->work);
3036
3037         unregister_netdev(adapter->netdev);
3038
3039         be_clear(adapter);
3040
3041         be_stats_cleanup(adapter);
3042
3043         be_ctrl_cleanup(adapter);
3044
3045         kfree(adapter->vf_cfg);
3046         be_sriov_disable(adapter);
3047
3048         be_msix_disable(adapter);
3049
3050         pci_set_drvdata(pdev, NULL);
3051         pci_release_regions(pdev);
3052         pci_disable_device(pdev);
3053
3054         free_netdev(adapter->netdev);
3055 }
3056
3057 static int be_get_config(struct be_adapter *adapter)
3058 {
3059         int status;
3060         u8 mac[ETH_ALEN];
3061
3062         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
3063         if (status)
3064                 return status;
3065
3066         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3067                         &adapter->function_mode, &adapter->function_caps);
3068         if (status)
3069                 return status;
3070
3071         memset(mac, 0, ETH_ALEN);
3072
3073         if (be_physfn(adapter)) {
3074                 status = be_cmd_mac_addr_query(adapter, mac,
3075                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
3076
3077                 if (status)
3078                         return status;
3079
3080                 if (!is_valid_ether_addr(mac))
3081                         return -EADDRNOTAVAIL;
3082
3083                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3084                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3085         }
3086
3087         if (adapter->function_mode & 0x400)
3088                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3089         else
3090                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3091
3092         status = be_cmd_get_cntl_attributes(adapter);
3093         if (status)
3094                 return status;
3095
3096         be_cmd_check_native_mode(adapter);
3097         return 0;
3098 }
3099
3100 static int be_dev_family_check(struct be_adapter *adapter)
3101 {
3102         struct pci_dev *pdev = adapter->pdev;
3103         u32 sli_intf = 0, if_type;
3104
3105         switch (pdev->device) {
3106         case BE_DEVICE_ID1:
3107         case OC_DEVICE_ID1:
3108                 adapter->generation = BE_GEN2;
3109                 break;
3110         case BE_DEVICE_ID2:
3111         case OC_DEVICE_ID2:
3112                 adapter->generation = BE_GEN3;
3113                 break;
3114         case OC_DEVICE_ID3:
3115                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3116                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3117                                                 SLI_INTF_IF_TYPE_SHIFT;
3118
3119                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3120                         if_type != 0x02) {
3121                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3122                         return -EINVAL;
3123                 }
3124                 if (num_vfs > 0) {
3125                         dev_err(&pdev->dev, "VFs not supported\n");
3126                         return -EINVAL;
3127                 }
3128                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3129                                          SLI_INTF_FAMILY_SHIFT);
3130                 adapter->generation = BE_GEN3;
3131                 break;
3132         default:
3133                 adapter->generation = 0;
3134         }
3135         return 0;
3136 }
3137
3138 static int lancer_wait_ready(struct be_adapter *adapter)
3139 {
3140 #define SLIPORT_READY_TIMEOUT 500
3141         u32 sliport_status;
3142         int status = 0, i;
3143
3144         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3145                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3146                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3147                         break;
3148
3149                 msleep(20);
3150         }
3151
3152         if (i == SLIPORT_READY_TIMEOUT)
3153                 status = -1;
3154
3155         return status;
3156 }
3157
3158 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3159 {
3160         int status;
3161         u32 sliport_status, err, reset_needed;
3162         status = lancer_wait_ready(adapter);
3163         if (!status) {
3164                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3165                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3166                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3167                 if (err && reset_needed) {
3168                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3169                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3170
3171                         /* check adapter has corrected the error */
3172                         status = lancer_wait_ready(adapter);
3173                         sliport_status = ioread32(adapter->db +
3174                                                         SLIPORT_STATUS_OFFSET);
3175                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3176                                                 SLIPORT_STATUS_RN_MASK);
3177                         if (status || sliport_status)
3178                                 status = -1;
3179                 } else if (err || reset_needed) {
3180                         status = -1;
3181                 }
3182         }
3183         return status;
3184 }
3185
3186 static int __devinit be_probe(struct pci_dev *pdev,
3187                         const struct pci_device_id *pdev_id)
3188 {
3189         int status = 0;
3190         struct be_adapter *adapter;
3191         struct net_device *netdev;
3192
3193         status = pci_enable_device(pdev);
3194         if (status)
3195                 goto do_none;
3196
3197         status = pci_request_regions(pdev, DRV_NAME);
3198         if (status)
3199                 goto disable_dev;
3200         pci_set_master(pdev);
3201
3202         netdev = alloc_etherdev(sizeof(struct be_adapter));
3203         if (netdev == NULL) {
3204                 status = -ENOMEM;
3205                 goto rel_reg;
3206         }
3207         adapter = netdev_priv(netdev);
3208         adapter->pdev = pdev;
3209         pci_set_drvdata(pdev, adapter);
3210
3211         status = be_dev_family_check(adapter);
3212         if (status)
3213                 goto free_netdev;
3214
3215         adapter->netdev = netdev;
3216         SET_NETDEV_DEV(netdev, &pdev->dev);
3217
3218         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3219         if (!status) {
3220                 netdev->features |= NETIF_F_HIGHDMA;
3221         } else {
3222                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3223                 if (status) {
3224                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3225                         goto free_netdev;
3226                 }
3227         }
3228
3229         be_sriov_enable(adapter);
3230         if (adapter->sriov_enabled) {
3231                 adapter->vf_cfg = kcalloc(num_vfs,
3232                         sizeof(struct be_vf_cfg), GFP_KERNEL);
3233
3234                 if (!adapter->vf_cfg)
3235                         goto free_netdev;
3236         }
3237
3238         status = be_ctrl_init(adapter);
3239         if (status)
3240                 goto free_vf_cfg;
3241
3242         if (lancer_chip(adapter)) {
3243                 status = lancer_test_and_set_rdy_state(adapter);
3244                 if (status) {
3245                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3246                         goto ctrl_clean;
3247                 }
3248         }
3249
3250         /* sync up with fw's ready state */
3251         if (be_physfn(adapter)) {
3252                 status = be_cmd_POST(adapter);
3253                 if (status)
3254                         goto ctrl_clean;
3255         }
3256
3257         /* tell fw we're ready to fire cmds */
3258         status = be_cmd_fw_init(adapter);
3259         if (status)
3260                 goto ctrl_clean;
3261
3262         status = be_cmd_reset_function(adapter);
3263         if (status)
3264                 goto ctrl_clean;
3265
3266         status = be_stats_init(adapter);
3267         if (status)
3268                 goto ctrl_clean;
3269
3270         status = be_get_config(adapter);
3271         if (status)
3272                 goto stats_clean;
3273
3274         be_msix_enable(adapter);
3275
3276         INIT_DELAYED_WORK(&adapter->work, be_worker);
3277
3278         status = be_setup(adapter);
3279         if (status)
3280                 goto msix_disable;
3281
3282         be_netdev_init(netdev);
3283         status = register_netdev(netdev);
3284         if (status != 0)
3285                 goto unsetup;
3286         netif_carrier_off(netdev);
3287
3288         if (be_physfn(adapter) && adapter->sriov_enabled) {
3289                 u8 mac_speed;
3290                 bool link_up;
3291                 u16 vf, lnk_speed;
3292
3293                 status = be_vf_eth_addr_config(adapter);
3294                 if (status)
3295                         goto unreg_netdev;
3296
3297                 for (vf = 0; vf < num_vfs; vf++) {
3298                         status = be_cmd_link_status_query(adapter, &link_up,
3299                                         &mac_speed, &lnk_speed, vf + 1);
3300                         if (!status)
3301                                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3302                         else
3303                                 goto unreg_netdev;
3304                 }
3305         }
3306
3307         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3308         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3309         return 0;
3310
3311 unreg_netdev:
3312         unregister_netdev(netdev);
3313 unsetup:
3314         be_clear(adapter);
3315 msix_disable:
3316         be_msix_disable(adapter);
3317 stats_clean:
3318         be_stats_cleanup(adapter);
3319 ctrl_clean:
3320         be_ctrl_cleanup(adapter);
3321 free_vf_cfg:
3322         kfree(adapter->vf_cfg);
3323 free_netdev:
3324         be_sriov_disable(adapter);
3325         free_netdev(netdev);
3326         pci_set_drvdata(pdev, NULL);
3327 rel_reg:
3328         pci_release_regions(pdev);
3329 disable_dev:
3330         pci_disable_device(pdev);
3331 do_none:
3332         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3333         return status;
3334 }
3335
3336 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3337 {
3338         struct be_adapter *adapter = pci_get_drvdata(pdev);
3339         struct net_device *netdev =  adapter->netdev;
3340
3341         cancel_delayed_work_sync(&adapter->work);
3342         if (adapter->wol)
3343                 be_setup_wol(adapter, true);
3344
3345         netif_device_detach(netdev);
3346         if (netif_running(netdev)) {
3347                 rtnl_lock();
3348                 be_close(netdev);
3349                 rtnl_unlock();
3350         }
3351         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3352         be_clear(adapter);
3353
3354         be_msix_disable(adapter);
3355         pci_save_state(pdev);
3356         pci_disable_device(pdev);
3357         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3358         return 0;
3359 }
3360
3361 static int be_resume(struct pci_dev *pdev)
3362 {
3363         int status = 0;
3364         struct be_adapter *adapter = pci_get_drvdata(pdev);
3365         struct net_device *netdev =  adapter->netdev;
3366
3367         netif_device_detach(netdev);
3368
3369         status = pci_enable_device(pdev);
3370         if (status)
3371                 return status;
3372
3373         pci_set_power_state(pdev, 0);
3374         pci_restore_state(pdev);
3375
3376         be_msix_enable(adapter);
3377         /* tell fw we're ready to fire cmds */
3378         status = be_cmd_fw_init(adapter);
3379         if (status)
3380                 return status;
3381
3382         be_setup(adapter);
3383         if (netif_running(netdev)) {
3384                 rtnl_lock();
3385                 be_open(netdev);
3386                 rtnl_unlock();
3387         }
3388         netif_device_attach(netdev);
3389
3390         if (adapter->wol)
3391                 be_setup_wol(adapter, false);
3392
3393         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3394         return 0;
3395 }
3396
3397 /*
3398  * An FLR will stop BE from DMAing any data.
3399  */
3400 static void be_shutdown(struct pci_dev *pdev)
3401 {
3402         struct be_adapter *adapter = pci_get_drvdata(pdev);
3403
3404         if (!adapter)
3405                 return;
3406
3407         cancel_delayed_work_sync(&adapter->work);
3408
3409         netif_device_detach(adapter->netdev);
3410
3411         if (adapter->wol)
3412                 be_setup_wol(adapter, true);
3413
3414         be_cmd_reset_function(adapter);
3415
3416         pci_disable_device(pdev);
3417 }
3418
3419 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3420                                 pci_channel_state_t state)
3421 {
3422         struct be_adapter *adapter = pci_get_drvdata(pdev);
3423         struct net_device *netdev =  adapter->netdev;
3424
3425         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3426
3427         adapter->eeh_err = true;
3428
3429         netif_device_detach(netdev);
3430
3431         if (netif_running(netdev)) {
3432                 rtnl_lock();
3433                 be_close(netdev);
3434                 rtnl_unlock();
3435         }
3436         be_clear(adapter);
3437
3438         if (state == pci_channel_io_perm_failure)
3439                 return PCI_ERS_RESULT_DISCONNECT;
3440
3441         pci_disable_device(pdev);
3442
3443         return PCI_ERS_RESULT_NEED_RESET;
3444 }
3445
3446 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3447 {
3448         struct be_adapter *adapter = pci_get_drvdata(pdev);
3449         int status;
3450
3451         dev_info(&adapter->pdev->dev, "EEH reset\n");
3452         adapter->eeh_err = false;
3453
3454         status = pci_enable_device(pdev);
3455         if (status)
3456                 return PCI_ERS_RESULT_DISCONNECT;
3457
3458         pci_set_master(pdev);
3459         pci_set_power_state(pdev, 0);
3460         pci_restore_state(pdev);
3461
3462         /* Check if card is ok and fw is ready */
3463         status = be_cmd_POST(adapter);
3464         if (status)
3465                 return PCI_ERS_RESULT_DISCONNECT;
3466
3467         return PCI_ERS_RESULT_RECOVERED;
3468 }
3469
3470 static void be_eeh_resume(struct pci_dev *pdev)
3471 {
3472         int status = 0;
3473         struct be_adapter *adapter = pci_get_drvdata(pdev);
3474         struct net_device *netdev =  adapter->netdev;
3475
3476         dev_info(&adapter->pdev->dev, "EEH resume\n");
3477
3478         pci_save_state(pdev);
3479
3480         /* tell fw we're ready to fire cmds */
3481         status = be_cmd_fw_init(adapter);
3482         if (status)
3483                 goto err;
3484
3485         status = be_setup(adapter);
3486         if (status)
3487                 goto err;
3488
3489         if (netif_running(netdev)) {
3490                 status = be_open(netdev);
3491                 if (status)
3492                         goto err;
3493         }
3494         netif_device_attach(netdev);
3495         return;
3496 err:
3497         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3498 }
3499
3500 static struct pci_error_handlers be_eeh_handlers = {
3501         .error_detected = be_eeh_err_detected,
3502         .slot_reset = be_eeh_reset,
3503         .resume = be_eeh_resume,
3504 };
3505
3506 static struct pci_driver be_driver = {
3507         .name = DRV_NAME,
3508         .id_table = be_dev_ids,
3509         .probe = be_probe,
3510         .remove = be_remove,
3511         .suspend = be_suspend,
3512         .resume = be_resume,
3513         .shutdown = be_shutdown,
3514         .err_handler = &be_eeh_handlers
3515 };
3516
3517 static int __init be_init_module(void)
3518 {
3519         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3520             rx_frag_size != 2048) {
3521                 printk(KERN_WARNING DRV_NAME
3522                         " : Module param rx_frag_size must be 2048/4096/8192."
3523                         " Using 2048\n");
3524                 rx_frag_size = 2048;
3525         }
3526
3527         return pci_register_driver(&be_driver);
3528 }
3529 module_init(be_init_module);
3530
3531 static void __exit be_exit_module(void)
3532 {
3533         pci_unregister_driver(&be_driver);
3534 }
3535 module_exit(be_exit_module);