2 * Copyright (C) 2005 - 2010 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
20 #include <asm/div64.h>
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
28 static ushort rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, ushort, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
119 static inline bool be_multi_rxq(struct be_adapter *adapter)
121 return (adapter->num_rx_qs > 1);
124 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126 struct be_dma_mem *mem = &q->dma_mem;
128 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133 u16 len, u16 entry_size)
135 struct be_dma_mem *mem = &q->dma_mem;
137 memset(q, 0, sizeof(*q));
139 q->entry_size = entry_size;
140 mem->size = len * entry_size;
141 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 memset(mem->va, 0, mem->size);
149 static void be_intr_set(struct be_adapter *adapter, bool enable)
151 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
152 u32 reg = ioread32(addr);
153 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155 if (adapter->eeh_err)
158 if (!enabled && enable)
159 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160 else if (enabled && !enable)
161 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165 iowrite32(reg, addr);
168 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
171 val |= qid & DB_RQ_RING_ID_MASK;
172 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
175 iowrite32(val, adapter->db + DB_RQ_OFFSET);
178 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
181 val |= qid & DB_TXULP_RING_ID_MASK;
182 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
185 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
188 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
189 bool arm, bool clear_int, u16 num_popped)
192 val |= qid & DB_EQ_RING_ID_MASK;
193 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
194 DB_EQ_RING_ID_EXT_MASK_SHIFT);
196 if (adapter->eeh_err)
200 val |= 1 << DB_EQ_REARM_SHIFT;
202 val |= 1 << DB_EQ_CLR_SHIFT;
203 val |= 1 << DB_EQ_EVNT_SHIFT;
204 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
205 iowrite32(val, adapter->db + DB_EQ_OFFSET);
208 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
211 val |= qid & DB_CQ_RING_ID_MASK;
212 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
213 DB_CQ_RING_ID_EXT_MASK_SHIFT);
215 if (adapter->eeh_err)
219 val |= 1 << DB_CQ_REARM_SHIFT;
220 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
221 iowrite32(val, adapter->db + DB_CQ_OFFSET);
224 static int be_mac_addr_set(struct net_device *netdev, void *p)
226 struct be_adapter *adapter = netdev_priv(netdev);
227 struct sockaddr *addr = p;
230 if (!is_valid_ether_addr(addr->sa_data))
231 return -EADDRNOTAVAIL;
233 /* MAC addr configuration will be done in hardware for VFs
234 * by their corresponding PFs. Just copy to netdev addr here
236 if (!be_physfn(adapter))
239 status = be_cmd_pmac_del(adapter, adapter->if_handle,
240 adapter->pmac_id, 0);
244 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
245 adapter->if_handle, &adapter->pmac_id, 0);
248 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
253 void netdev_stats_update(struct be_adapter *adapter)
255 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
256 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
257 struct be_port_rxf_stats *port_stats =
258 &rxf_stats->port[adapter->port_num];
259 struct net_device_stats *dev_stats = &adapter->netdev->stats;
260 struct be_erx_stats *erx_stats = &hw_stats->erx;
261 struct be_rx_obj *rxo;
264 memset(dev_stats, 0, sizeof(*dev_stats));
265 for_all_rx_queues(adapter, rxo, i) {
266 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
267 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
268 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
269 /* no space in linux buffers: best possible approximation */
270 dev_stats->rx_dropped +=
271 erx_stats->rx_drops_no_fragments[rxo->q.id];
274 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
275 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
277 /* bad pkts received */
278 dev_stats->rx_errors = port_stats->rx_crc_errors +
279 port_stats->rx_alignment_symbol_errors +
280 port_stats->rx_in_range_errors +
281 port_stats->rx_out_range_errors +
282 port_stats->rx_frame_too_long +
283 port_stats->rx_dropped_too_small +
284 port_stats->rx_dropped_too_short +
285 port_stats->rx_dropped_header_too_small +
286 port_stats->rx_dropped_tcp_length +
287 port_stats->rx_dropped_runt +
288 port_stats->rx_tcp_checksum_errs +
289 port_stats->rx_ip_checksum_errs +
290 port_stats->rx_udp_checksum_errs;
292 /* detailed rx errors */
293 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
294 port_stats->rx_out_range_errors +
295 port_stats->rx_frame_too_long;
297 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
299 /* frame alignment errors */
300 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
302 /* receiver fifo overrun */
303 /* drops_no_pbuf is no per i/f, it's per BE card */
304 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
305 port_stats->rx_input_fifo_overflow +
306 rxf_stats->rx_drops_no_pbuf;
309 void be_link_status_update(struct be_adapter *adapter, bool link_up)
311 struct net_device *netdev = adapter->netdev;
313 /* If link came up or went down */
314 if (adapter->link_up != link_up) {
315 adapter->link_speed = -1;
317 netif_carrier_on(netdev);
318 printk(KERN_INFO "%s: Link up\n", netdev->name);
320 netif_carrier_off(netdev);
321 printk(KERN_INFO "%s: Link down\n", netdev->name);
323 adapter->link_up = link_up;
327 /* Update the EQ delay n BE based on the RX frags consumed / sec */
328 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
330 struct be_eq_obj *rx_eq = &rxo->rx_eq;
331 struct be_rx_stats *stats = &rxo->stats;
335 if (!rx_eq->enable_aic)
339 if (time_before(now, stats->rx_fps_jiffies)) {
340 stats->rx_fps_jiffies = now;
344 /* Update once a second */
345 if ((now - stats->rx_fps_jiffies) < HZ)
348 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
349 ((now - stats->rx_fps_jiffies) / HZ);
351 stats->rx_fps_jiffies = now;
352 stats->prev_rx_frags = stats->rx_frags;
353 eqd = stats->rx_fps / 110000;
355 if (eqd > rx_eq->max_eqd)
356 eqd = rx_eq->max_eqd;
357 if (eqd < rx_eq->min_eqd)
358 eqd = rx_eq->min_eqd;
361 if (eqd != rx_eq->cur_eqd)
362 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
364 rx_eq->cur_eqd = eqd;
367 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
371 do_div(rate, ticks / HZ);
372 rate <<= 3; /* bytes/sec -> bits/sec */
373 do_div(rate, 1000000ul); /* MB/Sec */
378 static void be_tx_rate_update(struct be_adapter *adapter)
380 struct be_tx_stats *stats = tx_stats(adapter);
383 /* Wrapped around? */
384 if (time_before(now, stats->be_tx_jiffies)) {
385 stats->be_tx_jiffies = now;
389 /* Update tx rate once in two seconds */
390 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
391 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
392 - stats->be_tx_bytes_prev,
393 now - stats->be_tx_jiffies);
394 stats->be_tx_jiffies = now;
395 stats->be_tx_bytes_prev = stats->be_tx_bytes;
399 static void be_tx_stats_update(struct be_adapter *adapter,
400 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
402 struct be_tx_stats *stats = tx_stats(adapter);
404 stats->be_tx_wrbs += wrb_cnt;
405 stats->be_tx_bytes += copied;
406 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
408 stats->be_tx_stops++;
411 /* Determine number of WRB entries needed to xmit data in an skb */
412 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
415 int cnt = (skb->len > skb->data_len);
417 cnt += skb_shinfo(skb)->nr_frags;
419 /* to account for hdr wrb */
421 if (lancer_chip(adapter) || !(cnt & 1)) {
424 /* add a dummy to make it an even num */
428 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
432 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
434 wrb->frag_pa_hi = upper_32_bits(addr);
435 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
436 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
439 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
440 struct sk_buff *skb, u32 wrb_cnt, u32 len)
445 memset(hdr, 0, sizeof(*hdr));
447 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
449 if (skb_is_gso(skb)) {
450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
451 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
452 hdr, skb_shinfo(skb)->gso_size);
453 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
454 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
455 if (lancer_chip(adapter) && adapter->sli_family ==
456 LANCER_A0_SLI_FAMILY) {
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
459 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
461 else if (is_udp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
465 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
467 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
468 else if (is_udp_pkt(skb))
469 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
472 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
473 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
474 vlan_tag = vlan_tx_tag_get(skb);
475 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
476 /* If vlan priority provided by OS is NOT in available bmap */
477 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
478 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
479 adapter->recommended_prio;
480 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
483 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
484 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
485 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
486 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
489 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
494 be_dws_le_to_cpu(wrb, sizeof(*wrb));
496 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
499 dma_unmap_single(dev, dma, wrb->frag_len,
502 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
506 static int make_tx_wrbs(struct be_adapter *adapter,
507 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
511 struct device *dev = &adapter->pdev->dev;
512 struct sk_buff *first_skb = skb;
513 struct be_queue_info *txq = &adapter->tx_obj.q;
514 struct be_eth_wrb *wrb;
515 struct be_eth_hdr_wrb *hdr;
516 bool map_single = false;
519 hdr = queue_head_node(txq);
521 map_head = txq->head;
523 if (skb->len > skb->data_len) {
524 int len = skb_headlen(skb);
525 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
526 if (dma_mapping_error(dev, busaddr))
529 wrb = queue_head_node(txq);
530 wrb_fill(wrb, busaddr, len);
531 be_dws_cpu_to_le(wrb, sizeof(*wrb));
536 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
537 struct skb_frag_struct *frag =
538 &skb_shinfo(skb)->frags[i];
539 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
540 frag->size, DMA_TO_DEVICE);
541 if (dma_mapping_error(dev, busaddr))
543 wrb = queue_head_node(txq);
544 wrb_fill(wrb, busaddr, frag->size);
545 be_dws_cpu_to_le(wrb, sizeof(*wrb));
547 copied += frag->size;
551 wrb = queue_head_node(txq);
553 be_dws_cpu_to_le(wrb, sizeof(*wrb));
557 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
558 be_dws_cpu_to_le(hdr, sizeof(*hdr));
562 txq->head = map_head;
564 wrb = queue_head_node(txq);
565 unmap_tx_frag(dev, wrb, map_single);
567 copied -= wrb->frag_len;
573 static netdev_tx_t be_xmit(struct sk_buff *skb,
574 struct net_device *netdev)
576 struct be_adapter *adapter = netdev_priv(netdev);
577 struct be_tx_obj *tx_obj = &adapter->tx_obj;
578 struct be_queue_info *txq = &tx_obj->q;
579 u32 wrb_cnt = 0, copied = 0;
580 u32 start = txq->head;
581 bool dummy_wrb, stopped = false;
583 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
585 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
587 /* record the sent skb in the sent_skb table */
588 BUG_ON(tx_obj->sent_skb_list[start]);
589 tx_obj->sent_skb_list[start] = skb;
591 /* Ensure txq has space for the next skb; Else stop the queue
592 * *BEFORE* ringing the tx doorbell, so that we serialze the
593 * tx compls of the current transmit which'll wake up the queue
595 atomic_add(wrb_cnt, &txq->used);
596 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
598 netif_stop_queue(netdev);
602 be_txq_notify(adapter, txq->id, wrb_cnt);
604 be_tx_stats_update(adapter, wrb_cnt, copied,
605 skb_shinfo(skb)->gso_segs, stopped);
608 dev_kfree_skb_any(skb);
613 static int be_change_mtu(struct net_device *netdev, int new_mtu)
615 struct be_adapter *adapter = netdev_priv(netdev);
616 if (new_mtu < BE_MIN_MTU ||
617 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
618 (ETH_HLEN + ETH_FCS_LEN))) {
619 dev_info(&adapter->pdev->dev,
620 "MTU must be between %d and %d bytes\n",
622 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
625 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
626 netdev->mtu, new_mtu);
627 netdev->mtu = new_mtu;
632 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
633 * If the user configures more, place BE in vlan promiscuous mode.
635 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
637 u16 vtag[BE_NUM_VLANS_SUPPORTED];
643 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
644 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
645 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
648 if (adapter->vlans_added <= adapter->max_vlans) {
649 /* Construct VLAN Table to give to HW */
650 for (i = 0; i < VLAN_N_VID; i++) {
651 if (adapter->vlan_tag[i]) {
652 vtag[ntags] = cpu_to_le16(i);
656 status = be_cmd_vlan_config(adapter, adapter->if_handle,
659 status = be_cmd_vlan_config(adapter, adapter->if_handle,
666 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
668 struct be_adapter *adapter = netdev_priv(netdev);
670 adapter->vlan_grp = grp;
673 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
675 struct be_adapter *adapter = netdev_priv(netdev);
677 adapter->vlans_added++;
678 if (!be_physfn(adapter))
681 adapter->vlan_tag[vid] = 1;
682 if (adapter->vlans_added <= (adapter->max_vlans + 1))
683 be_vid_config(adapter, false, 0);
686 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
688 struct be_adapter *adapter = netdev_priv(netdev);
690 adapter->vlans_added--;
691 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
693 if (!be_physfn(adapter))
696 adapter->vlan_tag[vid] = 0;
697 if (adapter->vlans_added <= adapter->max_vlans)
698 be_vid_config(adapter, false, 0);
701 static void be_set_multicast_list(struct net_device *netdev)
703 struct be_adapter *adapter = netdev_priv(netdev);
705 if (netdev->flags & IFF_PROMISC) {
706 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
707 adapter->promiscuous = true;
711 /* BE was previously in promiscous mode; disable it */
712 if (adapter->promiscuous) {
713 adapter->promiscuous = false;
714 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
717 /* Enable multicast promisc if num configured exceeds what we support */
718 if (netdev->flags & IFF_ALLMULTI ||
719 netdev_mc_count(netdev) > BE_MAX_MC) {
720 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
721 &adapter->mc_cmd_mem);
725 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
726 &adapter->mc_cmd_mem);
731 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
733 struct be_adapter *adapter = netdev_priv(netdev);
736 if (!adapter->sriov_enabled)
739 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
742 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
743 status = be_cmd_pmac_del(adapter,
744 adapter->vf_cfg[vf].vf_if_handle,
745 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
747 status = be_cmd_pmac_add(adapter, mac,
748 adapter->vf_cfg[vf].vf_if_handle,
749 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
752 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
755 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
760 static int be_get_vf_config(struct net_device *netdev, int vf,
761 struct ifla_vf_info *vi)
763 struct be_adapter *adapter = netdev_priv(netdev);
765 if (!adapter->sriov_enabled)
772 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
773 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
775 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
780 static int be_set_vf_vlan(struct net_device *netdev,
781 int vf, u16 vlan, u8 qos)
783 struct be_adapter *adapter = netdev_priv(netdev);
786 if (!adapter->sriov_enabled)
789 if ((vf >= num_vfs) || (vlan > 4095))
793 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
794 adapter->vlans_added++;
796 adapter->vf_cfg[vf].vf_vlan_tag = 0;
797 adapter->vlans_added--;
800 status = be_vid_config(adapter, true, vf);
803 dev_info(&adapter->pdev->dev,
804 "VLAN %d config on VF %d failed\n", vlan, vf);
808 static int be_set_vf_tx_rate(struct net_device *netdev,
811 struct be_adapter *adapter = netdev_priv(netdev);
814 if (!adapter->sriov_enabled)
817 if ((vf >= num_vfs) || (rate < 0))
823 adapter->vf_cfg[vf].vf_tx_rate = rate;
824 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
827 dev_info(&adapter->pdev->dev,
828 "tx rate %d on VF %d failed\n", rate, vf);
832 static void be_rx_rate_update(struct be_rx_obj *rxo)
834 struct be_rx_stats *stats = &rxo->stats;
838 if (time_before(now, stats->rx_jiffies)) {
839 stats->rx_jiffies = now;
843 /* Update the rate once in two seconds */
844 if ((now - stats->rx_jiffies) < 2 * HZ)
847 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
848 now - stats->rx_jiffies);
849 stats->rx_jiffies = now;
850 stats->rx_bytes_prev = stats->rx_bytes;
853 static void be_rx_stats_update(struct be_rx_obj *rxo,
854 struct be_rx_compl_info *rxcp)
856 struct be_rx_stats *stats = &rxo->stats;
859 stats->rx_frags += rxcp->num_rcvd;
860 stats->rx_bytes += rxcp->pkt_size;
862 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
863 stats->rx_mcast_pkts++;
868 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
870 /* L4 checksum is not reliable for non TCP/UDP packets.
871 * Also ignore ipcksm for ipv6 pkts */
872 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
873 (rxcp->ip_csum || rxcp->ipv6);
876 static struct be_rx_page_info *
877 get_rx_page_info(struct be_adapter *adapter,
878 struct be_rx_obj *rxo,
881 struct be_rx_page_info *rx_page_info;
882 struct be_queue_info *rxq = &rxo->q;
884 rx_page_info = &rxo->page_info_tbl[frag_idx];
885 BUG_ON(!rx_page_info->page);
887 if (rx_page_info->last_page_user) {
888 dma_unmap_page(&adapter->pdev->dev,
889 dma_unmap_addr(rx_page_info, bus),
890 adapter->big_page_size, DMA_FROM_DEVICE);
891 rx_page_info->last_page_user = false;
894 atomic_dec(&rxq->used);
898 /* Throwaway the data in the Rx completion */
899 static void be_rx_compl_discard(struct be_adapter *adapter,
900 struct be_rx_obj *rxo,
901 struct be_rx_compl_info *rxcp)
903 struct be_queue_info *rxq = &rxo->q;
904 struct be_rx_page_info *page_info;
905 u16 i, num_rcvd = rxcp->num_rcvd;
907 for (i = 0; i < num_rcvd; i++) {
908 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
909 put_page(page_info->page);
910 memset(page_info, 0, sizeof(*page_info));
911 index_inc(&rxcp->rxq_idx, rxq->len);
916 * skb_fill_rx_data forms a complete skb for an ether frame
919 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
920 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
922 struct be_queue_info *rxq = &rxo->q;
923 struct be_rx_page_info *page_info;
925 u16 hdr_len, curr_frag_len, remaining;
928 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
929 start = page_address(page_info->page) + page_info->page_offset;
932 /* Copy data in the first descriptor of this completion */
933 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
935 /* Copy the header portion into skb_data */
936 hdr_len = min(BE_HDR_LEN, curr_frag_len);
937 memcpy(skb->data, start, hdr_len);
938 skb->len = curr_frag_len;
939 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
940 /* Complete packet has now been moved to data */
941 put_page(page_info->page);
943 skb->tail += curr_frag_len;
945 skb_shinfo(skb)->nr_frags = 1;
946 skb_shinfo(skb)->frags[0].page = page_info->page;
947 skb_shinfo(skb)->frags[0].page_offset =
948 page_info->page_offset + hdr_len;
949 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
950 skb->data_len = curr_frag_len - hdr_len;
951 skb->tail += hdr_len;
953 page_info->page = NULL;
955 if (rxcp->pkt_size <= rx_frag_size) {
956 BUG_ON(rxcp->num_rcvd != 1);
960 /* More frags present for this completion */
961 index_inc(&rxcp->rxq_idx, rxq->len);
962 remaining = rxcp->pkt_size - curr_frag_len;
963 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
964 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
965 curr_frag_len = min(remaining, rx_frag_size);
967 /* Coalesce all frags from the same physical page in one slot */
968 if (page_info->page_offset == 0) {
971 skb_shinfo(skb)->frags[j].page = page_info->page;
972 skb_shinfo(skb)->frags[j].page_offset =
973 page_info->page_offset;
974 skb_shinfo(skb)->frags[j].size = 0;
975 skb_shinfo(skb)->nr_frags++;
977 put_page(page_info->page);
980 skb_shinfo(skb)->frags[j].size += curr_frag_len;
981 skb->len += curr_frag_len;
982 skb->data_len += curr_frag_len;
984 remaining -= curr_frag_len;
985 index_inc(&rxcp->rxq_idx, rxq->len);
986 page_info->page = NULL;
988 BUG_ON(j > MAX_SKB_FRAGS);
991 /* Process the RX completion indicated by rxcp when GRO is disabled */
992 static void be_rx_compl_process(struct be_adapter *adapter,
993 struct be_rx_obj *rxo,
994 struct be_rx_compl_info *rxcp)
998 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
999 if (unlikely(!skb)) {
1000 if (net_ratelimit())
1001 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1002 be_rx_compl_discard(adapter, rxo, rxcp);
1006 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1008 if (likely(adapter->rx_csum && csum_passed(rxcp)))
1009 skb->ip_summed = CHECKSUM_UNNECESSARY;
1011 skb_checksum_none_assert(skb);
1013 skb->truesize = skb->len + sizeof(struct sk_buff);
1014 skb->protocol = eth_type_trans(skb, adapter->netdev);
1016 if (unlikely(rxcp->vlanf)) {
1017 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1021 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid);
1023 netif_receive_skb(skb);
1027 /* Process the RX completion indicated by rxcp when GRO is enabled */
1028 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1029 struct be_rx_obj *rxo,
1030 struct be_rx_compl_info *rxcp)
1032 struct be_rx_page_info *page_info;
1033 struct sk_buff *skb = NULL;
1034 struct be_queue_info *rxq = &rxo->q;
1035 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1036 u16 remaining, curr_frag_len;
1039 skb = napi_get_frags(&eq_obj->napi);
1041 be_rx_compl_discard(adapter, rxo, rxcp);
1045 remaining = rxcp->pkt_size;
1046 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1047 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1049 curr_frag_len = min(remaining, rx_frag_size);
1051 /* Coalesce all frags from the same physical page in one slot */
1052 if (i == 0 || page_info->page_offset == 0) {
1053 /* First frag or Fresh page */
1055 skb_shinfo(skb)->frags[j].page = page_info->page;
1056 skb_shinfo(skb)->frags[j].page_offset =
1057 page_info->page_offset;
1058 skb_shinfo(skb)->frags[j].size = 0;
1060 put_page(page_info->page);
1062 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1064 remaining -= curr_frag_len;
1065 index_inc(&rxcp->rxq_idx, rxq->len);
1066 memset(page_info, 0, sizeof(*page_info));
1068 BUG_ON(j > MAX_SKB_FRAGS);
1070 skb_shinfo(skb)->nr_frags = j + 1;
1071 skb->len = rxcp->pkt_size;
1072 skb->data_len = rxcp->pkt_size;
1073 skb->truesize += rxcp->pkt_size;
1074 skb->ip_summed = CHECKSUM_UNNECESSARY;
1076 if (likely(!rxcp->vlanf))
1077 napi_gro_frags(&eq_obj->napi);
1079 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid);
1082 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1083 struct be_eth_rx_compl *compl,
1084 struct be_rx_compl_info *rxcp)
1087 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1088 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1089 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1090 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1092 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1094 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1096 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1098 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1100 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1102 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1103 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl);
1104 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl);
1107 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1108 struct be_eth_rx_compl *compl,
1109 struct be_rx_compl_info *rxcp)
1112 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1113 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1114 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1115 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1117 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1119 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1121 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1123 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1125 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1127 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1128 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl);
1129 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl);
1132 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1134 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1135 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1136 struct be_adapter *adapter = rxo->adapter;
1138 /* For checking the valid bit it is Ok to use either definition as the
1139 * valid bit is at the same position in both v0 and v1 Rx compl */
1140 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1144 be_dws_le_to_cpu(compl, sizeof(*compl));
1146 if (adapter->be3_native)
1147 be_parse_rx_compl_v1(adapter, compl, rxcp);
1149 be_parse_rx_compl_v0(adapter, compl, rxcp);
1151 /* vlanf could be wrongly set in some cards. ignore if vtm is not set */
1152 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1155 if (!lancer_chip(adapter))
1156 rxcp->vid = swab16(rxcp->vid);
1158 if ((adapter->pvid == rxcp->vid) && !adapter->vlan_tag[rxcp->vid])
1161 /* As the compl has been parsed, reset it; we wont touch it again */
1162 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1164 queue_tail_inc(&rxo->cq);
1168 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1170 u32 order = get_order(size);
1174 return alloc_pages(gfp, order);
1178 * Allocate a page, split it to fragments of size rx_frag_size and post as
1179 * receive buffers to BE
1181 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1183 struct be_adapter *adapter = rxo->adapter;
1184 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1185 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1186 struct be_queue_info *rxq = &rxo->q;
1187 struct page *pagep = NULL;
1188 struct be_eth_rx_d *rxd;
1189 u64 page_dmaaddr = 0, frag_dmaaddr;
1190 u32 posted, page_offset = 0;
1192 page_info = &rxo->page_info_tbl[rxq->head];
1193 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1195 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1196 if (unlikely(!pagep)) {
1197 rxo->stats.rx_post_fail++;
1200 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1201 0, adapter->big_page_size,
1203 page_info->page_offset = 0;
1206 page_info->page_offset = page_offset + rx_frag_size;
1208 page_offset = page_info->page_offset;
1209 page_info->page = pagep;
1210 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1211 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1213 rxd = queue_head_node(rxq);
1214 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1215 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1217 /* Any space left in the current big page for another frag? */
1218 if ((page_offset + rx_frag_size + rx_frag_size) >
1219 adapter->big_page_size) {
1221 page_info->last_page_user = true;
1224 prev_page_info = page_info;
1225 queue_head_inc(rxq);
1226 page_info = &page_info_tbl[rxq->head];
1229 prev_page_info->last_page_user = true;
1232 atomic_add(posted, &rxq->used);
1233 be_rxq_notify(adapter, rxq->id, posted);
1234 } else if (atomic_read(&rxq->used) == 0) {
1235 /* Let be_worker replenish when memory is available */
1236 rxo->rx_post_starved = true;
1240 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1242 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1244 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1248 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1250 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1252 queue_tail_inc(tx_cq);
1256 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1258 struct be_queue_info *txq = &adapter->tx_obj.q;
1259 struct be_eth_wrb *wrb;
1260 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1261 struct sk_buff *sent_skb;
1262 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1263 bool unmap_skb_hdr = true;
1265 sent_skb = sent_skbs[txq->tail];
1267 sent_skbs[txq->tail] = NULL;
1269 /* skip header wrb */
1270 queue_tail_inc(txq);
1273 cur_index = txq->tail;
1274 wrb = queue_tail_node(txq);
1275 unmap_tx_frag(&adapter->pdev->dev, wrb,
1276 (unmap_skb_hdr && skb_headlen(sent_skb)));
1277 unmap_skb_hdr = false;
1280 queue_tail_inc(txq);
1281 } while (cur_index != last_index);
1283 atomic_sub(num_wrbs, &txq->used);
1285 kfree_skb(sent_skb);
1288 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1290 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1296 eqe->evt = le32_to_cpu(eqe->evt);
1297 queue_tail_inc(&eq_obj->q);
1301 static int event_handle(struct be_adapter *adapter,
1302 struct be_eq_obj *eq_obj)
1304 struct be_eq_entry *eqe;
1307 while ((eqe = event_get(eq_obj)) != NULL) {
1312 /* Deal with any spurious interrupts that come
1315 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1317 napi_schedule(&eq_obj->napi);
1322 /* Just read and notify events without processing them.
1323 * Used at the time of destroying event queues */
1324 static void be_eq_clean(struct be_adapter *adapter,
1325 struct be_eq_obj *eq_obj)
1327 struct be_eq_entry *eqe;
1330 while ((eqe = event_get(eq_obj)) != NULL) {
1336 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1339 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1341 struct be_rx_page_info *page_info;
1342 struct be_queue_info *rxq = &rxo->q;
1343 struct be_queue_info *rx_cq = &rxo->cq;
1344 struct be_rx_compl_info *rxcp;
1347 /* First cleanup pending rx completions */
1348 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1349 be_rx_compl_discard(adapter, rxo, rxcp);
1350 be_cq_notify(adapter, rx_cq->id, false, 1);
1353 /* Then free posted rx buffer that were not used */
1354 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1355 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1356 page_info = get_rx_page_info(adapter, rxo, tail);
1357 put_page(page_info->page);
1358 memset(page_info, 0, sizeof(*page_info));
1360 BUG_ON(atomic_read(&rxq->used));
1363 static void be_tx_compl_clean(struct be_adapter *adapter)
1365 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1366 struct be_queue_info *txq = &adapter->tx_obj.q;
1367 struct be_eth_tx_compl *txcp;
1368 u16 end_idx, cmpl = 0, timeo = 0;
1369 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1370 struct sk_buff *sent_skb;
1373 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1375 while ((txcp = be_tx_compl_get(tx_cq))) {
1376 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1378 be_tx_compl_process(adapter, end_idx);
1382 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1386 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1392 if (atomic_read(&txq->used))
1393 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1394 atomic_read(&txq->used));
1396 /* free posted tx for which compls will never arrive */
1397 while (atomic_read(&txq->used)) {
1398 sent_skb = sent_skbs[txq->tail];
1399 end_idx = txq->tail;
1401 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1403 be_tx_compl_process(adapter, end_idx);
1407 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1409 struct be_queue_info *q;
1411 q = &adapter->mcc_obj.q;
1413 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1414 be_queue_free(adapter, q);
1416 q = &adapter->mcc_obj.cq;
1418 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1419 be_queue_free(adapter, q);
1422 /* Must be called only after TX qs are created as MCC shares TX EQ */
1423 static int be_mcc_queues_create(struct be_adapter *adapter)
1425 struct be_queue_info *q, *cq;
1427 /* Alloc MCC compl queue */
1428 cq = &adapter->mcc_obj.cq;
1429 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1430 sizeof(struct be_mcc_compl)))
1433 /* Ask BE to create MCC compl queue; share TX's eq */
1434 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1437 /* Alloc MCC queue */
1438 q = &adapter->mcc_obj.q;
1439 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1440 goto mcc_cq_destroy;
1442 /* Ask BE to create MCC queue */
1443 if (be_cmd_mccq_create(adapter, q, cq))
1449 be_queue_free(adapter, q);
1451 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1453 be_queue_free(adapter, cq);
1458 static void be_tx_queues_destroy(struct be_adapter *adapter)
1460 struct be_queue_info *q;
1462 q = &adapter->tx_obj.q;
1464 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1465 be_queue_free(adapter, q);
1467 q = &adapter->tx_obj.cq;
1469 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1470 be_queue_free(adapter, q);
1472 /* Clear any residual events */
1473 be_eq_clean(adapter, &adapter->tx_eq);
1475 q = &adapter->tx_eq.q;
1477 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1478 be_queue_free(adapter, q);
1481 static int be_tx_queues_create(struct be_adapter *adapter)
1483 struct be_queue_info *eq, *q, *cq;
1485 adapter->tx_eq.max_eqd = 0;
1486 adapter->tx_eq.min_eqd = 0;
1487 adapter->tx_eq.cur_eqd = 96;
1488 adapter->tx_eq.enable_aic = false;
1489 /* Alloc Tx Event queue */
1490 eq = &adapter->tx_eq.q;
1491 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1494 /* Ask BE to create Tx Event queue */
1495 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1498 adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1501 /* Alloc TX eth compl queue */
1502 cq = &adapter->tx_obj.cq;
1503 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1504 sizeof(struct be_eth_tx_compl)))
1507 /* Ask BE to create Tx eth compl queue */
1508 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1511 /* Alloc TX eth queue */
1512 q = &adapter->tx_obj.q;
1513 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1516 /* Ask BE to create Tx eth queue */
1517 if (be_cmd_txq_create(adapter, q, cq))
1522 be_queue_free(adapter, q);
1524 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1526 be_queue_free(adapter, cq);
1528 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1530 be_queue_free(adapter, eq);
1534 static void be_rx_queues_destroy(struct be_adapter *adapter)
1536 struct be_queue_info *q;
1537 struct be_rx_obj *rxo;
1540 for_all_rx_queues(adapter, rxo, i) {
1543 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1544 /* After the rxq is invalidated, wait for a grace time
1545 * of 1ms for all dma to end and the flush compl to
1549 be_rx_q_clean(adapter, rxo);
1551 be_queue_free(adapter, q);
1555 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1556 be_queue_free(adapter, q);
1558 /* Clear any residual events */
1561 be_eq_clean(adapter, &rxo->rx_eq);
1562 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1564 be_queue_free(adapter, q);
1568 static int be_rx_queues_create(struct be_adapter *adapter)
1570 struct be_queue_info *eq, *q, *cq;
1571 struct be_rx_obj *rxo;
1574 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1575 for_all_rx_queues(adapter, rxo, i) {
1576 rxo->adapter = adapter;
1577 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1578 rxo->rx_eq.enable_aic = true;
1582 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1583 sizeof(struct be_eq_entry));
1587 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1591 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1595 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1596 sizeof(struct be_eth_rx_compl));
1600 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1605 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1606 sizeof(struct be_eth_rx_d));
1610 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1611 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1612 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1617 if (be_multi_rxq(adapter)) {
1618 u8 rsstable[MAX_RSS_QS];
1620 for_all_rss_queues(adapter, rxo, i)
1621 rsstable[i] = rxo->rss_id;
1623 rc = be_cmd_rss_config(adapter, rsstable,
1624 adapter->num_rx_qs - 1);
1631 be_rx_queues_destroy(adapter);
1635 static bool event_peek(struct be_eq_obj *eq_obj)
1637 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1644 static irqreturn_t be_intx(int irq, void *dev)
1646 struct be_adapter *adapter = dev;
1647 struct be_rx_obj *rxo;
1648 int isr, i, tx = 0 , rx = 0;
1650 if (lancer_chip(adapter)) {
1651 if (event_peek(&adapter->tx_eq))
1652 tx = event_handle(adapter, &adapter->tx_eq);
1653 for_all_rx_queues(adapter, rxo, i) {
1654 if (event_peek(&rxo->rx_eq))
1655 rx |= event_handle(adapter, &rxo->rx_eq);
1662 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1663 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1667 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1668 event_handle(adapter, &adapter->tx_eq);
1670 for_all_rx_queues(adapter, rxo, i) {
1671 if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1672 event_handle(adapter, &rxo->rx_eq);
1679 static irqreturn_t be_msix_rx(int irq, void *dev)
1681 struct be_rx_obj *rxo = dev;
1682 struct be_adapter *adapter = rxo->adapter;
1684 event_handle(adapter, &rxo->rx_eq);
1689 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1691 struct be_adapter *adapter = dev;
1693 event_handle(adapter, &adapter->tx_eq);
1698 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1700 return (rxcp->tcpf && !rxcp->err) ? true : false;
1703 static int be_poll_rx(struct napi_struct *napi, int budget)
1705 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1706 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1707 struct be_adapter *adapter = rxo->adapter;
1708 struct be_queue_info *rx_cq = &rxo->cq;
1709 struct be_rx_compl_info *rxcp;
1712 rxo->stats.rx_polls++;
1713 for (work_done = 0; work_done < budget; work_done++) {
1714 rxcp = be_rx_compl_get(rxo);
1718 /* Ignore flush completions */
1719 if (rxcp->num_rcvd) {
1721 be_rx_compl_process_gro(adapter, rxo, rxcp);
1723 be_rx_compl_process(adapter, rxo, rxcp);
1725 be_rx_stats_update(rxo, rxcp);
1728 /* Refill the queue */
1729 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1730 be_post_rx_frags(rxo, GFP_ATOMIC);
1733 if (work_done < budget) {
1734 napi_complete(napi);
1735 be_cq_notify(adapter, rx_cq->id, true, work_done);
1737 /* More to be consumed; continue with interrupts disabled */
1738 be_cq_notify(adapter, rx_cq->id, false, work_done);
1743 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1744 * For TX/MCC we don't honour budget; consume everything
1746 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1748 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1749 struct be_adapter *adapter =
1750 container_of(tx_eq, struct be_adapter, tx_eq);
1751 struct be_queue_info *txq = &adapter->tx_obj.q;
1752 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1753 struct be_eth_tx_compl *txcp;
1754 int tx_compl = 0, mcc_compl, status = 0;
1757 while ((txcp = be_tx_compl_get(tx_cq))) {
1758 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1760 be_tx_compl_process(adapter, end_idx);
1764 mcc_compl = be_process_mcc(adapter, &status);
1766 napi_complete(napi);
1769 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1770 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1774 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1776 /* As Tx wrbs have been freed up, wake up netdev queue if
1777 * it was stopped due to lack of tx wrbs.
1779 if (netif_queue_stopped(adapter->netdev) &&
1780 atomic_read(&txq->used) < txq->len / 2) {
1781 netif_wake_queue(adapter->netdev);
1784 tx_stats(adapter)->be_tx_events++;
1785 tx_stats(adapter)->be_tx_compl += tx_compl;
1791 void be_detect_dump_ue(struct be_adapter *adapter)
1793 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1796 pci_read_config_dword(adapter->pdev,
1797 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1798 pci_read_config_dword(adapter->pdev,
1799 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1800 pci_read_config_dword(adapter->pdev,
1801 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1802 pci_read_config_dword(adapter->pdev,
1803 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1805 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1806 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1808 if (ue_status_lo || ue_status_hi) {
1809 adapter->ue_detected = true;
1810 adapter->eeh_err = true;
1811 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1815 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1816 if (ue_status_lo & 1)
1817 dev_err(&adapter->pdev->dev,
1818 "UE: %s bit set\n", ue_status_low_desc[i]);
1822 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1823 if (ue_status_hi & 1)
1824 dev_err(&adapter->pdev->dev,
1825 "UE: %s bit set\n", ue_status_hi_desc[i]);
1831 static void be_worker(struct work_struct *work)
1833 struct be_adapter *adapter =
1834 container_of(work, struct be_adapter, work.work);
1835 struct be_rx_obj *rxo;
1838 /* when interrupts are not yet enabled, just reap any pending
1839 * mcc completions */
1840 if (!netif_running(adapter->netdev)) {
1841 int mcc_compl, status = 0;
1843 mcc_compl = be_process_mcc(adapter, &status);
1846 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1847 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1850 if (!adapter->ue_detected && !lancer_chip(adapter))
1851 be_detect_dump_ue(adapter);
1856 if (!adapter->stats_cmd_sent)
1857 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1859 be_tx_rate_update(adapter);
1861 for_all_rx_queues(adapter, rxo, i) {
1862 be_rx_rate_update(rxo);
1863 be_rx_eqd_update(adapter, rxo);
1865 if (rxo->rx_post_starved) {
1866 rxo->rx_post_starved = false;
1867 be_post_rx_frags(rxo, GFP_KERNEL);
1870 if (!adapter->ue_detected && !lancer_chip(adapter))
1871 be_detect_dump_ue(adapter);
1874 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1877 static void be_msix_disable(struct be_adapter *adapter)
1879 if (adapter->msix_enabled) {
1880 pci_disable_msix(adapter->pdev);
1881 adapter->msix_enabled = false;
1885 static int be_num_rxqs_get(struct be_adapter *adapter)
1887 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1888 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1889 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1891 dev_warn(&adapter->pdev->dev,
1892 "No support for multiple RX queues\n");
1897 static void be_msix_enable(struct be_adapter *adapter)
1899 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
1902 adapter->num_rx_qs = be_num_rxqs_get(adapter);
1904 for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1905 adapter->msix_entries[i].entry = i;
1907 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1908 adapter->num_rx_qs + 1);
1911 } else if (status >= BE_MIN_MSIX_VECTORS) {
1912 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1914 adapter->num_rx_qs = status - 1;
1915 dev_warn(&adapter->pdev->dev,
1916 "Could alloc only %d MSIx vectors. "
1917 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1923 adapter->msix_enabled = true;
1926 static void be_sriov_enable(struct be_adapter *adapter)
1928 be_check_sriov_fn_type(adapter);
1929 #ifdef CONFIG_PCI_IOV
1930 if (be_physfn(adapter) && num_vfs) {
1933 status = pci_enable_sriov(adapter->pdev, num_vfs);
1934 adapter->sriov_enabled = status ? false : true;
1939 static void be_sriov_disable(struct be_adapter *adapter)
1941 #ifdef CONFIG_PCI_IOV
1942 if (adapter->sriov_enabled) {
1943 pci_disable_sriov(adapter->pdev);
1944 adapter->sriov_enabled = false;
1949 static inline int be_msix_vec_get(struct be_adapter *adapter,
1950 struct be_eq_obj *eq_obj)
1952 return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
1955 static int be_request_irq(struct be_adapter *adapter,
1956 struct be_eq_obj *eq_obj,
1957 void *handler, char *desc, void *context)
1959 struct net_device *netdev = adapter->netdev;
1962 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1963 vec = be_msix_vec_get(adapter, eq_obj);
1964 return request_irq(vec, handler, 0, eq_obj->desc, context);
1967 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1970 int vec = be_msix_vec_get(adapter, eq_obj);
1971 free_irq(vec, context);
1974 static int be_msix_register(struct be_adapter *adapter)
1976 struct be_rx_obj *rxo;
1980 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1985 for_all_rx_queues(adapter, rxo, i) {
1986 sprintf(qname, "rxq%d", i);
1987 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
1996 be_free_irq(adapter, &adapter->tx_eq, adapter);
1998 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
1999 be_free_irq(adapter, &rxo->rx_eq, rxo);
2002 dev_warn(&adapter->pdev->dev,
2003 "MSIX Request IRQ failed - err %d\n", status);
2004 pci_disable_msix(adapter->pdev);
2005 adapter->msix_enabled = false;
2009 static int be_irq_register(struct be_adapter *adapter)
2011 struct net_device *netdev = adapter->netdev;
2014 if (adapter->msix_enabled) {
2015 status = be_msix_register(adapter);
2018 /* INTx is not supported for VF */
2019 if (!be_physfn(adapter))
2024 netdev->irq = adapter->pdev->irq;
2025 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2028 dev_err(&adapter->pdev->dev,
2029 "INTx request IRQ failed - err %d\n", status);
2033 adapter->isr_registered = true;
2037 static void be_irq_unregister(struct be_adapter *adapter)
2039 struct net_device *netdev = adapter->netdev;
2040 struct be_rx_obj *rxo;
2043 if (!adapter->isr_registered)
2047 if (!adapter->msix_enabled) {
2048 free_irq(netdev->irq, adapter);
2053 be_free_irq(adapter, &adapter->tx_eq, adapter);
2055 for_all_rx_queues(adapter, rxo, i)
2056 be_free_irq(adapter, &rxo->rx_eq, rxo);
2059 adapter->isr_registered = false;
2062 static int be_close(struct net_device *netdev)
2064 struct be_adapter *adapter = netdev_priv(netdev);
2065 struct be_rx_obj *rxo;
2066 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2069 be_async_mcc_disable(adapter);
2071 netif_carrier_off(netdev);
2072 adapter->link_up = false;
2074 if (!lancer_chip(adapter))
2075 be_intr_set(adapter, false);
2077 for_all_rx_queues(adapter, rxo, i)
2078 napi_disable(&rxo->rx_eq.napi);
2080 napi_disable(&tx_eq->napi);
2082 if (lancer_chip(adapter)) {
2083 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2084 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2085 for_all_rx_queues(adapter, rxo, i)
2086 be_cq_notify(adapter, rxo->cq.id, false, 0);
2089 if (adapter->msix_enabled) {
2090 vec = be_msix_vec_get(adapter, tx_eq);
2091 synchronize_irq(vec);
2093 for_all_rx_queues(adapter, rxo, i) {
2094 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2095 synchronize_irq(vec);
2098 synchronize_irq(netdev->irq);
2100 be_irq_unregister(adapter);
2102 /* Wait for all pending tx completions to arrive so that
2103 * all tx skbs are freed.
2105 be_tx_compl_clean(adapter);
2110 static int be_open(struct net_device *netdev)
2112 struct be_adapter *adapter = netdev_priv(netdev);
2113 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2114 struct be_rx_obj *rxo;
2120 for_all_rx_queues(adapter, rxo, i) {
2121 be_post_rx_frags(rxo, GFP_KERNEL);
2122 napi_enable(&rxo->rx_eq.napi);
2124 napi_enable(&tx_eq->napi);
2126 be_irq_register(adapter);
2128 if (!lancer_chip(adapter))
2129 be_intr_set(adapter, true);
2131 /* The evt queues are created in unarmed state; arm them */
2132 for_all_rx_queues(adapter, rxo, i) {
2133 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2134 be_cq_notify(adapter, rxo->cq.id, true, 0);
2136 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2138 /* Now that interrupts are on we can process async mcc */
2139 be_async_mcc_enable(adapter);
2141 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2145 be_link_status_update(adapter, link_up);
2147 if (be_physfn(adapter)) {
2148 status = be_vid_config(adapter, false, 0);
2152 status = be_cmd_set_flow_control(adapter,
2153 adapter->tx_fc, adapter->rx_fc);
2160 be_close(adapter->netdev);
2164 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2166 struct be_dma_mem cmd;
2170 memset(mac, 0, ETH_ALEN);
2172 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2173 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2177 memset(cmd.va, 0, cmd.size);
2180 status = pci_write_config_dword(adapter->pdev,
2181 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2183 dev_err(&adapter->pdev->dev,
2184 "Could not enable Wake-on-lan\n");
2185 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2189 status = be_cmd_enable_magic_wol(adapter,
2190 adapter->netdev->dev_addr, &cmd);
2191 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2192 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2194 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2195 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2196 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2199 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2204 * Generate a seed MAC address from the PF MAC Address using jhash.
2205 * MAC Address for VFs are assigned incrementally starting from the seed.
2206 * These addresses are programmed in the ASIC by the PF and the VF driver
2207 * queries for the MAC address during its probe.
2209 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2215 be_vf_eth_addr_generate(adapter, mac);
2217 for (vf = 0; vf < num_vfs; vf++) {
2218 status = be_cmd_pmac_add(adapter, mac,
2219 adapter->vf_cfg[vf].vf_if_handle,
2220 &adapter->vf_cfg[vf].vf_pmac_id,
2223 dev_err(&adapter->pdev->dev,
2224 "Mac address add failed for VF %d\n", vf);
2226 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2233 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2237 for (vf = 0; vf < num_vfs; vf++) {
2238 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2239 be_cmd_pmac_del(adapter,
2240 adapter->vf_cfg[vf].vf_if_handle,
2241 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2245 static int be_setup(struct be_adapter *adapter)
2247 struct net_device *netdev = adapter->netdev;
2248 u32 cap_flags, en_flags, vf = 0;
2252 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2253 BE_IF_FLAGS_BROADCAST |
2254 BE_IF_FLAGS_MULTICAST;
2256 if (be_physfn(adapter)) {
2257 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2258 BE_IF_FLAGS_PROMISCUOUS |
2259 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2260 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2262 if (be_multi_rxq(adapter)) {
2263 cap_flags |= BE_IF_FLAGS_RSS;
2264 en_flags |= BE_IF_FLAGS_RSS;
2268 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2269 netdev->dev_addr, false/* pmac_invalid */,
2270 &adapter->if_handle, &adapter->pmac_id, 0);
2274 if (be_physfn(adapter)) {
2275 if (adapter->sriov_enabled) {
2276 while (vf < num_vfs) {
2277 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2278 BE_IF_FLAGS_BROADCAST;
2279 status = be_cmd_if_create(adapter, cap_flags,
2280 en_flags, mac, true,
2281 &adapter->vf_cfg[vf].vf_if_handle,
2284 dev_err(&adapter->pdev->dev,
2285 "Interface Create failed for VF %d\n",
2289 adapter->vf_cfg[vf].vf_pmac_id =
2295 status = be_cmd_mac_addr_query(adapter, mac,
2296 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2298 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2299 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2303 status = be_tx_queues_create(adapter);
2307 status = be_rx_queues_create(adapter);
2311 status = be_mcc_queues_create(adapter);
2315 adapter->link_speed = -1;
2319 be_mcc_queues_destroy(adapter);
2321 be_rx_queues_destroy(adapter);
2323 be_tx_queues_destroy(adapter);
2325 if (be_physfn(adapter) && adapter->sriov_enabled)
2326 for (vf = 0; vf < num_vfs; vf++)
2327 if (adapter->vf_cfg[vf].vf_if_handle)
2328 be_cmd_if_destroy(adapter,
2329 adapter->vf_cfg[vf].vf_if_handle,
2331 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2336 static int be_clear(struct be_adapter *adapter)
2340 if (be_physfn(adapter) && adapter->sriov_enabled)
2341 be_vf_eth_addr_rem(adapter);
2343 be_mcc_queues_destroy(adapter);
2344 be_rx_queues_destroy(adapter);
2345 be_tx_queues_destroy(adapter);
2347 if (be_physfn(adapter) && adapter->sriov_enabled)
2348 for (vf = 0; vf < num_vfs; vf++)
2349 if (adapter->vf_cfg[vf].vf_if_handle)
2350 be_cmd_if_destroy(adapter,
2351 adapter->vf_cfg[vf].vf_if_handle,
2354 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2356 /* tell fw we're done with firing cmds */
2357 be_cmd_fw_clean(adapter);
2362 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2363 static bool be_flash_redboot(struct be_adapter *adapter,
2364 const u8 *p, u32 img_start, int image_size,
2371 crc_offset = hdr_size + img_start + image_size - 4;
2375 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2378 dev_err(&adapter->pdev->dev,
2379 "could not get crc from flash, not flashing redboot\n");
2383 /*update redboot only if crc does not match*/
2384 if (!memcmp(flashed_crc, p, 4))
2390 static int be_flash_data(struct be_adapter *adapter,
2391 const struct firmware *fw,
2392 struct be_dma_mem *flash_cmd, int num_of_images)
2395 int status = 0, i, filehdr_size = 0;
2396 u32 total_bytes = 0, flash_op;
2398 const u8 *p = fw->data;
2399 struct be_cmd_write_flashrom *req = flash_cmd->va;
2400 const struct flash_comp *pflashcomp;
2403 static const struct flash_comp gen3_flash_types[9] = {
2404 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2405 FLASH_IMAGE_MAX_SIZE_g3},
2406 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2407 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2408 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2409 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2410 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2411 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2412 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2413 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2414 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2415 FLASH_IMAGE_MAX_SIZE_g3},
2416 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2417 FLASH_IMAGE_MAX_SIZE_g3},
2418 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2419 FLASH_IMAGE_MAX_SIZE_g3},
2420 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2421 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2423 static const struct flash_comp gen2_flash_types[8] = {
2424 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2425 FLASH_IMAGE_MAX_SIZE_g2},
2426 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2427 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2428 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2429 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2430 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2431 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2432 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2433 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2434 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2435 FLASH_IMAGE_MAX_SIZE_g2},
2436 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2437 FLASH_IMAGE_MAX_SIZE_g2},
2438 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2439 FLASH_IMAGE_MAX_SIZE_g2}
2442 if (adapter->generation == BE_GEN3) {
2443 pflashcomp = gen3_flash_types;
2444 filehdr_size = sizeof(struct flash_file_hdr_g3);
2445 num_comp = ARRAY_SIZE(gen3_flash_types);
2447 pflashcomp = gen2_flash_types;
2448 filehdr_size = sizeof(struct flash_file_hdr_g2);
2449 num_comp = ARRAY_SIZE(gen2_flash_types);
2451 for (i = 0; i < num_comp; i++) {
2452 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2453 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2455 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2456 (!be_flash_redboot(adapter, fw->data,
2457 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2458 (num_of_images * sizeof(struct image_hdr)))))
2461 p += filehdr_size + pflashcomp[i].offset
2462 + (num_of_images * sizeof(struct image_hdr));
2463 if (p + pflashcomp[i].size > fw->data + fw->size)
2465 total_bytes = pflashcomp[i].size;
2466 while (total_bytes) {
2467 if (total_bytes > 32*1024)
2468 num_bytes = 32*1024;
2470 num_bytes = total_bytes;
2471 total_bytes -= num_bytes;
2474 flash_op = FLASHROM_OPER_FLASH;
2476 flash_op = FLASHROM_OPER_SAVE;
2477 memcpy(req->params.data_buf, p, num_bytes);
2479 status = be_cmd_write_flashrom(adapter, flash_cmd,
2480 pflashcomp[i].optype, flash_op, num_bytes);
2482 dev_err(&adapter->pdev->dev,
2483 "cmd to write to flash rom failed.\n");
2492 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2496 if (fhdr->build[0] == '3')
2498 else if (fhdr->build[0] == '2')
2504 int be_load_fw(struct be_adapter *adapter, u8 *func)
2506 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2507 const struct firmware *fw;
2508 struct flash_file_hdr_g2 *fhdr;
2509 struct flash_file_hdr_g3 *fhdr3;
2510 struct image_hdr *img_hdr_ptr = NULL;
2511 struct be_dma_mem flash_cmd;
2512 int status, i = 0, num_imgs = 0;
2515 if (!netif_running(adapter->netdev)) {
2516 dev_err(&adapter->pdev->dev,
2517 "Firmware load not allowed (interface is down)\n");
2521 strcpy(fw_file, func);
2523 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2528 fhdr = (struct flash_file_hdr_g2 *) p;
2529 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2531 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2532 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2533 &flash_cmd.dma, GFP_KERNEL);
2534 if (!flash_cmd.va) {
2536 dev_err(&adapter->pdev->dev,
2537 "Memory allocation failure while flashing\n");
2541 if ((adapter->generation == BE_GEN3) &&
2542 (get_ufigen_type(fhdr) == BE_GEN3)) {
2543 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2544 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2545 for (i = 0; i < num_imgs; i++) {
2546 img_hdr_ptr = (struct image_hdr *) (fw->data +
2547 (sizeof(struct flash_file_hdr_g3) +
2548 i * sizeof(struct image_hdr)));
2549 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2550 status = be_flash_data(adapter, fw, &flash_cmd,
2553 } else if ((adapter->generation == BE_GEN2) &&
2554 (get_ufigen_type(fhdr) == BE_GEN2)) {
2555 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2557 dev_err(&adapter->pdev->dev,
2558 "UFI and Interface are not compatible for flashing\n");
2562 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2565 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2569 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2572 release_firmware(fw);
2576 static struct net_device_ops be_netdev_ops = {
2577 .ndo_open = be_open,
2578 .ndo_stop = be_close,
2579 .ndo_start_xmit = be_xmit,
2580 .ndo_set_rx_mode = be_set_multicast_list,
2581 .ndo_set_mac_address = be_mac_addr_set,
2582 .ndo_change_mtu = be_change_mtu,
2583 .ndo_validate_addr = eth_validate_addr,
2584 .ndo_vlan_rx_register = be_vlan_register,
2585 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2586 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2587 .ndo_set_vf_mac = be_set_vf_mac,
2588 .ndo_set_vf_vlan = be_set_vf_vlan,
2589 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2590 .ndo_get_vf_config = be_get_vf_config
2593 static void be_netdev_init(struct net_device *netdev)
2595 struct be_adapter *adapter = netdev_priv(netdev);
2596 struct be_rx_obj *rxo;
2599 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2600 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2601 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2602 NETIF_F_GRO | NETIF_F_TSO6;
2604 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2605 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2607 if (lancer_chip(adapter))
2608 netdev->vlan_features |= NETIF_F_TSO6;
2610 netdev->flags |= IFF_MULTICAST;
2612 adapter->rx_csum = true;
2614 /* Default settings for Rx and Tx flow control */
2615 adapter->rx_fc = true;
2616 adapter->tx_fc = true;
2618 netif_set_gso_max_size(netdev, 65535);
2620 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2622 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2624 for_all_rx_queues(adapter, rxo, i)
2625 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2628 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2632 static void be_unmap_pci_bars(struct be_adapter *adapter)
2635 iounmap(adapter->csr);
2637 iounmap(adapter->db);
2638 if (adapter->pcicfg && be_physfn(adapter))
2639 iounmap(adapter->pcicfg);
2642 static int be_map_pci_bars(struct be_adapter *adapter)
2645 int pcicfg_reg, db_reg;
2647 if (lancer_chip(adapter)) {
2648 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2649 pci_resource_len(adapter->pdev, 0));
2656 if (be_physfn(adapter)) {
2657 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2658 pci_resource_len(adapter->pdev, 2));
2661 adapter->csr = addr;
2664 if (adapter->generation == BE_GEN2) {
2669 if (be_physfn(adapter))
2674 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2675 pci_resource_len(adapter->pdev, db_reg));
2680 if (be_physfn(adapter)) {
2681 addr = ioremap_nocache(
2682 pci_resource_start(adapter->pdev, pcicfg_reg),
2683 pci_resource_len(adapter->pdev, pcicfg_reg));
2686 adapter->pcicfg = addr;
2688 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2692 be_unmap_pci_bars(adapter);
2697 static void be_ctrl_cleanup(struct be_adapter *adapter)
2699 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2701 be_unmap_pci_bars(adapter);
2704 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2707 mem = &adapter->mc_cmd_mem;
2709 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2713 static int be_ctrl_init(struct be_adapter *adapter)
2715 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2716 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2717 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2720 status = be_map_pci_bars(adapter);
2724 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2725 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2726 mbox_mem_alloc->size,
2727 &mbox_mem_alloc->dma,
2729 if (!mbox_mem_alloc->va) {
2731 goto unmap_pci_bars;
2734 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2735 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2736 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2737 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2739 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2740 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2741 mc_cmd_mem->size, &mc_cmd_mem->dma,
2743 if (mc_cmd_mem->va == NULL) {
2747 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2749 mutex_init(&adapter->mbox_lock);
2750 spin_lock_init(&adapter->mcc_lock);
2751 spin_lock_init(&adapter->mcc_cq_lock);
2753 init_completion(&adapter->flash_compl);
2754 pci_save_state(adapter->pdev);
2758 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2759 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2762 be_unmap_pci_bars(adapter);
2768 static void be_stats_cleanup(struct be_adapter *adapter)
2770 struct be_dma_mem *cmd = &adapter->stats_cmd;
2773 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2777 static int be_stats_init(struct be_adapter *adapter)
2779 struct be_dma_mem *cmd = &adapter->stats_cmd;
2781 cmd->size = sizeof(struct be_cmd_req_get_stats);
2782 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2784 if (cmd->va == NULL)
2786 memset(cmd->va, 0, cmd->size);
2790 static void __devexit be_remove(struct pci_dev *pdev)
2792 struct be_adapter *adapter = pci_get_drvdata(pdev);
2797 cancel_delayed_work_sync(&adapter->work);
2799 unregister_netdev(adapter->netdev);
2803 be_stats_cleanup(adapter);
2805 be_ctrl_cleanup(adapter);
2807 be_sriov_disable(adapter);
2809 be_msix_disable(adapter);
2811 pci_set_drvdata(pdev, NULL);
2812 pci_release_regions(pdev);
2813 pci_disable_device(pdev);
2815 free_netdev(adapter->netdev);
2818 static int be_get_config(struct be_adapter *adapter)
2823 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2827 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2828 &adapter->function_mode, &adapter->function_caps);
2832 memset(mac, 0, ETH_ALEN);
2834 if (be_physfn(adapter)) {
2835 status = be_cmd_mac_addr_query(adapter, mac,
2836 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2841 if (!is_valid_ether_addr(mac))
2842 return -EADDRNOTAVAIL;
2844 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2845 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2848 if (adapter->function_mode & 0x400)
2849 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2851 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2853 status = be_cmd_get_cntl_attributes(adapter);
2857 be_cmd_check_native_mode(adapter);
2861 static int be_dev_family_check(struct be_adapter *adapter)
2863 struct pci_dev *pdev = adapter->pdev;
2864 u32 sli_intf = 0, if_type;
2866 switch (pdev->device) {
2869 adapter->generation = BE_GEN2;
2873 adapter->generation = BE_GEN3;
2876 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2877 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2878 SLI_INTF_IF_TYPE_SHIFT;
2880 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2882 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2886 dev_err(&pdev->dev, "VFs not supported\n");
2889 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2890 SLI_INTF_FAMILY_SHIFT);
2891 adapter->generation = BE_GEN3;
2894 adapter->generation = 0;
2899 static int lancer_wait_ready(struct be_adapter *adapter)
2901 #define SLIPORT_READY_TIMEOUT 500
2905 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2906 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2907 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2913 if (i == SLIPORT_READY_TIMEOUT)
2919 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2922 u32 sliport_status, err, reset_needed;
2923 status = lancer_wait_ready(adapter);
2925 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2926 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2927 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2928 if (err && reset_needed) {
2929 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2930 adapter->db + SLIPORT_CONTROL_OFFSET);
2932 /* check adapter has corrected the error */
2933 status = lancer_wait_ready(adapter);
2934 sliport_status = ioread32(adapter->db +
2935 SLIPORT_STATUS_OFFSET);
2936 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2937 SLIPORT_STATUS_RN_MASK);
2938 if (status || sliport_status)
2940 } else if (err || reset_needed) {
2947 static int __devinit be_probe(struct pci_dev *pdev,
2948 const struct pci_device_id *pdev_id)
2951 struct be_adapter *adapter;
2952 struct net_device *netdev;
2954 status = pci_enable_device(pdev);
2958 status = pci_request_regions(pdev, DRV_NAME);
2961 pci_set_master(pdev);
2963 netdev = alloc_etherdev(sizeof(struct be_adapter));
2964 if (netdev == NULL) {
2968 adapter = netdev_priv(netdev);
2969 adapter->pdev = pdev;
2970 pci_set_drvdata(pdev, adapter);
2972 status = be_dev_family_check(adapter);
2976 adapter->netdev = netdev;
2977 SET_NETDEV_DEV(netdev, &pdev->dev);
2979 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2981 netdev->features |= NETIF_F_HIGHDMA;
2983 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2985 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2990 be_sriov_enable(adapter);
2992 status = be_ctrl_init(adapter);
2996 if (lancer_chip(adapter)) {
2997 status = lancer_test_and_set_rdy_state(adapter);
2999 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3004 /* sync up with fw's ready state */
3005 if (be_physfn(adapter)) {
3006 status = be_cmd_POST(adapter);
3011 /* tell fw we're ready to fire cmds */
3012 status = be_cmd_fw_init(adapter);
3016 status = be_cmd_reset_function(adapter);
3020 status = be_stats_init(adapter);
3024 status = be_get_config(adapter);
3028 be_msix_enable(adapter);
3030 INIT_DELAYED_WORK(&adapter->work, be_worker);
3032 status = be_setup(adapter);
3036 be_netdev_init(netdev);
3037 status = register_netdev(netdev);
3040 netif_carrier_off(netdev);
3042 if (be_physfn(adapter) && adapter->sriov_enabled) {
3043 status = be_vf_eth_addr_config(adapter);
3048 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3049 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3053 unregister_netdev(netdev);
3057 be_msix_disable(adapter);
3059 be_stats_cleanup(adapter);
3061 be_ctrl_cleanup(adapter);
3063 be_sriov_disable(adapter);
3064 free_netdev(netdev);
3065 pci_set_drvdata(pdev, NULL);
3067 pci_release_regions(pdev);
3069 pci_disable_device(pdev);
3071 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3075 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3077 struct be_adapter *adapter = pci_get_drvdata(pdev);
3078 struct net_device *netdev = adapter->netdev;
3080 cancel_delayed_work_sync(&adapter->work);
3082 be_setup_wol(adapter, true);
3084 netif_device_detach(netdev);
3085 if (netif_running(netdev)) {
3090 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3093 be_msix_disable(adapter);
3094 pci_save_state(pdev);
3095 pci_disable_device(pdev);
3096 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3100 static int be_resume(struct pci_dev *pdev)
3103 struct be_adapter *adapter = pci_get_drvdata(pdev);
3104 struct net_device *netdev = adapter->netdev;
3106 netif_device_detach(netdev);
3108 status = pci_enable_device(pdev);
3112 pci_set_power_state(pdev, 0);
3113 pci_restore_state(pdev);
3115 be_msix_enable(adapter);
3116 /* tell fw we're ready to fire cmds */
3117 status = be_cmd_fw_init(adapter);
3122 if (netif_running(netdev)) {
3127 netif_device_attach(netdev);
3130 be_setup_wol(adapter, false);
3132 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3137 * An FLR will stop BE from DMAing any data.
3139 static void be_shutdown(struct pci_dev *pdev)
3141 struct be_adapter *adapter = pci_get_drvdata(pdev);
3142 struct net_device *netdev = adapter->netdev;
3144 if (netif_running(netdev))
3145 cancel_delayed_work_sync(&adapter->work);
3147 netif_device_detach(netdev);
3149 be_cmd_reset_function(adapter);
3152 be_setup_wol(adapter, true);
3154 pci_disable_device(pdev);
3157 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3158 pci_channel_state_t state)
3160 struct be_adapter *adapter = pci_get_drvdata(pdev);
3161 struct net_device *netdev = adapter->netdev;
3163 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3165 adapter->eeh_err = true;
3167 netif_device_detach(netdev);
3169 if (netif_running(netdev)) {
3176 if (state == pci_channel_io_perm_failure)
3177 return PCI_ERS_RESULT_DISCONNECT;
3179 pci_disable_device(pdev);
3181 return PCI_ERS_RESULT_NEED_RESET;
3184 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3186 struct be_adapter *adapter = pci_get_drvdata(pdev);
3189 dev_info(&adapter->pdev->dev, "EEH reset\n");
3190 adapter->eeh_err = false;
3192 status = pci_enable_device(pdev);
3194 return PCI_ERS_RESULT_DISCONNECT;
3196 pci_set_master(pdev);
3197 pci_set_power_state(pdev, 0);
3198 pci_restore_state(pdev);
3200 /* Check if card is ok and fw is ready */
3201 status = be_cmd_POST(adapter);
3203 return PCI_ERS_RESULT_DISCONNECT;
3205 return PCI_ERS_RESULT_RECOVERED;
3208 static void be_eeh_resume(struct pci_dev *pdev)
3211 struct be_adapter *adapter = pci_get_drvdata(pdev);
3212 struct net_device *netdev = adapter->netdev;
3214 dev_info(&adapter->pdev->dev, "EEH resume\n");
3216 pci_save_state(pdev);
3218 /* tell fw we're ready to fire cmds */
3219 status = be_cmd_fw_init(adapter);
3223 status = be_setup(adapter);
3227 if (netif_running(netdev)) {
3228 status = be_open(netdev);
3232 netif_device_attach(netdev);
3235 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3238 static struct pci_error_handlers be_eeh_handlers = {
3239 .error_detected = be_eeh_err_detected,
3240 .slot_reset = be_eeh_reset,
3241 .resume = be_eeh_resume,
3244 static struct pci_driver be_driver = {
3246 .id_table = be_dev_ids,
3248 .remove = be_remove,
3249 .suspend = be_suspend,
3250 .resume = be_resume,
3251 .shutdown = be_shutdown,
3252 .err_handler = &be_eeh_handlers
3255 static int __init be_init_module(void)
3257 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3258 rx_frag_size != 2048) {
3259 printk(KERN_WARNING DRV_NAME
3260 " : Module param rx_frag_size must be 2048/4096/8192."
3262 rx_frag_size = 2048;
3266 printk(KERN_WARNING DRV_NAME
3267 " : Module param num_vfs must not be greater than 32."
3272 return pci_register_driver(&be_driver);
3274 module_init(be_init_module);
3276 static void __exit be_exit_module(void)
3278 pci_unregister_driver(&be_driver);
3280 module_exit(be_exit_module);