2 * Copyright (C) 2005 - 2010 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
20 #include <asm/div64.h>
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
28 static unsigned int rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, uint, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
119 static inline bool be_multi_rxq(struct be_adapter *adapter)
121 return (adapter->num_rx_qs > 1);
124 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126 struct be_dma_mem *mem = &q->dma_mem;
128 pci_free_consistent(adapter->pdev, mem->size,
132 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133 u16 len, u16 entry_size)
135 struct be_dma_mem *mem = &q->dma_mem;
137 memset(q, 0, sizeof(*q));
139 q->entry_size = entry_size;
140 mem->size = len * entry_size;
141 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
144 memset(mem->va, 0, mem->size);
148 static void be_intr_set(struct be_adapter *adapter, bool enable)
150 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
151 u32 reg = ioread32(addr);
152 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
154 if (adapter->eeh_err)
157 if (!enabled && enable)
158 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159 else if (enabled && !enable)
160 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164 iowrite32(reg, addr);
167 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
170 val |= qid & DB_RQ_RING_ID_MASK;
171 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
174 iowrite32(val, adapter->db + DB_RQ_OFFSET);
177 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
180 val |= qid & DB_TXULP_RING_ID_MASK;
181 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
184 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
187 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
188 bool arm, bool clear_int, u16 num_popped)
191 val |= qid & DB_EQ_RING_ID_MASK;
192 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
193 DB_EQ_RING_ID_EXT_MASK_SHIFT);
195 if (adapter->eeh_err)
199 val |= 1 << DB_EQ_REARM_SHIFT;
201 val |= 1 << DB_EQ_CLR_SHIFT;
202 val |= 1 << DB_EQ_EVNT_SHIFT;
203 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
204 iowrite32(val, adapter->db + DB_EQ_OFFSET);
207 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
210 val |= qid & DB_CQ_RING_ID_MASK;
211 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
212 DB_CQ_RING_ID_EXT_MASK_SHIFT);
214 if (adapter->eeh_err)
218 val |= 1 << DB_CQ_REARM_SHIFT;
219 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
220 iowrite32(val, adapter->db + DB_CQ_OFFSET);
223 static int be_mac_addr_set(struct net_device *netdev, void *p)
225 struct be_adapter *adapter = netdev_priv(netdev);
226 struct sockaddr *addr = p;
229 if (!is_valid_ether_addr(addr->sa_data))
230 return -EADDRNOTAVAIL;
232 /* MAC addr configuration will be done in hardware for VFs
233 * by their corresponding PFs. Just copy to netdev addr here
235 if (!be_physfn(adapter))
238 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
242 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
243 adapter->if_handle, &adapter->pmac_id);
246 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
251 void netdev_stats_update(struct be_adapter *adapter)
253 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
254 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
255 struct be_port_rxf_stats *port_stats =
256 &rxf_stats->port[adapter->port_num];
257 struct net_device_stats *dev_stats = &adapter->netdev->stats;
258 struct be_erx_stats *erx_stats = &hw_stats->erx;
259 struct be_rx_obj *rxo;
262 memset(dev_stats, 0, sizeof(*dev_stats));
263 for_all_rx_queues(adapter, rxo, i) {
264 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
265 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
266 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
267 /* no space in linux buffers: best possible approximation */
268 dev_stats->rx_dropped +=
269 erx_stats->rx_drops_no_fragments[rxo->q.id];
272 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
273 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
275 /* bad pkts received */
276 dev_stats->rx_errors = port_stats->rx_crc_errors +
277 port_stats->rx_alignment_symbol_errors +
278 port_stats->rx_in_range_errors +
279 port_stats->rx_out_range_errors +
280 port_stats->rx_frame_too_long +
281 port_stats->rx_dropped_too_small +
282 port_stats->rx_dropped_too_short +
283 port_stats->rx_dropped_header_too_small +
284 port_stats->rx_dropped_tcp_length +
285 port_stats->rx_dropped_runt +
286 port_stats->rx_tcp_checksum_errs +
287 port_stats->rx_ip_checksum_errs +
288 port_stats->rx_udp_checksum_errs;
290 /* detailed rx errors */
291 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
292 port_stats->rx_out_range_errors +
293 port_stats->rx_frame_too_long;
295 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
297 /* frame alignment errors */
298 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
300 /* receiver fifo overrun */
301 /* drops_no_pbuf is no per i/f, it's per BE card */
302 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
303 port_stats->rx_input_fifo_overflow +
304 rxf_stats->rx_drops_no_pbuf;
307 void be_link_status_update(struct be_adapter *adapter, bool link_up)
309 struct net_device *netdev = adapter->netdev;
311 /* If link came up or went down */
312 if (adapter->link_up != link_up) {
313 adapter->link_speed = -1;
315 netif_carrier_on(netdev);
316 printk(KERN_INFO "%s: Link up\n", netdev->name);
318 netif_carrier_off(netdev);
319 printk(KERN_INFO "%s: Link down\n", netdev->name);
321 adapter->link_up = link_up;
325 /* Update the EQ delay n BE based on the RX frags consumed / sec */
326 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
328 struct be_eq_obj *rx_eq = &rxo->rx_eq;
329 struct be_rx_stats *stats = &rxo->stats;
333 if (!rx_eq->enable_aic)
337 if (time_before(now, stats->rx_fps_jiffies)) {
338 stats->rx_fps_jiffies = now;
342 /* Update once a second */
343 if ((now - stats->rx_fps_jiffies) < HZ)
346 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
347 ((now - stats->rx_fps_jiffies) / HZ);
349 stats->rx_fps_jiffies = now;
350 stats->prev_rx_frags = stats->rx_frags;
351 eqd = stats->rx_fps / 110000;
353 if (eqd > rx_eq->max_eqd)
354 eqd = rx_eq->max_eqd;
355 if (eqd < rx_eq->min_eqd)
356 eqd = rx_eq->min_eqd;
359 if (eqd != rx_eq->cur_eqd)
360 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
362 rx_eq->cur_eqd = eqd;
365 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
369 do_div(rate, ticks / HZ);
370 rate <<= 3; /* bytes/sec -> bits/sec */
371 do_div(rate, 1000000ul); /* MB/Sec */
376 static void be_tx_rate_update(struct be_adapter *adapter)
378 struct be_tx_stats *stats = tx_stats(adapter);
381 /* Wrapped around? */
382 if (time_before(now, stats->be_tx_jiffies)) {
383 stats->be_tx_jiffies = now;
387 /* Update tx rate once in two seconds */
388 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
389 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
390 - stats->be_tx_bytes_prev,
391 now - stats->be_tx_jiffies);
392 stats->be_tx_jiffies = now;
393 stats->be_tx_bytes_prev = stats->be_tx_bytes;
397 static void be_tx_stats_update(struct be_adapter *adapter,
398 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
400 struct be_tx_stats *stats = tx_stats(adapter);
402 stats->be_tx_wrbs += wrb_cnt;
403 stats->be_tx_bytes += copied;
404 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
406 stats->be_tx_stops++;
409 /* Determine number of WRB entries needed to xmit data in an skb */
410 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413 int cnt = (skb->len > skb->data_len);
415 cnt += skb_shinfo(skb)->nr_frags;
417 /* to account for hdr wrb */
419 if (lancer_chip(adapter) || !(cnt & 1)) {
422 /* add a dummy to make it an even num */
426 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
430 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
432 wrb->frag_pa_hi = upper_32_bits(addr);
433 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
434 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
437 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
438 struct sk_buff *skb, u32 wrb_cnt, u32 len)
443 memset(hdr, 0, sizeof(*hdr));
445 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
447 if (skb_is_gso(skb)) {
448 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
449 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
450 hdr, skb_shinfo(skb)->gso_size);
451 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
453 if (lancer_chip(adapter) && adapter->sli_family ==
454 LANCER_A0_SLI_FAMILY) {
455 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
459 else if (is_udp_pkt(skb))
460 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
465 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
466 else if (is_udp_pkt(skb))
467 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
470 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
471 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
472 vlan_tag = vlan_tx_tag_get(skb);
473 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
474 /* If vlan priority provided by OS is NOT in available bmap */
475 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
476 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
477 adapter->recommended_prio;
478 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
481 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
482 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
483 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
484 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
487 static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
492 be_dws_le_to_cpu(wrb, sizeof(*wrb));
494 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
497 pci_unmap_single(pdev, dma, wrb->frag_len,
500 pci_unmap_page(pdev, dma, wrb->frag_len,
505 static int make_tx_wrbs(struct be_adapter *adapter,
506 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
510 struct pci_dev *pdev = adapter->pdev;
511 struct sk_buff *first_skb = skb;
512 struct be_queue_info *txq = &adapter->tx_obj.q;
513 struct be_eth_wrb *wrb;
514 struct be_eth_hdr_wrb *hdr;
515 bool map_single = false;
518 hdr = queue_head_node(txq);
520 map_head = txq->head;
522 if (skb->len > skb->data_len) {
523 int len = skb_headlen(skb);
524 busaddr = pci_map_single(pdev, skb->data, len,
526 if (pci_dma_mapping_error(pdev, busaddr))
529 wrb = queue_head_node(txq);
530 wrb_fill(wrb, busaddr, len);
531 be_dws_cpu_to_le(wrb, sizeof(*wrb));
536 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
537 struct skb_frag_struct *frag =
538 &skb_shinfo(skb)->frags[i];
539 busaddr = pci_map_page(pdev, frag->page,
541 frag->size, PCI_DMA_TODEVICE);
542 if (pci_dma_mapping_error(pdev, busaddr))
544 wrb = queue_head_node(txq);
545 wrb_fill(wrb, busaddr, frag->size);
546 be_dws_cpu_to_le(wrb, sizeof(*wrb));
548 copied += frag->size;
552 wrb = queue_head_node(txq);
554 be_dws_cpu_to_le(wrb, sizeof(*wrb));
558 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
559 be_dws_cpu_to_le(hdr, sizeof(*hdr));
563 txq->head = map_head;
565 wrb = queue_head_node(txq);
566 unmap_tx_frag(pdev, wrb, map_single);
568 copied -= wrb->frag_len;
574 static netdev_tx_t be_xmit(struct sk_buff *skb,
575 struct net_device *netdev)
577 struct be_adapter *adapter = netdev_priv(netdev);
578 struct be_tx_obj *tx_obj = &adapter->tx_obj;
579 struct be_queue_info *txq = &tx_obj->q;
580 u32 wrb_cnt = 0, copied = 0;
581 u32 start = txq->head;
582 bool dummy_wrb, stopped = false;
584 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
586 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
588 /* record the sent skb in the sent_skb table */
589 BUG_ON(tx_obj->sent_skb_list[start]);
590 tx_obj->sent_skb_list[start] = skb;
592 /* Ensure txq has space for the next skb; Else stop the queue
593 * *BEFORE* ringing the tx doorbell, so that we serialze the
594 * tx compls of the current transmit which'll wake up the queue
596 atomic_add(wrb_cnt, &txq->used);
597 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
599 netif_stop_queue(netdev);
603 be_txq_notify(adapter, txq->id, wrb_cnt);
605 be_tx_stats_update(adapter, wrb_cnt, copied,
606 skb_shinfo(skb)->gso_segs, stopped);
609 dev_kfree_skb_any(skb);
614 static int be_change_mtu(struct net_device *netdev, int new_mtu)
616 struct be_adapter *adapter = netdev_priv(netdev);
617 if (new_mtu < BE_MIN_MTU ||
618 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
619 (ETH_HLEN + ETH_FCS_LEN))) {
620 dev_info(&adapter->pdev->dev,
621 "MTU must be between %d and %d bytes\n",
623 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
626 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
627 netdev->mtu, new_mtu);
628 netdev->mtu = new_mtu;
633 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
634 * If the user configures more, place BE in vlan promiscuous mode.
636 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
638 u16 vtag[BE_NUM_VLANS_SUPPORTED];
644 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
645 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
646 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
649 if (adapter->vlans_added <= adapter->max_vlans) {
650 /* Construct VLAN Table to give to HW */
651 for (i = 0; i < VLAN_N_VID; i++) {
652 if (adapter->vlan_tag[i]) {
653 vtag[ntags] = cpu_to_le16(i);
657 status = be_cmd_vlan_config(adapter, adapter->if_handle,
660 status = be_cmd_vlan_config(adapter, adapter->if_handle,
667 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
669 struct be_adapter *adapter = netdev_priv(netdev);
671 adapter->vlan_grp = grp;
674 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
676 struct be_adapter *adapter = netdev_priv(netdev);
678 adapter->vlans_added++;
679 if (!be_physfn(adapter))
682 adapter->vlan_tag[vid] = 1;
683 if (adapter->vlans_added <= (adapter->max_vlans + 1))
684 be_vid_config(adapter, false, 0);
687 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
689 struct be_adapter *adapter = netdev_priv(netdev);
691 adapter->vlans_added--;
692 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
694 if (!be_physfn(adapter))
697 adapter->vlan_tag[vid] = 0;
698 if (adapter->vlans_added <= adapter->max_vlans)
699 be_vid_config(adapter, false, 0);
702 static void be_set_multicast_list(struct net_device *netdev)
704 struct be_adapter *adapter = netdev_priv(netdev);
706 if (netdev->flags & IFF_PROMISC) {
707 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
708 adapter->promiscuous = true;
712 /* BE was previously in promiscous mode; disable it */
713 if (adapter->promiscuous) {
714 adapter->promiscuous = false;
715 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
718 /* Enable multicast promisc if num configured exceeds what we support */
719 if (netdev->flags & IFF_ALLMULTI ||
720 netdev_mc_count(netdev) > BE_MAX_MC) {
721 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
722 &adapter->mc_cmd_mem);
726 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
727 &adapter->mc_cmd_mem);
732 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
734 struct be_adapter *adapter = netdev_priv(netdev);
737 if (!adapter->sriov_enabled)
740 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
743 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
744 status = be_cmd_pmac_del(adapter,
745 adapter->vf_cfg[vf].vf_if_handle,
746 adapter->vf_cfg[vf].vf_pmac_id);
748 status = be_cmd_pmac_add(adapter, mac,
749 adapter->vf_cfg[vf].vf_if_handle,
750 &adapter->vf_cfg[vf].vf_pmac_id);
753 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
756 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
761 static int be_get_vf_config(struct net_device *netdev, int vf,
762 struct ifla_vf_info *vi)
764 struct be_adapter *adapter = netdev_priv(netdev);
766 if (!adapter->sriov_enabled)
773 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
774 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
776 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
781 static int be_set_vf_vlan(struct net_device *netdev,
782 int vf, u16 vlan, u8 qos)
784 struct be_adapter *adapter = netdev_priv(netdev);
787 if (!adapter->sriov_enabled)
790 if ((vf >= num_vfs) || (vlan > 4095))
794 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
795 adapter->vlans_added++;
797 adapter->vf_cfg[vf].vf_vlan_tag = 0;
798 adapter->vlans_added--;
801 status = be_vid_config(adapter, true, vf);
804 dev_info(&adapter->pdev->dev,
805 "VLAN %d config on VF %d failed\n", vlan, vf);
809 static int be_set_vf_tx_rate(struct net_device *netdev,
812 struct be_adapter *adapter = netdev_priv(netdev);
815 if (!adapter->sriov_enabled)
818 if ((vf >= num_vfs) || (rate < 0))
824 adapter->vf_cfg[vf].vf_tx_rate = rate;
825 status = be_cmd_set_qos(adapter, rate / 10, vf);
828 dev_info(&adapter->pdev->dev,
829 "tx rate %d on VF %d failed\n", rate, vf);
833 static void be_rx_rate_update(struct be_rx_obj *rxo)
835 struct be_rx_stats *stats = &rxo->stats;
839 if (time_before(now, stats->rx_jiffies)) {
840 stats->rx_jiffies = now;
844 /* Update the rate once in two seconds */
845 if ((now - stats->rx_jiffies) < 2 * HZ)
848 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
849 now - stats->rx_jiffies);
850 stats->rx_jiffies = now;
851 stats->rx_bytes_prev = stats->rx_bytes;
854 static void be_rx_stats_update(struct be_rx_obj *rxo,
855 u32 pktsize, u16 numfrags, u8 pkt_type)
857 struct be_rx_stats *stats = &rxo->stats;
860 stats->rx_frags += numfrags;
861 stats->rx_bytes += pktsize;
863 if (pkt_type == BE_MULTICAST_PACKET)
864 stats->rx_mcast_pkts++;
867 static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
869 u8 l4_cksm, ipv6, ipcksm;
871 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
872 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
873 ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
875 /* Ignore ipcksm for ipv6 pkts */
876 return l4_cksm && (ipcksm || ipv6);
879 static struct be_rx_page_info *
880 get_rx_page_info(struct be_adapter *adapter,
881 struct be_rx_obj *rxo,
884 struct be_rx_page_info *rx_page_info;
885 struct be_queue_info *rxq = &rxo->q;
887 rx_page_info = &rxo->page_info_tbl[frag_idx];
888 BUG_ON(!rx_page_info->page);
890 if (rx_page_info->last_page_user) {
891 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
892 adapter->big_page_size, PCI_DMA_FROMDEVICE);
893 rx_page_info->last_page_user = false;
896 atomic_dec(&rxq->used);
900 /* Throwaway the data in the Rx completion */
901 static void be_rx_compl_discard(struct be_adapter *adapter,
902 struct be_rx_obj *rxo,
903 struct be_eth_rx_compl *rxcp)
905 struct be_queue_info *rxq = &rxo->q;
906 struct be_rx_page_info *page_info;
907 u16 rxq_idx, i, num_rcvd;
909 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
910 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
912 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
913 if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
915 rxo->last_frag_index = rxq_idx;
917 for (i = 0; i < num_rcvd; i++) {
918 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
919 put_page(page_info->page);
920 memset(page_info, 0, sizeof(*page_info));
921 index_inc(&rxq_idx, rxq->len);
927 * skb_fill_rx_data forms a complete skb for an ether frame
930 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
931 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
934 struct be_queue_info *rxq = &rxo->q;
935 struct be_rx_page_info *page_info;
937 u32 pktsize, hdr_len, curr_frag_len, size;
941 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
942 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
943 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
945 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
947 start = page_address(page_info->page) + page_info->page_offset;
950 /* Copy data in the first descriptor of this completion */
951 curr_frag_len = min(pktsize, rx_frag_size);
953 /* Copy the header portion into skb_data */
954 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
955 memcpy(skb->data, start, hdr_len);
956 skb->len = curr_frag_len;
957 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
958 /* Complete packet has now been moved to data */
959 put_page(page_info->page);
961 skb->tail += curr_frag_len;
963 skb_shinfo(skb)->nr_frags = 1;
964 skb_shinfo(skb)->frags[0].page = page_info->page;
965 skb_shinfo(skb)->frags[0].page_offset =
966 page_info->page_offset + hdr_len;
967 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
968 skb->data_len = curr_frag_len - hdr_len;
969 skb->tail += hdr_len;
971 page_info->page = NULL;
973 if (pktsize <= rx_frag_size) {
974 BUG_ON(num_rcvd != 1);
978 /* More frags present for this completion */
980 for (i = 1, j = 0; i < num_rcvd; i++) {
981 size -= curr_frag_len;
982 index_inc(&rxq_idx, rxq->len);
983 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
985 curr_frag_len = min(size, rx_frag_size);
987 /* Coalesce all frags from the same physical page in one slot */
988 if (page_info->page_offset == 0) {
991 skb_shinfo(skb)->frags[j].page = page_info->page;
992 skb_shinfo(skb)->frags[j].page_offset =
993 page_info->page_offset;
994 skb_shinfo(skb)->frags[j].size = 0;
995 skb_shinfo(skb)->nr_frags++;
997 put_page(page_info->page);
1000 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1001 skb->len += curr_frag_len;
1002 skb->data_len += curr_frag_len;
1004 page_info->page = NULL;
1006 BUG_ON(j > MAX_SKB_FRAGS);
1009 be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
1012 /* Process the RX completion indicated by rxcp when GRO is disabled */
1013 static void be_rx_compl_process(struct be_adapter *adapter,
1014 struct be_rx_obj *rxo,
1015 struct be_eth_rx_compl *rxcp)
1017 struct sk_buff *skb;
1022 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1024 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1025 if (unlikely(!skb)) {
1026 if (net_ratelimit())
1027 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1028 be_rx_compl_discard(adapter, rxo, rxcp);
1032 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
1034 if (likely(adapter->rx_csum && csum_passed(rxcp)))
1035 skb->ip_summed = CHECKSUM_UNNECESSARY;
1037 skb_checksum_none_assert(skb);
1039 skb->truesize = skb->len + sizeof(struct sk_buff);
1040 skb->protocol = eth_type_trans(skb, adapter->netdev);
1042 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1043 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1045 /* vlanf could be wrongly set in some cards.
1046 * ignore if vtm is not set */
1047 if ((adapter->function_mode & 0x400) && !vtm)
1050 if (unlikely(vlanf)) {
1051 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1055 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1056 if (!lancer_chip(adapter))
1058 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1060 netif_receive_skb(skb);
1064 /* Process the RX completion indicated by rxcp when GRO is enabled */
1065 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1066 struct be_rx_obj *rxo,
1067 struct be_eth_rx_compl *rxcp)
1069 struct be_rx_page_info *page_info;
1070 struct sk_buff *skb = NULL;
1071 struct be_queue_info *rxq = &rxo->q;
1072 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1073 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1074 u16 i, rxq_idx = 0, vid, j;
1078 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1079 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1080 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1081 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1082 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1083 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1085 /* vlanf could be wrongly set in some cards.
1086 * ignore if vtm is not set */
1087 if ((adapter->function_mode & 0x400) && !vtm)
1090 skb = napi_get_frags(&eq_obj->napi);
1092 be_rx_compl_discard(adapter, rxo, rxcp);
1096 remaining = pkt_size;
1097 for (i = 0, j = -1; i < num_rcvd; i++) {
1098 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
1100 curr_frag_len = min(remaining, rx_frag_size);
1102 /* Coalesce all frags from the same physical page in one slot */
1103 if (i == 0 || page_info->page_offset == 0) {
1104 /* First frag or Fresh page */
1106 skb_shinfo(skb)->frags[j].page = page_info->page;
1107 skb_shinfo(skb)->frags[j].page_offset =
1108 page_info->page_offset;
1109 skb_shinfo(skb)->frags[j].size = 0;
1111 put_page(page_info->page);
1113 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1115 remaining -= curr_frag_len;
1116 index_inc(&rxq_idx, rxq->len);
1117 memset(page_info, 0, sizeof(*page_info));
1119 BUG_ON(j > MAX_SKB_FRAGS);
1121 skb_shinfo(skb)->nr_frags = j + 1;
1122 skb->len = pkt_size;
1123 skb->data_len = pkt_size;
1124 skb->truesize += pkt_size;
1125 skb->ip_summed = CHECKSUM_UNNECESSARY;
1127 if (likely(!vlanf)) {
1128 napi_gro_frags(&eq_obj->napi);
1130 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1131 if (!lancer_chip(adapter))
1134 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1137 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1140 be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
1143 static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
1145 struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
1147 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1151 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1153 queue_tail_inc(&rxo->cq);
1157 /* To reset the valid bit, we need to reset the whole word as
1158 * when walking the queue the valid entries are little-endian
1159 * and invalid entries are host endian
1161 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1163 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1166 static inline struct page *be_alloc_pages(u32 size)
1168 gfp_t alloc_flags = GFP_ATOMIC;
1169 u32 order = get_order(size);
1171 alloc_flags |= __GFP_COMP;
1172 return alloc_pages(alloc_flags, order);
1176 * Allocate a page, split it to fragments of size rx_frag_size and post as
1177 * receive buffers to BE
1179 static void be_post_rx_frags(struct be_rx_obj *rxo)
1181 struct be_adapter *adapter = rxo->adapter;
1182 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1183 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1184 struct be_queue_info *rxq = &rxo->q;
1185 struct page *pagep = NULL;
1186 struct be_eth_rx_d *rxd;
1187 u64 page_dmaaddr = 0, frag_dmaaddr;
1188 u32 posted, page_offset = 0;
1190 page_info = &rxo->page_info_tbl[rxq->head];
1191 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1193 pagep = be_alloc_pages(adapter->big_page_size);
1194 if (unlikely(!pagep)) {
1195 rxo->stats.rx_post_fail++;
1198 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
1199 adapter->big_page_size,
1200 PCI_DMA_FROMDEVICE);
1201 page_info->page_offset = 0;
1204 page_info->page_offset = page_offset + rx_frag_size;
1206 page_offset = page_info->page_offset;
1207 page_info->page = pagep;
1208 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1209 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1211 rxd = queue_head_node(rxq);
1212 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1213 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1215 /* Any space left in the current big page for another frag? */
1216 if ((page_offset + rx_frag_size + rx_frag_size) >
1217 adapter->big_page_size) {
1219 page_info->last_page_user = true;
1222 prev_page_info = page_info;
1223 queue_head_inc(rxq);
1224 page_info = &page_info_tbl[rxq->head];
1227 prev_page_info->last_page_user = true;
1230 atomic_add(posted, &rxq->used);
1231 be_rxq_notify(adapter, rxq->id, posted);
1232 } else if (atomic_read(&rxq->used) == 0) {
1233 /* Let be_worker replenish when memory is available */
1234 rxo->rx_post_starved = true;
1238 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1240 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1242 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1246 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1248 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1250 queue_tail_inc(tx_cq);
1254 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1256 struct be_queue_info *txq = &adapter->tx_obj.q;
1257 struct be_eth_wrb *wrb;
1258 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1259 struct sk_buff *sent_skb;
1260 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1261 bool unmap_skb_hdr = true;
1263 sent_skb = sent_skbs[txq->tail];
1265 sent_skbs[txq->tail] = NULL;
1267 /* skip header wrb */
1268 queue_tail_inc(txq);
1271 cur_index = txq->tail;
1272 wrb = queue_tail_node(txq);
1273 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
1274 skb_headlen(sent_skb)));
1275 unmap_skb_hdr = false;
1278 queue_tail_inc(txq);
1279 } while (cur_index != last_index);
1281 atomic_sub(num_wrbs, &txq->used);
1283 kfree_skb(sent_skb);
1286 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1288 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1294 eqe->evt = le32_to_cpu(eqe->evt);
1295 queue_tail_inc(&eq_obj->q);
1299 static int event_handle(struct be_adapter *adapter,
1300 struct be_eq_obj *eq_obj)
1302 struct be_eq_entry *eqe;
1305 while ((eqe = event_get(eq_obj)) != NULL) {
1310 /* Deal with any spurious interrupts that come
1313 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1315 napi_schedule(&eq_obj->napi);
1320 /* Just read and notify events without processing them.
1321 * Used at the time of destroying event queues */
1322 static void be_eq_clean(struct be_adapter *adapter,
1323 struct be_eq_obj *eq_obj)
1325 struct be_eq_entry *eqe;
1328 while ((eqe = event_get(eq_obj)) != NULL) {
1334 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1337 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1339 struct be_rx_page_info *page_info;
1340 struct be_queue_info *rxq = &rxo->q;
1341 struct be_queue_info *rx_cq = &rxo->cq;
1342 struct be_eth_rx_compl *rxcp;
1345 /* First cleanup pending rx completions */
1346 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1347 be_rx_compl_discard(adapter, rxo, rxcp);
1348 be_rx_compl_reset(rxcp);
1349 be_cq_notify(adapter, rx_cq->id, false, 1);
1352 /* Then free posted rx buffer that were not used */
1353 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1354 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1355 page_info = get_rx_page_info(adapter, rxo, tail);
1356 put_page(page_info->page);
1357 memset(page_info, 0, sizeof(*page_info));
1359 BUG_ON(atomic_read(&rxq->used));
1362 static void be_tx_compl_clean(struct be_adapter *adapter)
1364 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1365 struct be_queue_info *txq = &adapter->tx_obj.q;
1366 struct be_eth_tx_compl *txcp;
1367 u16 end_idx, cmpl = 0, timeo = 0;
1368 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1369 struct sk_buff *sent_skb;
1372 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1374 while ((txcp = be_tx_compl_get(tx_cq))) {
1375 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1377 be_tx_compl_process(adapter, end_idx);
1381 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1385 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1391 if (atomic_read(&txq->used))
1392 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1393 atomic_read(&txq->used));
1395 /* free posted tx for which compls will never arrive */
1396 while (atomic_read(&txq->used)) {
1397 sent_skb = sent_skbs[txq->tail];
1398 end_idx = txq->tail;
1400 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1402 be_tx_compl_process(adapter, end_idx);
1406 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1408 struct be_queue_info *q;
1410 q = &adapter->mcc_obj.q;
1412 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1413 be_queue_free(adapter, q);
1415 q = &adapter->mcc_obj.cq;
1417 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1418 be_queue_free(adapter, q);
1421 /* Must be called only after TX qs are created as MCC shares TX EQ */
1422 static int be_mcc_queues_create(struct be_adapter *adapter)
1424 struct be_queue_info *q, *cq;
1426 /* Alloc MCC compl queue */
1427 cq = &adapter->mcc_obj.cq;
1428 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1429 sizeof(struct be_mcc_compl)))
1432 /* Ask BE to create MCC compl queue; share TX's eq */
1433 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1436 /* Alloc MCC queue */
1437 q = &adapter->mcc_obj.q;
1438 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1439 goto mcc_cq_destroy;
1441 /* Ask BE to create MCC queue */
1442 if (be_cmd_mccq_create(adapter, q, cq))
1448 be_queue_free(adapter, q);
1450 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1452 be_queue_free(adapter, cq);
1457 static void be_tx_queues_destroy(struct be_adapter *adapter)
1459 struct be_queue_info *q;
1461 q = &adapter->tx_obj.q;
1463 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1464 be_queue_free(adapter, q);
1466 q = &adapter->tx_obj.cq;
1468 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1469 be_queue_free(adapter, q);
1471 /* Clear any residual events */
1472 be_eq_clean(adapter, &adapter->tx_eq);
1474 q = &adapter->tx_eq.q;
1476 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1477 be_queue_free(adapter, q);
1480 static int be_tx_queues_create(struct be_adapter *adapter)
1482 struct be_queue_info *eq, *q, *cq;
1484 adapter->tx_eq.max_eqd = 0;
1485 adapter->tx_eq.min_eqd = 0;
1486 adapter->tx_eq.cur_eqd = 96;
1487 adapter->tx_eq.enable_aic = false;
1488 /* Alloc Tx Event queue */
1489 eq = &adapter->tx_eq.q;
1490 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1493 /* Ask BE to create Tx Event queue */
1494 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1497 adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1500 /* Alloc TX eth compl queue */
1501 cq = &adapter->tx_obj.cq;
1502 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1503 sizeof(struct be_eth_tx_compl)))
1506 /* Ask BE to create Tx eth compl queue */
1507 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1510 /* Alloc TX eth queue */
1511 q = &adapter->tx_obj.q;
1512 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1515 /* Ask BE to create Tx eth queue */
1516 if (be_cmd_txq_create(adapter, q, cq))
1521 be_queue_free(adapter, q);
1523 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1525 be_queue_free(adapter, cq);
1527 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1529 be_queue_free(adapter, eq);
1533 static void be_rx_queues_destroy(struct be_adapter *adapter)
1535 struct be_queue_info *q;
1536 struct be_rx_obj *rxo;
1539 for_all_rx_queues(adapter, rxo, i) {
1542 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1543 /* After the rxq is invalidated, wait for a grace time
1544 * of 1ms for all dma to end and the flush compl to
1548 be_rx_q_clean(adapter, rxo);
1550 be_queue_free(adapter, q);
1554 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1555 be_queue_free(adapter, q);
1557 /* Clear any residual events */
1560 be_eq_clean(adapter, &rxo->rx_eq);
1561 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1563 be_queue_free(adapter, q);
1567 static int be_rx_queues_create(struct be_adapter *adapter)
1569 struct be_queue_info *eq, *q, *cq;
1570 struct be_rx_obj *rxo;
1573 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1574 for_all_rx_queues(adapter, rxo, i) {
1575 rxo->adapter = adapter;
1576 /* Init last_frag_index so that the frag index in the first
1577 * completion will never match */
1578 rxo->last_frag_index = 0xffff;
1579 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1580 rxo->rx_eq.enable_aic = true;
1584 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1585 sizeof(struct be_eq_entry));
1589 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1593 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1597 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1598 sizeof(struct be_eth_rx_compl));
1602 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1607 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1608 sizeof(struct be_eth_rx_d));
1612 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1613 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1614 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1619 if (be_multi_rxq(adapter)) {
1620 u8 rsstable[MAX_RSS_QS];
1622 for_all_rss_queues(adapter, rxo, i)
1623 rsstable[i] = rxo->rss_id;
1625 rc = be_cmd_rss_config(adapter, rsstable,
1626 adapter->num_rx_qs - 1);
1633 be_rx_queues_destroy(adapter);
1637 static bool event_peek(struct be_eq_obj *eq_obj)
1639 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1646 static irqreturn_t be_intx(int irq, void *dev)
1648 struct be_adapter *adapter = dev;
1649 struct be_rx_obj *rxo;
1650 int isr, i, tx = 0 , rx = 0;
1652 if (lancer_chip(adapter)) {
1653 if (event_peek(&adapter->tx_eq))
1654 tx = event_handle(adapter, &adapter->tx_eq);
1655 for_all_rx_queues(adapter, rxo, i) {
1656 if (event_peek(&rxo->rx_eq))
1657 rx |= event_handle(adapter, &rxo->rx_eq);
1664 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1665 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1669 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1670 event_handle(adapter, &adapter->tx_eq);
1672 for_all_rx_queues(adapter, rxo, i) {
1673 if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1674 event_handle(adapter, &rxo->rx_eq);
1681 static irqreturn_t be_msix_rx(int irq, void *dev)
1683 struct be_rx_obj *rxo = dev;
1684 struct be_adapter *adapter = rxo->adapter;
1686 event_handle(adapter, &rxo->rx_eq);
1691 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1693 struct be_adapter *adapter = dev;
1695 event_handle(adapter, &adapter->tx_eq);
1700 static inline bool do_gro(struct be_rx_obj *rxo,
1701 struct be_eth_rx_compl *rxcp, u8 err)
1703 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1706 rxo->stats.rxcp_err++;
1708 return (tcp_frame && !err) ? true : false;
1711 static int be_poll_rx(struct napi_struct *napi, int budget)
1713 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1714 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1715 struct be_adapter *adapter = rxo->adapter;
1716 struct be_queue_info *rx_cq = &rxo->cq;
1717 struct be_eth_rx_compl *rxcp;
1719 u16 frag_index, num_rcvd;
1722 rxo->stats.rx_polls++;
1723 for (work_done = 0; work_done < budget; work_done++) {
1724 rxcp = be_rx_compl_get(rxo);
1728 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1729 frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
1731 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1734 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1735 if (likely(frag_index != rxo->last_frag_index &&
1737 rxo->last_frag_index = frag_index;
1739 if (do_gro(rxo, rxcp, err))
1740 be_rx_compl_process_gro(adapter, rxo, rxcp);
1742 be_rx_compl_process(adapter, rxo, rxcp);
1745 be_rx_compl_reset(rxcp);
1748 /* Refill the queue */
1749 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1750 be_post_rx_frags(rxo);
1753 if (work_done < budget) {
1754 napi_complete(napi);
1755 be_cq_notify(adapter, rx_cq->id, true, work_done);
1757 /* More to be consumed; continue with interrupts disabled */
1758 be_cq_notify(adapter, rx_cq->id, false, work_done);
1763 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1764 * For TX/MCC we don't honour budget; consume everything
1766 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1768 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1769 struct be_adapter *adapter =
1770 container_of(tx_eq, struct be_adapter, tx_eq);
1771 struct be_queue_info *txq = &adapter->tx_obj.q;
1772 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1773 struct be_eth_tx_compl *txcp;
1774 int tx_compl = 0, mcc_compl, status = 0;
1777 while ((txcp = be_tx_compl_get(tx_cq))) {
1778 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1780 be_tx_compl_process(adapter, end_idx);
1784 mcc_compl = be_process_mcc(adapter, &status);
1786 napi_complete(napi);
1789 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1790 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1794 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1796 /* As Tx wrbs have been freed up, wake up netdev queue if
1797 * it was stopped due to lack of tx wrbs.
1799 if (netif_queue_stopped(adapter->netdev) &&
1800 atomic_read(&txq->used) < txq->len / 2) {
1801 netif_wake_queue(adapter->netdev);
1804 tx_stats(adapter)->be_tx_events++;
1805 tx_stats(adapter)->be_tx_compl += tx_compl;
1811 void be_detect_dump_ue(struct be_adapter *adapter)
1813 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1816 pci_read_config_dword(adapter->pdev,
1817 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1818 pci_read_config_dword(adapter->pdev,
1819 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1820 pci_read_config_dword(adapter->pdev,
1821 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1822 pci_read_config_dword(adapter->pdev,
1823 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1825 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1826 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1828 if (ue_status_lo || ue_status_hi) {
1829 adapter->ue_detected = true;
1830 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1834 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1835 if (ue_status_lo & 1)
1836 dev_err(&adapter->pdev->dev,
1837 "UE: %s bit set\n", ue_status_low_desc[i]);
1841 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1842 if (ue_status_hi & 1)
1843 dev_err(&adapter->pdev->dev,
1844 "UE: %s bit set\n", ue_status_hi_desc[i]);
1850 static void be_worker(struct work_struct *work)
1852 struct be_adapter *adapter =
1853 container_of(work, struct be_adapter, work.work);
1854 struct be_rx_obj *rxo;
1857 /* when interrupts are not yet enabled, just reap any pending
1858 * mcc completions */
1859 if (!netif_running(adapter->netdev)) {
1860 int mcc_compl, status = 0;
1862 mcc_compl = be_process_mcc(adapter, &status);
1865 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1866 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1871 if (!adapter->stats_ioctl_sent)
1872 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1874 be_tx_rate_update(adapter);
1876 for_all_rx_queues(adapter, rxo, i) {
1877 be_rx_rate_update(rxo);
1878 be_rx_eqd_update(adapter, rxo);
1880 if (rxo->rx_post_starved) {
1881 rxo->rx_post_starved = false;
1882 be_post_rx_frags(rxo);
1885 if (!adapter->ue_detected && !lancer_chip(adapter))
1886 be_detect_dump_ue(adapter);
1889 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1892 static void be_msix_disable(struct be_adapter *adapter)
1894 if (adapter->msix_enabled) {
1895 pci_disable_msix(adapter->pdev);
1896 adapter->msix_enabled = false;
1900 static int be_num_rxqs_get(struct be_adapter *adapter)
1902 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1903 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1904 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1906 dev_warn(&adapter->pdev->dev,
1907 "No support for multiple RX queues\n");
1912 static void be_msix_enable(struct be_adapter *adapter)
1914 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
1917 adapter->num_rx_qs = be_num_rxqs_get(adapter);
1919 for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1920 adapter->msix_entries[i].entry = i;
1922 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1923 adapter->num_rx_qs + 1);
1926 } else if (status >= BE_MIN_MSIX_VECTORS) {
1927 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1929 adapter->num_rx_qs = status - 1;
1930 dev_warn(&adapter->pdev->dev,
1931 "Could alloc only %d MSIx vectors. "
1932 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1938 adapter->msix_enabled = true;
1941 static void be_sriov_enable(struct be_adapter *adapter)
1943 be_check_sriov_fn_type(adapter);
1944 #ifdef CONFIG_PCI_IOV
1945 if (be_physfn(adapter) && num_vfs) {
1948 status = pci_enable_sriov(adapter->pdev, num_vfs);
1949 adapter->sriov_enabled = status ? false : true;
1954 static void be_sriov_disable(struct be_adapter *adapter)
1956 #ifdef CONFIG_PCI_IOV
1957 if (adapter->sriov_enabled) {
1958 pci_disable_sriov(adapter->pdev);
1959 adapter->sriov_enabled = false;
1964 static inline int be_msix_vec_get(struct be_adapter *adapter,
1965 struct be_eq_obj *eq_obj)
1967 return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
1970 static int be_request_irq(struct be_adapter *adapter,
1971 struct be_eq_obj *eq_obj,
1972 void *handler, char *desc, void *context)
1974 struct net_device *netdev = adapter->netdev;
1977 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1978 vec = be_msix_vec_get(adapter, eq_obj);
1979 return request_irq(vec, handler, 0, eq_obj->desc, context);
1982 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1985 int vec = be_msix_vec_get(adapter, eq_obj);
1986 free_irq(vec, context);
1989 static int be_msix_register(struct be_adapter *adapter)
1991 struct be_rx_obj *rxo;
1995 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2000 for_all_rx_queues(adapter, rxo, i) {
2001 sprintf(qname, "rxq%d", i);
2002 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2011 be_free_irq(adapter, &adapter->tx_eq, adapter);
2013 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2014 be_free_irq(adapter, &rxo->rx_eq, rxo);
2017 dev_warn(&adapter->pdev->dev,
2018 "MSIX Request IRQ failed - err %d\n", status);
2019 pci_disable_msix(adapter->pdev);
2020 adapter->msix_enabled = false;
2024 static int be_irq_register(struct be_adapter *adapter)
2026 struct net_device *netdev = adapter->netdev;
2029 if (adapter->msix_enabled) {
2030 status = be_msix_register(adapter);
2033 /* INTx is not supported for VF */
2034 if (!be_physfn(adapter))
2039 netdev->irq = adapter->pdev->irq;
2040 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2043 dev_err(&adapter->pdev->dev,
2044 "INTx request IRQ failed - err %d\n", status);
2048 adapter->isr_registered = true;
2052 static void be_irq_unregister(struct be_adapter *adapter)
2054 struct net_device *netdev = adapter->netdev;
2055 struct be_rx_obj *rxo;
2058 if (!adapter->isr_registered)
2062 if (!adapter->msix_enabled) {
2063 free_irq(netdev->irq, adapter);
2068 be_free_irq(adapter, &adapter->tx_eq, adapter);
2070 for_all_rx_queues(adapter, rxo, i)
2071 be_free_irq(adapter, &rxo->rx_eq, rxo);
2074 adapter->isr_registered = false;
2077 static int be_close(struct net_device *netdev)
2079 struct be_adapter *adapter = netdev_priv(netdev);
2080 struct be_rx_obj *rxo;
2081 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2084 be_async_mcc_disable(adapter);
2086 netif_stop_queue(netdev);
2087 netif_carrier_off(netdev);
2088 adapter->link_up = false;
2090 if (!lancer_chip(adapter))
2091 be_intr_set(adapter, false);
2093 if (adapter->msix_enabled) {
2094 vec = be_msix_vec_get(adapter, tx_eq);
2095 synchronize_irq(vec);
2097 for_all_rx_queues(adapter, rxo, i) {
2098 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2099 synchronize_irq(vec);
2102 synchronize_irq(netdev->irq);
2104 be_irq_unregister(adapter);
2106 for_all_rx_queues(adapter, rxo, i)
2107 napi_disable(&rxo->rx_eq.napi);
2109 napi_disable(&tx_eq->napi);
2111 /* Wait for all pending tx completions to arrive so that
2112 * all tx skbs are freed.
2114 be_tx_compl_clean(adapter);
2119 static int be_open(struct net_device *netdev)
2121 struct be_adapter *adapter = netdev_priv(netdev);
2122 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2123 struct be_rx_obj *rxo;
2129 for_all_rx_queues(adapter, rxo, i) {
2130 be_post_rx_frags(rxo);
2131 napi_enable(&rxo->rx_eq.napi);
2133 napi_enable(&tx_eq->napi);
2135 be_irq_register(adapter);
2137 if (!lancer_chip(adapter))
2138 be_intr_set(adapter, true);
2140 /* The evt queues are created in unarmed state; arm them */
2141 for_all_rx_queues(adapter, rxo, i) {
2142 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2143 be_cq_notify(adapter, rxo->cq.id, true, 0);
2145 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2147 /* Now that interrupts are on we can process async mcc */
2148 be_async_mcc_enable(adapter);
2150 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2154 be_link_status_update(adapter, link_up);
2156 if (be_physfn(adapter)) {
2157 status = be_vid_config(adapter, false, 0);
2161 status = be_cmd_set_flow_control(adapter,
2162 adapter->tx_fc, adapter->rx_fc);
2169 be_close(adapter->netdev);
2173 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2175 struct be_dma_mem cmd;
2179 memset(mac, 0, ETH_ALEN);
2181 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2182 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2185 memset(cmd.va, 0, cmd.size);
2188 status = pci_write_config_dword(adapter->pdev,
2189 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2191 dev_err(&adapter->pdev->dev,
2192 "Could not enable Wake-on-lan\n");
2193 pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
2197 status = be_cmd_enable_magic_wol(adapter,
2198 adapter->netdev->dev_addr, &cmd);
2199 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2200 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2202 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2203 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2204 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2207 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2212 * Generate a seed MAC address from the PF MAC Address using jhash.
2213 * MAC Address for VFs are assigned incrementally starting from the seed.
2214 * These addresses are programmed in the ASIC by the PF and the VF driver
2215 * queries for the MAC address during its probe.
2217 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2223 be_vf_eth_addr_generate(adapter, mac);
2225 for (vf = 0; vf < num_vfs; vf++) {
2226 status = be_cmd_pmac_add(adapter, mac,
2227 adapter->vf_cfg[vf].vf_if_handle,
2228 &adapter->vf_cfg[vf].vf_pmac_id);
2230 dev_err(&adapter->pdev->dev,
2231 "Mac address add failed for VF %d\n", vf);
2233 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2240 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2244 for (vf = 0; vf < num_vfs; vf++) {
2245 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2246 be_cmd_pmac_del(adapter,
2247 adapter->vf_cfg[vf].vf_if_handle,
2248 adapter->vf_cfg[vf].vf_pmac_id);
2252 static int be_setup(struct be_adapter *adapter)
2254 struct net_device *netdev = adapter->netdev;
2255 u32 cap_flags, en_flags, vf = 0;
2259 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2261 if (be_physfn(adapter)) {
2262 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2263 BE_IF_FLAGS_PROMISCUOUS |
2264 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2265 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2267 if (be_multi_rxq(adapter)) {
2268 cap_flags |= BE_IF_FLAGS_RSS;
2269 en_flags |= BE_IF_FLAGS_RSS;
2273 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2274 netdev->dev_addr, false/* pmac_invalid */,
2275 &adapter->if_handle, &adapter->pmac_id, 0);
2279 if (be_physfn(adapter)) {
2280 while (vf < num_vfs) {
2281 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2282 | BE_IF_FLAGS_BROADCAST;
2283 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2285 &adapter->vf_cfg[vf].vf_if_handle,
2288 dev_err(&adapter->pdev->dev,
2289 "Interface Create failed for VF %d\n", vf);
2292 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2295 } else if (!be_physfn(adapter)) {
2296 status = be_cmd_mac_addr_query(adapter, mac,
2297 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2299 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2300 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2304 status = be_tx_queues_create(adapter);
2308 status = be_rx_queues_create(adapter);
2312 status = be_mcc_queues_create(adapter);
2316 if (be_physfn(adapter)) {
2317 status = be_vf_eth_addr_config(adapter);
2322 adapter->link_speed = -1;
2327 if (be_physfn(adapter))
2328 be_vf_eth_addr_rem(adapter);
2329 be_mcc_queues_destroy(adapter);
2331 be_rx_queues_destroy(adapter);
2333 be_tx_queues_destroy(adapter);
2335 for (vf = 0; vf < num_vfs; vf++)
2336 if (adapter->vf_cfg[vf].vf_if_handle)
2337 be_cmd_if_destroy(adapter,
2338 adapter->vf_cfg[vf].vf_if_handle);
2339 be_cmd_if_destroy(adapter, adapter->if_handle);
2344 static int be_clear(struct be_adapter *adapter)
2346 if (be_physfn(adapter))
2347 be_vf_eth_addr_rem(adapter);
2349 be_mcc_queues_destroy(adapter);
2350 be_rx_queues_destroy(adapter);
2351 be_tx_queues_destroy(adapter);
2353 be_cmd_if_destroy(adapter, adapter->if_handle);
2355 /* tell fw we're done with firing cmds */
2356 be_cmd_fw_clean(adapter);
2361 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2362 static bool be_flash_redboot(struct be_adapter *adapter,
2363 const u8 *p, u32 img_start, int image_size,
2370 crc_offset = hdr_size + img_start + image_size - 4;
2374 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2377 dev_err(&adapter->pdev->dev,
2378 "could not get crc from flash, not flashing redboot\n");
2382 /*update redboot only if crc does not match*/
2383 if (!memcmp(flashed_crc, p, 4))
2389 static int be_flash_data(struct be_adapter *adapter,
2390 const struct firmware *fw,
2391 struct be_dma_mem *flash_cmd, int num_of_images)
2394 int status = 0, i, filehdr_size = 0;
2395 u32 total_bytes = 0, flash_op;
2397 const u8 *p = fw->data;
2398 struct be_cmd_write_flashrom *req = flash_cmd->va;
2399 const struct flash_comp *pflashcomp;
2402 static const struct flash_comp gen3_flash_types[9] = {
2403 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2404 FLASH_IMAGE_MAX_SIZE_g3},
2405 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2406 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2407 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2408 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2409 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2410 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2411 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2412 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2413 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2414 FLASH_IMAGE_MAX_SIZE_g3},
2415 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2416 FLASH_IMAGE_MAX_SIZE_g3},
2417 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2418 FLASH_IMAGE_MAX_SIZE_g3},
2419 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2420 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2422 static const struct flash_comp gen2_flash_types[8] = {
2423 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2424 FLASH_IMAGE_MAX_SIZE_g2},
2425 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2426 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2427 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2428 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2429 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2430 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2431 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2432 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2433 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2434 FLASH_IMAGE_MAX_SIZE_g2},
2435 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2436 FLASH_IMAGE_MAX_SIZE_g2},
2437 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2438 FLASH_IMAGE_MAX_SIZE_g2}
2441 if (adapter->generation == BE_GEN3) {
2442 pflashcomp = gen3_flash_types;
2443 filehdr_size = sizeof(struct flash_file_hdr_g3);
2444 num_comp = ARRAY_SIZE(gen3_flash_types);
2446 pflashcomp = gen2_flash_types;
2447 filehdr_size = sizeof(struct flash_file_hdr_g2);
2448 num_comp = ARRAY_SIZE(gen2_flash_types);
2450 for (i = 0; i < num_comp; i++) {
2451 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2452 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2454 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2455 (!be_flash_redboot(adapter, fw->data,
2456 pflashcomp[i].offset, pflashcomp[i].size,
2460 p += filehdr_size + pflashcomp[i].offset
2461 + (num_of_images * sizeof(struct image_hdr));
2462 if (p + pflashcomp[i].size > fw->data + fw->size)
2464 total_bytes = pflashcomp[i].size;
2465 while (total_bytes) {
2466 if (total_bytes > 32*1024)
2467 num_bytes = 32*1024;
2469 num_bytes = total_bytes;
2470 total_bytes -= num_bytes;
2473 flash_op = FLASHROM_OPER_FLASH;
2475 flash_op = FLASHROM_OPER_SAVE;
2476 memcpy(req->params.data_buf, p, num_bytes);
2478 status = be_cmd_write_flashrom(adapter, flash_cmd,
2479 pflashcomp[i].optype, flash_op, num_bytes);
2481 dev_err(&adapter->pdev->dev,
2482 "cmd to write to flash rom failed.\n");
2491 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2495 if (fhdr->build[0] == '3')
2497 else if (fhdr->build[0] == '2')
2503 int be_load_fw(struct be_adapter *adapter, u8 *func)
2505 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2506 const struct firmware *fw;
2507 struct flash_file_hdr_g2 *fhdr;
2508 struct flash_file_hdr_g3 *fhdr3;
2509 struct image_hdr *img_hdr_ptr = NULL;
2510 struct be_dma_mem flash_cmd;
2511 int status, i = 0, num_imgs = 0;
2514 if (!netif_running(adapter->netdev)) {
2515 dev_err(&adapter->pdev->dev,
2516 "Firmware load not allowed (interface is down)\n");
2520 strcpy(fw_file, func);
2522 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2527 fhdr = (struct flash_file_hdr_g2 *) p;
2528 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2530 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2531 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
2533 if (!flash_cmd.va) {
2535 dev_err(&adapter->pdev->dev,
2536 "Memory allocation failure while flashing\n");
2540 if ((adapter->generation == BE_GEN3) &&
2541 (get_ufigen_type(fhdr) == BE_GEN3)) {
2542 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2543 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2544 for (i = 0; i < num_imgs; i++) {
2545 img_hdr_ptr = (struct image_hdr *) (fw->data +
2546 (sizeof(struct flash_file_hdr_g3) +
2547 i * sizeof(struct image_hdr)));
2548 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2549 status = be_flash_data(adapter, fw, &flash_cmd,
2552 } else if ((adapter->generation == BE_GEN2) &&
2553 (get_ufigen_type(fhdr) == BE_GEN2)) {
2554 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2556 dev_err(&adapter->pdev->dev,
2557 "UFI and Interface are not compatible for flashing\n");
2561 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2564 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2568 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2571 release_firmware(fw);
2575 static struct net_device_ops be_netdev_ops = {
2576 .ndo_open = be_open,
2577 .ndo_stop = be_close,
2578 .ndo_start_xmit = be_xmit,
2579 .ndo_set_rx_mode = be_set_multicast_list,
2580 .ndo_set_mac_address = be_mac_addr_set,
2581 .ndo_change_mtu = be_change_mtu,
2582 .ndo_validate_addr = eth_validate_addr,
2583 .ndo_vlan_rx_register = be_vlan_register,
2584 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2585 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2586 .ndo_set_vf_mac = be_set_vf_mac,
2587 .ndo_set_vf_vlan = be_set_vf_vlan,
2588 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2589 .ndo_get_vf_config = be_get_vf_config
2592 static void be_netdev_init(struct net_device *netdev)
2594 struct be_adapter *adapter = netdev_priv(netdev);
2595 struct be_rx_obj *rxo;
2598 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2599 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2600 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2601 NETIF_F_GRO | NETIF_F_TSO6;
2603 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2604 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2606 if (lancer_chip(adapter))
2607 netdev->vlan_features |= NETIF_F_TSO6;
2609 netdev->flags |= IFF_MULTICAST;
2611 adapter->rx_csum = true;
2613 /* Default settings for Rx and Tx flow control */
2614 adapter->rx_fc = true;
2615 adapter->tx_fc = true;
2617 netif_set_gso_max_size(netdev, 65535);
2619 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2621 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2623 for_all_rx_queues(adapter, rxo, i)
2624 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2627 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2631 static void be_unmap_pci_bars(struct be_adapter *adapter)
2634 iounmap(adapter->csr);
2636 iounmap(adapter->db);
2637 if (adapter->pcicfg && be_physfn(adapter))
2638 iounmap(adapter->pcicfg);
2641 static int be_map_pci_bars(struct be_adapter *adapter)
2644 int pcicfg_reg, db_reg;
2646 if (lancer_chip(adapter)) {
2647 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2648 pci_resource_len(adapter->pdev, 0));
2655 if (be_physfn(adapter)) {
2656 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2657 pci_resource_len(adapter->pdev, 2));
2660 adapter->csr = addr;
2663 if (adapter->generation == BE_GEN2) {
2668 if (be_physfn(adapter))
2673 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2674 pci_resource_len(adapter->pdev, db_reg));
2679 if (be_physfn(adapter)) {
2680 addr = ioremap_nocache(
2681 pci_resource_start(adapter->pdev, pcicfg_reg),
2682 pci_resource_len(adapter->pdev, pcicfg_reg));
2685 adapter->pcicfg = addr;
2687 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2691 be_unmap_pci_bars(adapter);
2696 static void be_ctrl_cleanup(struct be_adapter *adapter)
2698 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2700 be_unmap_pci_bars(adapter);
2703 pci_free_consistent(adapter->pdev, mem->size,
2706 mem = &adapter->mc_cmd_mem;
2708 pci_free_consistent(adapter->pdev, mem->size,
2712 static int be_ctrl_init(struct be_adapter *adapter)
2714 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2715 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2716 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2719 status = be_map_pci_bars(adapter);
2723 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2724 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2725 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2726 if (!mbox_mem_alloc->va) {
2728 goto unmap_pci_bars;
2731 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2732 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2733 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2734 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2736 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2737 mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2739 if (mc_cmd_mem->va == NULL) {
2743 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2745 mutex_init(&adapter->mbox_lock);
2746 spin_lock_init(&adapter->mcc_lock);
2747 spin_lock_init(&adapter->mcc_cq_lock);
2749 init_completion(&adapter->flash_compl);
2750 pci_save_state(adapter->pdev);
2754 pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2755 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2758 be_unmap_pci_bars(adapter);
2764 static void be_stats_cleanup(struct be_adapter *adapter)
2766 struct be_dma_mem *cmd = &adapter->stats_cmd;
2769 pci_free_consistent(adapter->pdev, cmd->size,
2773 static int be_stats_init(struct be_adapter *adapter)
2775 struct be_dma_mem *cmd = &adapter->stats_cmd;
2777 cmd->size = sizeof(struct be_cmd_req_get_stats);
2778 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2779 if (cmd->va == NULL)
2781 memset(cmd->va, 0, cmd->size);
2785 static void __devexit be_remove(struct pci_dev *pdev)
2787 struct be_adapter *adapter = pci_get_drvdata(pdev);
2792 cancel_delayed_work_sync(&adapter->work);
2794 unregister_netdev(adapter->netdev);
2798 be_stats_cleanup(adapter);
2800 be_ctrl_cleanup(adapter);
2802 be_sriov_disable(adapter);
2804 be_msix_disable(adapter);
2806 pci_set_drvdata(pdev, NULL);
2807 pci_release_regions(pdev);
2808 pci_disable_device(pdev);
2810 free_netdev(adapter->netdev);
2813 static int be_get_config(struct be_adapter *adapter)
2818 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2822 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2823 &adapter->function_mode, &adapter->function_caps);
2827 memset(mac, 0, ETH_ALEN);
2829 if (be_physfn(adapter)) {
2830 status = be_cmd_mac_addr_query(adapter, mac,
2831 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2836 if (!is_valid_ether_addr(mac))
2837 return -EADDRNOTAVAIL;
2839 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2840 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2843 if (adapter->function_mode & 0x400)
2844 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2846 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2851 static int be_dev_family_check(struct be_adapter *adapter)
2853 struct pci_dev *pdev = adapter->pdev;
2854 u32 sli_intf = 0, if_type;
2856 switch (pdev->device) {
2859 adapter->generation = BE_GEN2;
2863 adapter->generation = BE_GEN3;
2866 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2867 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2868 SLI_INTF_IF_TYPE_SHIFT;
2870 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2872 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2876 dev_err(&pdev->dev, "VFs not supported\n");
2879 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2880 SLI_INTF_FAMILY_SHIFT);
2881 adapter->generation = BE_GEN3;
2884 adapter->generation = 0;
2889 static int __devinit be_probe(struct pci_dev *pdev,
2890 const struct pci_device_id *pdev_id)
2893 struct be_adapter *adapter;
2894 struct net_device *netdev;
2896 status = pci_enable_device(pdev);
2900 status = pci_request_regions(pdev, DRV_NAME);
2903 pci_set_master(pdev);
2905 netdev = alloc_etherdev(sizeof(struct be_adapter));
2906 if (netdev == NULL) {
2910 adapter = netdev_priv(netdev);
2911 adapter->pdev = pdev;
2912 pci_set_drvdata(pdev, adapter);
2914 status = be_dev_family_check(adapter);
2918 adapter->netdev = netdev;
2919 SET_NETDEV_DEV(netdev, &pdev->dev);
2921 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2923 netdev->features |= NETIF_F_HIGHDMA;
2925 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2927 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2932 be_sriov_enable(adapter);
2934 status = be_ctrl_init(adapter);
2938 /* sync up with fw's ready state */
2939 if (be_physfn(adapter)) {
2940 status = be_cmd_POST(adapter);
2945 /* tell fw we're ready to fire cmds */
2946 status = be_cmd_fw_init(adapter);
2950 if (be_physfn(adapter)) {
2951 status = be_cmd_reset_function(adapter);
2956 status = be_stats_init(adapter);
2960 status = be_get_config(adapter);
2964 be_msix_enable(adapter);
2966 INIT_DELAYED_WORK(&adapter->work, be_worker);
2968 status = be_setup(adapter);
2972 be_netdev_init(netdev);
2973 status = register_netdev(netdev);
2976 netif_carrier_off(netdev);
2978 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2979 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2985 be_msix_disable(adapter);
2987 be_stats_cleanup(adapter);
2989 be_ctrl_cleanup(adapter);
2991 be_sriov_disable(adapter);
2992 free_netdev(netdev);
2993 pci_set_drvdata(pdev, NULL);
2995 pci_release_regions(pdev);
2997 pci_disable_device(pdev);
2999 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3003 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3005 struct be_adapter *adapter = pci_get_drvdata(pdev);
3006 struct net_device *netdev = adapter->netdev;
3009 be_setup_wol(adapter, true);
3011 netif_device_detach(netdev);
3012 if (netif_running(netdev)) {
3017 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3020 pci_save_state(pdev);
3021 pci_disable_device(pdev);
3022 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3026 static int be_resume(struct pci_dev *pdev)
3029 struct be_adapter *adapter = pci_get_drvdata(pdev);
3030 struct net_device *netdev = adapter->netdev;
3032 netif_device_detach(netdev);
3034 status = pci_enable_device(pdev);
3038 pci_set_power_state(pdev, 0);
3039 pci_restore_state(pdev);
3041 /* tell fw we're ready to fire cmds */
3042 status = be_cmd_fw_init(adapter);
3047 if (netif_running(netdev)) {
3052 netif_device_attach(netdev);
3055 be_setup_wol(adapter, false);
3060 * An FLR will stop BE from DMAing any data.
3062 static void be_shutdown(struct pci_dev *pdev)
3064 struct be_adapter *adapter = pci_get_drvdata(pdev);
3065 struct net_device *netdev = adapter->netdev;
3067 netif_device_detach(netdev);
3069 be_cmd_reset_function(adapter);
3072 be_setup_wol(adapter, true);
3074 pci_disable_device(pdev);
3077 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3078 pci_channel_state_t state)
3080 struct be_adapter *adapter = pci_get_drvdata(pdev);
3081 struct net_device *netdev = adapter->netdev;
3083 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3085 adapter->eeh_err = true;
3087 netif_device_detach(netdev);
3089 if (netif_running(netdev)) {
3096 if (state == pci_channel_io_perm_failure)
3097 return PCI_ERS_RESULT_DISCONNECT;
3099 pci_disable_device(pdev);
3101 return PCI_ERS_RESULT_NEED_RESET;
3104 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3106 struct be_adapter *adapter = pci_get_drvdata(pdev);
3109 dev_info(&adapter->pdev->dev, "EEH reset\n");
3110 adapter->eeh_err = false;
3112 status = pci_enable_device(pdev);
3114 return PCI_ERS_RESULT_DISCONNECT;
3116 pci_set_master(pdev);
3117 pci_set_power_state(pdev, 0);
3118 pci_restore_state(pdev);
3120 /* Check if card is ok and fw is ready */
3121 status = be_cmd_POST(adapter);
3123 return PCI_ERS_RESULT_DISCONNECT;
3125 return PCI_ERS_RESULT_RECOVERED;
3128 static void be_eeh_resume(struct pci_dev *pdev)
3131 struct be_adapter *adapter = pci_get_drvdata(pdev);
3132 struct net_device *netdev = adapter->netdev;
3134 dev_info(&adapter->pdev->dev, "EEH resume\n");
3136 pci_save_state(pdev);
3138 /* tell fw we're ready to fire cmds */
3139 status = be_cmd_fw_init(adapter);
3143 status = be_setup(adapter);
3147 if (netif_running(netdev)) {
3148 status = be_open(netdev);
3152 netif_device_attach(netdev);
3155 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3158 static struct pci_error_handlers be_eeh_handlers = {
3159 .error_detected = be_eeh_err_detected,
3160 .slot_reset = be_eeh_reset,
3161 .resume = be_eeh_resume,
3164 static struct pci_driver be_driver = {
3166 .id_table = be_dev_ids,
3168 .remove = be_remove,
3169 .suspend = be_suspend,
3170 .resume = be_resume,
3171 .shutdown = be_shutdown,
3172 .err_handler = &be_eeh_handlers
3175 static int __init be_init_module(void)
3177 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3178 rx_frag_size != 2048) {
3179 printk(KERN_WARNING DRV_NAME
3180 " : Module param rx_frag_size must be 2048/4096/8192."
3182 rx_frag_size = 2048;
3186 printk(KERN_WARNING DRV_NAME
3187 " : Module param num_vfs must not be greater than 32."
3192 return pci_register_driver(&be_driver);
3194 module_init(be_init_module);
3196 static void __exit be_exit_module(void)
3198 pci_unregister_driver(&be_driver);
3200 module_exit(be_exit_module);