2 * Copyright (C) 2005 - 2009 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
20 #include <asm/div64.h>
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
28 static unsigned int rx_frag_size = 2048;
29 module_param(rx_frag_size, uint, S_IRUGO);
30 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
32 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
33 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
34 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
35 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
38 MODULE_DEVICE_TABLE(pci, be_dev_ids);
40 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
42 struct be_dma_mem *mem = &q->dma_mem;
44 pci_free_consistent(adapter->pdev, mem->size,
48 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
49 u16 len, u16 entry_size)
51 struct be_dma_mem *mem = &q->dma_mem;
53 memset(q, 0, sizeof(*q));
55 q->entry_size = entry_size;
56 mem->size = len * entry_size;
57 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
60 memset(mem->va, 0, mem->size);
64 static void be_intr_set(struct be_adapter *adapter, bool enable)
66 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
67 u32 reg = ioread32(addr);
68 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
70 if (!enabled && enable)
71 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
72 else if (enabled && !enable)
73 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
80 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
83 val |= qid & DB_RQ_RING_ID_MASK;
84 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
85 iowrite32(val, adapter->db + DB_RQ_OFFSET);
88 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
91 val |= qid & DB_TXULP_RING_ID_MASK;
92 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
93 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
96 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
97 bool arm, bool clear_int, u16 num_popped)
100 val |= qid & DB_EQ_RING_ID_MASK;
102 val |= 1 << DB_EQ_REARM_SHIFT;
104 val |= 1 << DB_EQ_CLR_SHIFT;
105 val |= 1 << DB_EQ_EVNT_SHIFT;
106 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
107 iowrite32(val, adapter->db + DB_EQ_OFFSET);
110 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
113 val |= qid & DB_CQ_RING_ID_MASK;
115 val |= 1 << DB_CQ_REARM_SHIFT;
116 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
117 iowrite32(val, adapter->db + DB_CQ_OFFSET);
120 static int be_mac_addr_set(struct net_device *netdev, void *p)
122 struct be_adapter *adapter = netdev_priv(netdev);
123 struct sockaddr *addr = p;
126 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
130 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
131 adapter->if_handle, &adapter->pmac_id);
133 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
138 static void netdev_stats_update(struct be_adapter *adapter)
140 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
141 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
142 struct be_port_rxf_stats *port_stats =
143 &rxf_stats->port[adapter->port_num];
144 struct net_device_stats *dev_stats = &adapter->stats.net_stats;
145 struct be_erx_stats *erx_stats = &hw_stats->erx;
147 dev_stats->rx_packets = port_stats->rx_total_frames;
148 dev_stats->tx_packets = port_stats->tx_unicastframes +
149 port_stats->tx_multicastframes + port_stats->tx_broadcastframes;
150 dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 |
151 (u64) port_stats->rx_bytes_lsd;
152 dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 |
153 (u64) port_stats->tx_bytes_lsd;
155 /* bad pkts received */
156 dev_stats->rx_errors = port_stats->rx_crc_errors +
157 port_stats->rx_alignment_symbol_errors +
158 port_stats->rx_in_range_errors +
159 port_stats->rx_out_range_errors +
160 port_stats->rx_frame_too_long +
161 port_stats->rx_dropped_too_small +
162 port_stats->rx_dropped_too_short +
163 port_stats->rx_dropped_header_too_small +
164 port_stats->rx_dropped_tcp_length +
165 port_stats->rx_dropped_runt +
166 port_stats->rx_tcp_checksum_errs +
167 port_stats->rx_ip_checksum_errs +
168 port_stats->rx_udp_checksum_errs;
170 /* no space in linux buffers: best possible approximation */
171 dev_stats->rx_dropped = erx_stats->rx_drops_no_fragments[0];
173 /* detailed rx errors */
174 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
175 port_stats->rx_out_range_errors +
176 port_stats->rx_frame_too_long;
178 /* receive ring buffer overflow */
179 dev_stats->rx_over_errors = 0;
181 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
183 /* frame alignment errors */
184 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
186 /* receiver fifo overrun */
187 /* drops_no_pbuf is no per i/f, it's per BE card */
188 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
189 port_stats->rx_input_fifo_overflow +
190 rxf_stats->rx_drops_no_pbuf;
191 /* receiver missed packetd */
192 dev_stats->rx_missed_errors = 0;
194 /* packet transmit problems */
195 dev_stats->tx_errors = 0;
197 /* no space available in linux */
198 dev_stats->tx_dropped = 0;
200 dev_stats->multicast = port_stats->tx_multicastframes;
201 dev_stats->collisions = 0;
203 /* detailed tx_errors */
204 dev_stats->tx_aborted_errors = 0;
205 dev_stats->tx_carrier_errors = 0;
206 dev_stats->tx_fifo_errors = 0;
207 dev_stats->tx_heartbeat_errors = 0;
208 dev_stats->tx_window_errors = 0;
211 void be_link_status_update(struct be_adapter *adapter, bool link_up)
213 struct net_device *netdev = adapter->netdev;
215 /* If link came up or went down */
216 if (adapter->link_up != link_up) {
218 netif_start_queue(netdev);
219 netif_carrier_on(netdev);
220 printk(KERN_INFO "%s: Link up\n", netdev->name);
222 netif_stop_queue(netdev);
223 netif_carrier_off(netdev);
224 printk(KERN_INFO "%s: Link down\n", netdev->name);
226 adapter->link_up = link_up;
230 /* Update the EQ delay n BE based on the RX frags consumed / sec */
231 static void be_rx_eqd_update(struct be_adapter *adapter)
233 struct be_eq_obj *rx_eq = &adapter->rx_eq;
234 struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
238 if (!rx_eq->enable_aic)
242 if (time_before(now, stats->rx_fps_jiffies)) {
243 stats->rx_fps_jiffies = now;
247 /* Update once a second */
248 if ((now - stats->rx_fps_jiffies) < HZ)
251 stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
252 ((now - stats->rx_fps_jiffies) / HZ);
254 stats->rx_fps_jiffies = now;
255 stats->be_prev_rx_frags = stats->be_rx_frags;
256 eqd = stats->be_rx_fps / 110000;
258 if (eqd > rx_eq->max_eqd)
259 eqd = rx_eq->max_eqd;
260 if (eqd < rx_eq->min_eqd)
261 eqd = rx_eq->min_eqd;
264 if (eqd != rx_eq->cur_eqd)
265 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
267 rx_eq->cur_eqd = eqd;
270 static struct net_device_stats *be_get_stats(struct net_device *dev)
272 struct be_adapter *adapter = netdev_priv(dev);
274 return &adapter->stats.net_stats;
277 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
281 do_div(rate, ticks / HZ);
282 rate <<= 3; /* bytes/sec -> bits/sec */
283 do_div(rate, 1000000ul); /* MB/Sec */
288 static void be_tx_rate_update(struct be_adapter *adapter)
290 struct be_drvr_stats *stats = drvr_stats(adapter);
293 /* Wrapped around? */
294 if (time_before(now, stats->be_tx_jiffies)) {
295 stats->be_tx_jiffies = now;
299 /* Update tx rate once in two seconds */
300 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
301 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
302 - stats->be_tx_bytes_prev,
303 now - stats->be_tx_jiffies);
304 stats->be_tx_jiffies = now;
305 stats->be_tx_bytes_prev = stats->be_tx_bytes;
309 static void be_tx_stats_update(struct be_adapter *adapter,
310 u32 wrb_cnt, u32 copied, bool stopped)
312 struct be_drvr_stats *stats = drvr_stats(adapter);
314 stats->be_tx_wrbs += wrb_cnt;
315 stats->be_tx_bytes += copied;
317 stats->be_tx_stops++;
320 /* Determine number of WRB entries needed to xmit data in an skb */
321 static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
323 int cnt = (skb->len > skb->data_len);
325 cnt += skb_shinfo(skb)->nr_frags;
327 /* to account for hdr wrb */
330 /* add a dummy to make it an even num */
335 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
339 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
341 wrb->frag_pa_hi = upper_32_bits(addr);
342 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
343 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
346 static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
347 bool vlan, u32 wrb_cnt, u32 len)
349 memset(hdr, 0, sizeof(*hdr));
351 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
353 if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
354 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
355 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
356 hdr, skb_shinfo(skb)->gso_size);
357 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
359 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
360 else if (is_udp_pkt(skb))
361 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
364 if (vlan && vlan_tx_tag_present(skb)) {
365 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
366 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
367 hdr, vlan_tx_tag_get(skb));
370 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
371 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
372 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
373 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
377 static int make_tx_wrbs(struct be_adapter *adapter,
378 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
382 struct pci_dev *pdev = adapter->pdev;
383 struct sk_buff *first_skb = skb;
384 struct be_queue_info *txq = &adapter->tx_obj.q;
385 struct be_eth_wrb *wrb;
386 struct be_eth_hdr_wrb *hdr;
388 atomic_add(wrb_cnt, &txq->used);
389 hdr = queue_head_node(txq);
392 if (skb->len > skb->data_len) {
393 int len = skb->len - skb->data_len;
394 busaddr = pci_map_single(pdev, skb->data, len,
396 wrb = queue_head_node(txq);
397 wrb_fill(wrb, busaddr, len);
398 be_dws_cpu_to_le(wrb, sizeof(*wrb));
403 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
404 struct skb_frag_struct *frag =
405 &skb_shinfo(skb)->frags[i];
406 busaddr = pci_map_page(pdev, frag->page,
408 frag->size, PCI_DMA_TODEVICE);
409 wrb = queue_head_node(txq);
410 wrb_fill(wrb, busaddr, frag->size);
411 be_dws_cpu_to_le(wrb, sizeof(*wrb));
413 copied += frag->size;
417 wrb = queue_head_node(txq);
419 be_dws_cpu_to_le(wrb, sizeof(*wrb));
423 wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
425 be_dws_cpu_to_le(hdr, sizeof(*hdr));
430 static int be_xmit(struct sk_buff *skb, struct net_device *netdev)
432 struct be_adapter *adapter = netdev_priv(netdev);
433 struct be_tx_obj *tx_obj = &adapter->tx_obj;
434 struct be_queue_info *txq = &tx_obj->q;
435 u32 wrb_cnt = 0, copied = 0;
436 u32 start = txq->head;
437 bool dummy_wrb, stopped = false;
439 wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
441 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
443 /* record the sent skb in the sent_skb table */
444 BUG_ON(tx_obj->sent_skb_list[start]);
445 tx_obj->sent_skb_list[start] = skb;
447 /* Ensure that txq has space for the next skb; Else stop the queue
448 * *BEFORE* ringing the tx doorbell, so that we serialze the
449 * tx compls of the current transmit which'll wake up the queue
451 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= txq->len) {
452 netif_stop_queue(netdev);
456 be_txq_notify(adapter, txq->id, wrb_cnt);
458 be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
462 static int be_change_mtu(struct net_device *netdev, int new_mtu)
464 struct be_adapter *adapter = netdev_priv(netdev);
465 if (new_mtu < BE_MIN_MTU ||
466 new_mtu > BE_MAX_JUMBO_FRAME_SIZE) {
467 dev_info(&adapter->pdev->dev,
468 "MTU must be between %d and %d bytes\n",
469 BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE);
472 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
473 netdev->mtu, new_mtu);
474 netdev->mtu = new_mtu;
479 * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
480 * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
481 * set the BE in promiscuous VLAN mode.
483 static void be_vid_config(struct net_device *netdev)
485 struct be_adapter *adapter = netdev_priv(netdev);
486 u16 vtag[BE_NUM_VLANS_SUPPORTED];
489 if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) {
490 /* Construct VLAN Table to give to HW */
491 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
492 if (adapter->vlan_tag[i]) {
493 vtag[ntags] = cpu_to_le16(i);
497 be_cmd_vlan_config(adapter, adapter->if_handle,
500 be_cmd_vlan_config(adapter, adapter->if_handle,
505 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
507 struct be_adapter *adapter = netdev_priv(netdev);
508 struct be_eq_obj *rx_eq = &adapter->rx_eq;
509 struct be_eq_obj *tx_eq = &adapter->tx_eq;
511 be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
512 be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
513 adapter->vlan_grp = grp;
514 be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
515 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
518 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
520 struct be_adapter *adapter = netdev_priv(netdev);
522 adapter->num_vlans++;
523 adapter->vlan_tag[vid] = 1;
525 be_vid_config(netdev);
528 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
530 struct be_adapter *adapter = netdev_priv(netdev);
532 adapter->num_vlans--;
533 adapter->vlan_tag[vid] = 0;
535 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
536 be_vid_config(netdev);
539 static void be_set_multicast_list(struct net_device *netdev)
541 struct be_adapter *adapter = netdev_priv(netdev);
543 if (netdev->flags & IFF_PROMISC) {
544 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
545 adapter->promiscuous = true;
549 /* BE was previously in promiscous mode; disable it */
550 if (adapter->promiscuous) {
551 adapter->promiscuous = false;
552 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
555 if (netdev->flags & IFF_ALLMULTI) {
556 be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0);
560 be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
566 static void be_rx_rate_update(struct be_adapter *adapter)
568 struct be_drvr_stats *stats = drvr_stats(adapter);
572 if (time_before(now, stats->be_rx_jiffies)) {
573 stats->be_rx_jiffies = now;
577 /* Update the rate once in two seconds */
578 if ((now - stats->be_rx_jiffies) < 2 * HZ)
581 stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
582 - stats->be_rx_bytes_prev,
583 now - stats->be_rx_jiffies);
584 stats->be_rx_jiffies = now;
585 stats->be_rx_bytes_prev = stats->be_rx_bytes;
588 static void be_rx_stats_update(struct be_adapter *adapter,
589 u32 pktsize, u16 numfrags)
591 struct be_drvr_stats *stats = drvr_stats(adapter);
593 stats->be_rx_compl++;
594 stats->be_rx_frags += numfrags;
595 stats->be_rx_bytes += pktsize;
598 static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
600 u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
602 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
603 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
604 ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
606 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
607 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
609 ipv6_chk = (ip_version && (tcpf || udpf));
611 return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
614 static struct be_rx_page_info *
615 get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
617 struct be_rx_page_info *rx_page_info;
618 struct be_queue_info *rxq = &adapter->rx_obj.q;
620 rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
621 BUG_ON(!rx_page_info->page);
623 if (rx_page_info->last_page_user)
624 pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
625 adapter->big_page_size, PCI_DMA_FROMDEVICE);
627 atomic_dec(&rxq->used);
631 /* Throwaway the data in the Rx completion */
632 static void be_rx_compl_discard(struct be_adapter *adapter,
633 struct be_eth_rx_compl *rxcp)
635 struct be_queue_info *rxq = &adapter->rx_obj.q;
636 struct be_rx_page_info *page_info;
637 u16 rxq_idx, i, num_rcvd;
639 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
640 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
642 for (i = 0; i < num_rcvd; i++) {
643 page_info = get_rx_page_info(adapter, rxq_idx);
644 put_page(page_info->page);
645 memset(page_info, 0, sizeof(*page_info));
646 index_inc(&rxq_idx, rxq->len);
651 * skb_fill_rx_data forms a complete skb for an ether frame
654 static void skb_fill_rx_data(struct be_adapter *adapter,
655 struct sk_buff *skb, struct be_eth_rx_compl *rxcp)
657 struct be_queue_info *rxq = &adapter->rx_obj.q;
658 struct be_rx_page_info *page_info;
659 u16 rxq_idx, i, num_rcvd, j;
660 u32 pktsize, hdr_len, curr_frag_len, size;
663 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
664 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
665 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
667 page_info = get_rx_page_info(adapter, rxq_idx);
669 start = page_address(page_info->page) + page_info->page_offset;
672 /* Copy data in the first descriptor of this completion */
673 curr_frag_len = min(pktsize, rx_frag_size);
675 /* Copy the header portion into skb_data */
676 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
677 memcpy(skb->data, start, hdr_len);
678 skb->len = curr_frag_len;
679 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
680 /* Complete packet has now been moved to data */
681 put_page(page_info->page);
683 skb->tail += curr_frag_len;
685 skb_shinfo(skb)->nr_frags = 1;
686 skb_shinfo(skb)->frags[0].page = page_info->page;
687 skb_shinfo(skb)->frags[0].page_offset =
688 page_info->page_offset + hdr_len;
689 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
690 skb->data_len = curr_frag_len - hdr_len;
691 skb->tail += hdr_len;
693 memset(page_info, 0, sizeof(*page_info));
695 if (pktsize <= rx_frag_size) {
696 BUG_ON(num_rcvd != 1);
700 /* More frags present for this completion */
702 for (i = 1, j = 0; i < num_rcvd; i++) {
703 size -= curr_frag_len;
704 index_inc(&rxq_idx, rxq->len);
705 page_info = get_rx_page_info(adapter, rxq_idx);
707 curr_frag_len = min(size, rx_frag_size);
709 /* Coalesce all frags from the same physical page in one slot */
710 if (page_info->page_offset == 0) {
713 skb_shinfo(skb)->frags[j].page = page_info->page;
714 skb_shinfo(skb)->frags[j].page_offset =
715 page_info->page_offset;
716 skb_shinfo(skb)->frags[j].size = 0;
717 skb_shinfo(skb)->nr_frags++;
719 put_page(page_info->page);
722 skb_shinfo(skb)->frags[j].size += curr_frag_len;
723 skb->len += curr_frag_len;
724 skb->data_len += curr_frag_len;
726 memset(page_info, 0, sizeof(*page_info));
728 BUG_ON(j > MAX_SKB_FRAGS);
731 be_rx_stats_update(adapter, pktsize, num_rcvd);
735 /* Process the RX completion indicated by rxcp when GRO is disabled */
736 static void be_rx_compl_process(struct be_adapter *adapter,
737 struct be_eth_rx_compl *rxcp)
742 vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
744 skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
747 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
748 be_rx_compl_discard(adapter, rxcp);
752 skb_reserve(skb, NET_IP_ALIGN);
754 skb_fill_rx_data(adapter, skb, rxcp);
756 if (do_pkt_csum(rxcp, adapter->rx_csum))
757 skb->ip_summed = CHECKSUM_NONE;
759 skb->ip_summed = CHECKSUM_UNNECESSARY;
761 skb->truesize = skb->len + sizeof(struct sk_buff);
762 skb->protocol = eth_type_trans(skb, adapter->netdev);
763 skb->dev = adapter->netdev;
766 if (!adapter->vlan_grp || adapter->num_vlans == 0) {
770 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
771 vid = be16_to_cpu(vid);
772 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
774 netif_receive_skb(skb);
777 adapter->netdev->last_rx = jiffies;
782 /* Process the RX completion indicated by rxcp when GRO is enabled */
783 static void be_rx_compl_process_gro(struct be_adapter *adapter,
784 struct be_eth_rx_compl *rxcp)
786 struct be_rx_page_info *page_info;
787 struct sk_buff *skb = NULL;
788 struct be_queue_info *rxq = &adapter->rx_obj.q;
789 struct be_eq_obj *eq_obj = &adapter->rx_eq;
790 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
791 u16 i, rxq_idx = 0, vid, j;
793 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
794 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
795 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
796 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
798 skb = napi_get_frags(&eq_obj->napi);
800 be_rx_compl_discard(adapter, rxcp);
804 remaining = pkt_size;
805 for (i = 0, j = -1; i < num_rcvd; i++) {
806 page_info = get_rx_page_info(adapter, rxq_idx);
808 curr_frag_len = min(remaining, rx_frag_size);
810 /* Coalesce all frags from the same physical page in one slot */
811 if (i == 0 || page_info->page_offset == 0) {
812 /* First frag or Fresh page */
814 skb_shinfo(skb)->frags[j].page = page_info->page;
815 skb_shinfo(skb)->frags[j].page_offset =
816 page_info->page_offset;
817 skb_shinfo(skb)->frags[j].size = 0;
819 put_page(page_info->page);
821 skb_shinfo(skb)->frags[j].size += curr_frag_len;
823 remaining -= curr_frag_len;
824 index_inc(&rxq_idx, rxq->len);
825 memset(page_info, 0, sizeof(*page_info));
827 BUG_ON(j > MAX_SKB_FRAGS);
829 skb_shinfo(skb)->nr_frags = j + 1;
831 skb->data_len = pkt_size;
832 skb->truesize += pkt_size;
833 skb->ip_summed = CHECKSUM_UNNECESSARY;
835 if (likely(!vlanf)) {
836 napi_gro_frags(&eq_obj->napi);
838 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
839 vid = be16_to_cpu(vid);
841 if (!adapter->vlan_grp || adapter->num_vlans == 0)
844 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
847 be_rx_stats_update(adapter, pkt_size, num_rcvd);
851 static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
853 struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
855 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
858 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
860 queue_tail_inc(&adapter->rx_obj.cq);
864 /* To reset the valid bit, we need to reset the whole word as
865 * when walking the queue the valid entries are little-endian
866 * and invalid entries are host endian
868 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
870 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
873 static inline struct page *be_alloc_pages(u32 size)
875 gfp_t alloc_flags = GFP_ATOMIC;
876 u32 order = get_order(size);
878 alloc_flags |= __GFP_COMP;
879 return alloc_pages(alloc_flags, order);
883 * Allocate a page, split it to fragments of size rx_frag_size and post as
884 * receive buffers to BE
886 static void be_post_rx_frags(struct be_adapter *adapter)
888 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
889 struct be_rx_page_info *page_info = NULL;
890 struct be_queue_info *rxq = &adapter->rx_obj.q;
891 struct page *pagep = NULL;
892 struct be_eth_rx_d *rxd;
893 u64 page_dmaaddr = 0, frag_dmaaddr;
894 u32 posted, page_offset = 0;
896 page_info = &page_info_tbl[rxq->head];
897 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
899 pagep = be_alloc_pages(adapter->big_page_size);
900 if (unlikely(!pagep)) {
901 drvr_stats(adapter)->be_ethrx_post_fail++;
904 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
905 adapter->big_page_size,
907 page_info->page_offset = 0;
910 page_info->page_offset = page_offset + rx_frag_size;
912 page_offset = page_info->page_offset;
913 page_info->page = pagep;
914 pci_unmap_addr_set(page_info, bus, page_dmaaddr);
915 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
917 rxd = queue_head_node(rxq);
918 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
919 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
922 /* Any space left in the current big page for another frag? */
923 if ((page_offset + rx_frag_size + rx_frag_size) >
924 adapter->big_page_size) {
926 page_info->last_page_user = true;
928 page_info = &page_info_tbl[rxq->head];
931 page_info->last_page_user = true;
934 atomic_add(posted, &rxq->used);
935 be_rxq_notify(adapter, rxq->id, posted);
936 } else if (atomic_read(&rxq->used) == 0) {
937 /* Let be_worker replenish when memory is available */
938 adapter->rx_post_starved = true;
944 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
946 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
948 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
951 be_dws_le_to_cpu(txcp, sizeof(*txcp));
953 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
955 queue_tail_inc(tx_cq);
959 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
961 struct be_queue_info *txq = &adapter->tx_obj.q;
962 struct be_eth_wrb *wrb;
963 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
964 struct sk_buff *sent_skb;
966 u16 cur_index, num_wrbs = 0;
968 cur_index = txq->tail;
969 sent_skb = sent_skbs[cur_index];
971 sent_skbs[cur_index] = NULL;
974 cur_index = txq->tail;
975 wrb = queue_tail_node(txq);
976 be_dws_le_to_cpu(wrb, sizeof(*wrb));
977 busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
979 pci_unmap_single(adapter->pdev, busaddr,
980 wrb->frag_len, PCI_DMA_TODEVICE);
984 } while (cur_index != last_index);
986 atomic_sub(num_wrbs, &txq->used);
991 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
993 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
998 eqe->evt = le32_to_cpu(eqe->evt);
999 queue_tail_inc(&eq_obj->q);
1003 static int event_handle(struct be_adapter *adapter,
1004 struct be_eq_obj *eq_obj)
1006 struct be_eq_entry *eqe;
1009 while ((eqe = event_get(eq_obj)) != NULL) {
1014 /* Deal with any spurious interrupts that come
1017 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1019 napi_schedule(&eq_obj->napi);
1024 /* Just read and notify events without processing them.
1025 * Used at the time of destroying event queues */
1026 static void be_eq_clean(struct be_adapter *adapter,
1027 struct be_eq_obj *eq_obj)
1029 struct be_eq_entry *eqe;
1032 while ((eqe = event_get(eq_obj)) != NULL) {
1038 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1041 static void be_rx_q_clean(struct be_adapter *adapter)
1043 struct be_rx_page_info *page_info;
1044 struct be_queue_info *rxq = &adapter->rx_obj.q;
1045 struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1046 struct be_eth_rx_compl *rxcp;
1049 /* First cleanup pending rx completions */
1050 while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
1051 be_rx_compl_discard(adapter, rxcp);
1052 be_rx_compl_reset(rxcp);
1053 be_cq_notify(adapter, rx_cq->id, true, 1);
1056 /* Then free posted rx buffer that were not used */
1057 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1058 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1059 page_info = get_rx_page_info(adapter, tail);
1060 put_page(page_info->page);
1061 memset(page_info, 0, sizeof(*page_info));
1063 BUG_ON(atomic_read(&rxq->used));
1066 static void be_tx_compl_clean(struct be_adapter *adapter)
1068 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1069 struct be_queue_info *txq = &adapter->tx_obj.q;
1070 struct be_eth_tx_compl *txcp;
1071 u16 end_idx, cmpl = 0, timeo = 0;
1073 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1075 while ((txcp = be_tx_compl_get(tx_cq))) {
1076 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1078 be_tx_compl_process(adapter, end_idx);
1082 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1086 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1092 if (atomic_read(&txq->used))
1093 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1094 atomic_read(&txq->used));
1097 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1099 struct be_queue_info *q;
1101 q = &adapter->mcc_obj.q;
1103 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1104 be_queue_free(adapter, q);
1106 q = &adapter->mcc_obj.cq;
1108 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1109 be_queue_free(adapter, q);
1112 /* Must be called only after TX qs are created as MCC shares TX EQ */
1113 static int be_mcc_queues_create(struct be_adapter *adapter)
1115 struct be_queue_info *q, *cq;
1117 /* Alloc MCC compl queue */
1118 cq = &adapter->mcc_obj.cq;
1119 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1120 sizeof(struct be_mcc_compl)))
1123 /* Ask BE to create MCC compl queue; share TX's eq */
1124 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1127 /* Alloc MCC queue */
1128 q = &adapter->mcc_obj.q;
1129 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1130 goto mcc_cq_destroy;
1132 /* Ask BE to create MCC queue */
1133 if (be_cmd_mccq_create(adapter, q, cq))
1139 be_queue_free(adapter, q);
1141 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1143 be_queue_free(adapter, cq);
1148 static void be_tx_queues_destroy(struct be_adapter *adapter)
1150 struct be_queue_info *q;
1152 q = &adapter->tx_obj.q;
1154 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1155 be_queue_free(adapter, q);
1157 q = &adapter->tx_obj.cq;
1159 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1160 be_queue_free(adapter, q);
1162 /* Clear any residual events */
1163 be_eq_clean(adapter, &adapter->tx_eq);
1165 q = &adapter->tx_eq.q;
1167 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1168 be_queue_free(adapter, q);
1171 static int be_tx_queues_create(struct be_adapter *adapter)
1173 struct be_queue_info *eq, *q, *cq;
1175 adapter->tx_eq.max_eqd = 0;
1176 adapter->tx_eq.min_eqd = 0;
1177 adapter->tx_eq.cur_eqd = 96;
1178 adapter->tx_eq.enable_aic = false;
1179 /* Alloc Tx Event queue */
1180 eq = &adapter->tx_eq.q;
1181 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1184 /* Ask BE to create Tx Event queue */
1185 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1187 /* Alloc TX eth compl queue */
1188 cq = &adapter->tx_obj.cq;
1189 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1190 sizeof(struct be_eth_tx_compl)))
1193 /* Ask BE to create Tx eth compl queue */
1194 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1197 /* Alloc TX eth queue */
1198 q = &adapter->tx_obj.q;
1199 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1202 /* Ask BE to create Tx eth queue */
1203 if (be_cmd_txq_create(adapter, q, cq))
1208 be_queue_free(adapter, q);
1210 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1212 be_queue_free(adapter, cq);
1214 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1216 be_queue_free(adapter, eq);
1220 static void be_rx_queues_destroy(struct be_adapter *adapter)
1222 struct be_queue_info *q;
1224 q = &adapter->rx_obj.q;
1226 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1227 be_rx_q_clean(adapter);
1229 be_queue_free(adapter, q);
1231 q = &adapter->rx_obj.cq;
1233 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1234 be_queue_free(adapter, q);
1236 /* Clear any residual events */
1237 be_eq_clean(adapter, &adapter->rx_eq);
1239 q = &adapter->rx_eq.q;
1241 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1242 be_queue_free(adapter, q);
1245 static int be_rx_queues_create(struct be_adapter *adapter)
1247 struct be_queue_info *eq, *q, *cq;
1250 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1251 adapter->rx_eq.max_eqd = BE_MAX_EQD;
1252 adapter->rx_eq.min_eqd = 0;
1253 adapter->rx_eq.cur_eqd = 0;
1254 adapter->rx_eq.enable_aic = true;
1256 /* Alloc Rx Event queue */
1257 eq = &adapter->rx_eq.q;
1258 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1259 sizeof(struct be_eq_entry));
1263 /* Ask BE to create Rx Event queue */
1264 rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd);
1268 /* Alloc RX eth compl queue */
1269 cq = &adapter->rx_obj.cq;
1270 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1271 sizeof(struct be_eth_rx_compl));
1275 /* Ask BE to create Rx eth compl queue */
1276 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1280 /* Alloc RX eth queue */
1281 q = &adapter->rx_obj.q;
1282 rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
1286 /* Ask BE to create Rx eth queue */
1287 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1288 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
1294 be_queue_free(adapter, q);
1296 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1298 be_queue_free(adapter, cq);
1300 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1302 be_queue_free(adapter, eq);
1306 /* There are 8 evt ids per func. Retruns the evt id's bit number */
1307 static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1309 return eq_id - 8 * be_pci_func(adapter);
1312 static irqreturn_t be_intx(int irq, void *dev)
1314 struct be_adapter *adapter = dev;
1317 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1318 be_pci_func(adapter) * CEV_ISR_SIZE);
1322 event_handle(adapter, &adapter->tx_eq);
1323 event_handle(adapter, &adapter->rx_eq);
1328 static irqreturn_t be_msix_rx(int irq, void *dev)
1330 struct be_adapter *adapter = dev;
1332 event_handle(adapter, &adapter->rx_eq);
1337 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1339 struct be_adapter *adapter = dev;
1341 event_handle(adapter, &adapter->tx_eq);
1346 static inline bool do_gro(struct be_adapter *adapter,
1347 struct be_eth_rx_compl *rxcp)
1349 int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1350 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1353 drvr_stats(adapter)->be_rxcp_err++;
1355 return (tcp_frame && !err) ? true : false;
1358 int be_poll_rx(struct napi_struct *napi, int budget)
1360 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1361 struct be_adapter *adapter =
1362 container_of(rx_eq, struct be_adapter, rx_eq);
1363 struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1364 struct be_eth_rx_compl *rxcp;
1367 for (work_done = 0; work_done < budget; work_done++) {
1368 rxcp = be_rx_compl_get(adapter);
1372 if (do_gro(adapter, rxcp))
1373 be_rx_compl_process_gro(adapter, rxcp);
1375 be_rx_compl_process(adapter, rxcp);
1377 be_rx_compl_reset(rxcp);
1380 /* Refill the queue */
1381 if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
1382 be_post_rx_frags(adapter);
1385 if (work_done < budget) {
1386 napi_complete(napi);
1387 be_cq_notify(adapter, rx_cq->id, true, work_done);
1389 /* More to be consumed; continue with interrupts disabled */
1390 be_cq_notify(adapter, rx_cq->id, false, work_done);
1395 void be_process_tx(struct be_adapter *adapter)
1397 struct be_queue_info *txq = &adapter->tx_obj.q;
1398 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1399 struct be_eth_tx_compl *txcp;
1403 while ((txcp = be_tx_compl_get(tx_cq))) {
1404 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1406 be_tx_compl_process(adapter, end_idx);
1411 be_cq_notify(adapter, tx_cq->id, true, num_cmpl);
1413 /* As Tx wrbs have been freed up, wake up netdev queue if
1414 * it was stopped due to lack of tx wrbs.
1416 if (netif_queue_stopped(adapter->netdev) &&
1417 atomic_read(&txq->used) < txq->len / 2) {
1418 netif_wake_queue(adapter->netdev);
1421 drvr_stats(adapter)->be_tx_events++;
1422 drvr_stats(adapter)->be_tx_compl += num_cmpl;
1426 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1427 * For TX/MCC we don't honour budget; consume everything
1429 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1431 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1432 struct be_adapter *adapter =
1433 container_of(tx_eq, struct be_adapter, tx_eq);
1435 napi_complete(napi);
1437 be_process_tx(adapter);
1439 be_process_mcc(adapter);
1444 static void be_worker(struct work_struct *work)
1446 struct be_adapter *adapter =
1447 container_of(work, struct be_adapter, work.work);
1451 status = be_cmd_get_stats(adapter, &adapter->stats.cmd);
1453 netdev_stats_update(adapter);
1456 be_rx_eqd_update(adapter);
1458 be_tx_rate_update(adapter);
1459 be_rx_rate_update(adapter);
1461 if (adapter->rx_post_starved) {
1462 adapter->rx_post_starved = false;
1463 be_post_rx_frags(adapter);
1466 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1469 static void be_msix_enable(struct be_adapter *adapter)
1473 for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
1474 adapter->msix_entries[i].entry = i;
1476 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1477 BE_NUM_MSIX_VECTORS);
1479 adapter->msix_enabled = true;
1483 static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1485 return adapter->msix_entries[
1486 be_evt_bit_get(adapter, eq_id)].vector;
1489 static int be_request_irq(struct be_adapter *adapter,
1490 struct be_eq_obj *eq_obj,
1491 void *handler, char *desc)
1493 struct net_device *netdev = adapter->netdev;
1496 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1497 vec = be_msix_vec_get(adapter, eq_obj->q.id);
1498 return request_irq(vec, handler, 0, eq_obj->desc, adapter);
1501 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj)
1503 int vec = be_msix_vec_get(adapter, eq_obj->q.id);
1504 free_irq(vec, adapter);
1507 static int be_msix_register(struct be_adapter *adapter)
1511 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx");
1515 status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx");
1522 be_free_irq(adapter, &adapter->tx_eq);
1524 dev_warn(&adapter->pdev->dev,
1525 "MSIX Request IRQ failed - err %d\n", status);
1526 pci_disable_msix(adapter->pdev);
1527 adapter->msix_enabled = false;
1531 static int be_irq_register(struct be_adapter *adapter)
1533 struct net_device *netdev = adapter->netdev;
1536 if (adapter->msix_enabled) {
1537 status = be_msix_register(adapter);
1543 netdev->irq = adapter->pdev->irq;
1544 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1547 dev_err(&adapter->pdev->dev,
1548 "INTx request IRQ failed - err %d\n", status);
1552 adapter->isr_registered = true;
1556 static void be_irq_unregister(struct be_adapter *adapter)
1558 struct net_device *netdev = adapter->netdev;
1560 if (!adapter->isr_registered)
1564 if (!adapter->msix_enabled) {
1565 free_irq(netdev->irq, adapter);
1570 be_free_irq(adapter, &adapter->tx_eq);
1571 be_free_irq(adapter, &adapter->rx_eq);
1573 adapter->isr_registered = false;
1577 static int be_open(struct net_device *netdev)
1579 struct be_adapter *adapter = netdev_priv(netdev);
1580 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1581 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1585 /* First time posting */
1586 be_post_rx_frags(adapter);
1588 napi_enable(&rx_eq->napi);
1589 napi_enable(&tx_eq->napi);
1591 be_irq_register(adapter);
1593 be_intr_set(adapter, true);
1595 /* The evt queues are created in unarmed state; arm them */
1596 be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
1597 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1599 /* Rx compl queue may be in unarmed state; rearm it */
1600 be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
1602 status = be_cmd_link_status_query(adapter, &link_up);
1605 be_link_status_update(adapter, link_up);
1607 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
1611 static int be_setup(struct be_adapter *adapter)
1613 struct net_device *netdev = adapter->netdev;
1617 if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS |
1618 BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED |
1619 BE_IF_FLAGS_PASS_L3L4_ERRORS;
1620 status = be_cmd_if_create(adapter, if_flags, netdev->dev_addr,
1621 false/* pmac_invalid */, &adapter->if_handle,
1626 be_vid_config(netdev);
1628 status = be_cmd_set_flow_control(adapter, true, true);
1632 status = be_tx_queues_create(adapter);
1636 status = be_rx_queues_create(adapter);
1640 status = be_mcc_queues_create(adapter);
1647 be_rx_queues_destroy(adapter);
1649 be_tx_queues_destroy(adapter);
1651 be_cmd_if_destroy(adapter, adapter->if_handle);
1656 static int be_clear(struct be_adapter *adapter)
1658 be_mcc_queues_destroy(adapter);
1659 be_rx_queues_destroy(adapter);
1660 be_tx_queues_destroy(adapter);
1662 be_cmd_if_destroy(adapter, adapter->if_handle);
1667 static int be_close(struct net_device *netdev)
1669 struct be_adapter *adapter = netdev_priv(netdev);
1670 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1671 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1674 cancel_delayed_work_sync(&adapter->work);
1676 netif_stop_queue(netdev);
1677 netif_carrier_off(netdev);
1678 adapter->link_up = false;
1680 be_intr_set(adapter, false);
1682 if (adapter->msix_enabled) {
1683 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1684 synchronize_irq(vec);
1685 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1686 synchronize_irq(vec);
1688 synchronize_irq(netdev->irq);
1690 be_irq_unregister(adapter);
1692 napi_disable(&rx_eq->napi);
1693 napi_disable(&tx_eq->napi);
1695 /* Wait for all pending tx completions to arrive so that
1696 * all tx skbs are freed.
1698 be_tx_compl_clean(adapter);
1703 static struct net_device_ops be_netdev_ops = {
1704 .ndo_open = be_open,
1705 .ndo_stop = be_close,
1706 .ndo_start_xmit = be_xmit,
1707 .ndo_get_stats = be_get_stats,
1708 .ndo_set_rx_mode = be_set_multicast_list,
1709 .ndo_set_mac_address = be_mac_addr_set,
1710 .ndo_change_mtu = be_change_mtu,
1711 .ndo_validate_addr = eth_validate_addr,
1712 .ndo_vlan_rx_register = be_vlan_register,
1713 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
1714 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
1717 static void be_netdev_init(struct net_device *netdev)
1719 struct be_adapter *adapter = netdev_priv(netdev);
1721 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
1722 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM |
1723 NETIF_F_IPV6_CSUM | NETIF_F_GRO;
1725 netdev->flags |= IFF_MULTICAST;
1727 adapter->rx_csum = true;
1729 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
1731 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
1733 netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
1735 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
1738 netif_carrier_off(netdev);
1739 netif_stop_queue(netdev);
1742 static void be_unmap_pci_bars(struct be_adapter *adapter)
1745 iounmap(adapter->csr);
1747 iounmap(adapter->db);
1748 if (adapter->pcicfg)
1749 iounmap(adapter->pcicfg);
1752 static int be_map_pci_bars(struct be_adapter *adapter)
1756 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
1757 pci_resource_len(adapter->pdev, 2));
1760 adapter->csr = addr;
1762 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
1768 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1),
1769 pci_resource_len(adapter->pdev, 1));
1772 adapter->pcicfg = addr;
1776 be_unmap_pci_bars(adapter);
1781 static void be_ctrl_cleanup(struct be_adapter *adapter)
1783 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
1785 be_unmap_pci_bars(adapter);
1788 pci_free_consistent(adapter->pdev, mem->size,
1792 static int be_ctrl_init(struct be_adapter *adapter)
1794 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
1795 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
1798 status = be_map_pci_bars(adapter);
1802 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
1803 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
1804 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
1805 if (!mbox_mem_alloc->va) {
1806 be_unmap_pci_bars(adapter);
1809 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
1810 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
1811 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
1812 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
1813 spin_lock_init(&adapter->mbox_lock);
1814 spin_lock_init(&adapter->mcc_lock);
1815 spin_lock_init(&adapter->mcc_cq_lock);
1820 static void be_stats_cleanup(struct be_adapter *adapter)
1822 struct be_stats_obj *stats = &adapter->stats;
1823 struct be_dma_mem *cmd = &stats->cmd;
1826 pci_free_consistent(adapter->pdev, cmd->size,
1830 static int be_stats_init(struct be_adapter *adapter)
1832 struct be_stats_obj *stats = &adapter->stats;
1833 struct be_dma_mem *cmd = &stats->cmd;
1835 cmd->size = sizeof(struct be_cmd_req_get_stats);
1836 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
1837 if (cmd->va == NULL)
1842 static void __devexit be_remove(struct pci_dev *pdev)
1844 struct be_adapter *adapter = pci_get_drvdata(pdev);
1848 unregister_netdev(adapter->netdev);
1852 be_stats_cleanup(adapter);
1854 be_ctrl_cleanup(adapter);
1856 if (adapter->msix_enabled) {
1857 pci_disable_msix(adapter->pdev);
1858 adapter->msix_enabled = false;
1861 pci_set_drvdata(pdev, NULL);
1862 pci_release_regions(pdev);
1863 pci_disable_device(pdev);
1865 free_netdev(adapter->netdev);
1868 static int be_hw_up(struct be_adapter *adapter)
1872 status = be_cmd_POST(adapter);
1876 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
1880 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num);
1884 static int __devinit be_probe(struct pci_dev *pdev,
1885 const struct pci_device_id *pdev_id)
1888 struct be_adapter *adapter;
1889 struct net_device *netdev;
1892 status = pci_enable_device(pdev);
1896 status = pci_request_regions(pdev, DRV_NAME);
1899 pci_set_master(pdev);
1901 netdev = alloc_etherdev(sizeof(struct be_adapter));
1902 if (netdev == NULL) {
1906 adapter = netdev_priv(netdev);
1907 adapter->pdev = pdev;
1908 pci_set_drvdata(pdev, adapter);
1909 adapter->netdev = netdev;
1911 be_msix_enable(adapter);
1913 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1915 netdev->features |= NETIF_F_HIGHDMA;
1917 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1919 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
1924 status = be_ctrl_init(adapter);
1928 status = be_cmd_reset_function(adapter);
1932 status = be_stats_init(adapter);
1936 status = be_hw_up(adapter);
1940 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
1941 true /* permanent */, 0);
1944 memcpy(netdev->dev_addr, mac, ETH_ALEN);
1946 INIT_DELAYED_WORK(&adapter->work, be_worker);
1947 be_netdev_init(netdev);
1948 SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
1950 status = be_setup(adapter);
1953 status = register_netdev(netdev);
1957 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
1963 be_stats_cleanup(adapter);
1965 be_ctrl_cleanup(adapter);
1967 free_netdev(adapter->netdev);
1969 pci_release_regions(pdev);
1971 pci_disable_device(pdev);
1973 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
1977 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
1979 struct be_adapter *adapter = pci_get_drvdata(pdev);
1980 struct net_device *netdev = adapter->netdev;
1982 netif_device_detach(netdev);
1983 if (netif_running(netdev)) {
1990 pci_save_state(pdev);
1991 pci_disable_device(pdev);
1992 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1996 static int be_resume(struct pci_dev *pdev)
1999 struct be_adapter *adapter = pci_get_drvdata(pdev);
2000 struct net_device *netdev = adapter->netdev;
2002 netif_device_detach(netdev);
2004 status = pci_enable_device(pdev);
2008 pci_set_power_state(pdev, 0);
2009 pci_restore_state(pdev);
2012 if (netif_running(netdev)) {
2017 netif_device_attach(netdev);
2021 static struct pci_driver be_driver = {
2023 .id_table = be_dev_ids,
2025 .remove = be_remove,
2026 .suspend = be_suspend,
2030 static int __init be_init_module(void)
2032 if (rx_frag_size != 8192 && rx_frag_size != 4096
2033 && rx_frag_size != 2048) {
2034 printk(KERN_WARNING DRV_NAME
2035 " : Module param rx_frag_size must be 2048/4096/8192."
2037 rx_frag_size = 2048;
2040 return pci_register_driver(&be_driver);
2042 module_init(be_init_module);
2044 static void __exit be_exit_module(void)
2046 pci_unregister_driver(&be_driver);
2048 module_exit(be_exit_module);