2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
21 #include <asm/div64.h>
23 MODULE_VERSION(DRV_VER);
24 MODULE_DEVICE_TABLE(pci, be_dev_ids);
25 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26 MODULE_AUTHOR("ServerEngines Corporation");
27 MODULE_LICENSE("GPL");
29 static ushort rx_frag_size = 2048;
30 static unsigned int num_vfs;
31 module_param(rx_frag_size, ushort, S_IRUGO);
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
37 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
39 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
41 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
45 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46 /* UE Status Low CSR */
47 static const char * const ue_status_low_desc[] = {
81 /* UE Status High CSR */
82 static const char * const ue_status_hi_desc[] = {
117 /* Is BE in a multi-channel mode */
118 static inline bool be_is_mc(struct be_adapter *adapter) {
119 return (adapter->function_mode & FLEX10_MODE ||
120 adapter->function_mode & VNIC_MODE ||
121 adapter->function_mode & UMC_ENABLED);
124 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126 struct be_dma_mem *mem = &q->dma_mem;
128 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133 u16 len, u16 entry_size)
135 struct be_dma_mem *mem = &q->dma_mem;
137 memset(q, 0, sizeof(*q));
139 q->entry_size = entry_size;
140 mem->size = len * entry_size;
141 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 memset(mem->va, 0, mem->size);
149 static void be_intr_set(struct be_adapter *adapter, bool enable)
153 if (adapter->eeh_err)
156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160 if (!enabled && enable)
161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162 else if (enabled && !enable)
163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167 pci_write_config_dword(adapter->pdev,
168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
171 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
174 val |= qid & DB_RQ_RING_ID_MASK;
175 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
178 iowrite32(val, adapter->db + DB_RQ_OFFSET);
181 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
184 val |= qid & DB_TXULP_RING_ID_MASK;
185 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
188 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
191 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
192 bool arm, bool clear_int, u16 num_popped)
195 val |= qid & DB_EQ_RING_ID_MASK;
196 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
197 DB_EQ_RING_ID_EXT_MASK_SHIFT);
199 if (adapter->eeh_err)
203 val |= 1 << DB_EQ_REARM_SHIFT;
205 val |= 1 << DB_EQ_CLR_SHIFT;
206 val |= 1 << DB_EQ_EVNT_SHIFT;
207 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
208 iowrite32(val, adapter->db + DB_EQ_OFFSET);
211 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
214 val |= qid & DB_CQ_RING_ID_MASK;
215 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
216 DB_CQ_RING_ID_EXT_MASK_SHIFT);
218 if (adapter->eeh_err)
222 val |= 1 << DB_CQ_REARM_SHIFT;
223 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
224 iowrite32(val, adapter->db + DB_CQ_OFFSET);
227 static int be_mac_addr_set(struct net_device *netdev, void *p)
229 struct be_adapter *adapter = netdev_priv(netdev);
230 struct sockaddr *addr = p;
232 u8 current_mac[ETH_ALEN];
233 u32 pmac_id = adapter->pmac_id;
235 if (!is_valid_ether_addr(addr->sa_data))
236 return -EADDRNOTAVAIL;
238 status = be_cmd_mac_addr_query(adapter, current_mac,
239 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
243 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
244 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
245 adapter->if_handle, &adapter->pmac_id, 0);
249 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
251 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
254 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
258 static void populate_be2_stats(struct be_adapter *adapter)
260 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
261 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
262 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
263 struct be_port_rxf_stats_v0 *port_stats =
264 &rxf_stats->port[adapter->port_num];
265 struct be_drv_stats *drvs = &adapter->drv_stats;
267 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
268 drvs->rx_pause_frames = port_stats->rx_pause_frames;
269 drvs->rx_crc_errors = port_stats->rx_crc_errors;
270 drvs->rx_control_frames = port_stats->rx_control_frames;
271 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
272 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
273 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
274 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
275 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
276 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
277 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
278 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
279 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
280 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
281 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
282 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
283 drvs->rx_dropped_header_too_small =
284 port_stats->rx_dropped_header_too_small;
285 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
286 drvs->rx_alignment_symbol_errors =
287 port_stats->rx_alignment_symbol_errors;
289 drvs->tx_pauseframes = port_stats->tx_pauseframes;
290 drvs->tx_controlframes = port_stats->tx_controlframes;
292 if (adapter->port_num)
293 drvs->jabber_events = rxf_stats->port1_jabber_events;
295 drvs->jabber_events = rxf_stats->port0_jabber_events;
296 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
297 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
298 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
299 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
300 drvs->forwarded_packets = rxf_stats->forwarded_packets;
301 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
302 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
303 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
304 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
307 static void populate_be3_stats(struct be_adapter *adapter)
309 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
310 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
311 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
312 struct be_port_rxf_stats_v1 *port_stats =
313 &rxf_stats->port[adapter->port_num];
314 struct be_drv_stats *drvs = &adapter->drv_stats;
316 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
317 drvs->rx_pause_frames = port_stats->rx_pause_frames;
318 drvs->rx_crc_errors = port_stats->rx_crc_errors;
319 drvs->rx_control_frames = port_stats->rx_control_frames;
320 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
321 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
322 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
323 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
324 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
325 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
326 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
327 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
328 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
329 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
330 drvs->rx_dropped_header_too_small =
331 port_stats->rx_dropped_header_too_small;
332 drvs->rx_input_fifo_overflow_drop =
333 port_stats->rx_input_fifo_overflow_drop;
334 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
335 drvs->rx_alignment_symbol_errors =
336 port_stats->rx_alignment_symbol_errors;
337 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
338 drvs->tx_pauseframes = port_stats->tx_pauseframes;
339 drvs->tx_controlframes = port_stats->tx_controlframes;
340 drvs->jabber_events = port_stats->jabber_events;
341 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
342 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
343 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
344 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
345 drvs->forwarded_packets = rxf_stats->forwarded_packets;
346 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
347 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
348 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
349 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
352 static void populate_lancer_stats(struct be_adapter *adapter)
355 struct be_drv_stats *drvs = &adapter->drv_stats;
356 struct lancer_pport_stats *pport_stats =
357 pport_stats_from_cmd(adapter);
359 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
360 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
361 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
362 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
363 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
364 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
365 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
366 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
367 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
368 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
369 drvs->rx_dropped_tcp_length =
370 pport_stats->rx_dropped_invalid_tcp_length;
371 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
372 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
373 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
374 drvs->rx_dropped_header_too_small =
375 pport_stats->rx_dropped_header_too_small;
376 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
377 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
378 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
379 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
380 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
381 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
382 drvs->jabber_events = pport_stats->rx_jabbers;
383 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
384 drvs->forwarded_packets = pport_stats->num_forwards_lo;
385 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
386 drvs->rx_drops_too_many_frags =
387 pport_stats->rx_drops_too_many_frags_lo;
390 static void accumulate_16bit_val(u32 *acc, u16 val)
392 #define lo(x) (x & 0xFFFF)
393 #define hi(x) (x & 0xFFFF0000)
394 bool wrapped = val < lo(*acc);
395 u32 newacc = hi(*acc) + val;
399 ACCESS_ONCE(*acc) = newacc;
402 void be_parse_stats(struct be_adapter *adapter)
404 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
405 struct be_rx_obj *rxo;
408 if (adapter->generation == BE_GEN3) {
409 if (lancer_chip(adapter))
410 populate_lancer_stats(adapter);
412 populate_be3_stats(adapter);
414 populate_be2_stats(adapter);
417 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
418 for_all_rx_queues(adapter, rxo, i) {
419 /* below erx HW counter can actually wrap around after
420 * 65535. Driver accumulates a 32-bit value
422 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
423 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
427 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
428 struct rtnl_link_stats64 *stats)
430 struct be_adapter *adapter = netdev_priv(netdev);
431 struct be_drv_stats *drvs = &adapter->drv_stats;
432 struct be_rx_obj *rxo;
433 struct be_tx_obj *txo;
438 for_all_rx_queues(adapter, rxo, i) {
439 const struct be_rx_stats *rx_stats = rx_stats(rxo);
441 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
442 pkts = rx_stats(rxo)->rx_pkts;
443 bytes = rx_stats(rxo)->rx_bytes;
444 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
445 stats->rx_packets += pkts;
446 stats->rx_bytes += bytes;
447 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
448 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
449 rx_stats(rxo)->rx_drops_no_frags;
452 for_all_tx_queues(adapter, txo, i) {
453 const struct be_tx_stats *tx_stats = tx_stats(txo);
455 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
456 pkts = tx_stats(txo)->tx_pkts;
457 bytes = tx_stats(txo)->tx_bytes;
458 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
459 stats->tx_packets += pkts;
460 stats->tx_bytes += bytes;
463 /* bad pkts received */
464 stats->rx_errors = drvs->rx_crc_errors +
465 drvs->rx_alignment_symbol_errors +
466 drvs->rx_in_range_errors +
467 drvs->rx_out_range_errors +
468 drvs->rx_frame_too_long +
469 drvs->rx_dropped_too_small +
470 drvs->rx_dropped_too_short +
471 drvs->rx_dropped_header_too_small +
472 drvs->rx_dropped_tcp_length +
473 drvs->rx_dropped_runt;
475 /* detailed rx errors */
476 stats->rx_length_errors = drvs->rx_in_range_errors +
477 drvs->rx_out_range_errors +
478 drvs->rx_frame_too_long;
480 stats->rx_crc_errors = drvs->rx_crc_errors;
482 /* frame alignment errors */
483 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
485 /* receiver fifo overrun */
486 /* drops_no_pbuf is no per i/f, it's per BE card */
487 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
488 drvs->rx_input_fifo_overflow_drop +
489 drvs->rx_drops_no_pbuf;
493 void be_link_status_update(struct be_adapter *adapter, u32 link_status)
495 struct net_device *netdev = adapter->netdev;
497 /* when link status changes, link speed must be re-queried from card */
498 adapter->link_speed = -1;
499 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
500 netif_carrier_on(netdev);
501 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
503 netif_carrier_off(netdev);
504 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
508 static void be_tx_stats_update(struct be_tx_obj *txo,
509 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
511 struct be_tx_stats *stats = tx_stats(txo);
513 u64_stats_update_begin(&stats->sync);
515 stats->tx_wrbs += wrb_cnt;
516 stats->tx_bytes += copied;
517 stats->tx_pkts += (gso_segs ? gso_segs : 1);
520 u64_stats_update_end(&stats->sync);
523 /* Determine number of WRB entries needed to xmit data in an skb */
524 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
527 int cnt = (skb->len > skb->data_len);
529 cnt += skb_shinfo(skb)->nr_frags;
531 /* to account for hdr wrb */
533 if (lancer_chip(adapter) || !(cnt & 1)) {
536 /* add a dummy to make it an even num */
540 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
544 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
546 wrb->frag_pa_hi = upper_32_bits(addr);
547 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
548 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
551 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
552 struct sk_buff *skb, u32 wrb_cnt, u32 len)
557 memset(hdr, 0, sizeof(*hdr));
559 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
561 if (skb_is_gso(skb)) {
562 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
563 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
564 hdr, skb_shinfo(skb)->gso_size);
565 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
566 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
567 if (lancer_chip(adapter) && adapter->sli_family ==
568 LANCER_A0_SLI_FAMILY) {
569 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
571 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
573 else if (is_udp_pkt(skb))
574 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
577 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
579 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
580 else if (is_udp_pkt(skb))
581 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
584 if (vlan_tx_tag_present(skb)) {
585 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
586 vlan_tag = vlan_tx_tag_get(skb);
587 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
588 /* If vlan priority provided by OS is NOT in available bmap */
589 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
590 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
591 adapter->recommended_prio;
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
595 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
597 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
601 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
606 be_dws_le_to_cpu(wrb, sizeof(*wrb));
608 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
611 dma_unmap_single(dev, dma, wrb->frag_len,
614 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
618 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
619 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
623 struct device *dev = &adapter->pdev->dev;
624 struct sk_buff *first_skb = skb;
625 struct be_eth_wrb *wrb;
626 struct be_eth_hdr_wrb *hdr;
627 bool map_single = false;
630 hdr = queue_head_node(txq);
632 map_head = txq->head;
634 if (skb->len > skb->data_len) {
635 int len = skb_headlen(skb);
636 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
637 if (dma_mapping_error(dev, busaddr))
640 wrb = queue_head_node(txq);
641 wrb_fill(wrb, busaddr, len);
642 be_dws_cpu_to_le(wrb, sizeof(*wrb));
647 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
648 const struct skb_frag_struct *frag =
649 &skb_shinfo(skb)->frags[i];
650 busaddr = skb_frag_dma_map(dev, frag, 0,
651 skb_frag_size(frag), DMA_TO_DEVICE);
652 if (dma_mapping_error(dev, busaddr))
654 wrb = queue_head_node(txq);
655 wrb_fill(wrb, busaddr, skb_frag_size(frag));
656 be_dws_cpu_to_le(wrb, sizeof(*wrb));
658 copied += skb_frag_size(frag);
662 wrb = queue_head_node(txq);
664 be_dws_cpu_to_le(wrb, sizeof(*wrb));
668 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
669 be_dws_cpu_to_le(hdr, sizeof(*hdr));
673 txq->head = map_head;
675 wrb = queue_head_node(txq);
676 unmap_tx_frag(dev, wrb, map_single);
678 copied -= wrb->frag_len;
684 static netdev_tx_t be_xmit(struct sk_buff *skb,
685 struct net_device *netdev)
687 struct be_adapter *adapter = netdev_priv(netdev);
688 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
689 struct be_queue_info *txq = &txo->q;
690 u32 wrb_cnt = 0, copied = 0;
691 u32 start = txq->head;
692 bool dummy_wrb, stopped = false;
694 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
696 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
698 /* record the sent skb in the sent_skb table */
699 BUG_ON(txo->sent_skb_list[start]);
700 txo->sent_skb_list[start] = skb;
702 /* Ensure txq has space for the next skb; Else stop the queue
703 * *BEFORE* ringing the tx doorbell, so that we serialze the
704 * tx compls of the current transmit which'll wake up the queue
706 atomic_add(wrb_cnt, &txq->used);
707 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
709 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
713 be_txq_notify(adapter, txq->id, wrb_cnt);
715 be_tx_stats_update(txo, wrb_cnt, copied,
716 skb_shinfo(skb)->gso_segs, stopped);
719 dev_kfree_skb_any(skb);
724 static int be_change_mtu(struct net_device *netdev, int new_mtu)
726 struct be_adapter *adapter = netdev_priv(netdev);
727 if (new_mtu < BE_MIN_MTU ||
728 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
729 (ETH_HLEN + ETH_FCS_LEN))) {
730 dev_info(&adapter->pdev->dev,
731 "MTU must be between %d and %d bytes\n",
733 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
736 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
737 netdev->mtu, new_mtu);
738 netdev->mtu = new_mtu;
743 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
744 * If the user configures more, place BE in vlan promiscuous mode.
746 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
748 u16 vtag[BE_NUM_VLANS_SUPPORTED];
754 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
755 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
756 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
759 /* No need to further configure vids if in promiscuous mode */
760 if (adapter->promiscuous)
763 if (adapter->vlans_added <= adapter->max_vlans) {
764 /* Construct VLAN Table to give to HW */
765 for (i = 0; i < VLAN_N_VID; i++) {
766 if (adapter->vlan_tag[i]) {
767 vtag[ntags] = cpu_to_le16(i);
771 status = be_cmd_vlan_config(adapter, adapter->if_handle,
774 status = be_cmd_vlan_config(adapter, adapter->if_handle,
781 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
783 struct be_adapter *adapter = netdev_priv(netdev);
785 adapter->vlans_added++;
786 if (!be_physfn(adapter))
789 adapter->vlan_tag[vid] = 1;
790 if (adapter->vlans_added <= (adapter->max_vlans + 1))
791 be_vid_config(adapter, false, 0);
794 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
796 struct be_adapter *adapter = netdev_priv(netdev);
798 adapter->vlans_added--;
800 if (!be_physfn(adapter))
803 adapter->vlan_tag[vid] = 0;
804 if (adapter->vlans_added <= adapter->max_vlans)
805 be_vid_config(adapter, false, 0);
808 static void be_set_rx_mode(struct net_device *netdev)
810 struct be_adapter *adapter = netdev_priv(netdev);
812 if (netdev->flags & IFF_PROMISC) {
813 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
814 adapter->promiscuous = true;
818 /* BE was previously in promiscuous mode; disable it */
819 if (adapter->promiscuous) {
820 adapter->promiscuous = false;
821 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
823 if (adapter->vlans_added)
824 be_vid_config(adapter, false, 0);
827 /* Enable multicast promisc if num configured exceeds what we support */
828 if (netdev->flags & IFF_ALLMULTI ||
829 netdev_mc_count(netdev) > BE_MAX_MC) {
830 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
834 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
839 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
841 struct be_adapter *adapter = netdev_priv(netdev);
844 if (!adapter->sriov_enabled)
847 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
850 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
851 status = be_cmd_pmac_del(adapter,
852 adapter->vf_cfg[vf].vf_if_handle,
853 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
855 status = be_cmd_pmac_add(adapter, mac,
856 adapter->vf_cfg[vf].vf_if_handle,
857 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
860 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
863 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
868 static int be_get_vf_config(struct net_device *netdev, int vf,
869 struct ifla_vf_info *vi)
871 struct be_adapter *adapter = netdev_priv(netdev);
873 if (!adapter->sriov_enabled)
880 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
881 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
883 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
888 static int be_set_vf_vlan(struct net_device *netdev,
889 int vf, u16 vlan, u8 qos)
891 struct be_adapter *adapter = netdev_priv(netdev);
894 if (!adapter->sriov_enabled)
897 if ((vf >= num_vfs) || (vlan > 4095))
901 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
902 adapter->vlans_added++;
904 adapter->vf_cfg[vf].vf_vlan_tag = 0;
905 adapter->vlans_added--;
908 status = be_vid_config(adapter, true, vf);
911 dev_info(&adapter->pdev->dev,
912 "VLAN %d config on VF %d failed\n", vlan, vf);
916 static int be_set_vf_tx_rate(struct net_device *netdev,
919 struct be_adapter *adapter = netdev_priv(netdev);
922 if (!adapter->sriov_enabled)
925 if ((vf >= num_vfs) || (rate < 0))
931 adapter->vf_cfg[vf].vf_tx_rate = rate;
932 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
935 dev_info(&adapter->pdev->dev,
936 "tx rate %d on VF %d failed\n", rate, vf);
940 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
942 struct be_eq_obj *rx_eq = &rxo->rx_eq;
943 struct be_rx_stats *stats = rx_stats(rxo);
945 ulong delta = now - stats->rx_jiffies;
947 unsigned int start, eqd;
949 if (!rx_eq->enable_aic)
953 if (time_before(now, stats->rx_jiffies)) {
954 stats->rx_jiffies = now;
958 /* Update once a second */
963 start = u64_stats_fetch_begin_bh(&stats->sync);
964 pkts = stats->rx_pkts;
965 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
967 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
968 stats->rx_pkts_prev = pkts;
969 stats->rx_jiffies = now;
970 eqd = stats->rx_pps / 110000;
972 if (eqd > rx_eq->max_eqd)
973 eqd = rx_eq->max_eqd;
974 if (eqd < rx_eq->min_eqd)
975 eqd = rx_eq->min_eqd;
978 if (eqd != rx_eq->cur_eqd) {
979 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
980 rx_eq->cur_eqd = eqd;
984 static void be_rx_stats_update(struct be_rx_obj *rxo,
985 struct be_rx_compl_info *rxcp)
987 struct be_rx_stats *stats = rx_stats(rxo);
989 u64_stats_update_begin(&stats->sync);
991 stats->rx_bytes += rxcp->pkt_size;
993 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
994 stats->rx_mcast_pkts++;
996 stats->rx_compl_err++;
997 u64_stats_update_end(&stats->sync);
1000 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1002 /* L4 checksum is not reliable for non TCP/UDP packets.
1003 * Also ignore ipcksm for ipv6 pkts */
1004 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1005 (rxcp->ip_csum || rxcp->ipv6);
1008 static struct be_rx_page_info *
1009 get_rx_page_info(struct be_adapter *adapter,
1010 struct be_rx_obj *rxo,
1013 struct be_rx_page_info *rx_page_info;
1014 struct be_queue_info *rxq = &rxo->q;
1016 rx_page_info = &rxo->page_info_tbl[frag_idx];
1017 BUG_ON(!rx_page_info->page);
1019 if (rx_page_info->last_page_user) {
1020 dma_unmap_page(&adapter->pdev->dev,
1021 dma_unmap_addr(rx_page_info, bus),
1022 adapter->big_page_size, DMA_FROM_DEVICE);
1023 rx_page_info->last_page_user = false;
1026 atomic_dec(&rxq->used);
1027 return rx_page_info;
1030 /* Throwaway the data in the Rx completion */
1031 static void be_rx_compl_discard(struct be_adapter *adapter,
1032 struct be_rx_obj *rxo,
1033 struct be_rx_compl_info *rxcp)
1035 struct be_queue_info *rxq = &rxo->q;
1036 struct be_rx_page_info *page_info;
1037 u16 i, num_rcvd = rxcp->num_rcvd;
1039 for (i = 0; i < num_rcvd; i++) {
1040 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1041 put_page(page_info->page);
1042 memset(page_info, 0, sizeof(*page_info));
1043 index_inc(&rxcp->rxq_idx, rxq->len);
1048 * skb_fill_rx_data forms a complete skb for an ether frame
1049 * indicated by rxcp.
1051 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1052 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1054 struct be_queue_info *rxq = &rxo->q;
1055 struct be_rx_page_info *page_info;
1057 u16 hdr_len, curr_frag_len, remaining;
1060 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1061 start = page_address(page_info->page) + page_info->page_offset;
1064 /* Copy data in the first descriptor of this completion */
1065 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1067 /* Copy the header portion into skb_data */
1068 hdr_len = min(BE_HDR_LEN, curr_frag_len);
1069 memcpy(skb->data, start, hdr_len);
1070 skb->len = curr_frag_len;
1071 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1072 /* Complete packet has now been moved to data */
1073 put_page(page_info->page);
1075 skb->tail += curr_frag_len;
1077 skb_shinfo(skb)->nr_frags = 1;
1078 skb_frag_set_page(skb, 0, page_info->page);
1079 skb_shinfo(skb)->frags[0].page_offset =
1080 page_info->page_offset + hdr_len;
1081 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1082 skb->data_len = curr_frag_len - hdr_len;
1083 skb->truesize += rx_frag_size;
1084 skb->tail += hdr_len;
1086 page_info->page = NULL;
1088 if (rxcp->pkt_size <= rx_frag_size) {
1089 BUG_ON(rxcp->num_rcvd != 1);
1093 /* More frags present for this completion */
1094 index_inc(&rxcp->rxq_idx, rxq->len);
1095 remaining = rxcp->pkt_size - curr_frag_len;
1096 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1097 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1098 curr_frag_len = min(remaining, rx_frag_size);
1100 /* Coalesce all frags from the same physical page in one slot */
1101 if (page_info->page_offset == 0) {
1104 skb_frag_set_page(skb, j, page_info->page);
1105 skb_shinfo(skb)->frags[j].page_offset =
1106 page_info->page_offset;
1107 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1108 skb_shinfo(skb)->nr_frags++;
1110 put_page(page_info->page);
1113 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1114 skb->len += curr_frag_len;
1115 skb->data_len += curr_frag_len;
1116 skb->truesize += rx_frag_size;
1117 remaining -= curr_frag_len;
1118 index_inc(&rxcp->rxq_idx, rxq->len);
1119 page_info->page = NULL;
1121 BUG_ON(j > MAX_SKB_FRAGS);
1124 /* Process the RX completion indicated by rxcp when GRO is disabled */
1125 static void be_rx_compl_process(struct be_adapter *adapter,
1126 struct be_rx_obj *rxo,
1127 struct be_rx_compl_info *rxcp)
1129 struct net_device *netdev = adapter->netdev;
1130 struct sk_buff *skb;
1132 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1133 if (unlikely(!skb)) {
1134 rx_stats(rxo)->rx_drops_no_skbs++;
1135 be_rx_compl_discard(adapter, rxo, rxcp);
1139 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1141 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1142 skb->ip_summed = CHECKSUM_UNNECESSARY;
1144 skb_checksum_none_assert(skb);
1146 skb->protocol = eth_type_trans(skb, netdev);
1147 if (adapter->netdev->features & NETIF_F_RXHASH)
1148 skb->rxhash = rxcp->rss_hash;
1152 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1154 netif_receive_skb(skb);
1157 /* Process the RX completion indicated by rxcp when GRO is enabled */
1158 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1159 struct be_rx_obj *rxo,
1160 struct be_rx_compl_info *rxcp)
1162 struct be_rx_page_info *page_info;
1163 struct sk_buff *skb = NULL;
1164 struct be_queue_info *rxq = &rxo->q;
1165 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1166 u16 remaining, curr_frag_len;
1169 skb = napi_get_frags(&eq_obj->napi);
1171 be_rx_compl_discard(adapter, rxo, rxcp);
1175 remaining = rxcp->pkt_size;
1176 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1177 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1179 curr_frag_len = min(remaining, rx_frag_size);
1181 /* Coalesce all frags from the same physical page in one slot */
1182 if (i == 0 || page_info->page_offset == 0) {
1183 /* First frag or Fresh page */
1185 skb_frag_set_page(skb, j, page_info->page);
1186 skb_shinfo(skb)->frags[j].page_offset =
1187 page_info->page_offset;
1188 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1190 put_page(page_info->page);
1192 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1193 skb->truesize += rx_frag_size;
1194 remaining -= curr_frag_len;
1195 index_inc(&rxcp->rxq_idx, rxq->len);
1196 memset(page_info, 0, sizeof(*page_info));
1198 BUG_ON(j > MAX_SKB_FRAGS);
1200 skb_shinfo(skb)->nr_frags = j + 1;
1201 skb->len = rxcp->pkt_size;
1202 skb->data_len = rxcp->pkt_size;
1203 skb->ip_summed = CHECKSUM_UNNECESSARY;
1204 if (adapter->netdev->features & NETIF_F_RXHASH)
1205 skb->rxhash = rxcp->rss_hash;
1208 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1210 napi_gro_frags(&eq_obj->napi);
1213 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1214 struct be_eth_rx_compl *compl,
1215 struct be_rx_compl_info *rxcp)
1218 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1219 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1220 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1221 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1222 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1224 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1226 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1228 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1230 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1232 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1234 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1236 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1238 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1240 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1243 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1246 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1247 struct be_eth_rx_compl *compl,
1248 struct be_rx_compl_info *rxcp)
1251 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1252 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1253 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1254 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1255 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1257 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1259 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1261 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1263 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1265 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1267 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1269 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1271 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1273 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1276 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1279 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1281 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1282 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1283 struct be_adapter *adapter = rxo->adapter;
1285 /* For checking the valid bit it is Ok to use either definition as the
1286 * valid bit is at the same position in both v0 and v1 Rx compl */
1287 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1291 be_dws_le_to_cpu(compl, sizeof(*compl));
1293 if (adapter->be3_native)
1294 be_parse_rx_compl_v1(adapter, compl, rxcp);
1296 be_parse_rx_compl_v0(adapter, compl, rxcp);
1299 /* vlanf could be wrongly set in some cards.
1300 * ignore if vtm is not set */
1301 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1304 if (!lancer_chip(adapter))
1305 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1307 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1308 !adapter->vlan_tag[rxcp->vlan_tag])
1312 /* As the compl has been parsed, reset it; we wont touch it again */
1313 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1315 queue_tail_inc(&rxo->cq);
1319 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1321 u32 order = get_order(size);
1325 return alloc_pages(gfp, order);
1329 * Allocate a page, split it to fragments of size rx_frag_size and post as
1330 * receive buffers to BE
1332 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1334 struct be_adapter *adapter = rxo->adapter;
1335 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1336 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1337 struct be_queue_info *rxq = &rxo->q;
1338 struct page *pagep = NULL;
1339 struct be_eth_rx_d *rxd;
1340 u64 page_dmaaddr = 0, frag_dmaaddr;
1341 u32 posted, page_offset = 0;
1343 page_info = &rxo->page_info_tbl[rxq->head];
1344 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1346 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1347 if (unlikely(!pagep)) {
1348 rx_stats(rxo)->rx_post_fail++;
1351 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1352 0, adapter->big_page_size,
1354 page_info->page_offset = 0;
1357 page_info->page_offset = page_offset + rx_frag_size;
1359 page_offset = page_info->page_offset;
1360 page_info->page = pagep;
1361 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1362 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1364 rxd = queue_head_node(rxq);
1365 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1366 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1368 /* Any space left in the current big page for another frag? */
1369 if ((page_offset + rx_frag_size + rx_frag_size) >
1370 adapter->big_page_size) {
1372 page_info->last_page_user = true;
1375 prev_page_info = page_info;
1376 queue_head_inc(rxq);
1377 page_info = &page_info_tbl[rxq->head];
1380 prev_page_info->last_page_user = true;
1383 atomic_add(posted, &rxq->used);
1384 be_rxq_notify(adapter, rxq->id, posted);
1385 } else if (atomic_read(&rxq->used) == 0) {
1386 /* Let be_worker replenish when memory is available */
1387 rxo->rx_post_starved = true;
1391 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1393 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1395 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1399 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1401 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1403 queue_tail_inc(tx_cq);
1407 static u16 be_tx_compl_process(struct be_adapter *adapter,
1408 struct be_tx_obj *txo, u16 last_index)
1410 struct be_queue_info *txq = &txo->q;
1411 struct be_eth_wrb *wrb;
1412 struct sk_buff **sent_skbs = txo->sent_skb_list;
1413 struct sk_buff *sent_skb;
1414 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1415 bool unmap_skb_hdr = true;
1417 sent_skb = sent_skbs[txq->tail];
1419 sent_skbs[txq->tail] = NULL;
1421 /* skip header wrb */
1422 queue_tail_inc(txq);
1425 cur_index = txq->tail;
1426 wrb = queue_tail_node(txq);
1427 unmap_tx_frag(&adapter->pdev->dev, wrb,
1428 (unmap_skb_hdr && skb_headlen(sent_skb)));
1429 unmap_skb_hdr = false;
1432 queue_tail_inc(txq);
1433 } while (cur_index != last_index);
1435 kfree_skb(sent_skb);
1439 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1441 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1447 eqe->evt = le32_to_cpu(eqe->evt);
1448 queue_tail_inc(&eq_obj->q);
1452 static int event_handle(struct be_adapter *adapter,
1453 struct be_eq_obj *eq_obj,
1456 struct be_eq_entry *eqe;
1459 while ((eqe = event_get(eq_obj)) != NULL) {
1464 /* Deal with any spurious interrupts that come
1470 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1472 napi_schedule(&eq_obj->napi);
1477 /* Just read and notify events without processing them.
1478 * Used at the time of destroying event queues */
1479 static void be_eq_clean(struct be_adapter *adapter,
1480 struct be_eq_obj *eq_obj)
1482 struct be_eq_entry *eqe;
1485 while ((eqe = event_get(eq_obj)) != NULL) {
1491 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1494 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1496 struct be_rx_page_info *page_info;
1497 struct be_queue_info *rxq = &rxo->q;
1498 struct be_queue_info *rx_cq = &rxo->cq;
1499 struct be_rx_compl_info *rxcp;
1502 /* First cleanup pending rx completions */
1503 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1504 be_rx_compl_discard(adapter, rxo, rxcp);
1505 be_cq_notify(adapter, rx_cq->id, false, 1);
1508 /* Then free posted rx buffer that were not used */
1509 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1510 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1511 page_info = get_rx_page_info(adapter, rxo, tail);
1512 put_page(page_info->page);
1513 memset(page_info, 0, sizeof(*page_info));
1515 BUG_ON(atomic_read(&rxq->used));
1516 rxq->tail = rxq->head = 0;
1519 static void be_tx_compl_clean(struct be_adapter *adapter,
1520 struct be_tx_obj *txo)
1522 struct be_queue_info *tx_cq = &txo->cq;
1523 struct be_queue_info *txq = &txo->q;
1524 struct be_eth_tx_compl *txcp;
1525 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1526 struct sk_buff **sent_skbs = txo->sent_skb_list;
1527 struct sk_buff *sent_skb;
1530 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1532 while ((txcp = be_tx_compl_get(tx_cq))) {
1533 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1535 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1539 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1540 atomic_sub(num_wrbs, &txq->used);
1545 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1551 if (atomic_read(&txq->used))
1552 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1553 atomic_read(&txq->used));
1555 /* free posted tx for which compls will never arrive */
1556 while (atomic_read(&txq->used)) {
1557 sent_skb = sent_skbs[txq->tail];
1558 end_idx = txq->tail;
1560 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1562 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1563 atomic_sub(num_wrbs, &txq->used);
1567 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1569 struct be_queue_info *q;
1571 q = &adapter->mcc_obj.q;
1573 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1574 be_queue_free(adapter, q);
1576 q = &adapter->mcc_obj.cq;
1578 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1579 be_queue_free(adapter, q);
1582 /* Must be called only after TX qs are created as MCC shares TX EQ */
1583 static int be_mcc_queues_create(struct be_adapter *adapter)
1585 struct be_queue_info *q, *cq;
1587 /* Alloc MCC compl queue */
1588 cq = &adapter->mcc_obj.cq;
1589 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1590 sizeof(struct be_mcc_compl)))
1593 /* Ask BE to create MCC compl queue; share TX's eq */
1594 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1597 /* Alloc MCC queue */
1598 q = &adapter->mcc_obj.q;
1599 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1600 goto mcc_cq_destroy;
1602 /* Ask BE to create MCC queue */
1603 if (be_cmd_mccq_create(adapter, q, cq))
1609 be_queue_free(adapter, q);
1611 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1613 be_queue_free(adapter, cq);
1618 static void be_tx_queues_destroy(struct be_adapter *adapter)
1620 struct be_queue_info *q;
1621 struct be_tx_obj *txo;
1624 for_all_tx_queues(adapter, txo, i) {
1627 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1628 be_queue_free(adapter, q);
1632 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1633 be_queue_free(adapter, q);
1636 /* Clear any residual events */
1637 be_eq_clean(adapter, &adapter->tx_eq);
1639 q = &adapter->tx_eq.q;
1641 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1642 be_queue_free(adapter, q);
1645 static int be_num_txqs_want(struct be_adapter *adapter)
1647 if ((num_vfs && adapter->sriov_enabled) ||
1648 be_is_mc(adapter) ||
1649 lancer_chip(adapter) || !be_physfn(adapter) ||
1650 adapter->generation == BE_GEN2)
1656 /* One TX event queue is shared by all TX compl qs */
1657 static int be_tx_queues_create(struct be_adapter *adapter)
1659 struct be_queue_info *eq, *q, *cq;
1660 struct be_tx_obj *txo;
1663 adapter->num_tx_qs = be_num_txqs_want(adapter);
1664 if (adapter->num_tx_qs != MAX_TX_QS)
1665 netif_set_real_num_tx_queues(adapter->netdev,
1666 adapter->num_tx_qs);
1668 adapter->tx_eq.max_eqd = 0;
1669 adapter->tx_eq.min_eqd = 0;
1670 adapter->tx_eq.cur_eqd = 96;
1671 adapter->tx_eq.enable_aic = false;
1673 eq = &adapter->tx_eq.q;
1674 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1675 sizeof(struct be_eq_entry)))
1678 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1680 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1682 for_all_tx_queues(adapter, txo, i) {
1684 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1685 sizeof(struct be_eth_tx_compl)))
1688 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1692 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1693 sizeof(struct be_eth_wrb)))
1696 if (be_cmd_txq_create(adapter, q, cq))
1702 be_tx_queues_destroy(adapter);
1706 static void be_rx_queues_destroy(struct be_adapter *adapter)
1708 struct be_queue_info *q;
1709 struct be_rx_obj *rxo;
1712 for_all_rx_queues(adapter, rxo, i) {
1713 be_queue_free(adapter, &rxo->q);
1717 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1718 be_queue_free(adapter, q);
1722 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1723 be_queue_free(adapter, q);
1727 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1729 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1730 !adapter->sriov_enabled && be_physfn(adapter) &&
1731 !be_is_mc(adapter)) {
1732 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1734 dev_warn(&adapter->pdev->dev,
1735 "No support for multiple RX queues\n");
1740 static int be_rx_queues_create(struct be_adapter *adapter)
1742 struct be_queue_info *eq, *q, *cq;
1743 struct be_rx_obj *rxo;
1746 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1747 msix_enabled(adapter) ?
1748 adapter->num_msix_vec - 1 : 1);
1749 if (adapter->num_rx_qs != MAX_RX_QS)
1750 dev_warn(&adapter->pdev->dev,
1751 "Can create only %d RX queues", adapter->num_rx_qs);
1753 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1754 for_all_rx_queues(adapter, rxo, i) {
1755 rxo->adapter = adapter;
1756 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1757 rxo->rx_eq.enable_aic = true;
1761 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1762 sizeof(struct be_eq_entry));
1766 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1770 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1774 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1775 sizeof(struct be_eth_rx_compl));
1779 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1783 /* Rx Q - will be created in be_open() */
1785 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1786 sizeof(struct be_eth_rx_d));
1794 be_rx_queues_destroy(adapter);
1798 static bool event_peek(struct be_eq_obj *eq_obj)
1800 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1807 static irqreturn_t be_intx(int irq, void *dev)
1809 struct be_adapter *adapter = dev;
1810 struct be_rx_obj *rxo;
1811 int isr, i, tx = 0 , rx = 0;
1813 if (lancer_chip(adapter)) {
1814 if (event_peek(&adapter->tx_eq))
1815 tx = event_handle(adapter, &adapter->tx_eq, false);
1816 for_all_rx_queues(adapter, rxo, i) {
1817 if (event_peek(&rxo->rx_eq))
1818 rx |= event_handle(adapter, &rxo->rx_eq, true);
1825 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1826 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1830 if ((1 << adapter->tx_eq.eq_idx & isr))
1831 event_handle(adapter, &adapter->tx_eq, false);
1833 for_all_rx_queues(adapter, rxo, i) {
1834 if ((1 << rxo->rx_eq.eq_idx & isr))
1835 event_handle(adapter, &rxo->rx_eq, true);
1842 static irqreturn_t be_msix_rx(int irq, void *dev)
1844 struct be_rx_obj *rxo = dev;
1845 struct be_adapter *adapter = rxo->adapter;
1847 event_handle(adapter, &rxo->rx_eq, true);
1852 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1854 struct be_adapter *adapter = dev;
1856 event_handle(adapter, &adapter->tx_eq, false);
1861 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1863 return (rxcp->tcpf && !rxcp->err) ? true : false;
1866 static int be_poll_rx(struct napi_struct *napi, int budget)
1868 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1869 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1870 struct be_adapter *adapter = rxo->adapter;
1871 struct be_queue_info *rx_cq = &rxo->cq;
1872 struct be_rx_compl_info *rxcp;
1875 rx_stats(rxo)->rx_polls++;
1876 for (work_done = 0; work_done < budget; work_done++) {
1877 rxcp = be_rx_compl_get(rxo);
1881 /* Is it a flush compl that has no data */
1882 if (unlikely(rxcp->num_rcvd == 0))
1885 /* Discard compl with partial DMA Lancer B0 */
1886 if (unlikely(!rxcp->pkt_size)) {
1887 be_rx_compl_discard(adapter, rxo, rxcp);
1891 /* On BE drop pkts that arrive due to imperfect filtering in
1892 * promiscuous mode on some skews
1894 if (unlikely(rxcp->port != adapter->port_num &&
1895 !lancer_chip(adapter))) {
1896 be_rx_compl_discard(adapter, rxo, rxcp);
1901 be_rx_compl_process_gro(adapter, rxo, rxcp);
1903 be_rx_compl_process(adapter, rxo, rxcp);
1905 be_rx_stats_update(rxo, rxcp);
1908 be_cq_notify(adapter, rx_cq->id, false, work_done);
1910 /* Refill the queue */
1911 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1912 be_post_rx_frags(rxo, GFP_ATOMIC);
1915 if (work_done < budget) {
1916 napi_complete(napi);
1918 be_cq_notify(adapter, rx_cq->id, true, 0);
1923 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1924 * For TX/MCC we don't honour budget; consume everything
1926 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1928 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1929 struct be_adapter *adapter =
1930 container_of(tx_eq, struct be_adapter, tx_eq);
1931 struct be_tx_obj *txo;
1932 struct be_eth_tx_compl *txcp;
1933 int tx_compl, mcc_compl, status = 0;
1937 for_all_tx_queues(adapter, txo, i) {
1940 while ((txcp = be_tx_compl_get(&txo->cq))) {
1941 num_wrbs += be_tx_compl_process(adapter, txo,
1942 AMAP_GET_BITS(struct amap_eth_tx_compl,
1947 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1949 atomic_sub(num_wrbs, &txo->q.used);
1951 /* As Tx wrbs have been freed up, wake up netdev queue
1952 * if it was stopped due to lack of tx wrbs. */
1953 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1954 atomic_read(&txo->q.used) < txo->q.len / 2) {
1955 netif_wake_subqueue(adapter->netdev, i);
1958 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1959 tx_stats(txo)->tx_compl += tx_compl;
1960 u64_stats_update_end(&tx_stats(txo)->sync_compl);
1964 mcc_compl = be_process_mcc(adapter, &status);
1967 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1968 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1971 napi_complete(napi);
1973 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1974 adapter->drv_stats.tx_events++;
1978 void be_detect_dump_ue(struct be_adapter *adapter)
1980 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1981 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
1984 if (lancer_chip(adapter)) {
1985 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
1986 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1987 sliport_err1 = ioread32(adapter->db +
1988 SLIPORT_ERROR1_OFFSET);
1989 sliport_err2 = ioread32(adapter->db +
1990 SLIPORT_ERROR2_OFFSET);
1993 pci_read_config_dword(adapter->pdev,
1994 PCICFG_UE_STATUS_LOW, &ue_lo);
1995 pci_read_config_dword(adapter->pdev,
1996 PCICFG_UE_STATUS_HIGH, &ue_hi);
1997 pci_read_config_dword(adapter->pdev,
1998 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
1999 pci_read_config_dword(adapter->pdev,
2000 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2002 ue_lo = (ue_lo & (~ue_lo_mask));
2003 ue_hi = (ue_hi & (~ue_hi_mask));
2006 if (ue_lo || ue_hi ||
2007 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2008 adapter->ue_detected = true;
2009 adapter->eeh_err = true;
2010 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2014 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2016 dev_err(&adapter->pdev->dev,
2017 "UE: %s bit set\n", ue_status_low_desc[i]);
2021 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2023 dev_err(&adapter->pdev->dev,
2024 "UE: %s bit set\n", ue_status_hi_desc[i]);
2028 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2029 dev_err(&adapter->pdev->dev,
2030 "sliport status 0x%x\n", sliport_status);
2031 dev_err(&adapter->pdev->dev,
2032 "sliport error1 0x%x\n", sliport_err1);
2033 dev_err(&adapter->pdev->dev,
2034 "sliport error2 0x%x\n", sliport_err2);
2038 static void be_worker(struct work_struct *work)
2040 struct be_adapter *adapter =
2041 container_of(work, struct be_adapter, work.work);
2042 struct be_rx_obj *rxo;
2045 if (!adapter->ue_detected)
2046 be_detect_dump_ue(adapter);
2048 /* when interrupts are not yet enabled, just reap any pending
2049 * mcc completions */
2050 if (!netif_running(adapter->netdev)) {
2051 int mcc_compl, status = 0;
2053 mcc_compl = be_process_mcc(adapter, &status);
2056 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2057 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2063 if (!adapter->stats_cmd_sent) {
2064 if (lancer_chip(adapter))
2065 lancer_cmd_get_pport_stats(adapter,
2066 &adapter->stats_cmd);
2068 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2071 for_all_rx_queues(adapter, rxo, i) {
2072 be_rx_eqd_update(adapter, rxo);
2074 if (rxo->rx_post_starved) {
2075 rxo->rx_post_starved = false;
2076 be_post_rx_frags(rxo, GFP_KERNEL);
2081 adapter->work_counter++;
2082 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2085 static void be_msix_disable(struct be_adapter *adapter)
2087 if (msix_enabled(adapter)) {
2088 pci_disable_msix(adapter->pdev);
2089 adapter->num_msix_vec = 0;
2093 static void be_msix_enable(struct be_adapter *adapter)
2095 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
2096 int i, status, num_vec;
2098 num_vec = be_num_rxqs_want(adapter) + 1;
2100 for (i = 0; i < num_vec; i++)
2101 adapter->msix_entries[i].entry = i;
2103 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2106 } else if (status >= BE_MIN_MSIX_VECTORS) {
2108 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2114 adapter->num_msix_vec = num_vec;
2118 static int be_sriov_enable(struct be_adapter *adapter)
2120 be_check_sriov_fn_type(adapter);
2121 #ifdef CONFIG_PCI_IOV
2122 if (be_physfn(adapter) && num_vfs) {
2126 pos = pci_find_ext_capability(adapter->pdev,
2127 PCI_EXT_CAP_ID_SRIOV);
2128 pci_read_config_word(adapter->pdev,
2129 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2131 if (num_vfs > nvfs) {
2132 dev_info(&adapter->pdev->dev,
2133 "Device supports %d VFs and not %d\n",
2138 status = pci_enable_sriov(adapter->pdev, num_vfs);
2139 adapter->sriov_enabled = status ? false : true;
2141 if (adapter->sriov_enabled) {
2142 adapter->vf_cfg = kcalloc(num_vfs,
2143 sizeof(struct be_vf_cfg),
2145 if (!adapter->vf_cfg)
2153 static void be_sriov_disable(struct be_adapter *adapter)
2155 #ifdef CONFIG_PCI_IOV
2156 if (adapter->sriov_enabled) {
2157 pci_disable_sriov(adapter->pdev);
2158 kfree(adapter->vf_cfg);
2159 adapter->sriov_enabled = false;
2164 static inline int be_msix_vec_get(struct be_adapter *adapter,
2165 struct be_eq_obj *eq_obj)
2167 return adapter->msix_entries[eq_obj->eq_idx].vector;
2170 static int be_request_irq(struct be_adapter *adapter,
2171 struct be_eq_obj *eq_obj,
2172 void *handler, char *desc, void *context)
2174 struct net_device *netdev = adapter->netdev;
2177 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2178 vec = be_msix_vec_get(adapter, eq_obj);
2179 return request_irq(vec, handler, 0, eq_obj->desc, context);
2182 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2185 int vec = be_msix_vec_get(adapter, eq_obj);
2186 free_irq(vec, context);
2189 static int be_msix_register(struct be_adapter *adapter)
2191 struct be_rx_obj *rxo;
2195 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2200 for_all_rx_queues(adapter, rxo, i) {
2201 sprintf(qname, "rxq%d", i);
2202 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2211 be_free_irq(adapter, &adapter->tx_eq, adapter);
2213 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2214 be_free_irq(adapter, &rxo->rx_eq, rxo);
2217 dev_warn(&adapter->pdev->dev,
2218 "MSIX Request IRQ failed - err %d\n", status);
2219 be_msix_disable(adapter);
2223 static int be_irq_register(struct be_adapter *adapter)
2225 struct net_device *netdev = adapter->netdev;
2228 if (msix_enabled(adapter)) {
2229 status = be_msix_register(adapter);
2232 /* INTx is not supported for VF */
2233 if (!be_physfn(adapter))
2238 netdev->irq = adapter->pdev->irq;
2239 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2242 dev_err(&adapter->pdev->dev,
2243 "INTx request IRQ failed - err %d\n", status);
2247 adapter->isr_registered = true;
2251 static void be_irq_unregister(struct be_adapter *adapter)
2253 struct net_device *netdev = adapter->netdev;
2254 struct be_rx_obj *rxo;
2257 if (!adapter->isr_registered)
2261 if (!msix_enabled(adapter)) {
2262 free_irq(netdev->irq, adapter);
2267 be_free_irq(adapter, &adapter->tx_eq, adapter);
2269 for_all_rx_queues(adapter, rxo, i)
2270 be_free_irq(adapter, &rxo->rx_eq, rxo);
2273 adapter->isr_registered = false;
2276 static void be_rx_queues_clear(struct be_adapter *adapter)
2278 struct be_queue_info *q;
2279 struct be_rx_obj *rxo;
2282 for_all_rx_queues(adapter, rxo, i) {
2285 be_cmd_rxq_destroy(adapter, q);
2286 /* After the rxq is invalidated, wait for a grace time
2287 * of 1ms for all dma to end and the flush compl to
2291 be_rx_q_clean(adapter, rxo);
2294 /* Clear any residual events */
2297 be_eq_clean(adapter, &rxo->rx_eq);
2301 static int be_close(struct net_device *netdev)
2303 struct be_adapter *adapter = netdev_priv(netdev);
2304 struct be_rx_obj *rxo;
2305 struct be_tx_obj *txo;
2306 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2309 be_async_mcc_disable(adapter);
2311 if (!lancer_chip(adapter))
2312 be_intr_set(adapter, false);
2314 for_all_rx_queues(adapter, rxo, i)
2315 napi_disable(&rxo->rx_eq.napi);
2317 napi_disable(&tx_eq->napi);
2319 if (lancer_chip(adapter)) {
2320 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2321 for_all_rx_queues(adapter, rxo, i)
2322 be_cq_notify(adapter, rxo->cq.id, false, 0);
2323 for_all_tx_queues(adapter, txo, i)
2324 be_cq_notify(adapter, txo->cq.id, false, 0);
2327 if (msix_enabled(adapter)) {
2328 vec = be_msix_vec_get(adapter, tx_eq);
2329 synchronize_irq(vec);
2331 for_all_rx_queues(adapter, rxo, i) {
2332 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2333 synchronize_irq(vec);
2336 synchronize_irq(netdev->irq);
2338 be_irq_unregister(adapter);
2340 /* Wait for all pending tx completions to arrive so that
2341 * all tx skbs are freed.
2343 for_all_tx_queues(adapter, txo, i)
2344 be_tx_compl_clean(adapter, txo);
2346 be_rx_queues_clear(adapter);
2350 static int be_rx_queues_setup(struct be_adapter *adapter)
2352 struct be_rx_obj *rxo;
2354 u8 rsstable[MAX_RSS_QS];
2356 for_all_rx_queues(adapter, rxo, i) {
2357 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2358 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2360 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2365 if (be_multi_rxq(adapter)) {
2366 for_all_rss_queues(adapter, rxo, i)
2367 rsstable[i] = rxo->rss_id;
2369 rc = be_cmd_rss_config(adapter, rsstable,
2370 adapter->num_rx_qs - 1);
2375 /* First time posting */
2376 for_all_rx_queues(adapter, rxo, i) {
2377 be_post_rx_frags(rxo, GFP_KERNEL);
2378 napi_enable(&rxo->rx_eq.napi);
2383 static int be_open(struct net_device *netdev)
2385 struct be_adapter *adapter = netdev_priv(netdev);
2386 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2387 struct be_rx_obj *rxo;
2390 status = be_rx_queues_setup(adapter);
2394 napi_enable(&tx_eq->napi);
2396 be_irq_register(adapter);
2398 if (!lancer_chip(adapter))
2399 be_intr_set(adapter, true);
2401 /* The evt queues are created in unarmed state; arm them */
2402 for_all_rx_queues(adapter, rxo, i) {
2403 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2404 be_cq_notify(adapter, rxo->cq.id, true, 0);
2406 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2408 /* Now that interrupts are on we can process async mcc */
2409 be_async_mcc_enable(adapter);
2413 be_close(adapter->netdev);
2417 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2419 struct be_dma_mem cmd;
2423 memset(mac, 0, ETH_ALEN);
2425 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2426 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2430 memset(cmd.va, 0, cmd.size);
2433 status = pci_write_config_dword(adapter->pdev,
2434 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2436 dev_err(&adapter->pdev->dev,
2437 "Could not enable Wake-on-lan\n");
2438 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2442 status = be_cmd_enable_magic_wol(adapter,
2443 adapter->netdev->dev_addr, &cmd);
2444 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2445 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2447 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2448 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2449 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2452 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2457 * Generate a seed MAC address from the PF MAC Address using jhash.
2458 * MAC Address for VFs are assigned incrementally starting from the seed.
2459 * These addresses are programmed in the ASIC by the PF and the VF driver
2460 * queries for the MAC address during its probe.
2462 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2468 be_vf_eth_addr_generate(adapter, mac);
2470 for (vf = 0; vf < num_vfs; vf++) {
2471 status = be_cmd_pmac_add(adapter, mac,
2472 adapter->vf_cfg[vf].vf_if_handle,
2473 &adapter->vf_cfg[vf].vf_pmac_id,
2476 dev_err(&adapter->pdev->dev,
2477 "Mac address add failed for VF %d\n", vf);
2479 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2486 static void be_vf_clear(struct be_adapter *adapter)
2490 for (vf = 0; vf < num_vfs; vf++) {
2491 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2492 be_cmd_pmac_del(adapter,
2493 adapter->vf_cfg[vf].vf_if_handle,
2494 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2497 for (vf = 0; vf < num_vfs; vf++)
2498 if (adapter->vf_cfg[vf].vf_if_handle)
2499 be_cmd_if_destroy(adapter,
2500 adapter->vf_cfg[vf].vf_if_handle, vf + 1);
2503 static int be_clear(struct be_adapter *adapter)
2505 if (be_physfn(adapter) && adapter->sriov_enabled)
2506 be_vf_clear(adapter);
2508 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2510 be_mcc_queues_destroy(adapter);
2511 be_rx_queues_destroy(adapter);
2512 be_tx_queues_destroy(adapter);
2513 adapter->eq_next_idx = 0;
2515 adapter->be3_native = false;
2516 adapter->promiscuous = false;
2518 /* tell fw we're done with firing cmds */
2519 be_cmd_fw_clean(adapter);
2523 static int be_vf_setup(struct be_adapter *adapter)
2525 u32 cap_flags, en_flags, vf;
2529 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2530 for (vf = 0; vf < num_vfs; vf++) {
2531 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2532 &adapter->vf_cfg[vf].vf_if_handle,
2536 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2539 if (!lancer_chip(adapter)) {
2540 status = be_vf_eth_addr_config(adapter);
2545 for (vf = 0; vf < num_vfs; vf++) {
2546 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2550 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2557 static int be_setup(struct be_adapter *adapter)
2559 struct net_device *netdev = adapter->netdev;
2560 u32 cap_flags, en_flags;
2565 /* Allow all priorities by default. A GRP5 evt may modify this */
2566 adapter->vlan_prio_bmap = 0xff;
2567 adapter->link_speed = -1;
2569 be_cmd_req_native_mode(adapter);
2571 status = be_tx_queues_create(adapter);
2575 status = be_rx_queues_create(adapter);
2579 status = be_mcc_queues_create(adapter);
2583 memset(mac, 0, ETH_ALEN);
2584 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2585 true /*permanent */, 0);
2588 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2589 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2591 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2592 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2593 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2594 BE_IF_FLAGS_PROMISCUOUS;
2595 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2596 cap_flags |= BE_IF_FLAGS_RSS;
2597 en_flags |= BE_IF_FLAGS_RSS;
2599 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2600 netdev->dev_addr, &adapter->if_handle,
2601 &adapter->pmac_id, 0);
2605 /* For BEx, the VF's permanent mac queried from card is incorrect.
2606 * Query the mac configued by the PF using if_handle
2608 if (!be_physfn(adapter) && !lancer_chip(adapter)) {
2609 status = be_cmd_mac_addr_query(adapter, mac,
2610 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2612 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2613 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2617 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2619 status = be_vid_config(adapter, false, 0);
2623 be_set_rx_mode(adapter->netdev);
2625 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2628 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2629 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2635 pcie_set_readrq(adapter->pdev, 4096);
2637 if (be_physfn(adapter) && adapter->sriov_enabled) {
2638 status = be_vf_setup(adapter);
2649 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2650 static bool be_flash_redboot(struct be_adapter *adapter,
2651 const u8 *p, u32 img_start, int image_size,
2658 crc_offset = hdr_size + img_start + image_size - 4;
2662 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2665 dev_err(&adapter->pdev->dev,
2666 "could not get crc from flash, not flashing redboot\n");
2670 /*update redboot only if crc does not match*/
2671 if (!memcmp(flashed_crc, p, 4))
2677 static bool phy_flashing_required(struct be_adapter *adapter)
2680 struct be_phy_info phy_info;
2682 status = be_cmd_get_phy_info(adapter, &phy_info);
2685 if ((phy_info.phy_type == TN_8022) &&
2686 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2692 static int be_flash_data(struct be_adapter *adapter,
2693 const struct firmware *fw,
2694 struct be_dma_mem *flash_cmd, int num_of_images)
2697 int status = 0, i, filehdr_size = 0;
2698 u32 total_bytes = 0, flash_op;
2700 const u8 *p = fw->data;
2701 struct be_cmd_write_flashrom *req = flash_cmd->va;
2702 const struct flash_comp *pflashcomp;
2705 static const struct flash_comp gen3_flash_types[10] = {
2706 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2707 FLASH_IMAGE_MAX_SIZE_g3},
2708 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2709 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2710 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2711 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2712 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2713 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2714 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2715 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2716 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2717 FLASH_IMAGE_MAX_SIZE_g3},
2718 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2719 FLASH_IMAGE_MAX_SIZE_g3},
2720 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2721 FLASH_IMAGE_MAX_SIZE_g3},
2722 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2723 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2724 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2725 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2727 static const struct flash_comp gen2_flash_types[8] = {
2728 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2729 FLASH_IMAGE_MAX_SIZE_g2},
2730 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2731 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2732 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2733 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2734 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2735 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2736 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2737 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2738 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2739 FLASH_IMAGE_MAX_SIZE_g2},
2740 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2741 FLASH_IMAGE_MAX_SIZE_g2},
2742 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2743 FLASH_IMAGE_MAX_SIZE_g2}
2746 if (adapter->generation == BE_GEN3) {
2747 pflashcomp = gen3_flash_types;
2748 filehdr_size = sizeof(struct flash_file_hdr_g3);
2749 num_comp = ARRAY_SIZE(gen3_flash_types);
2751 pflashcomp = gen2_flash_types;
2752 filehdr_size = sizeof(struct flash_file_hdr_g2);
2753 num_comp = ARRAY_SIZE(gen2_flash_types);
2755 for (i = 0; i < num_comp; i++) {
2756 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2757 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2759 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2760 if (!phy_flashing_required(adapter))
2763 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2764 (!be_flash_redboot(adapter, fw->data,
2765 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2766 (num_of_images * sizeof(struct image_hdr)))))
2769 p += filehdr_size + pflashcomp[i].offset
2770 + (num_of_images * sizeof(struct image_hdr));
2771 if (p + pflashcomp[i].size > fw->data + fw->size)
2773 total_bytes = pflashcomp[i].size;
2774 while (total_bytes) {
2775 if (total_bytes > 32*1024)
2776 num_bytes = 32*1024;
2778 num_bytes = total_bytes;
2779 total_bytes -= num_bytes;
2781 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2782 flash_op = FLASHROM_OPER_PHY_FLASH;
2784 flash_op = FLASHROM_OPER_FLASH;
2786 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2787 flash_op = FLASHROM_OPER_PHY_SAVE;
2789 flash_op = FLASHROM_OPER_SAVE;
2791 memcpy(req->params.data_buf, p, num_bytes);
2793 status = be_cmd_write_flashrom(adapter, flash_cmd,
2794 pflashcomp[i].optype, flash_op, num_bytes);
2796 if ((status == ILLEGAL_IOCTL_REQ) &&
2797 (pflashcomp[i].optype ==
2800 dev_err(&adapter->pdev->dev,
2801 "cmd to write to flash rom failed.\n");
2809 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2813 if (fhdr->build[0] == '3')
2815 else if (fhdr->build[0] == '2')
2821 static int lancer_fw_download(struct be_adapter *adapter,
2822 const struct firmware *fw)
2824 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2825 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2826 struct be_dma_mem flash_cmd;
2827 const u8 *data_ptr = NULL;
2828 u8 *dest_image_ptr = NULL;
2829 size_t image_size = 0;
2831 u32 data_written = 0;
2836 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2837 dev_err(&adapter->pdev->dev,
2838 "FW Image not properly aligned. "
2839 "Length must be 4 byte aligned.\n");
2841 goto lancer_fw_exit;
2844 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2845 + LANCER_FW_DOWNLOAD_CHUNK;
2846 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2847 &flash_cmd.dma, GFP_KERNEL);
2848 if (!flash_cmd.va) {
2850 dev_err(&adapter->pdev->dev,
2851 "Memory allocation failure while flashing\n");
2852 goto lancer_fw_exit;
2855 dest_image_ptr = flash_cmd.va +
2856 sizeof(struct lancer_cmd_req_write_object);
2857 image_size = fw->size;
2858 data_ptr = fw->data;
2860 while (image_size) {
2861 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2863 /* Copy the image chunk content. */
2864 memcpy(dest_image_ptr, data_ptr, chunk_size);
2866 status = lancer_cmd_write_object(adapter, &flash_cmd,
2867 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2868 &data_written, &add_status);
2873 offset += data_written;
2874 data_ptr += data_written;
2875 image_size -= data_written;
2879 /* Commit the FW written */
2880 status = lancer_cmd_write_object(adapter, &flash_cmd,
2881 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2882 &data_written, &add_status);
2885 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2888 dev_err(&adapter->pdev->dev,
2889 "Firmware load error. "
2890 "Status code: 0x%x Additional Status: 0x%x\n",
2891 status, add_status);
2892 goto lancer_fw_exit;
2895 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2900 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2902 struct flash_file_hdr_g2 *fhdr;
2903 struct flash_file_hdr_g3 *fhdr3;
2904 struct image_hdr *img_hdr_ptr = NULL;
2905 struct be_dma_mem flash_cmd;
2907 int status = 0, i = 0, num_imgs = 0;
2910 fhdr = (struct flash_file_hdr_g2 *) p;
2912 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2913 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2914 &flash_cmd.dma, GFP_KERNEL);
2915 if (!flash_cmd.va) {
2917 dev_err(&adapter->pdev->dev,
2918 "Memory allocation failure while flashing\n");
2922 if ((adapter->generation == BE_GEN3) &&
2923 (get_ufigen_type(fhdr) == BE_GEN3)) {
2924 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2925 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2926 for (i = 0; i < num_imgs; i++) {
2927 img_hdr_ptr = (struct image_hdr *) (fw->data +
2928 (sizeof(struct flash_file_hdr_g3) +
2929 i * sizeof(struct image_hdr)));
2930 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2931 status = be_flash_data(adapter, fw, &flash_cmd,
2934 } else if ((adapter->generation == BE_GEN2) &&
2935 (get_ufigen_type(fhdr) == BE_GEN2)) {
2936 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2938 dev_err(&adapter->pdev->dev,
2939 "UFI and Interface are not compatible for flashing\n");
2943 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2946 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2950 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2956 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2958 const struct firmware *fw;
2961 if (!netif_running(adapter->netdev)) {
2962 dev_err(&adapter->pdev->dev,
2963 "Firmware load not allowed (interface is down)\n");
2967 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2971 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2973 if (lancer_chip(adapter))
2974 status = lancer_fw_download(adapter, fw);
2976 status = be_fw_download(adapter, fw);
2979 release_firmware(fw);
2983 static struct net_device_ops be_netdev_ops = {
2984 .ndo_open = be_open,
2985 .ndo_stop = be_close,
2986 .ndo_start_xmit = be_xmit,
2987 .ndo_set_rx_mode = be_set_rx_mode,
2988 .ndo_set_mac_address = be_mac_addr_set,
2989 .ndo_change_mtu = be_change_mtu,
2990 .ndo_get_stats64 = be_get_stats64,
2991 .ndo_validate_addr = eth_validate_addr,
2992 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2993 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2994 .ndo_set_vf_mac = be_set_vf_mac,
2995 .ndo_set_vf_vlan = be_set_vf_vlan,
2996 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2997 .ndo_get_vf_config = be_get_vf_config
3000 static void be_netdev_init(struct net_device *netdev)
3002 struct be_adapter *adapter = netdev_priv(netdev);
3003 struct be_rx_obj *rxo;
3006 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3007 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3009 if (be_multi_rxq(adapter))
3010 netdev->hw_features |= NETIF_F_RXHASH;
3012 netdev->features |= netdev->hw_features |
3013 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3015 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3016 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3018 netdev->flags |= IFF_MULTICAST;
3020 netif_set_gso_max_size(netdev, 65535);
3022 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3024 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3026 for_all_rx_queues(adapter, rxo, i)
3027 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3030 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
3034 static void be_unmap_pci_bars(struct be_adapter *adapter)
3037 iounmap(adapter->csr);
3039 iounmap(adapter->db);
3042 static int be_map_pci_bars(struct be_adapter *adapter)
3047 if (lancer_chip(adapter)) {
3048 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3049 pci_resource_len(adapter->pdev, 0));
3056 if (be_physfn(adapter)) {
3057 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3058 pci_resource_len(adapter->pdev, 2));
3061 adapter->csr = addr;
3064 if (adapter->generation == BE_GEN2) {
3067 if (be_physfn(adapter))
3072 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3073 pci_resource_len(adapter->pdev, db_reg));
3080 be_unmap_pci_bars(adapter);
3085 static void be_ctrl_cleanup(struct be_adapter *adapter)
3087 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3089 be_unmap_pci_bars(adapter);
3092 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3095 mem = &adapter->rx_filter;
3097 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3101 static int be_ctrl_init(struct be_adapter *adapter)
3103 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3104 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3105 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3108 status = be_map_pci_bars(adapter);
3112 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3113 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3114 mbox_mem_alloc->size,
3115 &mbox_mem_alloc->dma,
3117 if (!mbox_mem_alloc->va) {
3119 goto unmap_pci_bars;
3121 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3122 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3123 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3124 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3126 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3127 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3128 &rx_filter->dma, GFP_KERNEL);
3129 if (rx_filter->va == NULL) {
3133 memset(rx_filter->va, 0, rx_filter->size);
3135 mutex_init(&adapter->mbox_lock);
3136 spin_lock_init(&adapter->mcc_lock);
3137 spin_lock_init(&adapter->mcc_cq_lock);
3139 init_completion(&adapter->flash_compl);
3140 pci_save_state(adapter->pdev);
3144 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3145 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3148 be_unmap_pci_bars(adapter);
3154 static void be_stats_cleanup(struct be_adapter *adapter)
3156 struct be_dma_mem *cmd = &adapter->stats_cmd;
3159 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3163 static int be_stats_init(struct be_adapter *adapter)
3165 struct be_dma_mem *cmd = &adapter->stats_cmd;
3167 if (adapter->generation == BE_GEN2) {
3168 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3170 if (lancer_chip(adapter))
3171 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3173 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3175 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3177 if (cmd->va == NULL)
3179 memset(cmd->va, 0, cmd->size);
3183 static void __devexit be_remove(struct pci_dev *pdev)
3185 struct be_adapter *adapter = pci_get_drvdata(pdev);
3190 cancel_delayed_work_sync(&adapter->work);
3192 unregister_netdev(adapter->netdev);
3196 be_stats_cleanup(adapter);
3198 be_ctrl_cleanup(adapter);
3200 be_sriov_disable(adapter);
3202 be_msix_disable(adapter);
3204 pci_set_drvdata(pdev, NULL);
3205 pci_release_regions(pdev);
3206 pci_disable_device(pdev);
3208 free_netdev(adapter->netdev);
3211 static int be_get_config(struct be_adapter *adapter)
3215 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3216 &adapter->function_mode, &adapter->function_caps);
3220 if (adapter->function_mode & FLEX10_MODE)
3221 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3223 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3225 status = be_cmd_get_cntl_attributes(adapter);
3232 static int be_dev_family_check(struct be_adapter *adapter)
3234 struct pci_dev *pdev = adapter->pdev;
3235 u32 sli_intf = 0, if_type;
3237 switch (pdev->device) {
3240 adapter->generation = BE_GEN2;
3244 adapter->generation = BE_GEN3;
3248 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3249 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3250 SLI_INTF_IF_TYPE_SHIFT;
3252 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3254 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3257 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3258 SLI_INTF_FAMILY_SHIFT);
3259 adapter->generation = BE_GEN3;
3262 adapter->generation = 0;
3267 static int lancer_wait_ready(struct be_adapter *adapter)
3269 #define SLIPORT_READY_TIMEOUT 500
3273 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3274 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3275 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3281 if (i == SLIPORT_READY_TIMEOUT)
3287 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3290 u32 sliport_status, err, reset_needed;
3291 status = lancer_wait_ready(adapter);
3293 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3294 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3295 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3296 if (err && reset_needed) {
3297 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3298 adapter->db + SLIPORT_CONTROL_OFFSET);
3300 /* check adapter has corrected the error */
3301 status = lancer_wait_ready(adapter);
3302 sliport_status = ioread32(adapter->db +
3303 SLIPORT_STATUS_OFFSET);
3304 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3305 SLIPORT_STATUS_RN_MASK);
3306 if (status || sliport_status)
3308 } else if (err || reset_needed) {
3315 static int __devinit be_probe(struct pci_dev *pdev,
3316 const struct pci_device_id *pdev_id)
3319 struct be_adapter *adapter;
3320 struct net_device *netdev;
3322 status = pci_enable_device(pdev);
3326 status = pci_request_regions(pdev, DRV_NAME);
3329 pci_set_master(pdev);
3331 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3332 if (netdev == NULL) {
3336 adapter = netdev_priv(netdev);
3337 adapter->pdev = pdev;
3338 pci_set_drvdata(pdev, adapter);
3340 status = be_dev_family_check(adapter);
3344 adapter->netdev = netdev;
3345 SET_NETDEV_DEV(netdev, &pdev->dev);
3347 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3349 netdev->features |= NETIF_F_HIGHDMA;
3351 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3353 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3358 status = be_sriov_enable(adapter);
3362 status = be_ctrl_init(adapter);
3366 if (lancer_chip(adapter)) {
3367 status = lancer_test_and_set_rdy_state(adapter);
3369 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3374 /* sync up with fw's ready state */
3375 if (be_physfn(adapter)) {
3376 status = be_cmd_POST(adapter);
3381 /* tell fw we're ready to fire cmds */
3382 status = be_cmd_fw_init(adapter);
3386 status = be_cmd_reset_function(adapter);
3390 status = be_stats_init(adapter);
3394 status = be_get_config(adapter);
3398 /* The INTR bit may be set in the card when probed by a kdump kernel
3401 if (!lancer_chip(adapter))
3402 be_intr_set(adapter, false);
3404 be_msix_enable(adapter);
3406 INIT_DELAYED_WORK(&adapter->work, be_worker);
3407 adapter->rx_fc = adapter->tx_fc = true;
3409 status = be_setup(adapter);
3413 be_netdev_init(netdev);
3414 status = register_netdev(netdev);
3418 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3420 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3426 be_msix_disable(adapter);
3428 be_stats_cleanup(adapter);
3430 be_ctrl_cleanup(adapter);
3432 be_sriov_disable(adapter);
3434 free_netdev(netdev);
3435 pci_set_drvdata(pdev, NULL);
3437 pci_release_regions(pdev);
3439 pci_disable_device(pdev);
3441 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3445 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3447 struct be_adapter *adapter = pci_get_drvdata(pdev);
3448 struct net_device *netdev = adapter->netdev;
3450 cancel_delayed_work_sync(&adapter->work);
3452 be_setup_wol(adapter, true);
3454 netif_device_detach(netdev);
3455 if (netif_running(netdev)) {
3462 be_msix_disable(adapter);
3463 pci_save_state(pdev);
3464 pci_disable_device(pdev);
3465 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3469 static int be_resume(struct pci_dev *pdev)
3472 struct be_adapter *adapter = pci_get_drvdata(pdev);
3473 struct net_device *netdev = adapter->netdev;
3475 netif_device_detach(netdev);
3477 status = pci_enable_device(pdev);
3481 pci_set_power_state(pdev, 0);
3482 pci_restore_state(pdev);
3484 be_msix_enable(adapter);
3485 /* tell fw we're ready to fire cmds */
3486 status = be_cmd_fw_init(adapter);
3491 if (netif_running(netdev)) {
3496 netif_device_attach(netdev);
3499 be_setup_wol(adapter, false);
3501 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3506 * An FLR will stop BE from DMAing any data.
3508 static void be_shutdown(struct pci_dev *pdev)
3510 struct be_adapter *adapter = pci_get_drvdata(pdev);
3515 cancel_delayed_work_sync(&adapter->work);
3517 netif_device_detach(adapter->netdev);
3520 be_setup_wol(adapter, true);
3522 be_cmd_reset_function(adapter);
3524 pci_disable_device(pdev);
3527 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3528 pci_channel_state_t state)
3530 struct be_adapter *adapter = pci_get_drvdata(pdev);
3531 struct net_device *netdev = adapter->netdev;
3533 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3535 adapter->eeh_err = true;
3537 netif_device_detach(netdev);
3539 if (netif_running(netdev)) {
3546 if (state == pci_channel_io_perm_failure)
3547 return PCI_ERS_RESULT_DISCONNECT;
3549 pci_disable_device(pdev);
3551 return PCI_ERS_RESULT_NEED_RESET;
3554 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3556 struct be_adapter *adapter = pci_get_drvdata(pdev);
3559 dev_info(&adapter->pdev->dev, "EEH reset\n");
3560 adapter->eeh_err = false;
3562 status = pci_enable_device(pdev);
3564 return PCI_ERS_RESULT_DISCONNECT;
3566 pci_set_master(pdev);
3567 pci_set_power_state(pdev, 0);
3568 pci_restore_state(pdev);
3570 /* Check if card is ok and fw is ready */
3571 status = be_cmd_POST(adapter);
3573 return PCI_ERS_RESULT_DISCONNECT;
3575 return PCI_ERS_RESULT_RECOVERED;
3578 static void be_eeh_resume(struct pci_dev *pdev)
3581 struct be_adapter *adapter = pci_get_drvdata(pdev);
3582 struct net_device *netdev = adapter->netdev;
3584 dev_info(&adapter->pdev->dev, "EEH resume\n");
3586 pci_save_state(pdev);
3588 /* tell fw we're ready to fire cmds */
3589 status = be_cmd_fw_init(adapter);
3593 status = be_setup(adapter);
3597 if (netif_running(netdev)) {
3598 status = be_open(netdev);
3602 netif_device_attach(netdev);
3605 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3608 static struct pci_error_handlers be_eeh_handlers = {
3609 .error_detected = be_eeh_err_detected,
3610 .slot_reset = be_eeh_reset,
3611 .resume = be_eeh_resume,
3614 static struct pci_driver be_driver = {
3616 .id_table = be_dev_ids,
3618 .remove = be_remove,
3619 .suspend = be_suspend,
3620 .resume = be_resume,
3621 .shutdown = be_shutdown,
3622 .err_handler = &be_eeh_handlers
3625 static int __init be_init_module(void)
3627 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3628 rx_frag_size != 2048) {
3629 printk(KERN_WARNING DRV_NAME
3630 " : Module param rx_frag_size must be 2048/4096/8192."
3632 rx_frag_size = 2048;
3635 return pci_register_driver(&be_driver);
3637 module_init(be_init_module);
3639 static void __exit be_exit_module(void)
3641 pci_unregister_driver(&be_driver);
3643 module_exit(be_exit_module);