2 * Copyright (C) 2005 - 2013 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
22 #include <asm/div64.h>
23 #include <linux/aer.h>
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
131 struct be_dma_mem *mem = &q->dma_mem;
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
142 struct be_dma_mem *mem = &q->dma_mem;
144 memset(q, 0, sizeof(*q));
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL | __GFP_ZERO);
155 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163 if (!enabled && enable)
164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165 else if (enabled && !enable)
166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174 static void be_intr_set(struct be_adapter *adapter, bool enable)
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
182 if (adapter->eeh_error)
185 status = be_cmd_intr_set(adapter, enable);
187 be_reg_intr_set(adapter, enable);
190 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
200 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
208 iowrite32(val, adapter->db + txo->db_offset);
211 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
212 bool arm, bool clear_int, u16 num_popped)
215 val |= qid & DB_EQ_RING_ID_MASK;
216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
219 if (adapter->eeh_error)
223 val |= 1 << DB_EQ_REARM_SHIFT;
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
231 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
234 val |= qid & DB_CQ_RING_ID_MASK;
235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
238 if (adapter->eeh_error)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
247 static int be_mac_addr_set(struct net_device *netdev, void *p)
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct sockaddr *addr = p;
252 u8 current_mac[ETH_ALEN];
253 u32 pmac_id = adapter->pmac_id[0];
254 bool active_mac = true;
256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
259 /* For BE VF, MAC address is already activated by PF.
260 * Hence only operation left is updating netdev->devaddr.
261 * Update it if user is passing the same MAC which was used
262 * during configuring VF MAC from PF(Hypervisor).
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265 status = be_cmd_mac_addr_query(adapter, current_mac,
266 false, adapter->if_handle, 0);
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
276 /* For Lancer check if any MAC is active.
277 * If active, get its mac id.
279 if (lancer_chip(adapter) && !be_physfn(adapter))
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
285 &adapter->pmac_id[0], 0);
291 be_cmd_pmac_del(adapter, adapter->if_handle,
294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
301 /* BE2 supports only v0 cmd */
302 static void *hw_stats_from_cmd(struct be_adapter *adapter)
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
307 return &cmd->hw_stats;
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
311 return &cmd->hw_stats;
315 /* BE2 supports only v0 cmd */
316 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
321 return &hw_stats->erx;
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
325 return &hw_stats->erx;
329 static void populate_be_v0_stats(struct be_adapter *adapter)
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
334 struct be_port_rxf_stats_v0 *port_stats =
335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
365 if (adapter->port_num)
366 drvs->jabber_events = rxf_stats->port1_jabber_events;
368 drvs->jabber_events = rxf_stats->port0_jabber_events;
369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
378 static void populate_be_v1_stats(struct be_adapter *adapter)
380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
383 struct be_port_rxf_stats_v1 *port_stats =
384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
413 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
414 drvs->jabber_events = port_stats->jabber_events;
415 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
416 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
417 drvs->forwarded_packets = rxf_stats->forwarded_packets;
418 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
419 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
421 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
424 static void populate_lancer_stats(struct be_adapter *adapter)
427 struct be_drv_stats *drvs = &adapter->drv_stats;
428 struct lancer_pport_stats *pport_stats =
429 pport_stats_from_cmd(adapter);
431 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
435 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
436 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
437 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441 drvs->rx_dropped_tcp_length =
442 pport_stats->rx_dropped_invalid_tcp_length;
443 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446 drvs->rx_dropped_header_too_small =
447 pport_stats->rx_dropped_header_too_small;
448 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
449 drvs->rx_address_filtered =
450 pport_stats->rx_address_filtered +
451 pport_stats->rx_vlan_filtered;
452 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
453 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
454 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
456 drvs->jabber_events = pport_stats->rx_jabbers;
457 drvs->forwarded_packets = pport_stats->num_forwards_lo;
458 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
459 drvs->rx_drops_too_many_frags =
460 pport_stats->rx_drops_too_many_frags_lo;
463 static void accumulate_16bit_val(u32 *acc, u16 val)
465 #define lo(x) (x & 0xFFFF)
466 #define hi(x) (x & 0xFFFF0000)
467 bool wrapped = val < lo(*acc);
468 u32 newacc = hi(*acc) + val;
472 ACCESS_ONCE(*acc) = newacc;
475 void populate_erx_stats(struct be_adapter *adapter,
476 struct be_rx_obj *rxo,
479 if (!BEx_chip(adapter))
480 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
489 void be_parse_stats(struct be_adapter *adapter)
491 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492 struct be_rx_obj *rxo;
496 if (lancer_chip(adapter)) {
497 populate_lancer_stats(adapter);
499 if (BE2_chip(adapter))
500 populate_be_v0_stats(adapter);
502 /* for BE3 and Skyhawk */
503 populate_be_v1_stats(adapter);
505 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506 for_all_rx_queues(adapter, rxo, i) {
507 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508 populate_erx_stats(adapter, rxo, erx_stat);
513 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514 struct rtnl_link_stats64 *stats)
516 struct be_adapter *adapter = netdev_priv(netdev);
517 struct be_drv_stats *drvs = &adapter->drv_stats;
518 struct be_rx_obj *rxo;
519 struct be_tx_obj *txo;
524 for_all_rx_queues(adapter, rxo, i) {
525 const struct be_rx_stats *rx_stats = rx_stats(rxo);
527 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528 pkts = rx_stats(rxo)->rx_pkts;
529 bytes = rx_stats(rxo)->rx_bytes;
530 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531 stats->rx_packets += pkts;
532 stats->rx_bytes += bytes;
533 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535 rx_stats(rxo)->rx_drops_no_frags;
538 for_all_tx_queues(adapter, txo, i) {
539 const struct be_tx_stats *tx_stats = tx_stats(txo);
541 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542 pkts = tx_stats(txo)->tx_pkts;
543 bytes = tx_stats(txo)->tx_bytes;
544 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545 stats->tx_packets += pkts;
546 stats->tx_bytes += bytes;
549 /* bad pkts received */
550 stats->rx_errors = drvs->rx_crc_errors +
551 drvs->rx_alignment_symbol_errors +
552 drvs->rx_in_range_errors +
553 drvs->rx_out_range_errors +
554 drvs->rx_frame_too_long +
555 drvs->rx_dropped_too_small +
556 drvs->rx_dropped_too_short +
557 drvs->rx_dropped_header_too_small +
558 drvs->rx_dropped_tcp_length +
559 drvs->rx_dropped_runt;
561 /* detailed rx errors */
562 stats->rx_length_errors = drvs->rx_in_range_errors +
563 drvs->rx_out_range_errors +
564 drvs->rx_frame_too_long;
566 stats->rx_crc_errors = drvs->rx_crc_errors;
568 /* frame alignment errors */
569 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
571 /* receiver fifo overrun */
572 /* drops_no_pbuf is no per i/f, it's per BE card */
573 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
574 drvs->rx_input_fifo_overflow_drop +
575 drvs->rx_drops_no_pbuf;
579 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
581 struct net_device *netdev = adapter->netdev;
583 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
584 netif_carrier_off(netdev);
585 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
588 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589 netif_carrier_on(netdev);
591 netif_carrier_off(netdev);
594 static void be_tx_stats_update(struct be_tx_obj *txo,
595 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
597 struct be_tx_stats *stats = tx_stats(txo);
599 u64_stats_update_begin(&stats->sync);
601 stats->tx_wrbs += wrb_cnt;
602 stats->tx_bytes += copied;
603 stats->tx_pkts += (gso_segs ? gso_segs : 1);
606 u64_stats_update_end(&stats->sync);
609 /* Determine number of WRB entries needed to xmit data in an skb */
610 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
613 int cnt = (skb->len > skb->data_len);
615 cnt += skb_shinfo(skb)->nr_frags;
617 /* to account for hdr wrb */
619 if (lancer_chip(adapter) || !(cnt & 1)) {
622 /* add a dummy to make it an even num */
626 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
630 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
632 wrb->frag_pa_hi = upper_32_bits(addr);
633 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
638 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
644 vlan_tag = vlan_tx_tag_get(skb);
645 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646 /* If vlan priority provided by OS is NOT in available bmap */
647 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649 adapter->recommended_prio;
654 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
655 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
659 memset(hdr, 0, sizeof(*hdr));
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
663 if (skb_is_gso(skb)) {
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666 hdr, skb_shinfo(skb)->gso_size);
667 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
669 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672 else if (is_udp_pkt(skb))
673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
676 if (vlan_tx_tag_present(skb)) {
677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
678 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
679 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
682 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
689 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
694 be_dws_le_to_cpu(wrb, sizeof(*wrb));
696 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
699 dma_unmap_single(dev, dma, wrb->frag_len,
702 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
706 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
707 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
712 struct device *dev = &adapter->pdev->dev;
713 struct sk_buff *first_skb = skb;
714 struct be_eth_wrb *wrb;
715 struct be_eth_hdr_wrb *hdr;
716 bool map_single = false;
719 hdr = queue_head_node(txq);
721 map_head = txq->head;
723 if (skb->len > skb->data_len) {
724 int len = skb_headlen(skb);
725 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726 if (dma_mapping_error(dev, busaddr))
729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, busaddr, len);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
736 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
737 const struct skb_frag_struct *frag =
738 &skb_shinfo(skb)->frags[i];
739 busaddr = skb_frag_dma_map(dev, frag, 0,
740 skb_frag_size(frag), DMA_TO_DEVICE);
741 if (dma_mapping_error(dev, busaddr))
743 wrb = queue_head_node(txq);
744 wrb_fill(wrb, busaddr, skb_frag_size(frag));
745 be_dws_cpu_to_le(wrb, sizeof(*wrb));
747 copied += skb_frag_size(frag);
751 wrb = queue_head_node(txq);
753 be_dws_cpu_to_le(wrb, sizeof(*wrb));
757 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
758 be_dws_cpu_to_le(hdr, sizeof(*hdr));
762 txq->head = map_head;
764 wrb = queue_head_node(txq);
765 unmap_tx_frag(dev, wrb, map_single);
767 copied -= wrb->frag_len;
773 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
779 skb = skb_share_check(skb, GFP_ATOMIC);
783 if (vlan_tx_tag_present(skb))
784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
785 else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
786 vlan_tag = adapter->pvid;
789 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
794 *skip_hw_vlan = true;
797 /* Insert the outer VLAN, if any */
798 if (adapter->qnq_vid) {
799 vlan_tag = adapter->qnq_vid;
800 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
804 *skip_hw_vlan = true;
810 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
812 struct ethhdr *eh = (struct ethhdr *)skb->data;
813 u16 offset = ETH_HLEN;
815 if (eh->h_proto == htons(ETH_P_IPV6)) {
816 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
818 offset += sizeof(struct ipv6hdr);
819 if (ip6h->nexthdr != NEXTHDR_TCP &&
820 ip6h->nexthdr != NEXTHDR_UDP) {
821 struct ipv6_opt_hdr *ehdr =
822 (struct ipv6_opt_hdr *) (skb->data + offset);
824 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
825 if (ehdr->hdrlen == 0xff)
832 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
834 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
837 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
839 return BE3_chip(adapter) &&
840 be_ipv6_exthdr_check(skb);
843 static netdev_tx_t be_xmit(struct sk_buff *skb,
844 struct net_device *netdev)
846 struct be_adapter *adapter = netdev_priv(netdev);
847 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
848 struct be_queue_info *txq = &txo->q;
849 struct iphdr *ip = NULL;
850 u32 wrb_cnt = 0, copied = 0;
851 u32 start = txq->head, eth_hdr_len;
852 bool dummy_wrb, stopped = false;
853 bool skip_hw_vlan = false;
854 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
856 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
857 VLAN_ETH_HLEN : ETH_HLEN;
859 /* For padded packets, BE HW modifies tot_len field in IP header
860 * incorrecly when VLAN tag is inserted by HW.
862 if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
863 ip = (struct iphdr *)ip_hdr(skb);
864 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
867 /* If vlan tag is already inlined in the packet, skip HW VLAN
868 * tagging in UMC mode
870 if ((adapter->function_mode & UMC_ENABLED) &&
871 veh->h_vlan_proto == htons(ETH_P_8021Q))
874 /* HW has a bug wherein it will calculate CSUM for VLAN
875 * pkts even though it is disabled.
876 * Manually insert VLAN in pkt.
878 if (skb->ip_summed != CHECKSUM_PARTIAL &&
879 vlan_tx_tag_present(skb)) {
880 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
885 /* HW may lockup when VLAN HW tagging is requested on
886 * certain ipv6 packets. Drop such pkts if the HW workaround to
887 * skip HW tagging is not enabled by FW.
889 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
890 (adapter->pvid || adapter->qnq_vid) &&
891 !qnq_async_evt_rcvd(adapter)))
894 /* Manual VLAN tag insertion to prevent:
895 * ASIC lockup when the ASIC inserts VLAN tag into
896 * certain ipv6 packets. Insert VLAN tags in driver,
897 * and set event, completion, vlan bits accordingly
900 if (be_ipv6_tx_stall_chk(adapter, skb) &&
901 be_vlan_tag_tx_chk(adapter, skb)) {
902 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
907 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
909 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
912 int gso_segs = skb_shinfo(skb)->gso_segs;
914 /* record the sent skb in the sent_skb table */
915 BUG_ON(txo->sent_skb_list[start]);
916 txo->sent_skb_list[start] = skb;
918 /* Ensure txq has space for the next skb; Else stop the queue
919 * *BEFORE* ringing the tx doorbell, so that we serialze the
920 * tx compls of the current transmit which'll wake up the queue
922 atomic_add(wrb_cnt, &txq->used);
923 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
925 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
929 be_txq_notify(adapter, txo, wrb_cnt);
931 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
934 dev_kfree_skb_any(skb);
940 static int be_change_mtu(struct net_device *netdev, int new_mtu)
942 struct be_adapter *adapter = netdev_priv(netdev);
943 if (new_mtu < BE_MIN_MTU ||
944 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
945 (ETH_HLEN + ETH_FCS_LEN))) {
946 dev_info(&adapter->pdev->dev,
947 "MTU must be between %d and %d bytes\n",
949 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
952 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
953 netdev->mtu, new_mtu);
954 netdev->mtu = new_mtu;
959 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
960 * If the user configures more, place BE in vlan promiscuous mode.
962 static int be_vid_config(struct be_adapter *adapter)
964 u16 vids[BE_NUM_VLANS_SUPPORTED];
968 /* No need to further configure vids if in promiscuous mode */
969 if (adapter->promiscuous)
972 if (adapter->vlans_added > adapter->max_vlans)
973 goto set_vlan_promisc;
975 /* Construct VLAN Table to give to HW */
976 for (i = 0; i < VLAN_N_VID; i++)
977 if (adapter->vlan_tag[i])
978 vids[num++] = cpu_to_le16(i);
980 status = be_cmd_vlan_config(adapter, adapter->if_handle,
983 /* Set to VLAN promisc mode as setting VLAN filter failed */
985 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
986 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
987 goto set_vlan_promisc;
993 status = be_cmd_vlan_config(adapter, adapter->if_handle,
998 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1000 struct be_adapter *adapter = netdev_priv(netdev);
1003 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1008 /* Packets with VID 0 are always received by Lancer by default */
1009 if (lancer_chip(adapter) && vid == 0)
1012 adapter->vlan_tag[vid] = 1;
1013 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1014 status = be_vid_config(adapter);
1017 adapter->vlans_added++;
1019 adapter->vlan_tag[vid] = 0;
1024 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1026 struct be_adapter *adapter = netdev_priv(netdev);
1029 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1034 /* Packets with VID 0 are always received by Lancer by default */
1035 if (lancer_chip(adapter) && vid == 0)
1038 adapter->vlan_tag[vid] = 0;
1039 if (adapter->vlans_added <= adapter->max_vlans)
1040 status = be_vid_config(adapter);
1043 adapter->vlans_added--;
1045 adapter->vlan_tag[vid] = 1;
1050 static void be_set_rx_mode(struct net_device *netdev)
1052 struct be_adapter *adapter = netdev_priv(netdev);
1055 if (netdev->flags & IFF_PROMISC) {
1056 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1057 adapter->promiscuous = true;
1061 /* BE was previously in promiscuous mode; disable it */
1062 if (adapter->promiscuous) {
1063 adapter->promiscuous = false;
1064 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1066 if (adapter->vlans_added)
1067 be_vid_config(adapter);
1070 /* Enable multicast promisc if num configured exceeds what we support */
1071 if (netdev->flags & IFF_ALLMULTI ||
1072 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
1073 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1077 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1078 struct netdev_hw_addr *ha;
1079 int i = 1; /* First slot is claimed by the Primary MAC */
1081 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1082 be_cmd_pmac_del(adapter, adapter->if_handle,
1083 adapter->pmac_id[i], 0);
1086 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1087 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1088 adapter->promiscuous = true;
1092 netdev_for_each_uc_addr(ha, adapter->netdev) {
1093 adapter->uc_macs++; /* First slot is for Primary MAC */
1094 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1096 &adapter->pmac_id[adapter->uc_macs], 0);
1100 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1102 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1104 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1105 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1106 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1112 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1114 struct be_adapter *adapter = netdev_priv(netdev);
1115 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1117 bool active_mac = false;
1119 u8 old_mac[ETH_ALEN];
1121 if (!sriov_enabled(adapter))
1124 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1127 if (lancer_chip(adapter)) {
1128 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1130 if (!status && active_mac)
1131 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1134 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1136 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1137 vf_cfg->pmac_id, vf + 1);
1139 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1140 &vf_cfg->pmac_id, vf + 1);
1144 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1147 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1152 static int be_get_vf_config(struct net_device *netdev, int vf,
1153 struct ifla_vf_info *vi)
1155 struct be_adapter *adapter = netdev_priv(netdev);
1156 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1158 if (!sriov_enabled(adapter))
1161 if (vf >= adapter->num_vfs)
1165 vi->tx_rate = vf_cfg->tx_rate;
1166 vi->vlan = vf_cfg->vlan_tag;
1168 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1173 static int be_set_vf_vlan(struct net_device *netdev,
1174 int vf, u16 vlan, u8 qos)
1176 struct be_adapter *adapter = netdev_priv(netdev);
1179 if (!sriov_enabled(adapter))
1182 if (vf >= adapter->num_vfs || vlan > 4095)
1186 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1187 /* If this is new value, program it. Else skip. */
1188 adapter->vf_cfg[vf].vlan_tag = vlan;
1190 status = be_cmd_set_hsw_config(adapter, vlan,
1191 vf + 1, adapter->vf_cfg[vf].if_handle);
1194 /* Reset Transparent Vlan Tagging. */
1195 adapter->vf_cfg[vf].vlan_tag = 0;
1196 vlan = adapter->vf_cfg[vf].def_vid;
1197 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1198 adapter->vf_cfg[vf].if_handle);
1203 dev_info(&adapter->pdev->dev,
1204 "VLAN %d config on VF %d failed\n", vlan, vf);
1208 static int be_set_vf_tx_rate(struct net_device *netdev,
1211 struct be_adapter *adapter = netdev_priv(netdev);
1214 if (!sriov_enabled(adapter))
1217 if (vf >= adapter->num_vfs)
1220 if (rate < 100 || rate > 10000) {
1221 dev_err(&adapter->pdev->dev,
1222 "tx rate must be between 100 and 10000 Mbps\n");
1226 if (lancer_chip(adapter))
1227 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1229 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1232 dev_err(&adapter->pdev->dev,
1233 "tx rate %d on VF %d failed\n", rate, vf);
1235 adapter->vf_cfg[vf].tx_rate = rate;
1239 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1241 struct pci_dev *dev, *pdev = adapter->pdev;
1242 int vfs = 0, assigned_vfs = 0, pos;
1245 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1248 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1249 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1251 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1253 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1255 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1258 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1260 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1263 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1265 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1266 ulong now = jiffies;
1267 ulong delta = now - stats->rx_jiffies;
1269 unsigned int start, eqd;
1271 if (!eqo->enable_aic) {
1276 if (eqo->idx >= adapter->num_rx_qs)
1279 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1281 /* Wrapped around */
1282 if (time_before(now, stats->rx_jiffies)) {
1283 stats->rx_jiffies = now;
1287 /* Update once a second */
1292 start = u64_stats_fetch_begin_bh(&stats->sync);
1293 pkts = stats->rx_pkts;
1294 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1296 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1297 stats->rx_pkts_prev = pkts;
1298 stats->rx_jiffies = now;
1299 eqd = (stats->rx_pps / 110000) << 3;
1300 eqd = min(eqd, eqo->max_eqd);
1301 eqd = max(eqd, eqo->min_eqd);
1306 if (eqd != eqo->cur_eqd) {
1307 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1312 static void be_rx_stats_update(struct be_rx_obj *rxo,
1313 struct be_rx_compl_info *rxcp)
1315 struct be_rx_stats *stats = rx_stats(rxo);
1317 u64_stats_update_begin(&stats->sync);
1319 stats->rx_bytes += rxcp->pkt_size;
1321 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1322 stats->rx_mcast_pkts++;
1324 stats->rx_compl_err++;
1325 u64_stats_update_end(&stats->sync);
1328 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1330 /* L4 checksum is not reliable for non TCP/UDP packets.
1331 * Also ignore ipcksm for ipv6 pkts */
1332 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1333 (rxcp->ip_csum || rxcp->ipv6);
1336 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1339 struct be_adapter *adapter = rxo->adapter;
1340 struct be_rx_page_info *rx_page_info;
1341 struct be_queue_info *rxq = &rxo->q;
1343 rx_page_info = &rxo->page_info_tbl[frag_idx];
1344 BUG_ON(!rx_page_info->page);
1346 if (rx_page_info->last_page_user) {
1347 dma_unmap_page(&adapter->pdev->dev,
1348 dma_unmap_addr(rx_page_info, bus),
1349 adapter->big_page_size, DMA_FROM_DEVICE);
1350 rx_page_info->last_page_user = false;
1353 atomic_dec(&rxq->used);
1354 return rx_page_info;
1357 /* Throwaway the data in the Rx completion */
1358 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1359 struct be_rx_compl_info *rxcp)
1361 struct be_queue_info *rxq = &rxo->q;
1362 struct be_rx_page_info *page_info;
1363 u16 i, num_rcvd = rxcp->num_rcvd;
1365 for (i = 0; i < num_rcvd; i++) {
1366 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1367 put_page(page_info->page);
1368 memset(page_info, 0, sizeof(*page_info));
1369 index_inc(&rxcp->rxq_idx, rxq->len);
1374 * skb_fill_rx_data forms a complete skb for an ether frame
1375 * indicated by rxcp.
1377 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1378 struct be_rx_compl_info *rxcp)
1380 struct be_queue_info *rxq = &rxo->q;
1381 struct be_rx_page_info *page_info;
1383 u16 hdr_len, curr_frag_len, remaining;
1386 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1387 start = page_address(page_info->page) + page_info->page_offset;
1390 /* Copy data in the first descriptor of this completion */
1391 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1393 skb->len = curr_frag_len;
1394 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1395 memcpy(skb->data, start, curr_frag_len);
1396 /* Complete packet has now been moved to data */
1397 put_page(page_info->page);
1399 skb->tail += curr_frag_len;
1402 memcpy(skb->data, start, hdr_len);
1403 skb_shinfo(skb)->nr_frags = 1;
1404 skb_frag_set_page(skb, 0, page_info->page);
1405 skb_shinfo(skb)->frags[0].page_offset =
1406 page_info->page_offset + hdr_len;
1407 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1408 skb->data_len = curr_frag_len - hdr_len;
1409 skb->truesize += rx_frag_size;
1410 skb->tail += hdr_len;
1412 page_info->page = NULL;
1414 if (rxcp->pkt_size <= rx_frag_size) {
1415 BUG_ON(rxcp->num_rcvd != 1);
1419 /* More frags present for this completion */
1420 index_inc(&rxcp->rxq_idx, rxq->len);
1421 remaining = rxcp->pkt_size - curr_frag_len;
1422 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1423 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1424 curr_frag_len = min(remaining, rx_frag_size);
1426 /* Coalesce all frags from the same physical page in one slot */
1427 if (page_info->page_offset == 0) {
1430 skb_frag_set_page(skb, j, page_info->page);
1431 skb_shinfo(skb)->frags[j].page_offset =
1432 page_info->page_offset;
1433 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1434 skb_shinfo(skb)->nr_frags++;
1436 put_page(page_info->page);
1439 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1440 skb->len += curr_frag_len;
1441 skb->data_len += curr_frag_len;
1442 skb->truesize += rx_frag_size;
1443 remaining -= curr_frag_len;
1444 index_inc(&rxcp->rxq_idx, rxq->len);
1445 page_info->page = NULL;
1447 BUG_ON(j > MAX_SKB_FRAGS);
1450 /* Process the RX completion indicated by rxcp when GRO is disabled */
1451 static void be_rx_compl_process(struct be_rx_obj *rxo,
1452 struct be_rx_compl_info *rxcp)
1454 struct be_adapter *adapter = rxo->adapter;
1455 struct net_device *netdev = adapter->netdev;
1456 struct sk_buff *skb;
1458 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1459 if (unlikely(!skb)) {
1460 rx_stats(rxo)->rx_drops_no_skbs++;
1461 be_rx_compl_discard(rxo, rxcp);
1465 skb_fill_rx_data(rxo, skb, rxcp);
1467 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1468 skb->ip_summed = CHECKSUM_UNNECESSARY;
1470 skb_checksum_none_assert(skb);
1472 skb->protocol = eth_type_trans(skb, netdev);
1473 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1474 if (netdev->features & NETIF_F_RXHASH)
1475 skb->rxhash = rxcp->rss_hash;
1479 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1481 netif_receive_skb(skb);
1484 /* Process the RX completion indicated by rxcp when GRO is enabled */
1485 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1486 struct be_rx_compl_info *rxcp)
1488 struct be_adapter *adapter = rxo->adapter;
1489 struct be_rx_page_info *page_info;
1490 struct sk_buff *skb = NULL;
1491 struct be_queue_info *rxq = &rxo->q;
1492 u16 remaining, curr_frag_len;
1495 skb = napi_get_frags(napi);
1497 be_rx_compl_discard(rxo, rxcp);
1501 remaining = rxcp->pkt_size;
1502 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1503 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1505 curr_frag_len = min(remaining, rx_frag_size);
1507 /* Coalesce all frags from the same physical page in one slot */
1508 if (i == 0 || page_info->page_offset == 0) {
1509 /* First frag or Fresh page */
1511 skb_frag_set_page(skb, j, page_info->page);
1512 skb_shinfo(skb)->frags[j].page_offset =
1513 page_info->page_offset;
1514 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1516 put_page(page_info->page);
1518 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1519 skb->truesize += rx_frag_size;
1520 remaining -= curr_frag_len;
1521 index_inc(&rxcp->rxq_idx, rxq->len);
1522 memset(page_info, 0, sizeof(*page_info));
1524 BUG_ON(j > MAX_SKB_FRAGS);
1526 skb_shinfo(skb)->nr_frags = j + 1;
1527 skb->len = rxcp->pkt_size;
1528 skb->data_len = rxcp->pkt_size;
1529 skb->ip_summed = CHECKSUM_UNNECESSARY;
1530 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1531 if (adapter->netdev->features & NETIF_F_RXHASH)
1532 skb->rxhash = rxcp->rss_hash;
1535 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1537 napi_gro_frags(napi);
1540 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1541 struct be_rx_compl_info *rxcp)
1544 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1545 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1546 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1547 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1548 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1550 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1552 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1554 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1556 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1558 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1560 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1562 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1564 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1566 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1569 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1572 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1573 struct be_rx_compl_info *rxcp)
1576 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1577 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1578 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1579 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1580 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1582 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1584 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1586 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1588 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1590 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1592 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1594 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1596 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1598 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1601 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1602 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1606 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1608 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1609 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1610 struct be_adapter *adapter = rxo->adapter;
1612 /* For checking the valid bit it is Ok to use either definition as the
1613 * valid bit is at the same position in both v0 and v1 Rx compl */
1614 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1618 be_dws_le_to_cpu(compl, sizeof(*compl));
1620 if (adapter->be3_native)
1621 be_parse_rx_compl_v1(compl, rxcp);
1623 be_parse_rx_compl_v0(compl, rxcp);
1629 /* vlanf could be wrongly set in some cards.
1630 * ignore if vtm is not set */
1631 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1634 if (!lancer_chip(adapter))
1635 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1637 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1638 !adapter->vlan_tag[rxcp->vlan_tag])
1642 /* As the compl has been parsed, reset it; we wont touch it again */
1643 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1645 queue_tail_inc(&rxo->cq);
1649 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1651 u32 order = get_order(size);
1655 return alloc_pages(gfp, order);
1659 * Allocate a page, split it to fragments of size rx_frag_size and post as
1660 * receive buffers to BE
1662 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1664 struct be_adapter *adapter = rxo->adapter;
1665 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1666 struct be_queue_info *rxq = &rxo->q;
1667 struct page *pagep = NULL;
1668 struct be_eth_rx_d *rxd;
1669 u64 page_dmaaddr = 0, frag_dmaaddr;
1670 u32 posted, page_offset = 0;
1672 page_info = &rxo->page_info_tbl[rxq->head];
1673 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1675 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1676 if (unlikely(!pagep)) {
1677 rx_stats(rxo)->rx_post_fail++;
1680 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1681 0, adapter->big_page_size,
1683 page_info->page_offset = 0;
1686 page_info->page_offset = page_offset + rx_frag_size;
1688 page_offset = page_info->page_offset;
1689 page_info->page = pagep;
1690 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1691 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1693 rxd = queue_head_node(rxq);
1694 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1695 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1697 /* Any space left in the current big page for another frag? */
1698 if ((page_offset + rx_frag_size + rx_frag_size) >
1699 adapter->big_page_size) {
1701 page_info->last_page_user = true;
1704 prev_page_info = page_info;
1705 queue_head_inc(rxq);
1706 page_info = &rxo->page_info_tbl[rxq->head];
1709 prev_page_info->last_page_user = true;
1712 atomic_add(posted, &rxq->used);
1713 be_rxq_notify(adapter, rxq->id, posted);
1714 } else if (atomic_read(&rxq->used) == 0) {
1715 /* Let be_worker replenish when memory is available */
1716 rxo->rx_post_starved = true;
1720 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1722 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1724 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1728 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1730 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1732 queue_tail_inc(tx_cq);
1736 static u16 be_tx_compl_process(struct be_adapter *adapter,
1737 struct be_tx_obj *txo, u16 last_index)
1739 struct be_queue_info *txq = &txo->q;
1740 struct be_eth_wrb *wrb;
1741 struct sk_buff **sent_skbs = txo->sent_skb_list;
1742 struct sk_buff *sent_skb;
1743 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1744 bool unmap_skb_hdr = true;
1746 sent_skb = sent_skbs[txq->tail];
1748 sent_skbs[txq->tail] = NULL;
1750 /* skip header wrb */
1751 queue_tail_inc(txq);
1754 cur_index = txq->tail;
1755 wrb = queue_tail_node(txq);
1756 unmap_tx_frag(&adapter->pdev->dev, wrb,
1757 (unmap_skb_hdr && skb_headlen(sent_skb)));
1758 unmap_skb_hdr = false;
1761 queue_tail_inc(txq);
1762 } while (cur_index != last_index);
1764 kfree_skb(sent_skb);
1768 /* Return the number of events in the event queue */
1769 static inline int events_get(struct be_eq_obj *eqo)
1771 struct be_eq_entry *eqe;
1775 eqe = queue_tail_node(&eqo->q);
1782 queue_tail_inc(&eqo->q);
1788 /* Leaves the EQ is disarmed state */
1789 static void be_eq_clean(struct be_eq_obj *eqo)
1791 int num = events_get(eqo);
1793 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1796 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1798 struct be_rx_page_info *page_info;
1799 struct be_queue_info *rxq = &rxo->q;
1800 struct be_queue_info *rx_cq = &rxo->cq;
1801 struct be_rx_compl_info *rxcp;
1802 struct be_adapter *adapter = rxo->adapter;
1806 /* Consume pending rx completions.
1807 * Wait for the flush completion (identified by zero num_rcvd)
1808 * to arrive. Notify CQ even when there are no more CQ entries
1809 * for HW to flush partially coalesced CQ entries.
1810 * In Lancer, there is no need to wait for flush compl.
1813 rxcp = be_rx_compl_get(rxo);
1815 if (lancer_chip(adapter))
1818 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1819 dev_warn(&adapter->pdev->dev,
1820 "did not receive flush compl\n");
1823 be_cq_notify(adapter, rx_cq->id, true, 0);
1826 be_rx_compl_discard(rxo, rxcp);
1827 be_cq_notify(adapter, rx_cq->id, false, 1);
1828 if (rxcp->num_rcvd == 0)
1833 /* After cleanup, leave the CQ in unarmed state */
1834 be_cq_notify(adapter, rx_cq->id, false, 0);
1836 /* Then free posted rx buffers that were not used */
1837 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1838 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1839 page_info = get_rx_page_info(rxo, tail);
1840 put_page(page_info->page);
1841 memset(page_info, 0, sizeof(*page_info));
1843 BUG_ON(atomic_read(&rxq->used));
1844 rxq->tail = rxq->head = 0;
1847 static void be_tx_compl_clean(struct be_adapter *adapter)
1849 struct be_tx_obj *txo;
1850 struct be_queue_info *txq;
1851 struct be_eth_tx_compl *txcp;
1852 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1853 struct sk_buff *sent_skb;
1855 int i, pending_txqs;
1857 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1859 pending_txqs = adapter->num_tx_qs;
1861 for_all_tx_queues(adapter, txo, i) {
1863 while ((txcp = be_tx_compl_get(&txo->cq))) {
1865 AMAP_GET_BITS(struct amap_eth_tx_compl,
1867 num_wrbs += be_tx_compl_process(adapter, txo,
1872 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1873 atomic_sub(num_wrbs, &txq->used);
1877 if (atomic_read(&txq->used) == 0)
1881 if (pending_txqs == 0 || ++timeo > 200)
1887 for_all_tx_queues(adapter, txo, i) {
1889 if (atomic_read(&txq->used))
1890 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1891 atomic_read(&txq->used));
1893 /* free posted tx for which compls will never arrive */
1894 while (atomic_read(&txq->used)) {
1895 sent_skb = txo->sent_skb_list[txq->tail];
1896 end_idx = txq->tail;
1897 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1899 index_adv(&end_idx, num_wrbs - 1, txq->len);
1900 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1901 atomic_sub(num_wrbs, &txq->used);
1906 static void be_evt_queues_destroy(struct be_adapter *adapter)
1908 struct be_eq_obj *eqo;
1911 for_all_evt_queues(adapter, eqo, i) {
1912 if (eqo->q.created) {
1914 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1916 be_queue_free(adapter, &eqo->q);
1920 static int be_evt_queues_create(struct be_adapter *adapter)
1922 struct be_queue_info *eq;
1923 struct be_eq_obj *eqo;
1926 adapter->num_evt_qs = num_irqs(adapter);
1928 for_all_evt_queues(adapter, eqo, i) {
1929 eqo->adapter = adapter;
1930 eqo->tx_budget = BE_TX_BUDGET;
1932 eqo->max_eqd = BE_MAX_EQD;
1933 eqo->enable_aic = true;
1936 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1937 sizeof(struct be_eq_entry));
1941 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1948 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1950 struct be_queue_info *q;
1952 q = &adapter->mcc_obj.q;
1954 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1955 be_queue_free(adapter, q);
1957 q = &adapter->mcc_obj.cq;
1959 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1960 be_queue_free(adapter, q);
1963 /* Must be called only after TX qs are created as MCC shares TX EQ */
1964 static int be_mcc_queues_create(struct be_adapter *adapter)
1966 struct be_queue_info *q, *cq;
1968 cq = &adapter->mcc_obj.cq;
1969 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1970 sizeof(struct be_mcc_compl)))
1973 /* Use the default EQ for MCC completions */
1974 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1977 q = &adapter->mcc_obj.q;
1978 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1979 goto mcc_cq_destroy;
1981 if (be_cmd_mccq_create(adapter, q, cq))
1987 be_queue_free(adapter, q);
1989 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1991 be_queue_free(adapter, cq);
1996 static void be_tx_queues_destroy(struct be_adapter *adapter)
1998 struct be_queue_info *q;
1999 struct be_tx_obj *txo;
2002 for_all_tx_queues(adapter, txo, i) {
2005 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2006 be_queue_free(adapter, q);
2010 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2011 be_queue_free(adapter, q);
2015 static int be_num_txqs_want(struct be_adapter *adapter)
2017 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2018 be_is_mc(adapter) ||
2019 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
2023 return adapter->max_tx_queues;
2026 static int be_tx_cqs_create(struct be_adapter *adapter)
2028 struct be_queue_info *cq, *eq;
2030 struct be_tx_obj *txo;
2033 adapter->num_tx_qs = be_num_txqs_want(adapter);
2034 if (adapter->num_tx_qs != MAX_TX_QS) {
2036 netif_set_real_num_tx_queues(adapter->netdev,
2037 adapter->num_tx_qs);
2041 for_all_tx_queues(adapter, txo, i) {
2043 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2044 sizeof(struct be_eth_tx_compl));
2048 /* If num_evt_qs is less than num_tx_qs, then more than
2049 * one txq share an eq
2051 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2052 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2059 static int be_tx_qs_create(struct be_adapter *adapter)
2061 struct be_tx_obj *txo;
2064 for_all_tx_queues(adapter, txo, i) {
2065 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2066 sizeof(struct be_eth_wrb));
2070 status = be_cmd_txq_create(adapter, txo);
2075 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2076 adapter->num_tx_qs);
2080 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2082 struct be_queue_info *q;
2083 struct be_rx_obj *rxo;
2086 for_all_rx_queues(adapter, rxo, i) {
2089 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2090 be_queue_free(adapter, q);
2094 static int be_rx_cqs_create(struct be_adapter *adapter)
2096 struct be_queue_info *eq, *cq;
2097 struct be_rx_obj *rxo;
2100 /* We'll create as many RSS rings as there are irqs.
2101 * But when there's only one irq there's no use creating RSS rings
2103 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2104 num_irqs(adapter) + 1 : 1;
2105 if (adapter->num_rx_qs != MAX_RX_QS) {
2107 netif_set_real_num_rx_queues(adapter->netdev,
2108 adapter->num_rx_qs);
2112 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2113 for_all_rx_queues(adapter, rxo, i) {
2114 rxo->adapter = adapter;
2116 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2117 sizeof(struct be_eth_rx_compl));
2121 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2122 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2127 dev_info(&adapter->pdev->dev,
2128 "created %d RSS queue(s) and 1 default RX queue\n",
2129 adapter->num_rx_qs - 1);
2133 static irqreturn_t be_intx(int irq, void *dev)
2135 struct be_eq_obj *eqo = dev;
2136 struct be_adapter *adapter = eqo->adapter;
2139 /* IRQ is not expected when NAPI is scheduled as the EQ
2140 * will not be armed.
2141 * But, this can happen on Lancer INTx where it takes
2142 * a while to de-assert INTx or in BE2 where occasionaly
2143 * an interrupt may be raised even when EQ is unarmed.
2144 * If NAPI is already scheduled, then counting & notifying
2145 * events will orphan them.
2147 if (napi_schedule_prep(&eqo->napi)) {
2148 num_evts = events_get(eqo);
2149 __napi_schedule(&eqo->napi);
2151 eqo->spurious_intr = 0;
2153 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2155 /* Return IRQ_HANDLED only for the the first spurious intr
2156 * after a valid intr to stop the kernel from branding
2157 * this irq as a bad one!
2159 if (num_evts || eqo->spurious_intr++ == 0)
2165 static irqreturn_t be_msix(int irq, void *dev)
2167 struct be_eq_obj *eqo = dev;
2169 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2170 napi_schedule(&eqo->napi);
2174 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2176 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2179 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2182 struct be_adapter *adapter = rxo->adapter;
2183 struct be_queue_info *rx_cq = &rxo->cq;
2184 struct be_rx_compl_info *rxcp;
2187 for (work_done = 0; work_done < budget; work_done++) {
2188 rxcp = be_rx_compl_get(rxo);
2192 /* Is it a flush compl that has no data */
2193 if (unlikely(rxcp->num_rcvd == 0))
2196 /* Discard compl with partial DMA Lancer B0 */
2197 if (unlikely(!rxcp->pkt_size)) {
2198 be_rx_compl_discard(rxo, rxcp);
2202 /* On BE drop pkts that arrive due to imperfect filtering in
2203 * promiscuous mode on some skews
2205 if (unlikely(rxcp->port != adapter->port_num &&
2206 !lancer_chip(adapter))) {
2207 be_rx_compl_discard(rxo, rxcp);
2212 be_rx_compl_process_gro(rxo, napi, rxcp);
2214 be_rx_compl_process(rxo, rxcp);
2216 be_rx_stats_update(rxo, rxcp);
2220 be_cq_notify(adapter, rx_cq->id, true, work_done);
2222 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2223 be_post_rx_frags(rxo, GFP_ATOMIC);
2229 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2230 int budget, int idx)
2232 struct be_eth_tx_compl *txcp;
2233 int num_wrbs = 0, work_done;
2235 for (work_done = 0; work_done < budget; work_done++) {
2236 txcp = be_tx_compl_get(&txo->cq);
2239 num_wrbs += be_tx_compl_process(adapter, txo,
2240 AMAP_GET_BITS(struct amap_eth_tx_compl,
2245 be_cq_notify(adapter, txo->cq.id, true, work_done);
2246 atomic_sub(num_wrbs, &txo->q.used);
2248 /* As Tx wrbs have been freed up, wake up netdev queue
2249 * if it was stopped due to lack of tx wrbs. */
2250 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2251 atomic_read(&txo->q.used) < txo->q.len / 2) {
2252 netif_wake_subqueue(adapter->netdev, idx);
2255 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2256 tx_stats(txo)->tx_compl += work_done;
2257 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2259 return (work_done < budget); /* Done */
2262 int be_poll(struct napi_struct *napi, int budget)
2264 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2265 struct be_adapter *adapter = eqo->adapter;
2266 int max_work = 0, work, i, num_evts;
2269 num_evts = events_get(eqo);
2271 /* Process all TXQs serviced by this EQ */
2272 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2273 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2279 /* This loop will iterate twice for EQ0 in which
2280 * completions of the last RXQ (default one) are also processed
2281 * For other EQs the loop iterates only once
2283 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2284 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2285 max_work = max(work, max_work);
2288 if (is_mcc_eqo(eqo))
2289 be_process_mcc(adapter);
2291 if (max_work < budget) {
2292 napi_complete(napi);
2293 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2295 /* As we'll continue in polling mode, count and clear events */
2296 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2301 void be_detect_error(struct be_adapter *adapter)
2303 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2304 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2307 if (be_hw_error(adapter))
2310 if (lancer_chip(adapter)) {
2311 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2312 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2313 sliport_err1 = ioread32(adapter->db +
2314 SLIPORT_ERROR1_OFFSET);
2315 sliport_err2 = ioread32(adapter->db +
2316 SLIPORT_ERROR2_OFFSET);
2319 pci_read_config_dword(adapter->pdev,
2320 PCICFG_UE_STATUS_LOW, &ue_lo);
2321 pci_read_config_dword(adapter->pdev,
2322 PCICFG_UE_STATUS_HIGH, &ue_hi);
2323 pci_read_config_dword(adapter->pdev,
2324 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2325 pci_read_config_dword(adapter->pdev,
2326 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2328 ue_lo = (ue_lo & ~ue_lo_mask);
2329 ue_hi = (ue_hi & ~ue_hi_mask);
2332 /* On certain platforms BE hardware can indicate spurious UEs.
2333 * Allow the h/w to stop working completely in case of a real UE.
2334 * Hence not setting the hw_error for UE detection.
2336 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2337 adapter->hw_error = true;
2338 dev_err(&adapter->pdev->dev,
2339 "Error detected in the card\n");
2342 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2343 dev_err(&adapter->pdev->dev,
2344 "ERR: sliport status 0x%x\n", sliport_status);
2345 dev_err(&adapter->pdev->dev,
2346 "ERR: sliport error1 0x%x\n", sliport_err1);
2347 dev_err(&adapter->pdev->dev,
2348 "ERR: sliport error2 0x%x\n", sliport_err2);
2352 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2354 dev_err(&adapter->pdev->dev,
2355 "UE: %s bit set\n", ue_status_low_desc[i]);
2360 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2362 dev_err(&adapter->pdev->dev,
2363 "UE: %s bit set\n", ue_status_hi_desc[i]);
2369 static void be_msix_disable(struct be_adapter *adapter)
2371 if (msix_enabled(adapter)) {
2372 pci_disable_msix(adapter->pdev);
2373 adapter->num_msix_vec = 0;
2377 static uint be_num_rss_want(struct be_adapter *adapter)
2381 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2382 (lancer_chip(adapter) ||
2383 (!sriov_want(adapter) && be_physfn(adapter)))) {
2384 num = adapter->max_rss_queues;
2385 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2390 static int be_msix_enable(struct be_adapter *adapter)
2392 #define BE_MIN_MSIX_VECTORS 1
2393 int i, status, num_vec, num_roce_vec = 0;
2394 struct device *dev = &adapter->pdev->dev;
2396 /* If RSS queues are not used, need a vec for default RX Q */
2397 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2398 if (be_roce_supported(adapter)) {
2399 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2400 (num_online_cpus() + 1));
2401 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2402 num_vec += num_roce_vec;
2403 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2405 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2407 for (i = 0; i < num_vec; i++)
2408 adapter->msix_entries[i].entry = i;
2410 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2413 } else if (status >= BE_MIN_MSIX_VECTORS) {
2415 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2421 dev_warn(dev, "MSIx enable failed\n");
2422 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2423 if (!be_physfn(adapter))
2427 if (be_roce_supported(adapter)) {
2428 if (num_vec > num_roce_vec) {
2429 adapter->num_msix_vec = num_vec - num_roce_vec;
2430 adapter->num_msix_roce_vec =
2431 num_vec - adapter->num_msix_vec;
2433 adapter->num_msix_vec = num_vec;
2434 adapter->num_msix_roce_vec = 0;
2437 adapter->num_msix_vec = num_vec;
2438 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2442 static inline int be_msix_vec_get(struct be_adapter *adapter,
2443 struct be_eq_obj *eqo)
2445 return adapter->msix_entries[eqo->idx].vector;
2448 static int be_msix_register(struct be_adapter *adapter)
2450 struct net_device *netdev = adapter->netdev;
2451 struct be_eq_obj *eqo;
2454 for_all_evt_queues(adapter, eqo, i) {
2455 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2456 vec = be_msix_vec_get(adapter, eqo);
2457 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2464 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2465 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2466 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2468 be_msix_disable(adapter);
2472 static int be_irq_register(struct be_adapter *adapter)
2474 struct net_device *netdev = adapter->netdev;
2477 if (msix_enabled(adapter)) {
2478 status = be_msix_register(adapter);
2481 /* INTx is not supported for VF */
2482 if (!be_physfn(adapter))
2486 /* INTx: only the first EQ is used */
2487 netdev->irq = adapter->pdev->irq;
2488 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2489 &adapter->eq_obj[0]);
2491 dev_err(&adapter->pdev->dev,
2492 "INTx request IRQ failed - err %d\n", status);
2496 adapter->isr_registered = true;
2500 static void be_irq_unregister(struct be_adapter *adapter)
2502 struct net_device *netdev = adapter->netdev;
2503 struct be_eq_obj *eqo;
2506 if (!adapter->isr_registered)
2510 if (!msix_enabled(adapter)) {
2511 free_irq(netdev->irq, &adapter->eq_obj[0]);
2516 for_all_evt_queues(adapter, eqo, i)
2517 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2520 adapter->isr_registered = false;
2523 static void be_rx_qs_destroy(struct be_adapter *adapter)
2525 struct be_queue_info *q;
2526 struct be_rx_obj *rxo;
2529 for_all_rx_queues(adapter, rxo, i) {
2532 be_cmd_rxq_destroy(adapter, q);
2533 be_rx_cq_clean(rxo);
2535 be_queue_free(adapter, q);
2539 static int be_close(struct net_device *netdev)
2541 struct be_adapter *adapter = netdev_priv(netdev);
2542 struct be_eq_obj *eqo;
2545 be_roce_dev_close(adapter);
2547 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2548 for_all_evt_queues(adapter, eqo, i)
2549 napi_disable(&eqo->napi);
2550 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2553 be_async_mcc_disable(adapter);
2555 /* Wait for all pending tx completions to arrive so that
2556 * all tx skbs are freed.
2558 be_tx_compl_clean(adapter);
2559 netif_tx_disable(netdev);
2561 be_rx_qs_destroy(adapter);
2563 for_all_evt_queues(adapter, eqo, i) {
2564 if (msix_enabled(adapter))
2565 synchronize_irq(be_msix_vec_get(adapter, eqo));
2567 synchronize_irq(netdev->irq);
2571 be_irq_unregister(adapter);
2576 static int be_rx_qs_create(struct be_adapter *adapter)
2578 struct be_rx_obj *rxo;
2582 for_all_rx_queues(adapter, rxo, i) {
2583 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2584 sizeof(struct be_eth_rx_d));
2589 /* The FW would like the default RXQ to be created first */
2590 rxo = default_rxo(adapter);
2591 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2592 adapter->if_handle, false, &rxo->rss_id);
2596 for_all_rss_queues(adapter, rxo, i) {
2597 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2598 rx_frag_size, adapter->if_handle,
2599 true, &rxo->rss_id);
2604 if (be_multi_rxq(adapter)) {
2605 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2606 for_all_rss_queues(adapter, rxo, i) {
2609 rsstable[j + i] = rxo->rss_id;
2612 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2613 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2615 if (!BEx_chip(adapter))
2616 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2617 RSS_ENABLE_UDP_IPV6;
2619 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2622 adapter->rss_flags = 0;
2627 /* First time posting */
2628 for_all_rx_queues(adapter, rxo, i)
2629 be_post_rx_frags(rxo, GFP_KERNEL);
2633 static int be_open(struct net_device *netdev)
2635 struct be_adapter *adapter = netdev_priv(netdev);
2636 struct be_eq_obj *eqo;
2637 struct be_rx_obj *rxo;
2638 struct be_tx_obj *txo;
2642 status = be_rx_qs_create(adapter);
2646 status = be_irq_register(adapter);
2650 for_all_rx_queues(adapter, rxo, i)
2651 be_cq_notify(adapter, rxo->cq.id, true, 0);
2653 for_all_tx_queues(adapter, txo, i)
2654 be_cq_notify(adapter, txo->cq.id, true, 0);
2656 be_async_mcc_enable(adapter);
2658 for_all_evt_queues(adapter, eqo, i) {
2659 napi_enable(&eqo->napi);
2660 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2662 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2664 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2666 be_link_status_update(adapter, link_status);
2668 netif_tx_start_all_queues(netdev);
2669 be_roce_dev_open(adapter);
2672 be_close(adapter->netdev);
2676 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2678 struct be_dma_mem cmd;
2682 memset(mac, 0, ETH_ALEN);
2684 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2685 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2686 GFP_KERNEL | __GFP_ZERO);
2691 status = pci_write_config_dword(adapter->pdev,
2692 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2694 dev_err(&adapter->pdev->dev,
2695 "Could not enable Wake-on-lan\n");
2696 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2700 status = be_cmd_enable_magic_wol(adapter,
2701 adapter->netdev->dev_addr, &cmd);
2702 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2703 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2705 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2706 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2707 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2710 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2715 * Generate a seed MAC address from the PF MAC Address using jhash.
2716 * MAC Address for VFs are assigned incrementally starting from the seed.
2717 * These addresses are programmed in the ASIC by the PF and the VF driver
2718 * queries for the MAC address during its probe.
2720 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2725 struct be_vf_cfg *vf_cfg;
2727 be_vf_eth_addr_generate(adapter, mac);
2729 for_all_vfs(adapter, vf_cfg, vf) {
2730 if (lancer_chip(adapter)) {
2731 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2733 status = be_cmd_pmac_add(adapter, mac,
2735 &vf_cfg->pmac_id, vf + 1);
2739 dev_err(&adapter->pdev->dev,
2740 "Mac address assignment failed for VF %d\n", vf);
2742 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2749 static int be_vfs_mac_query(struct be_adapter *adapter)
2753 struct be_vf_cfg *vf_cfg;
2756 for_all_vfs(adapter, vf_cfg, vf) {
2757 be_cmd_get_mac_from_list(adapter, mac, &active,
2758 &vf_cfg->pmac_id, 0);
2760 status = be_cmd_mac_addr_query(adapter, mac, false,
2761 vf_cfg->if_handle, 0);
2764 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2769 static void be_vf_clear(struct be_adapter *adapter)
2771 struct be_vf_cfg *vf_cfg;
2774 if (be_find_vfs(adapter, ASSIGNED)) {
2775 dev_warn(&adapter->pdev->dev,
2776 "VFs are assigned to VMs: not disabling VFs\n");
2780 pci_disable_sriov(adapter->pdev);
2782 for_all_vfs(adapter, vf_cfg, vf) {
2783 if (lancer_chip(adapter))
2784 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2786 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2787 vf_cfg->pmac_id, vf + 1);
2789 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2792 kfree(adapter->vf_cfg);
2793 adapter->num_vfs = 0;
2796 static int be_clear(struct be_adapter *adapter)
2800 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2801 cancel_delayed_work_sync(&adapter->work);
2802 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2805 if (sriov_enabled(adapter))
2806 be_vf_clear(adapter);
2808 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2809 be_cmd_pmac_del(adapter, adapter->if_handle,
2810 adapter->pmac_id[i], 0);
2812 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2814 be_mcc_queues_destroy(adapter);
2815 be_rx_cqs_destroy(adapter);
2816 be_tx_queues_destroy(adapter);
2817 be_evt_queues_destroy(adapter);
2819 kfree(adapter->pmac_id);
2820 adapter->pmac_id = NULL;
2822 be_msix_disable(adapter);
2826 static int be_vfs_if_create(struct be_adapter *adapter)
2828 struct be_vf_cfg *vf_cfg;
2829 u32 cap_flags, en_flags, vf;
2832 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2833 BE_IF_FLAGS_MULTICAST;
2835 for_all_vfs(adapter, vf_cfg, vf) {
2836 if (!BE3_chip(adapter))
2837 be_cmd_get_profile_config(adapter, &cap_flags,
2840 /* If a FW profile exists, then cap_flags are updated */
2841 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2842 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2843 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2844 &vf_cfg->if_handle, vf + 1);
2852 static int be_vf_setup_init(struct be_adapter *adapter)
2854 struct be_vf_cfg *vf_cfg;
2857 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2859 if (!adapter->vf_cfg)
2862 for_all_vfs(adapter, vf_cfg, vf) {
2863 vf_cfg->if_handle = -1;
2864 vf_cfg->pmac_id = -1;
2869 static int be_vf_setup(struct be_adapter *adapter)
2871 struct be_vf_cfg *vf_cfg;
2872 u16 def_vlan, lnk_speed;
2873 int status, old_vfs, vf;
2874 struct device *dev = &adapter->pdev->dev;
2876 old_vfs = be_find_vfs(adapter, ENABLED);
2878 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2879 if (old_vfs != num_vfs)
2880 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2881 adapter->num_vfs = old_vfs;
2883 if (num_vfs > adapter->dev_num_vfs)
2884 dev_info(dev, "Device supports %d VFs and not %d\n",
2885 adapter->dev_num_vfs, num_vfs);
2886 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2887 if (!adapter->num_vfs)
2891 status = be_vf_setup_init(adapter);
2896 for_all_vfs(adapter, vf_cfg, vf) {
2897 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2902 status = be_vfs_if_create(adapter);
2908 status = be_vfs_mac_query(adapter);
2912 status = be_vf_eth_addr_config(adapter);
2917 for_all_vfs(adapter, vf_cfg, vf) {
2918 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2919 * Allow full available bandwidth
2921 if (BE3_chip(adapter) && !old_vfs)
2922 be_cmd_set_qos(adapter, 1000, vf+1);
2924 status = be_cmd_link_status_query(adapter, &lnk_speed,
2927 vf_cfg->tx_rate = lnk_speed;
2929 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2930 vf + 1, vf_cfg->if_handle);
2933 vf_cfg->def_vid = def_vlan;
2935 be_cmd_enable_vf(adapter, vf + 1);
2939 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2941 dev_err(dev, "SRIOV enable failed\n");
2942 adapter->num_vfs = 0;
2948 dev_err(dev, "VF setup failed\n");
2949 be_vf_clear(adapter);
2953 static void be_setup_init(struct be_adapter *adapter)
2955 adapter->vlan_prio_bmap = 0xff;
2956 adapter->phy.link_speed = -1;
2957 adapter->if_handle = -1;
2958 adapter->be3_native = false;
2959 adapter->promiscuous = false;
2960 if (be_physfn(adapter))
2961 adapter->cmd_privileges = MAX_PRIVILEGES;
2963 adapter->cmd_privileges = MIN_PRIVILEGES;
2966 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2967 bool *active_mac, u32 *pmac_id)
2971 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2972 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2973 if (!lancer_chip(adapter) && !be_physfn(adapter))
2976 *active_mac = false;
2981 if (lancer_chip(adapter)) {
2982 status = be_cmd_get_mac_from_list(adapter, mac,
2983 active_mac, pmac_id, 0);
2985 status = be_cmd_mac_addr_query(adapter, mac, false,
2986 if_handle, *pmac_id);
2988 } else if (be_physfn(adapter)) {
2989 /* For BE3, for PF get permanent MAC */
2990 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2991 *active_mac = false;
2993 /* For BE3, for VF get soft MAC assigned by PF*/
2994 status = be_cmd_mac_addr_query(adapter, mac, false,
3001 static void be_get_resources(struct be_adapter *adapter)
3005 bool profile_present = false;
3008 if (!BEx_chip(adapter)) {
3009 status = be_cmd_get_func_config(adapter);
3011 profile_present = true;
3012 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3013 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
3016 if (profile_present) {
3017 /* Sanity fixes for Lancer */
3018 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3020 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3021 BE_NUM_VLANS_SUPPORTED);
3022 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3024 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3026 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3028 adapter->max_event_queues = min_t(u16,
3029 adapter->max_event_queues,
3032 if (adapter->max_rss_queues &&
3033 adapter->max_rss_queues == adapter->max_rx_queues)
3034 adapter->max_rss_queues -= 1;
3036 if (adapter->max_event_queues < adapter->max_rss_queues)
3037 adapter->max_rss_queues = adapter->max_event_queues;
3040 if (be_physfn(adapter))
3041 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3043 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3045 if (adapter->function_mode & FLEX10_MODE)
3046 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3048 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3050 adapter->max_mcast_mac = BE_MAX_MC;
3051 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3052 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3054 adapter->max_rss_queues = (adapter->be3_native) ?
3055 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3056 adapter->max_event_queues = BE3_MAX_RSS_QS;
3058 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3059 BE_IF_FLAGS_BROADCAST |
3060 BE_IF_FLAGS_MULTICAST |
3061 BE_IF_FLAGS_PASS_L3L4_ERRORS |
3062 BE_IF_FLAGS_MCAST_PROMISCUOUS |
3063 BE_IF_FLAGS_VLAN_PROMISCUOUS |
3064 BE_IF_FLAGS_PROMISCUOUS;
3066 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3067 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3070 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3072 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3074 if (BE3_chip(adapter))
3075 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3076 adapter->dev_num_vfs = dev_num_vfs;
3080 /* Routine to query per function resource limits */
3081 static int be_get_config(struct be_adapter *adapter)
3085 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3086 &adapter->function_mode,
3087 &adapter->function_caps,
3088 &adapter->asic_rev);
3092 be_get_resources(adapter);
3094 /* primary mac needs 1 pmac entry */
3095 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3096 sizeof(u32), GFP_KERNEL);
3097 if (!adapter->pmac_id) {
3106 static int be_setup(struct be_adapter *adapter)
3108 struct device *dev = &adapter->pdev->dev;
3115 be_setup_init(adapter);
3117 if (!lancer_chip(adapter))
3118 be_cmd_req_native_mode(adapter);
3120 status = be_get_config(adapter);
3124 status = be_msix_enable(adapter);
3128 status = be_evt_queues_create(adapter);
3132 status = be_tx_cqs_create(adapter);
3136 status = be_rx_cqs_create(adapter);
3140 status = be_mcc_queues_create(adapter);
3144 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3145 /* In UMC mode FW does not return right privileges.
3146 * Override with correct privilege equivalent to PF.
3148 if (be_is_mc(adapter))
3149 adapter->cmd_privileges = MAX_PRIVILEGES;
3151 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3152 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3154 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3155 en_flags |= BE_IF_FLAGS_RSS;
3157 en_flags = en_flags & adapter->if_cap_flags;
3159 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3160 &adapter->if_handle, 0);
3164 memset(mac, 0, ETH_ALEN);
3166 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3167 &active_mac, &adapter->pmac_id[0]);
3172 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3173 &adapter->pmac_id[0], 0);
3178 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3179 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3180 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3183 status = be_tx_qs_create(adapter);
3187 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3189 if (adapter->vlans_added)
3190 be_vid_config(adapter);
3192 be_set_rx_mode(adapter->netdev);
3194 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3196 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3197 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3200 if (be_physfn(adapter)) {
3201 if (adapter->dev_num_vfs)
3202 be_vf_setup(adapter);
3204 dev_warn(dev, "device doesn't support SRIOV\n");
3207 status = be_cmd_get_phy_info(adapter);
3208 if (!status && be_pause_supported(adapter))
3209 adapter->phy.fc_autoneg = 1;
3211 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3212 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3219 #ifdef CONFIG_NET_POLL_CONTROLLER
3220 static void be_netpoll(struct net_device *netdev)
3222 struct be_adapter *adapter = netdev_priv(netdev);
3223 struct be_eq_obj *eqo;
3226 for_all_evt_queues(adapter, eqo, i) {
3227 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3228 napi_schedule(&eqo->napi);
3235 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
3236 char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3238 static bool be_flash_redboot(struct be_adapter *adapter,
3239 const u8 *p, u32 img_start, int image_size,
3246 crc_offset = hdr_size + img_start + image_size - 4;
3250 status = be_cmd_get_flash_crc(adapter, flashed_crc,
3253 dev_err(&adapter->pdev->dev,
3254 "could not get crc from flash, not flashing redboot\n");
3258 /*update redboot only if crc does not match*/
3259 if (!memcmp(flashed_crc, p, 4))
3265 static bool phy_flashing_required(struct be_adapter *adapter)
3267 return (adapter->phy.phy_type == TN_8022 &&
3268 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3271 static bool is_comp_in_ufi(struct be_adapter *adapter,
3272 struct flash_section_info *fsec, int type)
3274 int i = 0, img_type = 0;
3275 struct flash_section_info_g2 *fsec_g2 = NULL;
3277 if (BE2_chip(adapter))
3278 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3280 for (i = 0; i < MAX_FLASH_COMP; i++) {
3282 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3284 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3286 if (img_type == type)
3293 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3295 const struct firmware *fw)
3297 struct flash_section_info *fsec = NULL;
3298 const u8 *p = fw->data;
3301 while (p < (fw->data + fw->size)) {
3302 fsec = (struct flash_section_info *)p;
3303 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3310 static int be_flash(struct be_adapter *adapter, const u8 *img,
3311 struct be_dma_mem *flash_cmd, int optype, int img_size)
3313 u32 total_bytes = 0, flash_op, num_bytes = 0;
3315 struct be_cmd_write_flashrom *req = flash_cmd->va;
3317 total_bytes = img_size;
3318 while (total_bytes) {
3319 num_bytes = min_t(u32, 32*1024, total_bytes);
3321 total_bytes -= num_bytes;
3324 if (optype == OPTYPE_PHY_FW)
3325 flash_op = FLASHROM_OPER_PHY_FLASH;
3327 flash_op = FLASHROM_OPER_FLASH;
3329 if (optype == OPTYPE_PHY_FW)
3330 flash_op = FLASHROM_OPER_PHY_SAVE;
3332 flash_op = FLASHROM_OPER_SAVE;
3335 memcpy(req->data_buf, img, num_bytes);
3337 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3338 flash_op, num_bytes);
3340 if (status == ILLEGAL_IOCTL_REQ &&
3341 optype == OPTYPE_PHY_FW)
3343 dev_err(&adapter->pdev->dev,
3344 "cmd to write to flash rom failed.\n");
3351 /* For BE2, BE3 and BE3-R */
3352 static int be_flash_BEx(struct be_adapter *adapter,
3353 const struct firmware *fw,
3354 struct be_dma_mem *flash_cmd,
3358 int status = 0, i, filehdr_size = 0;
3359 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3360 const u8 *p = fw->data;
3361 const struct flash_comp *pflashcomp;
3362 int num_comp, redboot;
3363 struct flash_section_info *fsec = NULL;
3365 struct flash_comp gen3_flash_types[] = {
3366 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3367 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3368 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3369 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3370 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3371 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3372 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3373 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3374 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3375 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3376 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3377 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3378 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3379 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3380 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3381 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3382 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3383 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3384 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3385 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3388 struct flash_comp gen2_flash_types[] = {
3389 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3390 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3391 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3392 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3393 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3394 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3395 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3396 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3397 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3398 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3399 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3400 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3401 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3402 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3403 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3404 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3407 if (BE3_chip(adapter)) {
3408 pflashcomp = gen3_flash_types;
3409 filehdr_size = sizeof(struct flash_file_hdr_g3);
3410 num_comp = ARRAY_SIZE(gen3_flash_types);
3412 pflashcomp = gen2_flash_types;
3413 filehdr_size = sizeof(struct flash_file_hdr_g2);
3414 num_comp = ARRAY_SIZE(gen2_flash_types);
3417 /* Get flash section info*/
3418 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3420 dev_err(&adapter->pdev->dev,
3421 "Invalid Cookie. UFI corrupted ?\n");
3424 for (i = 0; i < num_comp; i++) {
3425 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3428 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3429 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3432 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3433 !phy_flashing_required(adapter))
3436 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3437 redboot = be_flash_redboot(adapter, fw->data,
3438 pflashcomp[i].offset, pflashcomp[i].size,
3439 filehdr_size + img_hdrs_size);
3445 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3446 if (p + pflashcomp[i].size > fw->data + fw->size)
3449 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3450 pflashcomp[i].size);
3452 dev_err(&adapter->pdev->dev,
3453 "Flashing section type %d failed.\n",
3454 pflashcomp[i].img_type);
3461 static int be_flash_skyhawk(struct be_adapter *adapter,
3462 const struct firmware *fw,
3463 struct be_dma_mem *flash_cmd, int num_of_images)
3465 int status = 0, i, filehdr_size = 0;
3466 int img_offset, img_size, img_optype, redboot;
3467 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3468 const u8 *p = fw->data;
3469 struct flash_section_info *fsec = NULL;
3471 filehdr_size = sizeof(struct flash_file_hdr_g3);
3472 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3474 dev_err(&adapter->pdev->dev,
3475 "Invalid Cookie. UFI corrupted ?\n");
3479 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3480 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3481 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3483 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3484 case IMAGE_FIRMWARE_iSCSI:
3485 img_optype = OPTYPE_ISCSI_ACTIVE;
3487 case IMAGE_BOOT_CODE:
3488 img_optype = OPTYPE_REDBOOT;
3490 case IMAGE_OPTION_ROM_ISCSI:
3491 img_optype = OPTYPE_BIOS;
3493 case IMAGE_OPTION_ROM_PXE:
3494 img_optype = OPTYPE_PXE_BIOS;
3496 case IMAGE_OPTION_ROM_FCoE:
3497 img_optype = OPTYPE_FCOE_BIOS;
3499 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3500 img_optype = OPTYPE_ISCSI_BACKUP;
3503 img_optype = OPTYPE_NCSI_FW;
3509 if (img_optype == OPTYPE_REDBOOT) {
3510 redboot = be_flash_redboot(adapter, fw->data,
3511 img_offset, img_size,
3512 filehdr_size + img_hdrs_size);
3518 p += filehdr_size + img_offset + img_hdrs_size;
3519 if (p + img_size > fw->data + fw->size)
3522 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3524 dev_err(&adapter->pdev->dev,
3525 "Flashing section type %d failed.\n",
3526 fsec->fsec_entry[i].type);
3533 static int lancer_wait_idle(struct be_adapter *adapter)
3535 #define SLIPORT_IDLE_TIMEOUT 30
3539 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3540 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3541 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3547 if (i == SLIPORT_IDLE_TIMEOUT)
3553 static int lancer_fw_reset(struct be_adapter *adapter)
3557 status = lancer_wait_idle(adapter);
3561 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3562 PHYSDEV_CONTROL_OFFSET);
3567 static int lancer_fw_download(struct be_adapter *adapter,
3568 const struct firmware *fw)
3570 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3571 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3572 struct be_dma_mem flash_cmd;
3573 const u8 *data_ptr = NULL;
3574 u8 *dest_image_ptr = NULL;
3575 size_t image_size = 0;
3577 u32 data_written = 0;
3583 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3584 dev_err(&adapter->pdev->dev,
3585 "FW Image not properly aligned. "
3586 "Length must be 4 byte aligned.\n");
3588 goto lancer_fw_exit;
3591 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3592 + LANCER_FW_DOWNLOAD_CHUNK;
3593 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3594 &flash_cmd.dma, GFP_KERNEL);
3595 if (!flash_cmd.va) {
3597 goto lancer_fw_exit;
3600 dest_image_ptr = flash_cmd.va +
3601 sizeof(struct lancer_cmd_req_write_object);
3602 image_size = fw->size;
3603 data_ptr = fw->data;
3605 while (image_size) {
3606 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3608 /* Copy the image chunk content. */
3609 memcpy(dest_image_ptr, data_ptr, chunk_size);
3611 status = lancer_cmd_write_object(adapter, &flash_cmd,
3613 LANCER_FW_DOWNLOAD_LOCATION,
3614 &data_written, &change_status,
3619 offset += data_written;
3620 data_ptr += data_written;
3621 image_size -= data_written;
3625 /* Commit the FW written */
3626 status = lancer_cmd_write_object(adapter, &flash_cmd,
3628 LANCER_FW_DOWNLOAD_LOCATION,
3629 &data_written, &change_status,
3633 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3636 dev_err(&adapter->pdev->dev,
3637 "Firmware load error. "
3638 "Status code: 0x%x Additional Status: 0x%x\n",
3639 status, add_status);
3640 goto lancer_fw_exit;
3643 if (change_status == LANCER_FW_RESET_NEEDED) {
3644 status = lancer_fw_reset(adapter);
3646 dev_err(&adapter->pdev->dev,
3647 "Adapter busy for FW reset.\n"
3648 "New FW will not be active.\n");
3649 goto lancer_fw_exit;
3651 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3652 dev_err(&adapter->pdev->dev,
3653 "System reboot required for new FW"
3657 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3664 #define UFI_TYPE3R 10
3666 static int be_get_ufi_type(struct be_adapter *adapter,
3667 struct flash_file_hdr_g3 *fhdr)
3670 goto be_get_ufi_exit;
3672 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3674 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3675 if (fhdr->asic_type_rev == 0x10)
3679 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3683 dev_err(&adapter->pdev->dev,
3684 "UFI and Interface are not compatible for flashing\n");
3688 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3690 struct flash_file_hdr_g3 *fhdr3;
3691 struct image_hdr *img_hdr_ptr = NULL;
3692 struct be_dma_mem flash_cmd;
3694 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3696 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3697 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3698 &flash_cmd.dma, GFP_KERNEL);
3699 if (!flash_cmd.va) {
3705 fhdr3 = (struct flash_file_hdr_g3 *)p;
3707 ufi_type = be_get_ufi_type(adapter, fhdr3);
3709 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3710 for (i = 0; i < num_imgs; i++) {
3711 img_hdr_ptr = (struct image_hdr *)(fw->data +
3712 (sizeof(struct flash_file_hdr_g3) +
3713 i * sizeof(struct image_hdr)));
3714 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3717 status = be_flash_skyhawk(adapter, fw,
3718 &flash_cmd, num_imgs);
3721 status = be_flash_BEx(adapter, fw, &flash_cmd,
3725 /* Do not flash this ufi on BE3-R cards */
3726 if (adapter->asic_rev < 0x10)
3727 status = be_flash_BEx(adapter, fw,
3732 dev_err(&adapter->pdev->dev,
3733 "Can't load BE3 UFI on BE3R\n");
3739 if (ufi_type == UFI_TYPE2)
3740 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3741 else if (ufi_type == -1)
3744 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3747 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3751 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3757 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3759 const struct firmware *fw;
3762 if (!netif_running(adapter->netdev)) {
3763 dev_err(&adapter->pdev->dev,
3764 "Firmware load not allowed (interface is down)\n");
3768 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3772 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3774 if (lancer_chip(adapter))
3775 status = lancer_fw_download(adapter, fw);
3777 status = be_fw_download(adapter, fw);
3780 release_firmware(fw);
3784 static const struct net_device_ops be_netdev_ops = {
3785 .ndo_open = be_open,
3786 .ndo_stop = be_close,
3787 .ndo_start_xmit = be_xmit,
3788 .ndo_set_rx_mode = be_set_rx_mode,
3789 .ndo_set_mac_address = be_mac_addr_set,
3790 .ndo_change_mtu = be_change_mtu,
3791 .ndo_get_stats64 = be_get_stats64,
3792 .ndo_validate_addr = eth_validate_addr,
3793 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3794 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
3795 .ndo_set_vf_mac = be_set_vf_mac,
3796 .ndo_set_vf_vlan = be_set_vf_vlan,
3797 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
3798 .ndo_get_vf_config = be_get_vf_config,
3799 #ifdef CONFIG_NET_POLL_CONTROLLER
3800 .ndo_poll_controller = be_netpoll,
3804 static void be_netdev_init(struct net_device *netdev)
3806 struct be_adapter *adapter = netdev_priv(netdev);
3807 struct be_eq_obj *eqo;
3810 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3811 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3812 NETIF_F_HW_VLAN_CTAG_TX;
3813 if (be_multi_rxq(adapter))
3814 netdev->hw_features |= NETIF_F_RXHASH;
3816 netdev->features |= netdev->hw_features |
3817 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3819 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3820 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3822 netdev->priv_flags |= IFF_UNICAST_FLT;
3824 netdev->flags |= IFF_MULTICAST;
3826 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3828 netdev->netdev_ops = &be_netdev_ops;
3830 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3832 for_all_evt_queues(adapter, eqo, i)
3833 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3836 static void be_unmap_pci_bars(struct be_adapter *adapter)
3839 pci_iounmap(adapter->pdev, adapter->csr);
3841 pci_iounmap(adapter->pdev, adapter->db);
3844 static int db_bar(struct be_adapter *adapter)
3846 if (lancer_chip(adapter) || !be_physfn(adapter))
3852 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3854 if (skyhawk_chip(adapter)) {
3855 adapter->roce_db.size = 4096;
3856 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3858 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3864 static int be_map_pci_bars(struct be_adapter *adapter)
3869 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3870 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3871 SLI_INTF_IF_TYPE_SHIFT;
3873 if (BEx_chip(adapter) && be_physfn(adapter)) {
3874 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3875 if (adapter->csr == NULL)
3879 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3884 be_roce_map_pci_bars(adapter);
3888 be_unmap_pci_bars(adapter);
3892 static void be_ctrl_cleanup(struct be_adapter *adapter)
3894 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3896 be_unmap_pci_bars(adapter);
3899 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3902 mem = &adapter->rx_filter;
3904 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3908 static int be_ctrl_init(struct be_adapter *adapter)
3910 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3911 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3912 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3916 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3917 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3918 SLI_INTF_FAMILY_SHIFT;
3919 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3921 status = be_map_pci_bars(adapter);
3925 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3926 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3927 mbox_mem_alloc->size,
3928 &mbox_mem_alloc->dma,
3930 if (!mbox_mem_alloc->va) {
3932 goto unmap_pci_bars;
3934 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3935 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3936 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3937 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3939 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3940 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3942 GFP_KERNEL | __GFP_ZERO);
3943 if (rx_filter->va == NULL) {
3948 mutex_init(&adapter->mbox_lock);
3949 spin_lock_init(&adapter->mcc_lock);
3950 spin_lock_init(&adapter->mcc_cq_lock);
3952 init_completion(&adapter->flash_compl);
3953 pci_save_state(adapter->pdev);
3957 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3958 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3961 be_unmap_pci_bars(adapter);
3967 static void be_stats_cleanup(struct be_adapter *adapter)
3969 struct be_dma_mem *cmd = &adapter->stats_cmd;
3972 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3976 static int be_stats_init(struct be_adapter *adapter)
3978 struct be_dma_mem *cmd = &adapter->stats_cmd;
3980 if (lancer_chip(adapter))
3981 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3982 else if (BE2_chip(adapter))
3983 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3985 /* BE3 and Skyhawk */
3986 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3988 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3989 GFP_KERNEL | __GFP_ZERO);
3990 if (cmd->va == NULL)
3995 static void be_remove(struct pci_dev *pdev)
3997 struct be_adapter *adapter = pci_get_drvdata(pdev);
4002 be_roce_dev_remove(adapter);
4003 be_intr_set(adapter, false);
4005 cancel_delayed_work_sync(&adapter->func_recovery_work);
4007 unregister_netdev(adapter->netdev);
4011 /* tell fw we're done with firing cmds */
4012 be_cmd_fw_clean(adapter);
4014 be_stats_cleanup(adapter);
4016 be_ctrl_cleanup(adapter);
4018 pci_disable_pcie_error_reporting(pdev);
4020 pci_set_drvdata(pdev, NULL);
4021 pci_release_regions(pdev);
4022 pci_disable_device(pdev);
4024 free_netdev(adapter->netdev);
4027 bool be_is_wol_supported(struct be_adapter *adapter)
4029 return ((adapter->wol_cap & BE_WOL_CAP) &&
4030 !be_is_wol_excluded(adapter)) ? true : false;
4033 u32 be_get_fw_log_level(struct be_adapter *adapter)
4035 struct be_dma_mem extfat_cmd;
4036 struct be_fat_conf_params *cfgs;
4041 if (lancer_chip(adapter))
4044 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4045 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4046 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4049 if (!extfat_cmd.va) {
4050 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4055 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4057 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4058 sizeof(struct be_cmd_resp_hdr));
4059 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4060 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4061 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4064 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4070 static int be_get_initial_config(struct be_adapter *adapter)
4075 status = be_cmd_get_cntl_attributes(adapter);
4079 status = be_cmd_get_acpi_wol_cap(adapter);
4081 /* in case of a failure to get wol capabillities
4082 * check the exclusion list to determine WOL capability */
4083 if (!be_is_wol_excluded(adapter))
4084 adapter->wol_cap |= BE_WOL_CAP;
4087 if (be_is_wol_supported(adapter))
4088 adapter->wol = true;
4090 /* Must be a power of 2 or else MODULO will BUG_ON */
4091 adapter->be_get_temp_freq = 64;
4093 level = be_get_fw_log_level(adapter);
4094 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4099 static int lancer_recover_func(struct be_adapter *adapter)
4101 struct device *dev = &adapter->pdev->dev;
4104 status = lancer_test_and_set_rdy_state(adapter);
4108 if (netif_running(adapter->netdev))
4109 be_close(adapter->netdev);
4113 be_clear_all_error(adapter);
4115 status = be_setup(adapter);
4119 if (netif_running(adapter->netdev)) {
4120 status = be_open(adapter->netdev);
4125 dev_err(dev, "Error recovery successful\n");
4128 if (status == -EAGAIN)
4129 dev_err(dev, "Waiting for resource provisioning\n");
4131 dev_err(dev, "Error recovery failed\n");
4136 static void be_func_recovery_task(struct work_struct *work)
4138 struct be_adapter *adapter =
4139 container_of(work, struct be_adapter, func_recovery_work.work);
4142 be_detect_error(adapter);
4144 if (adapter->hw_error && lancer_chip(adapter)) {
4147 netif_device_detach(adapter->netdev);
4150 status = lancer_recover_func(adapter);
4152 netif_device_attach(adapter->netdev);
4155 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4156 * no need to attempt further recovery.
4158 if (!status || status == -EAGAIN)
4159 schedule_delayed_work(&adapter->func_recovery_work,
4160 msecs_to_jiffies(1000));
4163 static void be_worker(struct work_struct *work)
4165 struct be_adapter *adapter =
4166 container_of(work, struct be_adapter, work.work);
4167 struct be_rx_obj *rxo;
4168 struct be_eq_obj *eqo;
4171 /* when interrupts are not yet enabled, just reap any pending
4172 * mcc completions */
4173 if (!netif_running(adapter->netdev)) {
4175 be_process_mcc(adapter);
4180 if (!adapter->stats_cmd_sent) {
4181 if (lancer_chip(adapter))
4182 lancer_cmd_get_pport_stats(adapter,
4183 &adapter->stats_cmd);
4185 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4188 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4189 be_cmd_get_die_temperature(adapter);
4191 for_all_rx_queues(adapter, rxo, i) {
4192 if (rxo->rx_post_starved) {
4193 rxo->rx_post_starved = false;
4194 be_post_rx_frags(rxo, GFP_KERNEL);
4198 for_all_evt_queues(adapter, eqo, i)
4199 be_eqd_update(adapter, eqo);
4202 adapter->work_counter++;
4203 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4206 static bool be_reset_required(struct be_adapter *adapter)
4208 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4211 static char *mc_name(struct be_adapter *adapter)
4213 if (adapter->function_mode & FLEX10_MODE)
4215 else if (adapter->function_mode & VNIC_MODE)
4217 else if (adapter->function_mode & UMC_ENABLED)
4223 static inline char *func_name(struct be_adapter *adapter)
4225 return be_physfn(adapter) ? "PF" : "VF";
4228 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4231 struct be_adapter *adapter;
4232 struct net_device *netdev;
4235 status = pci_enable_device(pdev);
4239 status = pci_request_regions(pdev, DRV_NAME);
4242 pci_set_master(pdev);
4244 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4245 if (netdev == NULL) {
4249 adapter = netdev_priv(netdev);
4250 adapter->pdev = pdev;
4251 pci_set_drvdata(pdev, adapter);
4252 adapter->netdev = netdev;
4253 SET_NETDEV_DEV(netdev, &pdev->dev);
4255 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4257 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4259 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4262 netdev->features |= NETIF_F_HIGHDMA;
4264 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4266 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4271 status = pci_enable_pcie_error_reporting(pdev);
4273 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4275 status = be_ctrl_init(adapter);
4279 /* sync up with fw's ready state */
4280 if (be_physfn(adapter)) {
4281 status = be_fw_wait_ready(adapter);
4286 if (be_reset_required(adapter)) {
4287 status = be_cmd_reset_function(adapter);
4291 /* Wait for interrupts to quiesce after an FLR */
4295 /* Allow interrupts for other ULPs running on NIC function */
4296 be_intr_set(adapter, true);
4298 /* tell fw we're ready to fire cmds */
4299 status = be_cmd_fw_init(adapter);
4303 status = be_stats_init(adapter);
4307 status = be_get_initial_config(adapter);
4311 INIT_DELAYED_WORK(&adapter->work, be_worker);
4312 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4313 adapter->rx_fc = adapter->tx_fc = true;
4315 status = be_setup(adapter);
4319 be_netdev_init(netdev);
4320 status = register_netdev(netdev);
4324 be_roce_dev_add(adapter);
4326 schedule_delayed_work(&adapter->func_recovery_work,
4327 msecs_to_jiffies(1000));
4329 be_cmd_query_port_name(adapter, &port_name);
4331 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4332 func_name(adapter), mc_name(adapter), port_name);
4339 be_stats_cleanup(adapter);
4341 be_ctrl_cleanup(adapter);
4343 free_netdev(netdev);
4344 pci_set_drvdata(pdev, NULL);
4346 pci_release_regions(pdev);
4348 pci_disable_device(pdev);
4350 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4354 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4356 struct be_adapter *adapter = pci_get_drvdata(pdev);
4357 struct net_device *netdev = adapter->netdev;
4360 be_setup_wol(adapter, true);
4362 cancel_delayed_work_sync(&adapter->func_recovery_work);
4364 netif_device_detach(netdev);
4365 if (netif_running(netdev)) {
4372 pci_save_state(pdev);
4373 pci_disable_device(pdev);
4374 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4378 static int be_resume(struct pci_dev *pdev)
4381 struct be_adapter *adapter = pci_get_drvdata(pdev);
4382 struct net_device *netdev = adapter->netdev;
4384 netif_device_detach(netdev);
4386 status = pci_enable_device(pdev);
4390 pci_set_power_state(pdev, 0);
4391 pci_restore_state(pdev);
4393 /* tell fw we're ready to fire cmds */
4394 status = be_cmd_fw_init(adapter);
4399 if (netif_running(netdev)) {
4405 schedule_delayed_work(&adapter->func_recovery_work,
4406 msecs_to_jiffies(1000));
4407 netif_device_attach(netdev);
4410 be_setup_wol(adapter, false);
4416 * An FLR will stop BE from DMAing any data.
4418 static void be_shutdown(struct pci_dev *pdev)
4420 struct be_adapter *adapter = pci_get_drvdata(pdev);
4425 cancel_delayed_work_sync(&adapter->work);
4426 cancel_delayed_work_sync(&adapter->func_recovery_work);
4428 netif_device_detach(adapter->netdev);
4430 be_cmd_reset_function(adapter);
4432 pci_disable_device(pdev);
4435 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4436 pci_channel_state_t state)
4438 struct be_adapter *adapter = pci_get_drvdata(pdev);
4439 struct net_device *netdev = adapter->netdev;
4441 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4443 if (!adapter->eeh_error) {
4444 adapter->eeh_error = true;
4446 cancel_delayed_work_sync(&adapter->func_recovery_work);
4449 netif_device_detach(netdev);
4450 if (netif_running(netdev))
4457 if (state == pci_channel_io_perm_failure)
4458 return PCI_ERS_RESULT_DISCONNECT;
4460 pci_disable_device(pdev);
4462 /* The error could cause the FW to trigger a flash debug dump.
4463 * Resetting the card while flash dump is in progress
4464 * can cause it not to recover; wait for it to finish.
4465 * Wait only for first function as it is needed only once per
4468 if (pdev->devfn == 0)
4471 return PCI_ERS_RESULT_NEED_RESET;
4474 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4476 struct be_adapter *adapter = pci_get_drvdata(pdev);
4479 dev_info(&adapter->pdev->dev, "EEH reset\n");
4481 status = pci_enable_device(pdev);
4483 return PCI_ERS_RESULT_DISCONNECT;
4485 pci_set_master(pdev);
4486 pci_set_power_state(pdev, 0);
4487 pci_restore_state(pdev);
4489 /* Check if card is ok and fw is ready */
4490 dev_info(&adapter->pdev->dev,
4491 "Waiting for FW to be ready after EEH reset\n");
4492 status = be_fw_wait_ready(adapter);
4494 return PCI_ERS_RESULT_DISCONNECT;
4496 pci_cleanup_aer_uncorrect_error_status(pdev);
4497 be_clear_all_error(adapter);
4498 return PCI_ERS_RESULT_RECOVERED;
4501 static void be_eeh_resume(struct pci_dev *pdev)
4504 struct be_adapter *adapter = pci_get_drvdata(pdev);
4505 struct net_device *netdev = adapter->netdev;
4507 dev_info(&adapter->pdev->dev, "EEH resume\n");
4509 pci_save_state(pdev);
4511 status = be_cmd_reset_function(adapter);
4515 /* tell fw we're ready to fire cmds */
4516 status = be_cmd_fw_init(adapter);
4520 status = be_setup(adapter);
4524 if (netif_running(netdev)) {
4525 status = be_open(netdev);
4530 schedule_delayed_work(&adapter->func_recovery_work,
4531 msecs_to_jiffies(1000));
4532 netif_device_attach(netdev);
4535 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4538 static const struct pci_error_handlers be_eeh_handlers = {
4539 .error_detected = be_eeh_err_detected,
4540 .slot_reset = be_eeh_reset,
4541 .resume = be_eeh_resume,
4544 static struct pci_driver be_driver = {
4546 .id_table = be_dev_ids,
4548 .remove = be_remove,
4549 .suspend = be_suspend,
4550 .resume = be_resume,
4551 .shutdown = be_shutdown,
4552 .err_handler = &be_eeh_handlers
4555 static int __init be_init_module(void)
4557 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4558 rx_frag_size != 2048) {
4559 printk(KERN_WARNING DRV_NAME
4560 " : Module param rx_frag_size must be 2048/4096/8192."
4562 rx_frag_size = 2048;
4565 return pci_register_driver(&be_driver);
4567 module_init(be_init_module);
4569 static void __exit be_exit_module(void)
4571 pci_unregister_driver(&be_driver);
4573 module_exit(be_exit_module);