2 * Copyright (C) 2005 - 2015 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26 #include <net/vxlan.h>
28 MODULE_VERSION(DRV_VER);
29 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
30 MODULE_AUTHOR("Emulex Corporation");
31 MODULE_LICENSE("GPL");
33 /* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
36 static unsigned int num_vfs;
37 module_param(num_vfs, uint, S_IRUGO);
38 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
40 static ushort rx_frag_size = 2048;
41 module_param(rx_frag_size, ushort, S_IRUGO);
42 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
44 static const struct pci_device_id be_dev_ids[] = {
45 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
47 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
51 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
52 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
55 MODULE_DEVICE_TABLE(pci, be_dev_ids);
56 /* UE Status Low CSR */
57 static const char * const ue_status_low_desc[] = {
92 /* UE Status High CSR */
93 static const char * const ue_status_hi_desc[] = {
128 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 struct be_dma_mem *mem = &q->dma_mem;
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
142 struct be_dma_mem *mem = &q->dma_mem;
144 memset(q, 0, sizeof(*q));
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
155 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163 if (!enabled && enable)
164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165 else if (enabled && !enable)
166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174 static void be_intr_set(struct be_adapter *adapter, bool enable)
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
182 if (be_check_error(adapter, BE_ERROR_EEH))
185 status = be_cmd_intr_set(adapter, enable);
187 be_reg_intr_set(adapter, enable);
190 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
194 if (be_check_error(adapter, BE_ERROR_HW))
197 val |= qid & DB_RQ_RING_ID_MASK;
198 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
201 iowrite32(val, adapter->db + DB_RQ_OFFSET);
204 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
209 if (be_check_error(adapter, BE_ERROR_HW))
212 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
213 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
216 iowrite32(val, adapter->db + txo->db_offset);
219 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
220 bool arm, bool clear_int, u16 num_popped,
221 u32 eq_delay_mult_enc)
225 val |= qid & DB_EQ_RING_ID_MASK;
226 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
228 if (be_check_error(adapter, BE_ERROR_HW))
232 val |= 1 << DB_EQ_REARM_SHIFT;
234 val |= 1 << DB_EQ_CLR_SHIFT;
235 val |= 1 << DB_EQ_EVNT_SHIFT;
236 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
237 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
238 iowrite32(val, adapter->db + DB_EQ_OFFSET);
241 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
245 val |= qid & DB_CQ_RING_ID_MASK;
246 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
247 DB_CQ_RING_ID_EXT_MASK_SHIFT);
249 if (be_check_error(adapter, BE_ERROR_HW))
253 val |= 1 << DB_CQ_REARM_SHIFT;
254 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
255 iowrite32(val, adapter->db + DB_CQ_OFFSET);
258 static int be_mac_addr_set(struct net_device *netdev, void *p)
260 struct be_adapter *adapter = netdev_priv(netdev);
261 struct device *dev = &adapter->pdev->dev;
262 struct sockaddr *addr = p;
265 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
267 if (!is_valid_ether_addr(addr->sa_data))
268 return -EADDRNOTAVAIL;
270 /* Proceed further only if, User provided MAC is different
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
276 /* if device is not running, copy MAC to netdev->dev_addr */
277 if (!netif_running(netdev))
280 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
281 * privilege or if PF did not provision the new MAC address.
282 * On BE3, this cmd will always fail if the VF doesn't have the
283 * FILTMGMT privilege. This failure is OK, only if the PF programmed
284 * the MAC for the VF.
286 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
287 adapter->if_handle, &adapter->pmac_id[0], 0);
289 curr_pmac_id = adapter->pmac_id[0];
291 /* Delete the old programmed MAC. This call may fail if the
292 * old MAC was already deleted by the PF driver.
294 if (adapter->pmac_id[0] != old_pmac_id)
295 be_cmd_pmac_del(adapter, adapter->if_handle,
299 /* Decide if the new MAC is successfully activated only after
302 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
303 adapter->if_handle, true, 0);
307 /* The MAC change did not happen, either due to lack of privilege
308 * or PF didn't pre-provision.
310 if (!ether_addr_equal(addr->sa_data, mac)) {
315 ether_addr_copy(netdev->dev_addr, addr->sa_data);
316 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
319 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
323 /* BE2 supports only v0 cmd */
324 static void *hw_stats_from_cmd(struct be_adapter *adapter)
326 if (BE2_chip(adapter)) {
327 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
329 return &cmd->hw_stats;
330 } else if (BE3_chip(adapter)) {
331 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
333 return &cmd->hw_stats;
335 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
337 return &cmd->hw_stats;
341 /* BE2 supports only v0 cmd */
342 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
344 if (BE2_chip(adapter)) {
345 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
347 return &hw_stats->erx;
348 } else if (BE3_chip(adapter)) {
349 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
351 return &hw_stats->erx;
353 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
355 return &hw_stats->erx;
359 static void populate_be_v0_stats(struct be_adapter *adapter)
361 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
362 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
363 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
364 struct be_port_rxf_stats_v0 *port_stats =
365 &rxf_stats->port[adapter->port_num];
366 struct be_drv_stats *drvs = &adapter->drv_stats;
368 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
369 drvs->rx_pause_frames = port_stats->rx_pause_frames;
370 drvs->rx_crc_errors = port_stats->rx_crc_errors;
371 drvs->rx_control_frames = port_stats->rx_control_frames;
372 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
373 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
374 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
375 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
376 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
377 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
378 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
379 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
380 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
381 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
382 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
383 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
384 drvs->rx_dropped_header_too_small =
385 port_stats->rx_dropped_header_too_small;
386 drvs->rx_address_filtered =
387 port_stats->rx_address_filtered +
388 port_stats->rx_vlan_filtered;
389 drvs->rx_alignment_symbol_errors =
390 port_stats->rx_alignment_symbol_errors;
392 drvs->tx_pauseframes = port_stats->tx_pauseframes;
393 drvs->tx_controlframes = port_stats->tx_controlframes;
395 if (adapter->port_num)
396 drvs->jabber_events = rxf_stats->port1_jabber_events;
398 drvs->jabber_events = rxf_stats->port0_jabber_events;
399 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
400 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
401 drvs->forwarded_packets = rxf_stats->forwarded_packets;
402 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
403 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
404 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
405 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
408 static void populate_be_v1_stats(struct be_adapter *adapter)
410 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
411 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
412 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
413 struct be_port_rxf_stats_v1 *port_stats =
414 &rxf_stats->port[adapter->port_num];
415 struct be_drv_stats *drvs = &adapter->drv_stats;
417 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
418 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
419 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
420 drvs->rx_pause_frames = port_stats->rx_pause_frames;
421 drvs->rx_crc_errors = port_stats->rx_crc_errors;
422 drvs->rx_control_frames = port_stats->rx_control_frames;
423 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
424 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
425 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
426 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
427 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
428 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
429 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
430 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
431 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
432 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
433 drvs->rx_dropped_header_too_small =
434 port_stats->rx_dropped_header_too_small;
435 drvs->rx_input_fifo_overflow_drop =
436 port_stats->rx_input_fifo_overflow_drop;
437 drvs->rx_address_filtered = port_stats->rx_address_filtered;
438 drvs->rx_alignment_symbol_errors =
439 port_stats->rx_alignment_symbol_errors;
440 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
441 drvs->tx_pauseframes = port_stats->tx_pauseframes;
442 drvs->tx_controlframes = port_stats->tx_controlframes;
443 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
444 drvs->jabber_events = port_stats->jabber_events;
445 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
446 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
447 drvs->forwarded_packets = rxf_stats->forwarded_packets;
448 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
449 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
450 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
451 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
454 static void populate_be_v2_stats(struct be_adapter *adapter)
456 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
457 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
458 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
459 struct be_port_rxf_stats_v2 *port_stats =
460 &rxf_stats->port[adapter->port_num];
461 struct be_drv_stats *drvs = &adapter->drv_stats;
463 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
464 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
465 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
466 drvs->rx_pause_frames = port_stats->rx_pause_frames;
467 drvs->rx_crc_errors = port_stats->rx_crc_errors;
468 drvs->rx_control_frames = port_stats->rx_control_frames;
469 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
470 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
471 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
472 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
473 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
474 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
475 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
476 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
477 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
478 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
479 drvs->rx_dropped_header_too_small =
480 port_stats->rx_dropped_header_too_small;
481 drvs->rx_input_fifo_overflow_drop =
482 port_stats->rx_input_fifo_overflow_drop;
483 drvs->rx_address_filtered = port_stats->rx_address_filtered;
484 drvs->rx_alignment_symbol_errors =
485 port_stats->rx_alignment_symbol_errors;
486 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
487 drvs->tx_pauseframes = port_stats->tx_pauseframes;
488 drvs->tx_controlframes = port_stats->tx_controlframes;
489 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
490 drvs->jabber_events = port_stats->jabber_events;
491 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
492 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
493 drvs->forwarded_packets = rxf_stats->forwarded_packets;
494 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
495 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
496 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
497 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
498 if (be_roce_supported(adapter)) {
499 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
500 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
501 drvs->rx_roce_frames = port_stats->roce_frames_received;
502 drvs->roce_drops_crc = port_stats->roce_drops_crc;
503 drvs->roce_drops_payload_len =
504 port_stats->roce_drops_payload_len;
508 static void populate_lancer_stats(struct be_adapter *adapter)
510 struct be_drv_stats *drvs = &adapter->drv_stats;
511 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
513 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
514 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
515 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
516 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
517 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
518 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
519 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
520 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
521 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
522 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
523 drvs->rx_dropped_tcp_length =
524 pport_stats->rx_dropped_invalid_tcp_length;
525 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
526 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
527 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
528 drvs->rx_dropped_header_too_small =
529 pport_stats->rx_dropped_header_too_small;
530 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
531 drvs->rx_address_filtered =
532 pport_stats->rx_address_filtered +
533 pport_stats->rx_vlan_filtered;
534 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
535 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
536 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
537 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
538 drvs->jabber_events = pport_stats->rx_jabbers;
539 drvs->forwarded_packets = pport_stats->num_forwards_lo;
540 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
541 drvs->rx_drops_too_many_frags =
542 pport_stats->rx_drops_too_many_frags_lo;
545 static void accumulate_16bit_val(u32 *acc, u16 val)
547 #define lo(x) (x & 0xFFFF)
548 #define hi(x) (x & 0xFFFF0000)
549 bool wrapped = val < lo(*acc);
550 u32 newacc = hi(*acc) + val;
554 ACCESS_ONCE(*acc) = newacc;
557 static void populate_erx_stats(struct be_adapter *adapter,
558 struct be_rx_obj *rxo, u32 erx_stat)
560 if (!BEx_chip(adapter))
561 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
563 /* below erx HW counter can actually wrap around after
564 * 65535. Driver accumulates a 32-bit value
566 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
570 void be_parse_stats(struct be_adapter *adapter)
572 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
573 struct be_rx_obj *rxo;
577 if (lancer_chip(adapter)) {
578 populate_lancer_stats(adapter);
580 if (BE2_chip(adapter))
581 populate_be_v0_stats(adapter);
582 else if (BE3_chip(adapter))
584 populate_be_v1_stats(adapter);
586 populate_be_v2_stats(adapter);
588 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
589 for_all_rx_queues(adapter, rxo, i) {
590 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
591 populate_erx_stats(adapter, rxo, erx_stat);
596 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
597 struct rtnl_link_stats64 *stats)
599 struct be_adapter *adapter = netdev_priv(netdev);
600 struct be_drv_stats *drvs = &adapter->drv_stats;
601 struct be_rx_obj *rxo;
602 struct be_tx_obj *txo;
607 for_all_rx_queues(adapter, rxo, i) {
608 const struct be_rx_stats *rx_stats = rx_stats(rxo);
611 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
612 pkts = rx_stats(rxo)->rx_pkts;
613 bytes = rx_stats(rxo)->rx_bytes;
614 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
615 stats->rx_packets += pkts;
616 stats->rx_bytes += bytes;
617 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
618 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
619 rx_stats(rxo)->rx_drops_no_frags;
622 for_all_tx_queues(adapter, txo, i) {
623 const struct be_tx_stats *tx_stats = tx_stats(txo);
626 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
627 pkts = tx_stats(txo)->tx_pkts;
628 bytes = tx_stats(txo)->tx_bytes;
629 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
630 stats->tx_packets += pkts;
631 stats->tx_bytes += bytes;
634 /* bad pkts received */
635 stats->rx_errors = drvs->rx_crc_errors +
636 drvs->rx_alignment_symbol_errors +
637 drvs->rx_in_range_errors +
638 drvs->rx_out_range_errors +
639 drvs->rx_frame_too_long +
640 drvs->rx_dropped_too_small +
641 drvs->rx_dropped_too_short +
642 drvs->rx_dropped_header_too_small +
643 drvs->rx_dropped_tcp_length +
644 drvs->rx_dropped_runt;
646 /* detailed rx errors */
647 stats->rx_length_errors = drvs->rx_in_range_errors +
648 drvs->rx_out_range_errors +
649 drvs->rx_frame_too_long;
651 stats->rx_crc_errors = drvs->rx_crc_errors;
653 /* frame alignment errors */
654 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
656 /* receiver fifo overrun */
657 /* drops_no_pbuf is no per i/f, it's per BE card */
658 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
659 drvs->rx_input_fifo_overflow_drop +
660 drvs->rx_drops_no_pbuf;
664 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
666 struct net_device *netdev = adapter->netdev;
668 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
669 netif_carrier_off(netdev);
670 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
674 netif_carrier_on(netdev);
676 netif_carrier_off(netdev);
678 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
681 static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
683 struct be_tx_stats *stats = tx_stats(txo);
684 u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
686 u64_stats_update_begin(&stats->sync);
688 stats->tx_bytes += skb->len;
689 stats->tx_pkts += tx_pkts;
690 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
691 stats->tx_vxlan_offload_pkts += tx_pkts;
692 u64_stats_update_end(&stats->sync);
695 /* Returns number of WRBs needed for the skb */
696 static u32 skb_wrb_cnt(struct sk_buff *skb)
698 /* +1 for the header wrb */
699 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
702 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
704 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
705 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
706 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
710 /* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
711 * to avoid the swap and shift/mask operations in wrb_fill().
713 static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
721 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
727 vlan_tag = skb_vlan_tag_get(skb);
728 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
729 /* If vlan priority provided by OS is NOT in available bmap */
730 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
731 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
732 adapter->recommended_prio_bits;
737 /* Used only for IP tunnel packets */
738 static u16 skb_inner_ip_proto(struct sk_buff *skb)
740 return (inner_ip_hdr(skb)->version == 4) ?
741 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
744 static u16 skb_ip_proto(struct sk_buff *skb)
746 return (ip_hdr(skb)->version == 4) ?
747 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
750 static inline bool be_is_txq_full(struct be_tx_obj *txo)
752 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
755 static inline bool be_can_txq_wake(struct be_tx_obj *txo)
757 return atomic_read(&txo->q.used) < txo->q.len / 2;
760 static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
762 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
765 static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
767 struct be_wrb_params *wrb_params)
771 if (skb_is_gso(skb)) {
772 BE_WRB_F_SET(wrb_params->features, LSO, 1);
773 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
774 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
775 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
776 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
777 if (skb->encapsulation) {
778 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
779 proto = skb_inner_ip_proto(skb);
781 proto = skb_ip_proto(skb);
783 if (proto == IPPROTO_TCP)
784 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
785 else if (proto == IPPROTO_UDP)
786 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
789 if (skb_vlan_tag_present(skb)) {
790 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
791 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
794 BE_WRB_F_SET(wrb_params->features, CRC, 1);
797 static void wrb_fill_hdr(struct be_adapter *adapter,
798 struct be_eth_hdr_wrb *hdr,
799 struct be_wrb_params *wrb_params,
802 memset(hdr, 0, sizeof(*hdr));
804 SET_TX_WRB_HDR_BITS(crc, hdr,
805 BE_WRB_F_GET(wrb_params->features, CRC));
806 SET_TX_WRB_HDR_BITS(ipcs, hdr,
807 BE_WRB_F_GET(wrb_params->features, IPCS));
808 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
809 BE_WRB_F_GET(wrb_params->features, TCPCS));
810 SET_TX_WRB_HDR_BITS(udpcs, hdr,
811 BE_WRB_F_GET(wrb_params->features, UDPCS));
813 SET_TX_WRB_HDR_BITS(lso, hdr,
814 BE_WRB_F_GET(wrb_params->features, LSO));
815 SET_TX_WRB_HDR_BITS(lso6, hdr,
816 BE_WRB_F_GET(wrb_params->features, LSO6));
817 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
819 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
820 * hack is not needed, the evt bit is set while ringing DB.
822 SET_TX_WRB_HDR_BITS(event, hdr,
823 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
824 SET_TX_WRB_HDR_BITS(vlan, hdr,
825 BE_WRB_F_GET(wrb_params->features, VLAN));
826 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
828 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
829 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
830 SET_TX_WRB_HDR_BITS(mgmt, hdr,
831 BE_WRB_F_GET(wrb_params->features, OS2BMC));
834 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
838 u32 frag_len = le32_to_cpu(wrb->frag_len);
841 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
842 (u64)le32_to_cpu(wrb->frag_pa_lo);
845 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
847 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
851 /* Grab a WRB header for xmit */
852 static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
854 u16 head = txo->q.head;
856 queue_head_inc(&txo->q);
860 /* Set up the WRB header for xmit */
861 static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
862 struct be_tx_obj *txo,
863 struct be_wrb_params *wrb_params,
864 struct sk_buff *skb, u16 head)
866 u32 num_frags = skb_wrb_cnt(skb);
867 struct be_queue_info *txq = &txo->q;
868 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
870 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
871 be_dws_cpu_to_le(hdr, sizeof(*hdr));
873 BUG_ON(txo->sent_skb_list[head]);
874 txo->sent_skb_list[head] = skb;
875 txo->last_req_hdr = head;
876 atomic_add(num_frags, &txq->used);
877 txo->last_req_wrb_cnt = num_frags;
878 txo->pend_wrb_cnt += num_frags;
881 /* Setup a WRB fragment (buffer descriptor) for xmit */
882 static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
885 struct be_eth_wrb *wrb;
886 struct be_queue_info *txq = &txo->q;
888 wrb = queue_head_node(txq);
889 wrb_fill(wrb, busaddr, len);
893 /* Bring the queue back to the state it was in before be_xmit_enqueue() routine
894 * was invoked. The producer index is restored to the previous packet and the
895 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
897 static void be_xmit_restore(struct be_adapter *adapter,
898 struct be_tx_obj *txo, u16 head, bool map_single,
902 struct be_eth_wrb *wrb;
903 struct be_queue_info *txq = &txo->q;
905 dev = &adapter->pdev->dev;
908 /* skip the first wrb (hdr); it's not mapped */
911 wrb = queue_head_node(txq);
912 unmap_tx_frag(dev, wrb, map_single);
914 copied -= le32_to_cpu(wrb->frag_len);
921 /* Enqueue the given packet for transmit. This routine allocates WRBs for the
922 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
923 * of WRBs used up by the packet.
925 static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
927 struct be_wrb_params *wrb_params)
929 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
930 struct device *dev = &adapter->pdev->dev;
931 struct be_queue_info *txq = &txo->q;
932 bool map_single = false;
933 u16 head = txq->head;
937 head = be_tx_get_wrb_hdr(txo);
939 if (skb->len > skb->data_len) {
940 len = skb_headlen(skb);
942 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
943 if (dma_mapping_error(dev, busaddr))
946 be_tx_setup_wrb_frag(txo, busaddr, len);
950 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
951 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
952 len = skb_frag_size(frag);
954 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
955 if (dma_mapping_error(dev, busaddr))
957 be_tx_setup_wrb_frag(txo, busaddr, len);
961 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
963 be_tx_stats_update(txo, skb);
967 adapter->drv_stats.dma_map_errors++;
968 be_xmit_restore(adapter, txo, head, map_single, copied);
972 static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
974 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
977 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
984 skb = skb_share_check(skb, GFP_ATOMIC);
988 if (skb_vlan_tag_present(skb))
989 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
991 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
993 vlan_tag = adapter->pvid;
994 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
995 * skip VLAN insertion
997 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1001 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1008 /* Insert the outer VLAN, if any */
1009 if (adapter->qnq_vid) {
1010 vlan_tag = adapter->qnq_vid;
1011 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1015 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1021 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1023 struct ethhdr *eh = (struct ethhdr *)skb->data;
1024 u16 offset = ETH_HLEN;
1026 if (eh->h_proto == htons(ETH_P_IPV6)) {
1027 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1029 offset += sizeof(struct ipv6hdr);
1030 if (ip6h->nexthdr != NEXTHDR_TCP &&
1031 ip6h->nexthdr != NEXTHDR_UDP) {
1032 struct ipv6_opt_hdr *ehdr =
1033 (struct ipv6_opt_hdr *)(skb->data + offset);
1035 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1036 if (ehdr->hdrlen == 0xff)
1043 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1045 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
1048 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
1050 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
1053 static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1054 struct sk_buff *skb,
1055 struct be_wrb_params
1058 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1059 unsigned int eth_hdr_len;
1062 /* For padded packets, BE HW modifies tot_len field in IP header
1063 * incorrecly when VLAN tag is inserted by HW.
1064 * For padded packets, Lancer computes incorrect checksum.
1066 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1067 VLAN_ETH_HLEN : ETH_HLEN;
1068 if (skb->len <= 60 &&
1069 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
1071 ip = (struct iphdr *)ip_hdr(skb);
1072 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1075 /* If vlan tag is already inlined in the packet, skip HW VLAN
1076 * tagging in pvid-tagging mode
1078 if (be_pvid_tagging_enabled(adapter) &&
1079 veh->h_vlan_proto == htons(ETH_P_8021Q))
1080 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1082 /* HW has a bug wherein it will calculate CSUM for VLAN
1083 * pkts even though it is disabled.
1084 * Manually insert VLAN in pkt.
1086 if (skb->ip_summed != CHECKSUM_PARTIAL &&
1087 skb_vlan_tag_present(skb)) {
1088 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1093 /* HW may lockup when VLAN HW tagging is requested on
1094 * certain ipv6 packets. Drop such pkts if the HW workaround to
1095 * skip HW tagging is not enabled by FW.
1097 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
1098 (adapter->pvid || adapter->qnq_vid) &&
1099 !qnq_async_evt_rcvd(adapter)))
1102 /* Manual VLAN tag insertion to prevent:
1103 * ASIC lockup when the ASIC inserts VLAN tag into
1104 * certain ipv6 packets. Insert VLAN tags in driver,
1105 * and set event, completion, vlan bits accordingly
1108 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1109 be_vlan_tag_tx_chk(adapter, skb)) {
1110 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1117 dev_kfree_skb_any(skb);
1122 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1123 struct sk_buff *skb,
1124 struct be_wrb_params *wrb_params)
1126 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1127 * packets that are 32b or less may cause a transmit stall
1128 * on that port. The workaround is to pad such packets
1129 * (len <= 32 bytes) to a minimum length of 36b.
1131 if (skb->len <= 32) {
1132 if (skb_put_padto(skb, 36))
1136 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1137 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
1145 static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1147 struct be_queue_info *txq = &txo->q;
1148 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1150 /* Mark the last request eventable if it hasn't been marked already */
1151 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1152 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1154 /* compose a dummy wrb if there are odd set of wrbs to notify */
1155 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1156 wrb_fill_dummy(queue_head_node(txq));
1157 queue_head_inc(txq);
1158 atomic_inc(&txq->used);
1159 txo->pend_wrb_cnt++;
1160 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1161 TX_HDR_WRB_NUM_SHIFT);
1162 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1163 TX_HDR_WRB_NUM_SHIFT);
1165 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1166 txo->pend_wrb_cnt = 0;
1169 /* OS2BMC related */
1171 #define DHCP_CLIENT_PORT 68
1172 #define DHCP_SERVER_PORT 67
1173 #define NET_BIOS_PORT1 137
1174 #define NET_BIOS_PORT2 138
1175 #define DHCPV6_RAS_PORT 547
1177 #define is_mc_allowed_on_bmc(adapter, eh) \
1178 (!is_multicast_filt_enabled(adapter) && \
1179 is_multicast_ether_addr(eh->h_dest) && \
1180 !is_broadcast_ether_addr(eh->h_dest))
1182 #define is_bc_allowed_on_bmc(adapter, eh) \
1183 (!is_broadcast_filt_enabled(adapter) && \
1184 is_broadcast_ether_addr(eh->h_dest))
1186 #define is_arp_allowed_on_bmc(adapter, skb) \
1187 (is_arp(skb) && is_arp_filt_enabled(adapter))
1189 #define is_broadcast_packet(eh, adapter) \
1190 (is_multicast_ether_addr(eh->h_dest) && \
1191 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1193 #define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1195 #define is_arp_filt_enabled(adapter) \
1196 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1198 #define is_dhcp_client_filt_enabled(adapter) \
1199 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1201 #define is_dhcp_srvr_filt_enabled(adapter) \
1202 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1204 #define is_nbios_filt_enabled(adapter) \
1205 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1207 #define is_ipv6_na_filt_enabled(adapter) \
1208 (adapter->bmc_filt_mask & \
1209 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1211 #define is_ipv6_ra_filt_enabled(adapter) \
1212 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1214 #define is_ipv6_ras_filt_enabled(adapter) \
1215 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1217 #define is_broadcast_filt_enabled(adapter) \
1218 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1220 #define is_multicast_filt_enabled(adapter) \
1221 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1223 static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1224 struct sk_buff **skb)
1226 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1227 bool os2bmc = false;
1229 if (!be_is_os2bmc_enabled(adapter))
1232 if (!is_multicast_ether_addr(eh->h_dest))
1235 if (is_mc_allowed_on_bmc(adapter, eh) ||
1236 is_bc_allowed_on_bmc(adapter, eh) ||
1237 is_arp_allowed_on_bmc(adapter, (*skb))) {
1242 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1243 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1244 u8 nexthdr = hdr->nexthdr;
1246 if (nexthdr == IPPROTO_ICMPV6) {
1247 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1249 switch (icmp6->icmp6_type) {
1250 case NDISC_ROUTER_ADVERTISEMENT:
1251 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1253 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1254 os2bmc = is_ipv6_na_filt_enabled(adapter);
1262 if (is_udp_pkt((*skb))) {
1263 struct udphdr *udp = udp_hdr((*skb));
1265 switch (ntohs(udp->dest)) {
1266 case DHCP_CLIENT_PORT:
1267 os2bmc = is_dhcp_client_filt_enabled(adapter);
1269 case DHCP_SERVER_PORT:
1270 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1272 case NET_BIOS_PORT1:
1273 case NET_BIOS_PORT2:
1274 os2bmc = is_nbios_filt_enabled(adapter);
1276 case DHCPV6_RAS_PORT:
1277 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1284 /* For packets over a vlan, which are destined
1285 * to BMC, asic expects the vlan to be inline in the packet.
1288 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1293 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1295 struct be_adapter *adapter = netdev_priv(netdev);
1296 u16 q_idx = skb_get_queue_mapping(skb);
1297 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
1298 struct be_wrb_params wrb_params = { 0 };
1299 bool flush = !skb->xmit_more;
1302 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
1306 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1308 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1309 if (unlikely(!wrb_cnt)) {
1310 dev_kfree_skb_any(skb);
1314 /* if os2bmc is enabled and if the pkt is destined to bmc,
1315 * enqueue the pkt a 2nd time with mgmt bit set.
1317 if (be_send_pkt_to_bmc(adapter, &skb)) {
1318 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1319 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1320 if (unlikely(!wrb_cnt))
1326 if (be_is_txq_full(txo)) {
1327 netif_stop_subqueue(netdev, q_idx);
1328 tx_stats(txo)->tx_stops++;
1331 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1332 be_xmit_flush(adapter, txo);
1334 return NETDEV_TX_OK;
1336 tx_stats(txo)->tx_drv_drops++;
1337 /* Flush the already enqueued tx requests */
1338 if (flush && txo->pend_wrb_cnt)
1339 be_xmit_flush(adapter, txo);
1341 return NETDEV_TX_OK;
1344 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1346 struct be_adapter *adapter = netdev_priv(netdev);
1347 struct device *dev = &adapter->pdev->dev;
1349 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1350 dev_info(dev, "MTU must be between %d and %d bytes\n",
1351 BE_MIN_MTU, BE_MAX_MTU);
1355 dev_info(dev, "MTU changed from %d to %d bytes\n",
1356 netdev->mtu, new_mtu);
1357 netdev->mtu = new_mtu;
1361 static inline bool be_in_all_promisc(struct be_adapter *adapter)
1363 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1364 BE_IF_FLAGS_ALL_PROMISCUOUS;
1367 static int be_set_vlan_promisc(struct be_adapter *adapter)
1369 struct device *dev = &adapter->pdev->dev;
1372 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1375 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1377 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1378 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1380 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1385 static int be_clear_vlan_promisc(struct be_adapter *adapter)
1387 struct device *dev = &adapter->pdev->dev;
1390 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1392 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1393 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1399 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1400 * If the user configures more, place BE in vlan promiscuous mode.
1402 static int be_vid_config(struct be_adapter *adapter)
1404 struct device *dev = &adapter->pdev->dev;
1405 u16 vids[BE_NUM_VLANS_SUPPORTED];
1409 /* No need to further configure vids if in promiscuous mode */
1410 if (be_in_all_promisc(adapter))
1413 if (adapter->vlans_added > be_max_vlans(adapter))
1414 return be_set_vlan_promisc(adapter);
1416 /* Construct VLAN Table to give to HW */
1417 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1418 vids[num++] = cpu_to_le16(i);
1420 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
1422 dev_err(dev, "Setting HW VLAN filtering failed\n");
1423 /* Set to VLAN promisc mode as setting VLAN filter failed */
1424 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1425 addl_status(status) ==
1426 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
1427 return be_set_vlan_promisc(adapter);
1428 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1429 status = be_clear_vlan_promisc(adapter);
1434 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1436 struct be_adapter *adapter = netdev_priv(netdev);
1439 /* Packets with VID 0 are always received by Lancer by default */
1440 if (lancer_chip(adapter) && vid == 0)
1443 if (test_bit(vid, adapter->vids))
1446 set_bit(vid, adapter->vids);
1447 adapter->vlans_added++;
1449 status = be_vid_config(adapter);
1451 adapter->vlans_added--;
1452 clear_bit(vid, adapter->vids);
1458 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1460 struct be_adapter *adapter = netdev_priv(netdev);
1462 /* Packets with VID 0 are always received by Lancer by default */
1463 if (lancer_chip(adapter) && vid == 0)
1466 if (!test_bit(vid, adapter->vids))
1469 clear_bit(vid, adapter->vids);
1470 adapter->vlans_added--;
1472 return be_vid_config(adapter);
1475 static void be_clear_all_promisc(struct be_adapter *adapter)
1477 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
1478 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1481 static void be_set_all_promisc(struct be_adapter *adapter)
1483 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1484 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1487 static void be_set_mc_promisc(struct be_adapter *adapter)
1491 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1494 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1496 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1499 static void be_set_mc_list(struct be_adapter *adapter)
1503 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1505 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1507 be_set_mc_promisc(adapter);
1510 static void be_set_uc_list(struct be_adapter *adapter)
1512 struct netdev_hw_addr *ha;
1513 int i = 1; /* First slot is claimed by the Primary MAC */
1515 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1516 be_cmd_pmac_del(adapter, adapter->if_handle,
1517 adapter->pmac_id[i], 0);
1519 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1520 be_set_all_promisc(adapter);
1524 netdev_for_each_uc_addr(ha, adapter->netdev) {
1525 adapter->uc_macs++; /* First slot is for Primary MAC */
1526 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1527 &adapter->pmac_id[adapter->uc_macs], 0);
1531 static void be_clear_uc_list(struct be_adapter *adapter)
1535 for (i = 1; i < (adapter->uc_macs + 1); i++)
1536 be_cmd_pmac_del(adapter, adapter->if_handle,
1537 adapter->pmac_id[i], 0);
1538 adapter->uc_macs = 0;
1541 static void be_set_rx_mode(struct net_device *netdev)
1543 struct be_adapter *adapter = netdev_priv(netdev);
1545 if (netdev->flags & IFF_PROMISC) {
1546 be_set_all_promisc(adapter);
1550 /* Interface was previously in promiscuous mode; disable it */
1551 if (be_in_all_promisc(adapter)) {
1552 be_clear_all_promisc(adapter);
1553 if (adapter->vlans_added)
1554 be_vid_config(adapter);
1557 /* Enable multicast promisc if num configured exceeds what we support */
1558 if (netdev->flags & IFF_ALLMULTI ||
1559 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1560 be_set_mc_promisc(adapter);
1564 if (netdev_uc_count(netdev) != adapter->uc_macs)
1565 be_set_uc_list(adapter);
1567 be_set_mc_list(adapter);
1570 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1572 struct be_adapter *adapter = netdev_priv(netdev);
1573 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1576 if (!sriov_enabled(adapter))
1579 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1582 /* Proceed further only if user provided MAC is different
1585 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1588 if (BEx_chip(adapter)) {
1589 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1592 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1593 &vf_cfg->pmac_id, vf + 1);
1595 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1600 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1602 return be_cmd_status(status);
1605 ether_addr_copy(vf_cfg->mac_addr, mac);
1610 static int be_get_vf_config(struct net_device *netdev, int vf,
1611 struct ifla_vf_info *vi)
1613 struct be_adapter *adapter = netdev_priv(netdev);
1614 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1616 if (!sriov_enabled(adapter))
1619 if (vf >= adapter->num_vfs)
1623 vi->max_tx_rate = vf_cfg->tx_rate;
1624 vi->min_tx_rate = 0;
1625 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1626 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1627 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1628 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
1629 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
1634 static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1636 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1637 u16 vids[BE_NUM_VLANS_SUPPORTED];
1638 int vf_if_id = vf_cfg->if_handle;
1641 /* Enable Transparent VLAN Tagging */
1642 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
1646 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1648 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1650 dev_info(&adapter->pdev->dev,
1651 "Cleared guest VLANs on VF%d", vf);
1653 /* After TVT is enabled, disallow VFs to program VLAN filters */
1654 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1655 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1656 ~BE_PRIV_FILTMGMT, vf + 1);
1658 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1663 static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1665 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1666 struct device *dev = &adapter->pdev->dev;
1669 /* Reset Transparent VLAN Tagging. */
1670 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
1671 vf_cfg->if_handle, 0, 0);
1675 /* Allow VFs to program VLAN filtering */
1676 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1677 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1678 BE_PRIV_FILTMGMT, vf + 1);
1680 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1681 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1686 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1690 static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1692 struct be_adapter *adapter = netdev_priv(netdev);
1693 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1696 if (!sriov_enabled(adapter))
1699 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1703 vlan |= qos << VLAN_PRIO_SHIFT;
1704 status = be_set_vf_tvt(adapter, vf, vlan);
1706 status = be_clear_vf_tvt(adapter, vf);
1710 dev_err(&adapter->pdev->dev,
1711 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1713 return be_cmd_status(status);
1716 vf_cfg->vlan_tag = vlan;
1720 static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1721 int min_tx_rate, int max_tx_rate)
1723 struct be_adapter *adapter = netdev_priv(netdev);
1724 struct device *dev = &adapter->pdev->dev;
1725 int percent_rate, status = 0;
1729 if (!sriov_enabled(adapter))
1732 if (vf >= adapter->num_vfs)
1741 status = be_cmd_link_status_query(adapter, &link_speed,
1747 dev_err(dev, "TX-rate setting not allowed when link is down\n");
1752 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1753 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1759 /* On Skyhawk the QOS setting must be done only as a % value */
1760 percent_rate = link_speed / 100;
1761 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1762 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1769 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
1773 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1777 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1779 return be_cmd_status(status);
1782 static int be_set_vf_link_state(struct net_device *netdev, int vf,
1785 struct be_adapter *adapter = netdev_priv(netdev);
1788 if (!sriov_enabled(adapter))
1791 if (vf >= adapter->num_vfs)
1794 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1796 dev_err(&adapter->pdev->dev,
1797 "Link state change on VF %d failed: %#x\n", vf, status);
1798 return be_cmd_status(status);
1801 adapter->vf_cfg[vf].plink_tracking = link_state;
1806 static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1808 struct be_adapter *adapter = netdev_priv(netdev);
1809 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1813 if (!sriov_enabled(adapter))
1816 if (vf >= adapter->num_vfs)
1819 if (BEx_chip(adapter))
1822 if (enable == vf_cfg->spoofchk)
1825 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1827 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1830 dev_err(&adapter->pdev->dev,
1831 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1832 return be_cmd_status(status);
1835 vf_cfg->spoofchk = enable;
1839 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1842 aic->rx_pkts_prev = rx_pkts;
1843 aic->tx_reqs_prev = tx_pkts;
1847 static int be_get_new_eqd(struct be_eq_obj *eqo)
1849 struct be_adapter *adapter = eqo->adapter;
1851 struct be_aic_obj *aic;
1852 struct be_rx_obj *rxo;
1853 struct be_tx_obj *txo;
1854 u64 rx_pkts = 0, tx_pkts = 0;
1859 aic = &adapter->aic_obj[eqo->idx];
1867 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
1869 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1870 rx_pkts += rxo->stats.rx_pkts;
1871 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1874 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
1876 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1877 tx_pkts += txo->stats.tx_reqs;
1878 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1881 /* Skip, if wrapped around or first calculation */
1883 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1884 rx_pkts < aic->rx_pkts_prev ||
1885 tx_pkts < aic->tx_reqs_prev) {
1886 be_aic_update(aic, rx_pkts, tx_pkts, now);
1887 return aic->prev_eqd;
1890 delta = jiffies_to_msecs(now - aic->jiffies);
1892 return aic->prev_eqd;
1894 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1895 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1896 eqd = (pps / 15000) << 2;
1900 eqd = min_t(u32, eqd, aic->max_eqd);
1901 eqd = max_t(u32, eqd, aic->min_eqd);
1903 be_aic_update(aic, rx_pkts, tx_pkts, now);
1908 /* For Skyhawk-R only */
1909 static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1911 struct be_adapter *adapter = eqo->adapter;
1912 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1913 ulong now = jiffies;
1920 if (jiffies_to_msecs(now - aic->jiffies) < 1)
1921 eqd = aic->prev_eqd;
1923 eqd = be_get_new_eqd(eqo);
1926 mult_enc = R2I_DLY_ENC_1;
1928 mult_enc = R2I_DLY_ENC_2;
1930 mult_enc = R2I_DLY_ENC_3;
1932 mult_enc = R2I_DLY_ENC_0;
1934 aic->prev_eqd = eqd;
1939 void be_eqd_update(struct be_adapter *adapter, bool force_update)
1941 struct be_set_eqd set_eqd[MAX_EVT_QS];
1942 struct be_aic_obj *aic;
1943 struct be_eq_obj *eqo;
1944 int i, num = 0, eqd;
1946 for_all_evt_queues(adapter, eqo, i) {
1947 aic = &adapter->aic_obj[eqo->idx];
1948 eqd = be_get_new_eqd(eqo);
1949 if (force_update || eqd != aic->prev_eqd) {
1950 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1951 set_eqd[num].eq_id = eqo->q.id;
1952 aic->prev_eqd = eqd;
1958 be_cmd_modify_eqd(adapter, set_eqd, num);
1961 static void be_rx_stats_update(struct be_rx_obj *rxo,
1962 struct be_rx_compl_info *rxcp)
1964 struct be_rx_stats *stats = rx_stats(rxo);
1966 u64_stats_update_begin(&stats->sync);
1968 stats->rx_bytes += rxcp->pkt_size;
1971 stats->rx_vxlan_offload_pkts++;
1972 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1973 stats->rx_mcast_pkts++;
1975 stats->rx_compl_err++;
1976 u64_stats_update_end(&stats->sync);
1979 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1981 /* L4 checksum is not reliable for non TCP/UDP packets.
1982 * Also ignore ipcksm for ipv6 pkts
1984 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1985 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
1988 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
1990 struct be_adapter *adapter = rxo->adapter;
1991 struct be_rx_page_info *rx_page_info;
1992 struct be_queue_info *rxq = &rxo->q;
1993 u16 frag_idx = rxq->tail;
1995 rx_page_info = &rxo->page_info_tbl[frag_idx];
1996 BUG_ON(!rx_page_info->page);
1998 if (rx_page_info->last_frag) {
1999 dma_unmap_page(&adapter->pdev->dev,
2000 dma_unmap_addr(rx_page_info, bus),
2001 adapter->big_page_size, DMA_FROM_DEVICE);
2002 rx_page_info->last_frag = false;
2004 dma_sync_single_for_cpu(&adapter->pdev->dev,
2005 dma_unmap_addr(rx_page_info, bus),
2006 rx_frag_size, DMA_FROM_DEVICE);
2009 queue_tail_inc(rxq);
2010 atomic_dec(&rxq->used);
2011 return rx_page_info;
2014 /* Throwaway the data in the Rx completion */
2015 static void be_rx_compl_discard(struct be_rx_obj *rxo,
2016 struct be_rx_compl_info *rxcp)
2018 struct be_rx_page_info *page_info;
2019 u16 i, num_rcvd = rxcp->num_rcvd;
2021 for (i = 0; i < num_rcvd; i++) {
2022 page_info = get_rx_page_info(rxo);
2023 put_page(page_info->page);
2024 memset(page_info, 0, sizeof(*page_info));
2029 * skb_fill_rx_data forms a complete skb for an ether frame
2030 * indicated by rxcp.
2032 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2033 struct be_rx_compl_info *rxcp)
2035 struct be_rx_page_info *page_info;
2037 u16 hdr_len, curr_frag_len, remaining;
2040 page_info = get_rx_page_info(rxo);
2041 start = page_address(page_info->page) + page_info->page_offset;
2044 /* Copy data in the first descriptor of this completion */
2045 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
2047 skb->len = curr_frag_len;
2048 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
2049 memcpy(skb->data, start, curr_frag_len);
2050 /* Complete packet has now been moved to data */
2051 put_page(page_info->page);
2053 skb->tail += curr_frag_len;
2056 memcpy(skb->data, start, hdr_len);
2057 skb_shinfo(skb)->nr_frags = 1;
2058 skb_frag_set_page(skb, 0, page_info->page);
2059 skb_shinfo(skb)->frags[0].page_offset =
2060 page_info->page_offset + hdr_len;
2061 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2062 curr_frag_len - hdr_len);
2063 skb->data_len = curr_frag_len - hdr_len;
2064 skb->truesize += rx_frag_size;
2065 skb->tail += hdr_len;
2067 page_info->page = NULL;
2069 if (rxcp->pkt_size <= rx_frag_size) {
2070 BUG_ON(rxcp->num_rcvd != 1);
2074 /* More frags present for this completion */
2075 remaining = rxcp->pkt_size - curr_frag_len;
2076 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
2077 page_info = get_rx_page_info(rxo);
2078 curr_frag_len = min(remaining, rx_frag_size);
2080 /* Coalesce all frags from the same physical page in one slot */
2081 if (page_info->page_offset == 0) {
2084 skb_frag_set_page(skb, j, page_info->page);
2085 skb_shinfo(skb)->frags[j].page_offset =
2086 page_info->page_offset;
2087 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
2088 skb_shinfo(skb)->nr_frags++;
2090 put_page(page_info->page);
2093 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
2094 skb->len += curr_frag_len;
2095 skb->data_len += curr_frag_len;
2096 skb->truesize += rx_frag_size;
2097 remaining -= curr_frag_len;
2098 page_info->page = NULL;
2100 BUG_ON(j > MAX_SKB_FRAGS);
2103 /* Process the RX completion indicated by rxcp when GRO is disabled */
2104 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
2105 struct be_rx_compl_info *rxcp)
2107 struct be_adapter *adapter = rxo->adapter;
2108 struct net_device *netdev = adapter->netdev;
2109 struct sk_buff *skb;
2111 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
2112 if (unlikely(!skb)) {
2113 rx_stats(rxo)->rx_drops_no_skbs++;
2114 be_rx_compl_discard(rxo, rxcp);
2118 skb_fill_rx_data(rxo, skb, rxcp);
2120 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
2121 skb->ip_summed = CHECKSUM_UNNECESSARY;
2123 skb_checksum_none_assert(skb);
2125 skb->protocol = eth_type_trans(skb, netdev);
2126 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
2127 if (netdev->features & NETIF_F_RXHASH)
2128 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
2130 skb->csum_level = rxcp->tunneled;
2131 skb_mark_napi_id(skb, napi);
2134 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
2136 netif_receive_skb(skb);
2139 /* Process the RX completion indicated by rxcp when GRO is enabled */
2140 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2141 struct napi_struct *napi,
2142 struct be_rx_compl_info *rxcp)
2144 struct be_adapter *adapter = rxo->adapter;
2145 struct be_rx_page_info *page_info;
2146 struct sk_buff *skb = NULL;
2147 u16 remaining, curr_frag_len;
2150 skb = napi_get_frags(napi);
2152 be_rx_compl_discard(rxo, rxcp);
2156 remaining = rxcp->pkt_size;
2157 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
2158 page_info = get_rx_page_info(rxo);
2160 curr_frag_len = min(remaining, rx_frag_size);
2162 /* Coalesce all frags from the same physical page in one slot */
2163 if (i == 0 || page_info->page_offset == 0) {
2164 /* First frag or Fresh page */
2166 skb_frag_set_page(skb, j, page_info->page);
2167 skb_shinfo(skb)->frags[j].page_offset =
2168 page_info->page_offset;
2169 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
2171 put_page(page_info->page);
2173 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
2174 skb->truesize += rx_frag_size;
2175 remaining -= curr_frag_len;
2176 memset(page_info, 0, sizeof(*page_info));
2178 BUG_ON(j > MAX_SKB_FRAGS);
2180 skb_shinfo(skb)->nr_frags = j + 1;
2181 skb->len = rxcp->pkt_size;
2182 skb->data_len = rxcp->pkt_size;
2183 skb->ip_summed = CHECKSUM_UNNECESSARY;
2184 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
2185 if (adapter->netdev->features & NETIF_F_RXHASH)
2186 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
2188 skb->csum_level = rxcp->tunneled;
2191 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
2193 napi_gro_frags(napi);
2196 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2197 struct be_rx_compl_info *rxcp)
2199 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2200 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2201 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2202 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2203 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2204 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2205 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2206 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2207 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2208 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2209 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
2211 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2212 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
2214 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
2216 GET_RX_COMPL_V1_BITS(tunneled, compl);
2219 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2220 struct be_rx_compl_info *rxcp)
2222 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2223 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2224 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2225 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2226 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2227 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2228 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2229 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2230 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2231 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2232 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
2234 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2235 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
2237 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2238 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2241 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2243 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2244 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2245 struct be_adapter *adapter = rxo->adapter;
2247 /* For checking the valid bit it is Ok to use either definition as the
2248 * valid bit is at the same position in both v0 and v1 Rx compl */
2249 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2253 be_dws_le_to_cpu(compl, sizeof(*compl));
2255 if (adapter->be3_native)
2256 be_parse_rx_compl_v1(compl, rxcp);
2258 be_parse_rx_compl_v0(compl, rxcp);
2264 /* In QNQ modes, if qnq bit is not set, then the packet was
2265 * tagged only with the transparent outer vlan-tag and must
2266 * not be treated as a vlan packet by host
2268 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
2271 if (!lancer_chip(adapter))
2272 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
2274 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
2275 !test_bit(rxcp->vlan_tag, adapter->vids))
2279 /* As the compl has been parsed, reset it; we wont touch it again */
2280 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
2282 queue_tail_inc(&rxo->cq);
2286 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
2288 u32 order = get_order(size);
2292 return alloc_pages(gfp, order);
2296 * Allocate a page, split it to fragments of size rx_frag_size and post as
2297 * receive buffers to BE
2299 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
2301 struct be_adapter *adapter = rxo->adapter;
2302 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
2303 struct be_queue_info *rxq = &rxo->q;
2304 struct page *pagep = NULL;
2305 struct device *dev = &adapter->pdev->dev;
2306 struct be_eth_rx_d *rxd;
2307 u64 page_dmaaddr = 0, frag_dmaaddr;
2308 u32 posted, page_offset = 0, notify = 0;
2310 page_info = &rxo->page_info_tbl[rxq->head];
2311 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
2313 pagep = be_alloc_pages(adapter->big_page_size, gfp);
2314 if (unlikely(!pagep)) {
2315 rx_stats(rxo)->rx_post_fail++;
2318 page_dmaaddr = dma_map_page(dev, pagep, 0,
2319 adapter->big_page_size,
2321 if (dma_mapping_error(dev, page_dmaaddr)) {
2324 adapter->drv_stats.dma_map_errors++;
2330 page_offset += rx_frag_size;
2332 page_info->page_offset = page_offset;
2333 page_info->page = pagep;
2335 rxd = queue_head_node(rxq);
2336 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
2337 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2338 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
2340 /* Any space left in the current big page for another frag? */
2341 if ((page_offset + rx_frag_size + rx_frag_size) >
2342 adapter->big_page_size) {
2344 page_info->last_frag = true;
2345 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2347 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
2350 prev_page_info = page_info;
2351 queue_head_inc(rxq);
2352 page_info = &rxo->page_info_tbl[rxq->head];
2355 /* Mark the last frag of a page when we break out of the above loop
2356 * with no more slots available in the RXQ
2359 prev_page_info->last_frag = true;
2360 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2364 atomic_add(posted, &rxq->used);
2365 if (rxo->rx_post_starved)
2366 rxo->rx_post_starved = false;
2368 notify = min(MAX_NUM_POST_ERX_DB, posted);
2369 be_rxq_notify(adapter, rxq->id, notify);
2372 } else if (atomic_read(&rxq->used) == 0) {
2373 /* Let be_worker replenish when memory is available */
2374 rxo->rx_post_starved = true;
2378 static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
2380 struct be_queue_info *tx_cq = &txo->cq;
2381 struct be_tx_compl_info *txcp = &txo->txcp;
2382 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
2384 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
2387 /* Ensure load ordering of valid bit dword and other dwords below */
2389 be_dws_le_to_cpu(compl, sizeof(*compl));
2391 txcp->status = GET_TX_COMPL_BITS(status, compl);
2392 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
2394 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
2395 queue_tail_inc(tx_cq);
2399 static u16 be_tx_compl_process(struct be_adapter *adapter,
2400 struct be_tx_obj *txo, u16 last_index)
2402 struct sk_buff **sent_skbs = txo->sent_skb_list;
2403 struct be_queue_info *txq = &txo->q;
2404 u16 frag_index, num_wrbs = 0;
2405 struct sk_buff *skb = NULL;
2406 bool unmap_skb_hdr = false;
2407 struct be_eth_wrb *wrb;
2410 if (sent_skbs[txq->tail]) {
2411 /* Free skb from prev req */
2413 dev_consume_skb_any(skb);
2414 skb = sent_skbs[txq->tail];
2415 sent_skbs[txq->tail] = NULL;
2416 queue_tail_inc(txq); /* skip hdr wrb */
2418 unmap_skb_hdr = true;
2420 wrb = queue_tail_node(txq);
2421 frag_index = txq->tail;
2422 unmap_tx_frag(&adapter->pdev->dev, wrb,
2423 (unmap_skb_hdr && skb_headlen(skb)));
2424 unmap_skb_hdr = false;
2425 queue_tail_inc(txq);
2427 } while (frag_index != last_index);
2428 dev_consume_skb_any(skb);
2433 /* Return the number of events in the event queue */
2434 static inline int events_get(struct be_eq_obj *eqo)
2436 struct be_eq_entry *eqe;
2440 eqe = queue_tail_node(&eqo->q);
2447 queue_tail_inc(&eqo->q);
2453 /* Leaves the EQ is disarmed state */
2454 static void be_eq_clean(struct be_eq_obj *eqo)
2456 int num = events_get(eqo);
2458 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
2461 /* Free posted rx buffers that were not used */
2462 static void be_rxq_clean(struct be_rx_obj *rxo)
2464 struct be_queue_info *rxq = &rxo->q;
2465 struct be_rx_page_info *page_info;
2467 while (atomic_read(&rxq->used) > 0) {
2468 page_info = get_rx_page_info(rxo);
2469 put_page(page_info->page);
2470 memset(page_info, 0, sizeof(*page_info));
2472 BUG_ON(atomic_read(&rxq->used));
2477 static void be_rx_cq_clean(struct be_rx_obj *rxo)
2479 struct be_queue_info *rx_cq = &rxo->cq;
2480 struct be_rx_compl_info *rxcp;
2481 struct be_adapter *adapter = rxo->adapter;
2484 /* Consume pending rx completions.
2485 * Wait for the flush completion (identified by zero num_rcvd)
2486 * to arrive. Notify CQ even when there are no more CQ entries
2487 * for HW to flush partially coalesced CQ entries.
2488 * In Lancer, there is no need to wait for flush compl.
2491 rxcp = be_rx_compl_get(rxo);
2493 if (lancer_chip(adapter))
2496 if (flush_wait++ > 50 ||
2497 be_check_error(adapter,
2499 dev_warn(&adapter->pdev->dev,
2500 "did not receive flush compl\n");
2503 be_cq_notify(adapter, rx_cq->id, true, 0);
2506 be_rx_compl_discard(rxo, rxcp);
2507 be_cq_notify(adapter, rx_cq->id, false, 1);
2508 if (rxcp->num_rcvd == 0)
2513 /* After cleanup, leave the CQ in unarmed state */
2514 be_cq_notify(adapter, rx_cq->id, false, 0);
2517 static void be_tx_compl_clean(struct be_adapter *adapter)
2519 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2520 struct device *dev = &adapter->pdev->dev;
2521 struct be_tx_compl_info *txcp;
2522 struct be_queue_info *txq;
2523 struct be_tx_obj *txo;
2524 int i, pending_txqs;
2526 /* Stop polling for compls when HW has been silent for 10ms */
2528 pending_txqs = adapter->num_tx_qs;
2530 for_all_tx_queues(adapter, txo, i) {
2534 while ((txcp = be_tx_compl_get(txo))) {
2536 be_tx_compl_process(adapter, txo,
2541 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2542 atomic_sub(num_wrbs, &txq->used);
2545 if (!be_is_tx_compl_pending(txo))
2549 if (pending_txqs == 0 || ++timeo > 10 ||
2550 be_check_error(adapter, BE_ERROR_HW))
2556 /* Free enqueued TX that was never notified to HW */
2557 for_all_tx_queues(adapter, txo, i) {
2560 if (atomic_read(&txq->used)) {
2561 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2562 i, atomic_read(&txq->used));
2563 notified_idx = txq->tail;
2564 end_idx = txq->tail;
2565 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2567 /* Use the tx-compl process logic to handle requests
2568 * that were not sent to the HW.
2570 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2571 atomic_sub(num_wrbs, &txq->used);
2572 BUG_ON(atomic_read(&txq->used));
2573 txo->pend_wrb_cnt = 0;
2574 /* Since hw was never notified of these requests,
2577 txq->head = notified_idx;
2578 txq->tail = notified_idx;
2583 static void be_evt_queues_destroy(struct be_adapter *adapter)
2585 struct be_eq_obj *eqo;
2588 for_all_evt_queues(adapter, eqo, i) {
2589 if (eqo->q.created) {
2591 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2592 napi_hash_del(&eqo->napi);
2593 netif_napi_del(&eqo->napi);
2594 free_cpumask_var(eqo->affinity_mask);
2596 be_queue_free(adapter, &eqo->q);
2600 static int be_evt_queues_create(struct be_adapter *adapter)
2602 struct be_queue_info *eq;
2603 struct be_eq_obj *eqo;
2604 struct be_aic_obj *aic;
2607 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2608 adapter->cfg_num_qs);
2610 for_all_evt_queues(adapter, eqo, i) {
2611 int numa_node = dev_to_node(&adapter->pdev->dev);
2613 aic = &adapter->aic_obj[i];
2614 eqo->adapter = adapter;
2616 aic->max_eqd = BE_MAX_EQD;
2620 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2621 sizeof(struct be_eq_entry));
2625 rc = be_cmd_eq_create(adapter, eqo);
2629 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2631 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2632 eqo->affinity_mask);
2633 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2639 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2641 struct be_queue_info *q;
2643 q = &adapter->mcc_obj.q;
2645 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2646 be_queue_free(adapter, q);
2648 q = &adapter->mcc_obj.cq;
2650 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2651 be_queue_free(adapter, q);
2654 /* Must be called only after TX qs are created as MCC shares TX EQ */
2655 static int be_mcc_queues_create(struct be_adapter *adapter)
2657 struct be_queue_info *q, *cq;
2659 cq = &adapter->mcc_obj.cq;
2660 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2661 sizeof(struct be_mcc_compl)))
2664 /* Use the default EQ for MCC completions */
2665 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2668 q = &adapter->mcc_obj.q;
2669 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2670 goto mcc_cq_destroy;
2672 if (be_cmd_mccq_create(adapter, q, cq))
2678 be_queue_free(adapter, q);
2680 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2682 be_queue_free(adapter, cq);
2687 static void be_tx_queues_destroy(struct be_adapter *adapter)
2689 struct be_queue_info *q;
2690 struct be_tx_obj *txo;
2693 for_all_tx_queues(adapter, txo, i) {
2696 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2697 be_queue_free(adapter, q);
2701 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2702 be_queue_free(adapter, q);
2706 static int be_tx_qs_create(struct be_adapter *adapter)
2708 struct be_queue_info *cq;
2709 struct be_tx_obj *txo;
2710 struct be_eq_obj *eqo;
2713 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2715 for_all_tx_queues(adapter, txo, i) {
2717 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2718 sizeof(struct be_eth_tx_compl));
2722 u64_stats_init(&txo->stats.sync);
2723 u64_stats_init(&txo->stats.sync_compl);
2725 /* If num_evt_qs is less than num_tx_qs, then more than
2726 * one txq share an eq
2728 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2729 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
2733 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2734 sizeof(struct be_eth_wrb));
2738 status = be_cmd_txq_create(adapter, txo);
2742 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2746 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2747 adapter->num_tx_qs);
2751 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2753 struct be_queue_info *q;
2754 struct be_rx_obj *rxo;
2757 for_all_rx_queues(adapter, rxo, i) {
2760 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2761 be_queue_free(adapter, q);
2765 static int be_rx_cqs_create(struct be_adapter *adapter)
2767 struct be_queue_info *eq, *cq;
2768 struct be_rx_obj *rxo;
2771 /* We can create as many RSS rings as there are EQs. */
2772 adapter->num_rss_qs = adapter->num_evt_qs;
2774 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2775 if (adapter->num_rss_qs <= 1)
2776 adapter->num_rss_qs = 0;
2778 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2780 /* When the interface is not capable of RSS rings (and there is no
2781 * need to create a default RXQ) we'll still need one RXQ
2783 if (adapter->num_rx_qs == 0)
2784 adapter->num_rx_qs = 1;
2786 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2787 for_all_rx_queues(adapter, rxo, i) {
2788 rxo->adapter = adapter;
2790 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2791 sizeof(struct be_eth_rx_compl));
2795 u64_stats_init(&rxo->stats.sync);
2796 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2797 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2802 dev_info(&adapter->pdev->dev,
2803 "created %d RX queue(s)\n", adapter->num_rx_qs);
2807 static irqreturn_t be_intx(int irq, void *dev)
2809 struct be_eq_obj *eqo = dev;
2810 struct be_adapter *adapter = eqo->adapter;
2813 /* IRQ is not expected when NAPI is scheduled as the EQ
2814 * will not be armed.
2815 * But, this can happen on Lancer INTx where it takes
2816 * a while to de-assert INTx or in BE2 where occasionaly
2817 * an interrupt may be raised even when EQ is unarmed.
2818 * If NAPI is already scheduled, then counting & notifying
2819 * events will orphan them.
2821 if (napi_schedule_prep(&eqo->napi)) {
2822 num_evts = events_get(eqo);
2823 __napi_schedule(&eqo->napi);
2825 eqo->spurious_intr = 0;
2827 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
2829 /* Return IRQ_HANDLED only for the the first spurious intr
2830 * after a valid intr to stop the kernel from branding
2831 * this irq as a bad one!
2833 if (num_evts || eqo->spurious_intr++ == 0)
2839 static irqreturn_t be_msix(int irq, void *dev)
2841 struct be_eq_obj *eqo = dev;
2843 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
2844 napi_schedule(&eqo->napi);
2848 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2850 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2853 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2854 int budget, int polling)
2856 struct be_adapter *adapter = rxo->adapter;
2857 struct be_queue_info *rx_cq = &rxo->cq;
2858 struct be_rx_compl_info *rxcp;
2860 u32 frags_consumed = 0;
2862 for (work_done = 0; work_done < budget; work_done++) {
2863 rxcp = be_rx_compl_get(rxo);
2867 /* Is it a flush compl that has no data */
2868 if (unlikely(rxcp->num_rcvd == 0))
2871 /* Discard compl with partial DMA Lancer B0 */
2872 if (unlikely(!rxcp->pkt_size)) {
2873 be_rx_compl_discard(rxo, rxcp);
2877 /* On BE drop pkts that arrive due to imperfect filtering in
2878 * promiscuous mode on some skews
2880 if (unlikely(rxcp->port != adapter->port_num &&
2881 !lancer_chip(adapter))) {
2882 be_rx_compl_discard(rxo, rxcp);
2886 /* Don't do gro when we're busy_polling */
2887 if (do_gro(rxcp) && polling != BUSY_POLLING)
2888 be_rx_compl_process_gro(rxo, napi, rxcp);
2890 be_rx_compl_process(rxo, napi, rxcp);
2893 frags_consumed += rxcp->num_rcvd;
2894 be_rx_stats_update(rxo, rxcp);
2898 be_cq_notify(adapter, rx_cq->id, true, work_done);
2900 /* When an rx-obj gets into post_starved state, just
2901 * let be_worker do the posting.
2903 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2904 !rxo->rx_post_starved)
2905 be_post_rx_frags(rxo, GFP_ATOMIC,
2906 max_t(u32, MAX_RX_POST,
2913 static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
2916 case BE_TX_COMP_HDR_PARSE_ERR:
2917 tx_stats(txo)->tx_hdr_parse_err++;
2919 case BE_TX_COMP_NDMA_ERR:
2920 tx_stats(txo)->tx_dma_err++;
2922 case BE_TX_COMP_ACL_ERR:
2923 tx_stats(txo)->tx_spoof_check_err++;
2928 static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
2931 case LANCER_TX_COMP_LSO_ERR:
2932 tx_stats(txo)->tx_tso_err++;
2934 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2935 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2936 tx_stats(txo)->tx_spoof_check_err++;
2938 case LANCER_TX_COMP_QINQ_ERR:
2939 tx_stats(txo)->tx_qinq_err++;
2941 case LANCER_TX_COMP_PARITY_ERR:
2942 tx_stats(txo)->tx_internal_parity_err++;
2944 case LANCER_TX_COMP_DMA_ERR:
2945 tx_stats(txo)->tx_dma_err++;
2950 static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2953 int num_wrbs = 0, work_done = 0;
2954 struct be_tx_compl_info *txcp;
2956 while ((txcp = be_tx_compl_get(txo))) {
2957 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
2961 if (lancer_chip(adapter))
2962 lancer_update_tx_err(txo, txcp->status);
2964 be_update_tx_err(txo, txcp->status);
2969 be_cq_notify(adapter, txo->cq.id, true, work_done);
2970 atomic_sub(num_wrbs, &txo->q.used);
2972 /* As Tx wrbs have been freed up, wake up netdev queue
2973 * if it was stopped due to lack of tx wrbs. */
2974 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2975 be_can_txq_wake(txo)) {
2976 netif_wake_subqueue(adapter->netdev, idx);
2979 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2980 tx_stats(txo)->tx_compl += work_done;
2981 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2985 #ifdef CONFIG_NET_RX_BUSY_POLL
2986 static inline bool be_lock_napi(struct be_eq_obj *eqo)
2990 spin_lock(&eqo->lock); /* BH is already disabled */
2991 if (eqo->state & BE_EQ_LOCKED) {
2992 WARN_ON(eqo->state & BE_EQ_NAPI);
2993 eqo->state |= BE_EQ_NAPI_YIELD;
2996 eqo->state = BE_EQ_NAPI;
2998 spin_unlock(&eqo->lock);
3002 static inline void be_unlock_napi(struct be_eq_obj *eqo)
3004 spin_lock(&eqo->lock); /* BH is already disabled */
3006 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3007 eqo->state = BE_EQ_IDLE;
3009 spin_unlock(&eqo->lock);
3012 static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3016 spin_lock_bh(&eqo->lock);
3017 if (eqo->state & BE_EQ_LOCKED) {
3018 eqo->state |= BE_EQ_POLL_YIELD;
3021 eqo->state |= BE_EQ_POLL;
3023 spin_unlock_bh(&eqo->lock);
3027 static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3029 spin_lock_bh(&eqo->lock);
3031 WARN_ON(eqo->state & (BE_EQ_NAPI));
3032 eqo->state = BE_EQ_IDLE;
3034 spin_unlock_bh(&eqo->lock);
3037 static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3039 spin_lock_init(&eqo->lock);
3040 eqo->state = BE_EQ_IDLE;
3043 static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3047 /* It's enough to just acquire napi lock on the eqo to stop
3048 * be_busy_poll() from processing any queueus.
3050 while (!be_lock_napi(eqo))
3056 #else /* CONFIG_NET_RX_BUSY_POLL */
3058 static inline bool be_lock_napi(struct be_eq_obj *eqo)
3063 static inline void be_unlock_napi(struct be_eq_obj *eqo)
3067 static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3072 static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3076 static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3080 static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3083 #endif /* CONFIG_NET_RX_BUSY_POLL */
3085 int be_poll(struct napi_struct *napi, int budget)
3087 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3088 struct be_adapter *adapter = eqo->adapter;
3089 int max_work = 0, work, i, num_evts;
3090 struct be_rx_obj *rxo;
3091 struct be_tx_obj *txo;
3094 num_evts = events_get(eqo);
3096 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3097 be_process_tx(adapter, txo, i);
3099 if (be_lock_napi(eqo)) {
3100 /* This loop will iterate twice for EQ0 in which
3101 * completions of the last RXQ (default one) are also processed
3102 * For other EQs the loop iterates only once
3104 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3105 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3106 max_work = max(work, max_work);
3108 be_unlock_napi(eqo);
3113 if (is_mcc_eqo(eqo))
3114 be_process_mcc(adapter);
3116 if (max_work < budget) {
3117 napi_complete(napi);
3119 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3120 * delay via a delay multiplier encoding value
3122 if (skyhawk_chip(adapter))
3123 mult_enc = be_get_eq_delay_mult_enc(eqo);
3125 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3128 /* As we'll continue in polling mode, count and clear events */
3129 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
3134 #ifdef CONFIG_NET_RX_BUSY_POLL
3135 static int be_busy_poll(struct napi_struct *napi)
3137 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3138 struct be_adapter *adapter = eqo->adapter;
3139 struct be_rx_obj *rxo;
3142 if (!be_lock_busy_poll(eqo))
3143 return LL_FLUSH_BUSY;
3145 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3146 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3151 be_unlock_busy_poll(eqo);
3156 void be_detect_error(struct be_adapter *adapter)
3158 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3159 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
3161 struct device *dev = &adapter->pdev->dev;
3163 if (be_check_error(adapter, BE_ERROR_HW))
3166 if (lancer_chip(adapter)) {
3167 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3168 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3169 be_set_error(adapter, BE_ERROR_UE);
3170 sliport_err1 = ioread32(adapter->db +
3171 SLIPORT_ERROR1_OFFSET);
3172 sliport_err2 = ioread32(adapter->db +
3173 SLIPORT_ERROR2_OFFSET);
3174 /* Do not log error messages if its a FW reset */
3175 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3176 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3177 dev_info(dev, "Firmware update in progress\n");
3179 dev_err(dev, "Error detected in the card\n");
3180 dev_err(dev, "ERR: sliport status 0x%x\n",
3182 dev_err(dev, "ERR: sliport error1 0x%x\n",
3184 dev_err(dev, "ERR: sliport error2 0x%x\n",
3189 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3190 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3191 ue_lo_mask = ioread32(adapter->pcicfg +
3192 PCICFG_UE_STATUS_LOW_MASK);
3193 ue_hi_mask = ioread32(adapter->pcicfg +
3194 PCICFG_UE_STATUS_HI_MASK);
3196 ue_lo = (ue_lo & ~ue_lo_mask);
3197 ue_hi = (ue_hi & ~ue_hi_mask);
3199 /* On certain platforms BE hardware can indicate spurious UEs.
3200 * Allow HW to stop working completely in case of a real UE.
3201 * Hence not setting the hw_error for UE detection.
3204 if (ue_lo || ue_hi) {
3206 "Unrecoverable Error detected in the adapter");
3207 dev_err(dev, "Please reboot server to recover");
3208 if (skyhawk_chip(adapter))
3209 be_set_error(adapter, BE_ERROR_UE);
3211 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3213 dev_err(dev, "UE: %s bit set\n",
3214 ue_status_low_desc[i]);
3216 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3218 dev_err(dev, "UE: %s bit set\n",
3219 ue_status_hi_desc[i]);
3225 static void be_msix_disable(struct be_adapter *adapter)
3227 if (msix_enabled(adapter)) {
3228 pci_disable_msix(adapter->pdev);
3229 adapter->num_msix_vec = 0;
3230 adapter->num_msix_roce_vec = 0;
3234 static int be_msix_enable(struct be_adapter *adapter)
3237 struct device *dev = &adapter->pdev->dev;
3239 /* If RoCE is supported, program the max number of NIC vectors that
3240 * may be configured via set-channels, along with vectors needed for
3241 * RoCe. Else, just program the number we'll use initially.
3243 if (be_roce_supported(adapter))
3244 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3245 2 * num_online_cpus());
3247 num_vec = adapter->cfg_num_qs;
3249 for (i = 0; i < num_vec; i++)
3250 adapter->msix_entries[i].entry = i;
3252 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3253 MIN_MSIX_VECTORS, num_vec);
3257 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3258 adapter->num_msix_roce_vec = num_vec / 2;
3259 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3260 adapter->num_msix_roce_vec);
3263 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3265 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3266 adapter->num_msix_vec);
3270 dev_warn(dev, "MSIx enable failed\n");
3272 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
3273 if (be_virtfn(adapter))
3278 static inline int be_msix_vec_get(struct be_adapter *adapter,
3279 struct be_eq_obj *eqo)
3281 return adapter->msix_entries[eqo->msix_idx].vector;
3284 static int be_msix_register(struct be_adapter *adapter)
3286 struct net_device *netdev = adapter->netdev;
3287 struct be_eq_obj *eqo;
3290 for_all_evt_queues(adapter, eqo, i) {
3291 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3292 vec = be_msix_vec_get(adapter, eqo);
3293 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3297 irq_set_affinity_hint(vec, eqo->affinity_mask);
3302 for (i--; i >= 0; i--) {
3303 eqo = &adapter->eq_obj[i];
3304 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3306 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
3308 be_msix_disable(adapter);
3312 static int be_irq_register(struct be_adapter *adapter)
3314 struct net_device *netdev = adapter->netdev;
3317 if (msix_enabled(adapter)) {
3318 status = be_msix_register(adapter);
3321 /* INTx is not supported for VF */
3322 if (be_virtfn(adapter))
3326 /* INTx: only the first EQ is used */
3327 netdev->irq = adapter->pdev->irq;
3328 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
3329 &adapter->eq_obj[0]);
3331 dev_err(&adapter->pdev->dev,
3332 "INTx request IRQ failed - err %d\n", status);
3336 adapter->isr_registered = true;
3340 static void be_irq_unregister(struct be_adapter *adapter)
3342 struct net_device *netdev = adapter->netdev;
3343 struct be_eq_obj *eqo;
3346 if (!adapter->isr_registered)
3350 if (!msix_enabled(adapter)) {
3351 free_irq(netdev->irq, &adapter->eq_obj[0]);
3356 for_all_evt_queues(adapter, eqo, i) {
3357 vec = be_msix_vec_get(adapter, eqo);
3358 irq_set_affinity_hint(vec, NULL);
3363 adapter->isr_registered = false;
3366 static void be_rx_qs_destroy(struct be_adapter *adapter)
3368 struct be_queue_info *q;
3369 struct be_rx_obj *rxo;
3372 for_all_rx_queues(adapter, rxo, i) {
3375 /* If RXQs are destroyed while in an "out of buffer"
3376 * state, there is a possibility of an HW stall on
3377 * Lancer. So, post 64 buffers to each queue to relieve
3378 * the "out of buffer" condition.
3379 * Make sure there's space in the RXQ before posting.
3381 if (lancer_chip(adapter)) {
3382 be_rx_cq_clean(rxo);
3383 if (atomic_read(&q->used) == 0)
3384 be_post_rx_frags(rxo, GFP_KERNEL,
3388 be_cmd_rxq_destroy(adapter, q);
3389 be_rx_cq_clean(rxo);
3392 be_queue_free(adapter, q);
3396 static void be_disable_if_filters(struct be_adapter *adapter)
3398 be_cmd_pmac_del(adapter, adapter->if_handle,
3399 adapter->pmac_id[0], 0);
3401 be_clear_uc_list(adapter);
3403 /* The IFACE flags are enabled in the open path and cleared
3404 * in the close path. When a VF gets detached from the host and
3405 * assigned to a VM the following happens:
3406 * - VF's IFACE flags get cleared in the detach path
3407 * - IFACE create is issued by the VF in the attach path
3408 * Due to a bug in the BE3/Skyhawk-R FW
3409 * (Lancer FW doesn't have the bug), the IFACE capability flags
3410 * specified along with the IFACE create cmd issued by a VF are not
3411 * honoured by FW. As a consequence, if a *new* driver
3412 * (that enables/disables IFACE flags in open/close)
3413 * is loaded in the host and an *old* driver is * used by a VM/VF,
3414 * the IFACE gets created *without* the needed flags.
3415 * To avoid this, disable RX-filter flags only for Lancer.
3417 if (lancer_chip(adapter)) {
3418 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3419 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3423 static int be_close(struct net_device *netdev)
3425 struct be_adapter *adapter = netdev_priv(netdev);
3426 struct be_eq_obj *eqo;
3429 /* This protection is needed as be_close() may be called even when the
3430 * adapter is in cleared state (after eeh perm failure)
3432 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3435 be_disable_if_filters(adapter);
3437 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3438 for_all_evt_queues(adapter, eqo, i) {
3439 napi_disable(&eqo->napi);
3440 be_disable_busy_poll(eqo);
3442 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
3445 be_async_mcc_disable(adapter);
3447 /* Wait for all pending tx completions to arrive so that
3448 * all tx skbs are freed.
3450 netif_tx_disable(netdev);
3451 be_tx_compl_clean(adapter);
3453 be_rx_qs_destroy(adapter);
3455 for_all_evt_queues(adapter, eqo, i) {
3456 if (msix_enabled(adapter))
3457 synchronize_irq(be_msix_vec_get(adapter, eqo));
3459 synchronize_irq(netdev->irq);
3463 be_irq_unregister(adapter);
3468 static int be_rx_qs_create(struct be_adapter *adapter)
3470 struct rss_info *rss = &adapter->rss_info;
3471 u8 rss_key[RSS_HASH_KEY_LEN];
3472 struct be_rx_obj *rxo;
3475 for_all_rx_queues(adapter, rxo, i) {
3476 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3477 sizeof(struct be_eth_rx_d));
3482 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3483 rxo = default_rxo(adapter);
3484 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3485 rx_frag_size, adapter->if_handle,
3486 false, &rxo->rss_id);
3491 for_all_rss_queues(adapter, rxo, i) {
3492 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3493 rx_frag_size, adapter->if_handle,
3494 true, &rxo->rss_id);
3499 if (be_multi_rxq(adapter)) {
3500 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
3501 for_all_rss_queues(adapter, rxo, i) {
3502 if ((j + i) >= RSS_INDIR_TABLE_LEN)
3504 rss->rsstable[j + i] = rxo->rss_id;
3505 rss->rss_queue[j + i] = i;
3508 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3509 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
3511 if (!BEx_chip(adapter))
3512 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3513 RSS_ENABLE_UDP_IPV6;
3515 /* Disable RSS, if only default RX Q is created */
3516 rss->rss_flags = RSS_ENABLE_NONE;
3519 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3520 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3521 RSS_INDIR_TABLE_LEN, rss_key);
3523 rss->rss_flags = RSS_ENABLE_NONE;
3527 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
3529 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3530 * which is a queue empty condition
3532 for_all_rx_queues(adapter, rxo, i)
3533 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3538 static int be_enable_if_filters(struct be_adapter *adapter)
3542 status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
3546 /* For BE3 VFs, the PF programs the initial MAC address */
3547 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3548 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3550 &adapter->pmac_id[0], 0);
3555 if (adapter->vlans_added)
3556 be_vid_config(adapter);
3558 be_set_rx_mode(adapter->netdev);
3563 static int be_open(struct net_device *netdev)
3565 struct be_adapter *adapter = netdev_priv(netdev);
3566 struct be_eq_obj *eqo;
3567 struct be_rx_obj *rxo;
3568 struct be_tx_obj *txo;
3572 status = be_rx_qs_create(adapter);
3576 status = be_enable_if_filters(adapter);
3580 status = be_irq_register(adapter);
3584 for_all_rx_queues(adapter, rxo, i)
3585 be_cq_notify(adapter, rxo->cq.id, true, 0);
3587 for_all_tx_queues(adapter, txo, i)
3588 be_cq_notify(adapter, txo->cq.id, true, 0);
3590 be_async_mcc_enable(adapter);
3592 for_all_evt_queues(adapter, eqo, i) {
3593 napi_enable(&eqo->napi);
3594 be_enable_busy_poll(eqo);
3595 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
3597 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
3599 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
3601 be_link_status_update(adapter, link_status);
3603 netif_tx_start_all_queues(netdev);
3604 #ifdef CONFIG_BE2NET_VXLAN
3605 if (skyhawk_chip(adapter))
3606 vxlan_get_rx_port(netdev);
3611 be_close(adapter->netdev);
3615 static int be_setup_wol(struct be_adapter *adapter, bool enable)
3617 struct device *dev = &adapter->pdev->dev;
3618 struct be_dma_mem cmd;
3624 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
3625 cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
3630 status = pci_write_config_dword(adapter->pdev,
3631 PCICFG_PM_CONTROL_OFFSET,
3632 PCICFG_PM_CONTROL_MASK);
3634 dev_err(dev, "Could not enable Wake-on-lan\n");
3638 ether_addr_copy(mac, adapter->netdev->dev_addr);
3641 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3642 pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
3643 pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
3645 dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
3649 static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3653 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3655 mac[5] = (u8)(addr & 0xFF);
3656 mac[4] = (u8)((addr >> 8) & 0xFF);
3657 mac[3] = (u8)((addr >> 16) & 0xFF);
3658 /* Use the OUI from the current MAC address */
3659 memcpy(mac, adapter->netdev->dev_addr, 3);
3663 * Generate a seed MAC address from the PF MAC Address using jhash.
3664 * MAC Address for VFs are assigned incrementally starting from the seed.
3665 * These addresses are programmed in the ASIC by the PF and the VF driver
3666 * queries for the MAC address during its probe.
3668 static int be_vf_eth_addr_config(struct be_adapter *adapter)
3673 struct be_vf_cfg *vf_cfg;
3675 be_vf_eth_addr_generate(adapter, mac);
3677 for_all_vfs(adapter, vf_cfg, vf) {
3678 if (BEx_chip(adapter))
3679 status = be_cmd_pmac_add(adapter, mac,
3681 &vf_cfg->pmac_id, vf + 1);
3683 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3687 dev_err(&adapter->pdev->dev,
3688 "Mac address assignment failed for VF %d\n",
3691 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3698 static int be_vfs_mac_query(struct be_adapter *adapter)
3702 struct be_vf_cfg *vf_cfg;
3704 for_all_vfs(adapter, vf_cfg, vf) {
3705 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3706 mac, vf_cfg->if_handle,
3710 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3715 static void be_vf_clear(struct be_adapter *adapter)
3717 struct be_vf_cfg *vf_cfg;
3720 if (pci_vfs_assigned(adapter->pdev)) {
3721 dev_warn(&adapter->pdev->dev,
3722 "VFs are assigned to VMs: not disabling VFs\n");
3726 pci_disable_sriov(adapter->pdev);
3728 for_all_vfs(adapter, vf_cfg, vf) {
3729 if (BEx_chip(adapter))
3730 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3731 vf_cfg->pmac_id, vf + 1);
3733 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3736 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3739 kfree(adapter->vf_cfg);
3740 adapter->num_vfs = 0;
3741 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
3744 static void be_clear_queues(struct be_adapter *adapter)
3746 be_mcc_queues_destroy(adapter);
3747 be_rx_cqs_destroy(adapter);
3748 be_tx_queues_destroy(adapter);
3749 be_evt_queues_destroy(adapter);
3752 static void be_cancel_worker(struct be_adapter *adapter)
3754 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3755 cancel_delayed_work_sync(&adapter->work);
3756 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3760 static void be_cancel_err_detection(struct be_adapter *adapter)
3762 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3763 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3764 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3768 #ifdef CONFIG_BE2NET_VXLAN
3769 static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3771 struct net_device *netdev = adapter->netdev;
3773 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3774 be_cmd_manage_iface(adapter, adapter->if_handle,
3775 OP_CONVERT_TUNNEL_TO_NORMAL);
3777 if (adapter->vxlan_port)
3778 be_cmd_set_vxlan_port(adapter, 0);
3780 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3781 adapter->vxlan_port = 0;
3783 netdev->hw_enc_features = 0;
3784 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
3785 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
3789 static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3791 struct be_resources res = adapter->pool_res;
3794 /* Distribute the queue resources among the PF and it's VFs
3795 * Do not distribute queue resources in multi-channel configuration.
3797 if (num_vfs && !be_is_mc(adapter)) {
3798 /* Divide the qpairs evenly among the VFs and the PF, capped
3799 * at VF-EQ-count. Any remainder qpairs belong to the PF.
3801 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
3802 res.max_rss_qs / (num_vfs + 1));
3804 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3805 * interfaces per port. Provide RSS on VFs, only if number
3806 * of VFs requested is less than MAX_RSS_IFACES limit.
3808 if (num_vfs >= MAX_RSS_IFACES)
3814 static int be_clear(struct be_adapter *adapter)
3816 struct pci_dev *pdev = adapter->pdev;
3819 be_cancel_worker(adapter);
3821 if (sriov_enabled(adapter))
3822 be_vf_clear(adapter);
3824 /* Re-configure FW to distribute resources evenly across max-supported
3825 * number of VFs, only when VFs are not already enabled.
3827 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3828 !pci_vfs_assigned(pdev)) {
3829 num_vf_qs = be_calculate_vf_qs(adapter,
3830 pci_sriov_get_totalvfs(pdev));
3831 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3832 pci_sriov_get_totalvfs(pdev),
3836 #ifdef CONFIG_BE2NET_VXLAN
3837 be_disable_vxlan_offloads(adapter);
3839 kfree(adapter->pmac_id);
3840 adapter->pmac_id = NULL;
3842 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
3844 be_clear_queues(adapter);
3846 be_msix_disable(adapter);
3847 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
3851 static int be_vfs_if_create(struct be_adapter *adapter)
3853 struct be_resources res = {0};
3854 u32 cap_flags, en_flags, vf;
3855 struct be_vf_cfg *vf_cfg;
3858 /* If a FW profile exists, then cap_flags are updated */
3859 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3860 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3862 for_all_vfs(adapter, vf_cfg, vf) {
3863 if (!BE3_chip(adapter)) {
3864 status = be_cmd_get_profile_config(adapter, &res,
3868 cap_flags = res.if_cap_flags;
3869 /* Prevent VFs from enabling VLAN promiscuous
3872 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3876 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3877 BE_IF_FLAGS_BROADCAST |
3878 BE_IF_FLAGS_MULTICAST |
3879 BE_IF_FLAGS_PASS_L3L4_ERRORS);
3880 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3881 &vf_cfg->if_handle, vf + 1);
3889 static int be_vf_setup_init(struct be_adapter *adapter)
3891 struct be_vf_cfg *vf_cfg;
3894 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3896 if (!adapter->vf_cfg)
3899 for_all_vfs(adapter, vf_cfg, vf) {
3900 vf_cfg->if_handle = -1;
3901 vf_cfg->pmac_id = -1;
3906 static int be_vf_setup(struct be_adapter *adapter)
3908 struct device *dev = &adapter->pdev->dev;
3909 struct be_vf_cfg *vf_cfg;
3910 int status, old_vfs, vf;
3913 old_vfs = pci_num_vf(adapter->pdev);
3915 status = be_vf_setup_init(adapter);
3920 for_all_vfs(adapter, vf_cfg, vf) {
3921 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3926 status = be_vfs_mac_query(adapter);
3930 status = be_vfs_if_create(adapter);
3934 status = be_vf_eth_addr_config(adapter);
3939 for_all_vfs(adapter, vf_cfg, vf) {
3940 /* Allow VFs to programs MAC/VLAN filters */
3941 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3943 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
3944 status = be_cmd_set_fn_privileges(adapter,
3945 vf_cfg->privileges |
3949 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
3950 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3955 /* Allow full available bandwidth */
3957 be_cmd_config_qos(adapter, 0, 0, vf + 1);
3959 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3960 vf_cfg->if_handle, NULL,
3963 vf_cfg->spoofchk = spoofchk;
3966 be_cmd_enable_vf(adapter, vf + 1);
3967 be_cmd_set_logical_link_config(adapter,
3968 IFLA_VF_LINK_STATE_AUTO,
3974 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3976 dev_err(dev, "SRIOV enable failed\n");
3977 adapter->num_vfs = 0;
3982 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
3985 dev_err(dev, "VF setup failed\n");
3986 be_vf_clear(adapter);
3990 /* Converting function_mode bits on BE3 to SH mc_type enums */
3992 static u8 be_convert_mc_type(u32 function_mode)
3994 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
3996 else if (function_mode & QNQ_MODE)
3998 else if (function_mode & VNIC_MODE)
4000 else if (function_mode & UMC_ENABLED)
4006 /* On BE2/BE3 FW does not suggest the supported limits */
4007 static void BEx_get_resources(struct be_adapter *adapter,
4008 struct be_resources *res)
4010 bool use_sriov = adapter->num_vfs ? 1 : 0;
4012 if (be_physfn(adapter))
4013 res->max_uc_mac = BE_UC_PMAC_COUNT;
4015 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4017 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4019 if (be_is_mc(adapter)) {
4020 /* Assuming that there are 4 channels per port,
4021 * when multi-channel is enabled
4023 if (be_is_qnq_mode(adapter))
4024 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4026 /* In a non-qnq multichannel mode, the pvid
4027 * takes up one vlan entry
4029 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4031 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
4034 res->max_mcast_mac = BE_MAX_MC;
4036 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4037 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4038 * *only* if it is RSS-capable.
4040 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
4041 be_virtfn(adapter) ||
4042 (be_is_mc(adapter) &&
4043 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
4045 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4046 struct be_resources super_nic_res = {0};
4048 /* On a SuperNIC profile, the driver needs to use the
4049 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4051 be_cmd_get_profile_config(adapter, &super_nic_res,
4052 RESOURCE_LIMITS, 0);
4053 /* Some old versions of BE3 FW don't report max_tx_qs value */
4054 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4056 res->max_tx_qs = BE3_MAX_TX_QS;
4059 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4060 !use_sriov && be_physfn(adapter))
4061 res->max_rss_qs = (adapter->be3_native) ?
4062 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4063 res->max_rx_qs = res->max_rss_qs + 1;
4065 if (be_physfn(adapter))
4066 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
4067 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4069 res->max_evt_qs = 1;
4071 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
4072 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
4073 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4074 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4077 static void be_setup_init(struct be_adapter *adapter)
4079 adapter->vlan_prio_bmap = 0xff;
4080 adapter->phy.link_speed = -1;
4081 adapter->if_handle = -1;
4082 adapter->be3_native = false;
4083 adapter->if_flags = 0;
4084 if (be_physfn(adapter))
4085 adapter->cmd_privileges = MAX_PRIVILEGES;
4087 adapter->cmd_privileges = MIN_PRIVILEGES;
4090 static int be_get_sriov_config(struct be_adapter *adapter)
4092 struct be_resources res = {0};
4093 int max_vfs, old_vfs;
4095 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
4097 /* Some old versions of BE3 FW don't report max_vfs value */
4098 if (BE3_chip(adapter) && !res.max_vfs) {
4099 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4100 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4103 adapter->pool_res = res;
4105 /* If during previous unload of the driver, the VFs were not disabled,
4106 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4107 * Instead use the TotalVFs value stored in the pci-dev struct.
4109 old_vfs = pci_num_vf(adapter->pdev);
4111 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4114 adapter->pool_res.max_vfs =
4115 pci_sriov_get_totalvfs(adapter->pdev);
4116 adapter->num_vfs = old_vfs;
4122 static void be_alloc_sriov_res(struct be_adapter *adapter)
4124 int old_vfs = pci_num_vf(adapter->pdev);
4128 be_get_sriov_config(adapter);
4131 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4133 /* When the HW is in SRIOV capable configuration, the PF-pool
4134 * resources are given to PF during driver load, if there are no
4135 * old VFs. This facility is not available in BE3 FW.
4136 * Also, this is done by FW in Lancer chip.
4138 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4139 num_vf_qs = be_calculate_vf_qs(adapter, 0);
4140 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4143 dev_err(&adapter->pdev->dev,
4144 "Failed to optimize SRIOV resources\n");
4148 static int be_get_resources(struct be_adapter *adapter)
4150 struct device *dev = &adapter->pdev->dev;
4151 struct be_resources res = {0};
4154 if (BEx_chip(adapter)) {
4155 BEx_get_resources(adapter, &res);
4159 /* For Lancer, SH etc read per-function resource limits from FW.
4160 * GET_FUNC_CONFIG returns per function guaranteed limits.
4161 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4163 if (!BEx_chip(adapter)) {
4164 status = be_cmd_get_func_config(adapter, &res);
4168 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4169 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4170 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4171 res.max_rss_qs -= 1;
4173 /* If RoCE may be enabled stash away half the EQs for RoCE */
4174 if (be_roce_supported(adapter))
4175 res.max_evt_qs /= 2;
4179 /* If FW supports RSS default queue, then skip creating non-RSS
4180 * queue for non-IP traffic.
4182 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4183 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4185 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4186 be_max_txqs(adapter), be_max_rxqs(adapter),
4187 be_max_rss(adapter), be_max_eqs(adapter),
4188 be_max_vfs(adapter));
4189 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4190 be_max_uc(adapter), be_max_mc(adapter),
4191 be_max_vlans(adapter));
4193 /* Sanitize cfg_num_qs based on HW and platform limits */
4194 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4195 be_max_qs(adapter));
4199 static int be_get_config(struct be_adapter *adapter)
4204 status = be_cmd_get_cntl_attributes(adapter);
4208 status = be_cmd_query_fw_cfg(adapter);
4212 if (!lancer_chip(adapter) && be_physfn(adapter))
4213 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4215 if (BEx_chip(adapter)) {
4216 level = be_cmd_get_fw_log_level(adapter);
4217 adapter->msg_enable =
4218 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4221 be_cmd_get_acpi_wol_cap(adapter);
4223 be_cmd_query_port_name(adapter);
4225 if (be_physfn(adapter)) {
4226 status = be_cmd_get_active_profile(adapter, &profile_id);
4228 dev_info(&adapter->pdev->dev,
4229 "Using profile 0x%x\n", profile_id);
4232 status = be_get_resources(adapter);
4236 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4237 sizeof(*adapter->pmac_id), GFP_KERNEL);
4238 if (!adapter->pmac_id)
4244 static int be_mac_setup(struct be_adapter *adapter)
4249 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4250 status = be_cmd_get_perm_mac(adapter, mac);
4254 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4255 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4261 static void be_schedule_worker(struct be_adapter *adapter)
4263 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4264 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4267 static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
4269 schedule_delayed_work(&adapter->be_err_detection_work,
4270 msecs_to_jiffies(delay));
4271 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4274 static int be_setup_queues(struct be_adapter *adapter)
4276 struct net_device *netdev = adapter->netdev;
4279 status = be_evt_queues_create(adapter);
4283 status = be_tx_qs_create(adapter);
4287 status = be_rx_cqs_create(adapter);
4291 status = be_mcc_queues_create(adapter);
4295 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4299 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4305 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4309 int be_update_queues(struct be_adapter *adapter)
4311 struct net_device *netdev = adapter->netdev;
4314 if (netif_running(netdev))
4317 be_cancel_worker(adapter);
4319 /* If any vectors have been shared with RoCE we cannot re-program
4322 if (!adapter->num_msix_roce_vec)
4323 be_msix_disable(adapter);
4325 be_clear_queues(adapter);
4327 if (!msix_enabled(adapter)) {
4328 status = be_msix_enable(adapter);
4333 status = be_setup_queues(adapter);
4337 be_schedule_worker(adapter);
4339 if (netif_running(netdev))
4340 status = be_open(netdev);
4345 static inline int fw_major_num(const char *fw_ver)
4347 int fw_major = 0, i;
4349 i = sscanf(fw_ver, "%d.", &fw_major);
4356 /* If any VFs are already enabled don't FLR the PF */
4357 static bool be_reset_required(struct be_adapter *adapter)
4359 return pci_num_vf(adapter->pdev) ? false : true;
4362 /* Wait for the FW to be ready and perform the required initialization */
4363 static int be_func_init(struct be_adapter *adapter)
4367 status = be_fw_wait_ready(adapter);
4371 if (be_reset_required(adapter)) {
4372 status = be_cmd_reset_function(adapter);
4376 /* Wait for interrupts to quiesce after an FLR */
4379 /* We can clear all errors when function reset succeeds */
4380 be_clear_error(adapter, BE_CLEAR_ALL);
4383 /* Tell FW we're ready to fire cmds */
4384 status = be_cmd_fw_init(adapter);
4388 /* Allow interrupts for other ULPs running on NIC function */
4389 be_intr_set(adapter, true);
4394 static int be_setup(struct be_adapter *adapter)
4396 struct device *dev = &adapter->pdev->dev;
4400 status = be_func_init(adapter);
4404 be_setup_init(adapter);
4406 if (!lancer_chip(adapter))
4407 be_cmd_req_native_mode(adapter);
4409 /* invoke this cmd first to get pf_num and vf_num which are needed
4410 * for issuing profile related cmds
4412 if (!BEx_chip(adapter)) {
4413 status = be_cmd_get_func_config(adapter, NULL);
4418 if (!BE2_chip(adapter) && be_physfn(adapter))
4419 be_alloc_sriov_res(adapter);
4421 status = be_get_config(adapter);
4425 status = be_msix_enable(adapter);
4429 /* will enable all the needed filter flags in be_open() */
4430 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4431 en_flags = en_flags & be_if_cap_flags(adapter);
4432 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4433 &adapter->if_handle, 0);
4437 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4439 status = be_setup_queues(adapter);
4444 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
4446 status = be_mac_setup(adapter);
4450 be_cmd_get_fw_ver(adapter);
4451 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
4453 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
4454 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
4456 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4459 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4462 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4465 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4466 adapter->tx_fc, adapter->rx_fc);
4468 if (be_physfn(adapter))
4469 be_cmd_set_logical_link_config(adapter,
4470 IFLA_VF_LINK_STATE_AUTO, 0);
4472 if (adapter->num_vfs)
4473 be_vf_setup(adapter);
4475 status = be_cmd_get_phy_info(adapter);
4476 if (!status && be_pause_supported(adapter))
4477 adapter->phy.fc_autoneg = 1;
4479 be_schedule_worker(adapter);
4480 adapter->flags |= BE_FLAGS_SETUP_DONE;
4487 #ifdef CONFIG_NET_POLL_CONTROLLER
4488 static void be_netpoll(struct net_device *netdev)
4490 struct be_adapter *adapter = netdev_priv(netdev);
4491 struct be_eq_obj *eqo;
4494 for_all_evt_queues(adapter, eqo, i) {
4495 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
4496 napi_schedule(&eqo->napi);
4501 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4503 const struct firmware *fw;
4506 if (!netif_running(adapter->netdev)) {
4507 dev_err(&adapter->pdev->dev,
4508 "Firmware load not allowed (interface is down)\n");
4512 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4516 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4518 if (lancer_chip(adapter))
4519 status = lancer_fw_download(adapter, fw);
4521 status = be_fw_download(adapter, fw);
4524 be_cmd_get_fw_ver(adapter);
4527 release_firmware(fw);
4531 static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4534 struct be_adapter *adapter = netdev_priv(dev);
4535 struct nlattr *attr, *br_spec;
4540 if (!sriov_enabled(adapter))
4543 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4547 nla_for_each_nested(attr, br_spec, rem) {
4548 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4551 if (nla_len(attr) < sizeof(mode))
4554 mode = nla_get_u16(attr);
4555 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4558 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4561 status = be_cmd_set_hsw_config(adapter, 0, 0,
4563 mode == BRIDGE_MODE_VEPA ?
4564 PORT_FWD_TYPE_VEPA :
4565 PORT_FWD_TYPE_VEB, 0);
4569 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4570 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4575 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4576 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4581 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4582 struct net_device *dev, u32 filter_mask,
4585 struct be_adapter *adapter = netdev_priv(dev);
4589 /* BE and Lancer chips support VEB mode only */
4590 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4591 hsw_mode = PORT_FWD_TYPE_VEB;
4593 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4594 adapter->if_handle, &hsw_mode,
4599 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4603 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4604 hsw_mode == PORT_FWD_TYPE_VEPA ?
4605 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4606 0, 0, nlflags, filter_mask, NULL);
4609 #ifdef CONFIG_BE2NET_VXLAN
4610 /* VxLAN offload Notes:
4612 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4613 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4614 * is expected to work across all types of IP tunnels once exported. Skyhawk
4615 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
4616 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4617 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4618 * those other tunnels are unexported on the fly through ndo_features_check().
4620 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4621 * adds more than one port, disable offloads and don't re-enable them again
4622 * until after all the tunnels are removed.
4624 static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4627 struct be_adapter *adapter = netdev_priv(netdev);
4628 struct device *dev = &adapter->pdev->dev;
4631 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
4634 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
4635 adapter->vxlan_port_aliases++;
4639 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4641 "Only one UDP port supported for VxLAN offloads\n");
4642 dev_info(dev, "Disabling VxLAN offloads\n");
4643 adapter->vxlan_port_count++;
4647 if (adapter->vxlan_port_count++ >= 1)
4650 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4651 OP_CONVERT_NORMAL_TO_TUNNEL);
4653 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4657 status = be_cmd_set_vxlan_port(adapter, port);
4659 dev_warn(dev, "Failed to add VxLAN port\n");
4662 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4663 adapter->vxlan_port = port;
4665 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4666 NETIF_F_TSO | NETIF_F_TSO6 |
4667 NETIF_F_GSO_UDP_TUNNEL;
4668 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4669 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
4671 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4675 be_disable_vxlan_offloads(adapter);
4678 static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4681 struct be_adapter *adapter = netdev_priv(netdev);
4683 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
4686 if (adapter->vxlan_port != port)
4689 if (adapter->vxlan_port_aliases) {
4690 adapter->vxlan_port_aliases--;
4694 be_disable_vxlan_offloads(adapter);
4696 dev_info(&adapter->pdev->dev,
4697 "Disabled VxLAN offloads for UDP port %d\n",
4700 adapter->vxlan_port_count--;
4703 static netdev_features_t be_features_check(struct sk_buff *skb,
4704 struct net_device *dev,
4705 netdev_features_t features)
4707 struct be_adapter *adapter = netdev_priv(dev);
4710 /* The code below restricts offload features for some tunneled packets.
4711 * Offload features for normal (non tunnel) packets are unchanged.
4713 if (!skb->encapsulation ||
4714 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4717 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4718 * should disable tunnel offload features if it's not a VxLAN packet,
4719 * as tunnel offloads have been enabled only for VxLAN. This is done to
4720 * allow other tunneled traffic like GRE work fine while VxLAN
4721 * offloads are configured in Skyhawk-R.
4723 switch (vlan_get_protocol(skb)) {
4724 case htons(ETH_P_IP):
4725 l4_hdr = ip_hdr(skb)->protocol;
4727 case htons(ETH_P_IPV6):
4728 l4_hdr = ipv6_hdr(skb)->nexthdr;
4734 if (l4_hdr != IPPROTO_UDP ||
4735 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4736 skb->inner_protocol != htons(ETH_P_TEB) ||
4737 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4738 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4739 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4745 static int be_get_phys_port_id(struct net_device *dev,
4746 struct netdev_phys_item_id *ppid)
4748 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
4749 struct be_adapter *adapter = netdev_priv(dev);
4752 if (MAX_PHYS_ITEM_ID_LEN < id_len)
4755 ppid->id[0] = adapter->hba_port_num + 1;
4757 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
4758 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
4759 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
4761 ppid->id_len = id_len;
4766 static const struct net_device_ops be_netdev_ops = {
4767 .ndo_open = be_open,
4768 .ndo_stop = be_close,
4769 .ndo_start_xmit = be_xmit,
4770 .ndo_set_rx_mode = be_set_rx_mode,
4771 .ndo_set_mac_address = be_mac_addr_set,
4772 .ndo_change_mtu = be_change_mtu,
4773 .ndo_get_stats64 = be_get_stats64,
4774 .ndo_validate_addr = eth_validate_addr,
4775 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4776 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
4777 .ndo_set_vf_mac = be_set_vf_mac,
4778 .ndo_set_vf_vlan = be_set_vf_vlan,
4779 .ndo_set_vf_rate = be_set_vf_tx_rate,
4780 .ndo_get_vf_config = be_get_vf_config,
4781 .ndo_set_vf_link_state = be_set_vf_link_state,
4782 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
4783 #ifdef CONFIG_NET_POLL_CONTROLLER
4784 .ndo_poll_controller = be_netpoll,
4786 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4787 .ndo_bridge_getlink = be_ndo_bridge_getlink,
4788 #ifdef CONFIG_NET_RX_BUSY_POLL
4789 .ndo_busy_poll = be_busy_poll,
4791 #ifdef CONFIG_BE2NET_VXLAN
4792 .ndo_add_vxlan_port = be_add_vxlan_port,
4793 .ndo_del_vxlan_port = be_del_vxlan_port,
4794 .ndo_features_check = be_features_check,
4796 .ndo_get_phys_port_id = be_get_phys_port_id,
4799 static void be_netdev_init(struct net_device *netdev)
4801 struct be_adapter *adapter = netdev_priv(netdev);
4803 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4804 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4805 NETIF_F_HW_VLAN_CTAG_TX;
4806 if (be_multi_rxq(adapter))
4807 netdev->hw_features |= NETIF_F_RXHASH;
4809 netdev->features |= netdev->hw_features |
4810 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4812 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4813 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4815 netdev->priv_flags |= IFF_UNICAST_FLT;
4817 netdev->flags |= IFF_MULTICAST;
4819 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4821 netdev->netdev_ops = &be_netdev_ops;
4823 netdev->ethtool_ops = &be_ethtool_ops;
4826 static void be_cleanup(struct be_adapter *adapter)
4828 struct net_device *netdev = adapter->netdev;
4831 netif_device_detach(netdev);
4832 if (netif_running(netdev))
4839 static int be_resume(struct be_adapter *adapter)
4841 struct net_device *netdev = adapter->netdev;
4844 status = be_setup(adapter);
4848 if (netif_running(netdev)) {
4849 status = be_open(netdev);
4854 netif_device_attach(netdev);
4859 static int be_err_recover(struct be_adapter *adapter)
4863 /* Error recovery is supported only Lancer as of now */
4864 if (!lancer_chip(adapter))
4867 /* Wait for adapter to reach quiescent state before
4870 status = be_fw_wait_ready(adapter);
4874 be_cleanup(adapter);
4876 status = be_resume(adapter);
4885 static void be_err_detection_task(struct work_struct *work)
4887 struct be_adapter *adapter =
4888 container_of(work, struct be_adapter,
4889 be_err_detection_work.work);
4890 struct device *dev = &adapter->pdev->dev;
4891 int recovery_status;
4892 int delay = ERR_DETECTION_DELAY;
4894 be_detect_error(adapter);
4896 if (be_check_error(adapter, BE_ERROR_HW))
4897 recovery_status = be_err_recover(adapter);
4899 goto reschedule_task;
4901 if (!recovery_status) {
4902 adapter->recovery_retries = 0;
4903 dev_info(dev, "Adapter recovery successful\n");
4904 goto reschedule_task;
4905 } else if (be_virtfn(adapter)) {
4906 /* For VFs, check if PF have allocated resources
4909 dev_err(dev, "Re-trying adapter recovery\n");
4910 goto reschedule_task;
4911 } else if (adapter->recovery_retries++ <
4912 MAX_ERR_RECOVERY_RETRY_COUNT) {
4913 /* In case of another error during recovery, it takes 30 sec
4914 * for adapter to come out of error. Retry error recovery after
4915 * this time interval.
4917 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
4918 delay = ERR_RECOVERY_RETRY_DELAY;
4919 goto reschedule_task;
4921 dev_err(dev, "Adapter recovery failed\n");
4926 be_schedule_err_detection(adapter, delay);
4929 static void be_log_sfp_info(struct be_adapter *adapter)
4933 status = be_cmd_query_sfp_info(adapter);
4935 dev_err(&adapter->pdev->dev,
4936 "Unqualified SFP+ detected on %c from %s part no: %s",
4937 adapter->port_name, adapter->phy.vendor_name,
4938 adapter->phy.vendor_pn);
4940 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
4943 static void be_worker(struct work_struct *work)
4945 struct be_adapter *adapter =
4946 container_of(work, struct be_adapter, work.work);
4947 struct be_rx_obj *rxo;
4950 /* when interrupts are not yet enabled, just reap any pending
4953 if (!netif_running(adapter->netdev)) {
4955 be_process_mcc(adapter);
4960 if (!adapter->stats_cmd_sent) {
4961 if (lancer_chip(adapter))
4962 lancer_cmd_get_pport_stats(adapter,
4963 &adapter->stats_cmd);
4965 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4968 if (be_physfn(adapter) &&
4969 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4970 be_cmd_get_die_temperature(adapter);
4972 for_all_rx_queues(adapter, rxo, i) {
4973 /* Replenish RX-queues starved due to memory
4974 * allocation failures.
4976 if (rxo->rx_post_starved)
4977 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
4980 /* EQ-delay update for Skyhawk is done while notifying EQ */
4981 if (!skyhawk_chip(adapter))
4982 be_eqd_update(adapter, false);
4984 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
4985 be_log_sfp_info(adapter);
4988 adapter->work_counter++;
4989 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4992 static void be_unmap_pci_bars(struct be_adapter *adapter)
4995 pci_iounmap(adapter->pdev, adapter->csr);
4997 pci_iounmap(adapter->pdev, adapter->db);
5000 static int db_bar(struct be_adapter *adapter)
5002 if (lancer_chip(adapter) || be_virtfn(adapter))
5008 static int be_roce_map_pci_bars(struct be_adapter *adapter)
5010 if (skyhawk_chip(adapter)) {
5011 adapter->roce_db.size = 4096;
5012 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5014 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5020 static int be_map_pci_bars(struct be_adapter *adapter)
5022 struct pci_dev *pdev = adapter->pdev;
5026 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5027 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5028 SLI_INTF_FAMILY_SHIFT;
5029 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5031 if (BEx_chip(adapter) && be_physfn(adapter)) {
5032 adapter->csr = pci_iomap(pdev, 2, 0);
5037 addr = pci_iomap(pdev, db_bar(adapter), 0);
5042 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5043 if (be_physfn(adapter)) {
5044 /* PCICFG is the 2nd BAR in BE2 */
5045 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5048 adapter->pcicfg = addr;
5050 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5054 be_roce_map_pci_bars(adapter);
5058 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
5059 be_unmap_pci_bars(adapter);
5063 static void be_drv_cleanup(struct be_adapter *adapter)
5065 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5066 struct device *dev = &adapter->pdev->dev;
5069 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5071 mem = &adapter->rx_filter;
5073 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5075 mem = &adapter->stats_cmd;
5077 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5080 /* Allocate and initialize various fields in be_adapter struct */
5081 static int be_drv_init(struct be_adapter *adapter)
5083 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5084 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5085 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5086 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5087 struct device *dev = &adapter->pdev->dev;
5090 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5091 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5092 &mbox_mem_alloc->dma,
5094 if (!mbox_mem_alloc->va)
5097 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5098 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5099 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5101 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5102 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5103 &rx_filter->dma, GFP_KERNEL);
5104 if (!rx_filter->va) {
5109 if (lancer_chip(adapter))
5110 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5111 else if (BE2_chip(adapter))
5112 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5113 else if (BE3_chip(adapter))
5114 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5116 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5117 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5118 &stats_cmd->dma, GFP_KERNEL);
5119 if (!stats_cmd->va) {
5121 goto free_rx_filter;
5124 mutex_init(&adapter->mbox_lock);
5125 spin_lock_init(&adapter->mcc_lock);
5126 spin_lock_init(&adapter->mcc_cq_lock);
5127 init_completion(&adapter->et_cmd_compl);
5129 pci_save_state(adapter->pdev);
5131 INIT_DELAYED_WORK(&adapter->work, be_worker);
5132 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5133 be_err_detection_task);
5135 adapter->rx_fc = true;
5136 adapter->tx_fc = true;
5138 /* Must be a power of 2 or else MODULO will BUG_ON */
5139 adapter->be_get_temp_freq = 64;
5144 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5146 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5147 mbox_mem_alloc->dma);
5151 static void be_remove(struct pci_dev *pdev)
5153 struct be_adapter *adapter = pci_get_drvdata(pdev);
5158 be_roce_dev_remove(adapter);
5159 be_intr_set(adapter, false);
5161 be_cancel_err_detection(adapter);
5163 unregister_netdev(adapter->netdev);
5167 /* tell fw we're done with firing cmds */
5168 be_cmd_fw_clean(adapter);
5170 be_unmap_pci_bars(adapter);
5171 be_drv_cleanup(adapter);
5173 pci_disable_pcie_error_reporting(pdev);
5175 pci_release_regions(pdev);
5176 pci_disable_device(pdev);
5178 free_netdev(adapter->netdev);
5181 static ssize_t be_hwmon_show_temp(struct device *dev,
5182 struct device_attribute *dev_attr,
5185 struct be_adapter *adapter = dev_get_drvdata(dev);
5187 /* Unit: millidegree Celsius */
5188 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5191 return sprintf(buf, "%u\n",
5192 adapter->hwmon_info.be_on_die_temp * 1000);
5195 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5196 be_hwmon_show_temp, NULL, 1);
5198 static struct attribute *be_hwmon_attrs[] = {
5199 &sensor_dev_attr_temp1_input.dev_attr.attr,
5203 ATTRIBUTE_GROUPS(be_hwmon);
5205 static char *mc_name(struct be_adapter *adapter)
5207 char *str = ""; /* default */
5209 switch (adapter->mc_type) {
5235 static inline char *func_name(struct be_adapter *adapter)
5237 return be_physfn(adapter) ? "PF" : "VF";
5240 static inline char *nic_name(struct pci_dev *pdev)
5242 switch (pdev->device) {
5249 return OC_NAME_LANCER;
5260 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
5262 struct be_adapter *adapter;
5263 struct net_device *netdev;
5266 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5268 status = pci_enable_device(pdev);
5272 status = pci_request_regions(pdev, DRV_NAME);
5275 pci_set_master(pdev);
5277 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
5282 adapter = netdev_priv(netdev);
5283 adapter->pdev = pdev;
5284 pci_set_drvdata(pdev, adapter);
5285 adapter->netdev = netdev;
5286 SET_NETDEV_DEV(netdev, &pdev->dev);
5288 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5290 netdev->features |= NETIF_F_HIGHDMA;
5292 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
5294 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5299 status = pci_enable_pcie_error_reporting(pdev);
5301 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
5303 status = be_map_pci_bars(adapter);
5307 status = be_drv_init(adapter);
5311 status = be_setup(adapter);
5315 be_netdev_init(netdev);
5316 status = register_netdev(netdev);
5320 be_roce_dev_add(adapter);
5322 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
5324 /* On Die temperature not supported for VF. */
5325 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
5326 adapter->hwmon_info.hwmon_dev =
5327 devm_hwmon_device_register_with_groups(&pdev->dev,
5331 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5334 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
5335 func_name(adapter), mc_name(adapter), adapter->port_name);
5342 be_drv_cleanup(adapter);
5344 be_unmap_pci_bars(adapter);
5346 free_netdev(netdev);
5348 pci_release_regions(pdev);
5350 pci_disable_device(pdev);
5352 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
5356 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5358 struct be_adapter *adapter = pci_get_drvdata(pdev);
5360 if (adapter->wol_en)
5361 be_setup_wol(adapter, true);
5363 be_intr_set(adapter, false);
5364 be_cancel_err_detection(adapter);
5366 be_cleanup(adapter);
5368 pci_save_state(pdev);
5369 pci_disable_device(pdev);
5370 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5374 static int be_pci_resume(struct pci_dev *pdev)
5376 struct be_adapter *adapter = pci_get_drvdata(pdev);
5379 status = pci_enable_device(pdev);
5383 pci_restore_state(pdev);
5385 status = be_resume(adapter);
5389 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
5391 if (adapter->wol_en)
5392 be_setup_wol(adapter, false);
5398 * An FLR will stop BE from DMAing any data.
5400 static void be_shutdown(struct pci_dev *pdev)
5402 struct be_adapter *adapter = pci_get_drvdata(pdev);
5407 be_roce_dev_shutdown(adapter);
5408 cancel_delayed_work_sync(&adapter->work);
5409 be_cancel_err_detection(adapter);
5411 netif_device_detach(adapter->netdev);
5413 be_cmd_reset_function(adapter);
5415 pci_disable_device(pdev);
5418 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
5419 pci_channel_state_t state)
5421 struct be_adapter *adapter = pci_get_drvdata(pdev);
5423 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5425 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5426 be_set_error(adapter, BE_ERROR_EEH);
5428 be_cancel_err_detection(adapter);
5430 be_cleanup(adapter);
5433 if (state == pci_channel_io_perm_failure)
5434 return PCI_ERS_RESULT_DISCONNECT;
5436 pci_disable_device(pdev);
5438 /* The error could cause the FW to trigger a flash debug dump.
5439 * Resetting the card while flash dump is in progress
5440 * can cause it not to recover; wait for it to finish.
5441 * Wait only for first function as it is needed only once per
5444 if (pdev->devfn == 0)
5447 return PCI_ERS_RESULT_NEED_RESET;
5450 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5452 struct be_adapter *adapter = pci_get_drvdata(pdev);
5455 dev_info(&adapter->pdev->dev, "EEH reset\n");
5457 status = pci_enable_device(pdev);
5459 return PCI_ERS_RESULT_DISCONNECT;
5461 pci_set_master(pdev);
5462 pci_restore_state(pdev);
5464 /* Check if card is ok and fw is ready */
5465 dev_info(&adapter->pdev->dev,
5466 "Waiting for FW to be ready after EEH reset\n");
5467 status = be_fw_wait_ready(adapter);
5469 return PCI_ERS_RESULT_DISCONNECT;
5471 pci_cleanup_aer_uncorrect_error_status(pdev);
5472 be_clear_error(adapter, BE_CLEAR_ALL);
5473 return PCI_ERS_RESULT_RECOVERED;
5476 static void be_eeh_resume(struct pci_dev *pdev)
5479 struct be_adapter *adapter = pci_get_drvdata(pdev);
5481 dev_info(&adapter->pdev->dev, "EEH resume\n");
5483 pci_save_state(pdev);
5485 status = be_resume(adapter);
5489 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
5492 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
5495 static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5497 struct be_adapter *adapter = pci_get_drvdata(pdev);
5502 be_vf_clear(adapter);
5504 adapter->num_vfs = num_vfs;
5506 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5507 dev_warn(&pdev->dev,
5508 "Cannot disable VFs while they are assigned\n");
5512 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5513 * are equally distributed across the max-number of VFs. The user may
5514 * request only a subset of the max-vfs to be enabled.
5515 * Based on num_vfs, redistribute the resources across num_vfs so that
5516 * each VF will have access to more number of resources.
5517 * This facility is not available in BE3 FW.
5518 * Also, this is done by FW in Lancer chip.
5520 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5521 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5522 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5523 adapter->num_vfs, num_vf_qs);
5526 "Failed to optimize SR-IOV resources\n");
5529 status = be_get_resources(adapter);
5531 return be_cmd_status(status);
5533 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5535 status = be_update_queues(adapter);
5538 return be_cmd_status(status);
5540 if (adapter->num_vfs)
5541 status = be_vf_setup(adapter);
5544 return adapter->num_vfs;
5549 static const struct pci_error_handlers be_eeh_handlers = {
5550 .error_detected = be_eeh_err_detected,
5551 .slot_reset = be_eeh_reset,
5552 .resume = be_eeh_resume,
5555 static struct pci_driver be_driver = {
5557 .id_table = be_dev_ids,
5559 .remove = be_remove,
5560 .suspend = be_suspend,
5561 .resume = be_pci_resume,
5562 .shutdown = be_shutdown,
5563 .sriov_configure = be_pci_sriov_configure,
5564 .err_handler = &be_eeh_handlers
5567 static int __init be_init_module(void)
5569 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5570 rx_frag_size != 2048) {
5571 printk(KERN_WARNING DRV_NAME
5572 " : Module param rx_frag_size must be 2048/4096/8192."
5574 rx_frag_size = 2048;
5578 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5579 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5582 return pci_register_driver(&be_driver);
5584 module_init(be_init_module);
5586 static void __exit be_exit_module(void)
5588 pci_unregister_driver(&be_driver);
5590 module_exit(be_exit_module);