2 * Copyright (C) 2005 - 2015 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26 #include <net/vxlan.h>
28 MODULE_VERSION(DRV_VER);
29 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
30 MODULE_AUTHOR("Emulex Corporation");
31 MODULE_LICENSE("GPL");
33 /* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
36 static unsigned int num_vfs;
37 module_param(num_vfs, uint, S_IRUGO);
38 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
40 static ushort rx_frag_size = 2048;
41 module_param(rx_frag_size, ushort, S_IRUGO);
42 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
44 static const struct pci_device_id be_dev_ids[] = {
45 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
47 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
51 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
52 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
55 MODULE_DEVICE_TABLE(pci, be_dev_ids);
56 /* UE Status Low CSR */
57 static const char * const ue_status_low_desc[] = {
92 /* UE Status High CSR */
93 static const char * const ue_status_hi_desc[] = {
128 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 struct be_dma_mem *mem = &q->dma_mem;
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
142 struct be_dma_mem *mem = &q->dma_mem;
144 memset(q, 0, sizeof(*q));
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
155 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163 if (!enabled && enable)
164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165 else if (enabled && !enable)
166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174 static void be_intr_set(struct be_adapter *adapter, bool enable)
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
182 if (be_check_error(adapter, BE_ERROR_EEH))
185 status = be_cmd_intr_set(adapter, enable);
187 be_reg_intr_set(adapter, enable);
190 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
194 if (be_check_error(adapter, BE_ERROR_HW))
197 val |= qid & DB_RQ_RING_ID_MASK;
198 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
201 iowrite32(val, adapter->db + DB_RQ_OFFSET);
204 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
209 if (be_check_error(adapter, BE_ERROR_HW))
212 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
213 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
216 iowrite32(val, adapter->db + txo->db_offset);
219 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
220 bool arm, bool clear_int, u16 num_popped,
221 u32 eq_delay_mult_enc)
225 val |= qid & DB_EQ_RING_ID_MASK;
226 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
228 if (be_check_error(adapter, BE_ERROR_HW))
232 val |= 1 << DB_EQ_REARM_SHIFT;
234 val |= 1 << DB_EQ_CLR_SHIFT;
235 val |= 1 << DB_EQ_EVNT_SHIFT;
236 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
237 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
238 iowrite32(val, adapter->db + DB_EQ_OFFSET);
241 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
245 val |= qid & DB_CQ_RING_ID_MASK;
246 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
247 DB_CQ_RING_ID_EXT_MASK_SHIFT);
249 if (be_check_error(adapter, BE_ERROR_HW))
253 val |= 1 << DB_CQ_REARM_SHIFT;
254 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
255 iowrite32(val, adapter->db + DB_CQ_OFFSET);
258 static int be_mac_addr_set(struct net_device *netdev, void *p)
260 struct be_adapter *adapter = netdev_priv(netdev);
261 struct device *dev = &adapter->pdev->dev;
262 struct sockaddr *addr = p;
265 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
267 if (!is_valid_ether_addr(addr->sa_data))
268 return -EADDRNOTAVAIL;
270 /* Proceed further only if, User provided MAC is different
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
276 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
277 * privilege or if PF did not provision the new MAC address.
278 * On BE3, this cmd will always fail if the VF doesn't have the
279 * FILTMGMT privilege. This failure is OK, only if the PF programmed
280 * the MAC for the VF.
282 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
283 adapter->if_handle, &adapter->pmac_id[0], 0);
285 curr_pmac_id = adapter->pmac_id[0];
287 /* Delete the old programmed MAC. This call may fail if the
288 * old MAC was already deleted by the PF driver.
290 if (adapter->pmac_id[0] != old_pmac_id)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
295 /* Decide if the new MAC is successfully activated only after
298 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
299 adapter->if_handle, true, 0);
303 /* The MAC change did not happen, either due to lack of privilege
304 * or PF didn't pre-provision.
306 if (!ether_addr_equal(addr->sa_data, mac)) {
311 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
312 dev_info(dev, "MAC address changed to %pM\n", mac);
315 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
319 /* BE2 supports only v0 cmd */
320 static void *hw_stats_from_cmd(struct be_adapter *adapter)
322 if (BE2_chip(adapter)) {
323 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
325 return &cmd->hw_stats;
326 } else if (BE3_chip(adapter)) {
327 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
329 return &cmd->hw_stats;
331 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
333 return &cmd->hw_stats;
337 /* BE2 supports only v0 cmd */
338 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
340 if (BE2_chip(adapter)) {
341 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 return &hw_stats->erx;
344 } else if (BE3_chip(adapter)) {
345 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
347 return &hw_stats->erx;
349 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
351 return &hw_stats->erx;
355 static void populate_be_v0_stats(struct be_adapter *adapter)
357 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
358 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
359 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
360 struct be_port_rxf_stats_v0 *port_stats =
361 &rxf_stats->port[adapter->port_num];
362 struct be_drv_stats *drvs = &adapter->drv_stats;
364 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
365 drvs->rx_pause_frames = port_stats->rx_pause_frames;
366 drvs->rx_crc_errors = port_stats->rx_crc_errors;
367 drvs->rx_control_frames = port_stats->rx_control_frames;
368 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
369 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
370 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
371 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
372 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
373 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
374 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
375 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
376 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
377 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
378 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
379 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
380 drvs->rx_dropped_header_too_small =
381 port_stats->rx_dropped_header_too_small;
382 drvs->rx_address_filtered =
383 port_stats->rx_address_filtered +
384 port_stats->rx_vlan_filtered;
385 drvs->rx_alignment_symbol_errors =
386 port_stats->rx_alignment_symbol_errors;
388 drvs->tx_pauseframes = port_stats->tx_pauseframes;
389 drvs->tx_controlframes = port_stats->tx_controlframes;
391 if (adapter->port_num)
392 drvs->jabber_events = rxf_stats->port1_jabber_events;
394 drvs->jabber_events = rxf_stats->port0_jabber_events;
395 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
396 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
397 drvs->forwarded_packets = rxf_stats->forwarded_packets;
398 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
399 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
400 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
401 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
404 static void populate_be_v1_stats(struct be_adapter *adapter)
406 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
407 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
408 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
409 struct be_port_rxf_stats_v1 *port_stats =
410 &rxf_stats->port[adapter->port_num];
411 struct be_drv_stats *drvs = &adapter->drv_stats;
413 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
414 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
415 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
416 drvs->rx_pause_frames = port_stats->rx_pause_frames;
417 drvs->rx_crc_errors = port_stats->rx_crc_errors;
418 drvs->rx_control_frames = port_stats->rx_control_frames;
419 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
420 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
421 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
422 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
423 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
424 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
425 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
426 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
427 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
428 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
429 drvs->rx_dropped_header_too_small =
430 port_stats->rx_dropped_header_too_small;
431 drvs->rx_input_fifo_overflow_drop =
432 port_stats->rx_input_fifo_overflow_drop;
433 drvs->rx_address_filtered = port_stats->rx_address_filtered;
434 drvs->rx_alignment_symbol_errors =
435 port_stats->rx_alignment_symbol_errors;
436 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
437 drvs->tx_pauseframes = port_stats->tx_pauseframes;
438 drvs->tx_controlframes = port_stats->tx_controlframes;
439 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
440 drvs->jabber_events = port_stats->jabber_events;
441 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
442 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
443 drvs->forwarded_packets = rxf_stats->forwarded_packets;
444 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
445 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
446 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
447 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
450 static void populate_be_v2_stats(struct be_adapter *adapter)
452 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
453 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
454 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
455 struct be_port_rxf_stats_v2 *port_stats =
456 &rxf_stats->port[adapter->port_num];
457 struct be_drv_stats *drvs = &adapter->drv_stats;
459 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
460 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
461 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
462 drvs->rx_pause_frames = port_stats->rx_pause_frames;
463 drvs->rx_crc_errors = port_stats->rx_crc_errors;
464 drvs->rx_control_frames = port_stats->rx_control_frames;
465 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
466 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
467 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
468 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
469 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
470 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
471 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
472 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
473 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
474 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
475 drvs->rx_dropped_header_too_small =
476 port_stats->rx_dropped_header_too_small;
477 drvs->rx_input_fifo_overflow_drop =
478 port_stats->rx_input_fifo_overflow_drop;
479 drvs->rx_address_filtered = port_stats->rx_address_filtered;
480 drvs->rx_alignment_symbol_errors =
481 port_stats->rx_alignment_symbol_errors;
482 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
483 drvs->tx_pauseframes = port_stats->tx_pauseframes;
484 drvs->tx_controlframes = port_stats->tx_controlframes;
485 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
486 drvs->jabber_events = port_stats->jabber_events;
487 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
488 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
489 drvs->forwarded_packets = rxf_stats->forwarded_packets;
490 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
491 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
492 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
493 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
494 if (be_roce_supported(adapter)) {
495 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
496 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
497 drvs->rx_roce_frames = port_stats->roce_frames_received;
498 drvs->roce_drops_crc = port_stats->roce_drops_crc;
499 drvs->roce_drops_payload_len =
500 port_stats->roce_drops_payload_len;
504 static void populate_lancer_stats(struct be_adapter *adapter)
506 struct be_drv_stats *drvs = &adapter->drv_stats;
507 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
509 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
510 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
511 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
512 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
513 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
514 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
515 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
516 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
517 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
518 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
519 drvs->rx_dropped_tcp_length =
520 pport_stats->rx_dropped_invalid_tcp_length;
521 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
522 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
523 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
524 drvs->rx_dropped_header_too_small =
525 pport_stats->rx_dropped_header_too_small;
526 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
527 drvs->rx_address_filtered =
528 pport_stats->rx_address_filtered +
529 pport_stats->rx_vlan_filtered;
530 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
531 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
532 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
533 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
534 drvs->jabber_events = pport_stats->rx_jabbers;
535 drvs->forwarded_packets = pport_stats->num_forwards_lo;
536 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
537 drvs->rx_drops_too_many_frags =
538 pport_stats->rx_drops_too_many_frags_lo;
541 static void accumulate_16bit_val(u32 *acc, u16 val)
543 #define lo(x) (x & 0xFFFF)
544 #define hi(x) (x & 0xFFFF0000)
545 bool wrapped = val < lo(*acc);
546 u32 newacc = hi(*acc) + val;
550 ACCESS_ONCE(*acc) = newacc;
553 static void populate_erx_stats(struct be_adapter *adapter,
554 struct be_rx_obj *rxo, u32 erx_stat)
556 if (!BEx_chip(adapter))
557 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
559 /* below erx HW counter can actually wrap around after
560 * 65535. Driver accumulates a 32-bit value
562 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
566 void be_parse_stats(struct be_adapter *adapter)
568 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
569 struct be_rx_obj *rxo;
573 if (lancer_chip(adapter)) {
574 populate_lancer_stats(adapter);
576 if (BE2_chip(adapter))
577 populate_be_v0_stats(adapter);
578 else if (BE3_chip(adapter))
580 populate_be_v1_stats(adapter);
582 populate_be_v2_stats(adapter);
584 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
585 for_all_rx_queues(adapter, rxo, i) {
586 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
587 populate_erx_stats(adapter, rxo, erx_stat);
592 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
593 struct rtnl_link_stats64 *stats)
595 struct be_adapter *adapter = netdev_priv(netdev);
596 struct be_drv_stats *drvs = &adapter->drv_stats;
597 struct be_rx_obj *rxo;
598 struct be_tx_obj *txo;
603 for_all_rx_queues(adapter, rxo, i) {
604 const struct be_rx_stats *rx_stats = rx_stats(rxo);
607 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
608 pkts = rx_stats(rxo)->rx_pkts;
609 bytes = rx_stats(rxo)->rx_bytes;
610 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
611 stats->rx_packets += pkts;
612 stats->rx_bytes += bytes;
613 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
614 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
615 rx_stats(rxo)->rx_drops_no_frags;
618 for_all_tx_queues(adapter, txo, i) {
619 const struct be_tx_stats *tx_stats = tx_stats(txo);
622 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
623 pkts = tx_stats(txo)->tx_pkts;
624 bytes = tx_stats(txo)->tx_bytes;
625 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
626 stats->tx_packets += pkts;
627 stats->tx_bytes += bytes;
630 /* bad pkts received */
631 stats->rx_errors = drvs->rx_crc_errors +
632 drvs->rx_alignment_symbol_errors +
633 drvs->rx_in_range_errors +
634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long +
636 drvs->rx_dropped_too_small +
637 drvs->rx_dropped_too_short +
638 drvs->rx_dropped_header_too_small +
639 drvs->rx_dropped_tcp_length +
640 drvs->rx_dropped_runt;
642 /* detailed rx errors */
643 stats->rx_length_errors = drvs->rx_in_range_errors +
644 drvs->rx_out_range_errors +
645 drvs->rx_frame_too_long;
647 stats->rx_crc_errors = drvs->rx_crc_errors;
649 /* frame alignment errors */
650 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
652 /* receiver fifo overrun */
653 /* drops_no_pbuf is no per i/f, it's per BE card */
654 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
655 drvs->rx_input_fifo_overflow_drop +
656 drvs->rx_drops_no_pbuf;
660 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
662 struct net_device *netdev = adapter->netdev;
664 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
665 netif_carrier_off(netdev);
666 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
670 netif_carrier_on(netdev);
672 netif_carrier_off(netdev);
674 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
677 static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
679 struct be_tx_stats *stats = tx_stats(txo);
681 u64_stats_update_begin(&stats->sync);
683 stats->tx_bytes += skb->len;
684 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
685 u64_stats_update_end(&stats->sync);
688 /* Returns number of WRBs needed for the skb */
689 static u32 skb_wrb_cnt(struct sk_buff *skb)
691 /* +1 for the header wrb */
692 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
695 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
698 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
699 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
703 /* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
704 * to avoid the swap and shift/mask operations in wrb_fill().
706 static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
714 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
720 vlan_tag = skb_vlan_tag_get(skb);
721 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
722 /* If vlan priority provided by OS is NOT in available bmap */
723 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
724 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
725 adapter->recommended_prio;
730 /* Used only for IP tunnel packets */
731 static u16 skb_inner_ip_proto(struct sk_buff *skb)
733 return (inner_ip_hdr(skb)->version == 4) ?
734 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
737 static u16 skb_ip_proto(struct sk_buff *skb)
739 return (ip_hdr(skb)->version == 4) ?
740 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
743 static inline bool be_is_txq_full(struct be_tx_obj *txo)
745 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
748 static inline bool be_can_txq_wake(struct be_tx_obj *txo)
750 return atomic_read(&txo->q.used) < txo->q.len / 2;
753 static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
755 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
758 static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
760 struct be_wrb_params *wrb_params)
764 if (skb_is_gso(skb)) {
765 BE_WRB_F_SET(wrb_params->features, LSO, 1);
766 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
767 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
768 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
769 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
770 if (skb->encapsulation) {
771 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
772 proto = skb_inner_ip_proto(skb);
774 proto = skb_ip_proto(skb);
776 if (proto == IPPROTO_TCP)
777 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
778 else if (proto == IPPROTO_UDP)
779 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
782 if (skb_vlan_tag_present(skb)) {
783 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
784 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
787 BE_WRB_F_SET(wrb_params->features, CRC, 1);
790 static void wrb_fill_hdr(struct be_adapter *adapter,
791 struct be_eth_hdr_wrb *hdr,
792 struct be_wrb_params *wrb_params,
795 memset(hdr, 0, sizeof(*hdr));
797 SET_TX_WRB_HDR_BITS(crc, hdr,
798 BE_WRB_F_GET(wrb_params->features, CRC));
799 SET_TX_WRB_HDR_BITS(ipcs, hdr,
800 BE_WRB_F_GET(wrb_params->features, IPCS));
801 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
802 BE_WRB_F_GET(wrb_params->features, TCPCS));
803 SET_TX_WRB_HDR_BITS(udpcs, hdr,
804 BE_WRB_F_GET(wrb_params->features, UDPCS));
806 SET_TX_WRB_HDR_BITS(lso, hdr,
807 BE_WRB_F_GET(wrb_params->features, LSO));
808 SET_TX_WRB_HDR_BITS(lso6, hdr,
809 BE_WRB_F_GET(wrb_params->features, LSO6));
810 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
812 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
813 * hack is not needed, the evt bit is set while ringing DB.
815 SET_TX_WRB_HDR_BITS(event, hdr,
816 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
817 SET_TX_WRB_HDR_BITS(vlan, hdr,
818 BE_WRB_F_GET(wrb_params->features, VLAN));
819 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
821 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
822 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
823 SET_TX_WRB_HDR_BITS(mgmt, hdr,
824 BE_WRB_F_GET(wrb_params->features, OS2BMC));
827 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
831 u32 frag_len = le32_to_cpu(wrb->frag_len);
834 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
835 (u64)le32_to_cpu(wrb->frag_pa_lo);
838 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
840 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
844 /* Grab a WRB header for xmit */
845 static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
847 u16 head = txo->q.head;
849 queue_head_inc(&txo->q);
853 /* Set up the WRB header for xmit */
854 static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
855 struct be_tx_obj *txo,
856 struct be_wrb_params *wrb_params,
857 struct sk_buff *skb, u16 head)
859 u32 num_frags = skb_wrb_cnt(skb);
860 struct be_queue_info *txq = &txo->q;
861 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
863 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
864 be_dws_cpu_to_le(hdr, sizeof(*hdr));
866 BUG_ON(txo->sent_skb_list[head]);
867 txo->sent_skb_list[head] = skb;
868 txo->last_req_hdr = head;
869 atomic_add(num_frags, &txq->used);
870 txo->last_req_wrb_cnt = num_frags;
871 txo->pend_wrb_cnt += num_frags;
874 /* Setup a WRB fragment (buffer descriptor) for xmit */
875 static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
878 struct be_eth_wrb *wrb;
879 struct be_queue_info *txq = &txo->q;
881 wrb = queue_head_node(txq);
882 wrb_fill(wrb, busaddr, len);
886 /* Bring the queue back to the state it was in before be_xmit_enqueue() routine
887 * was invoked. The producer index is restored to the previous packet and the
888 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
890 static void be_xmit_restore(struct be_adapter *adapter,
891 struct be_tx_obj *txo, u16 head, bool map_single,
895 struct be_eth_wrb *wrb;
896 struct be_queue_info *txq = &txo->q;
898 dev = &adapter->pdev->dev;
901 /* skip the first wrb (hdr); it's not mapped */
904 wrb = queue_head_node(txq);
905 unmap_tx_frag(dev, wrb, map_single);
907 copied -= le32_to_cpu(wrb->frag_len);
914 /* Enqueue the given packet for transmit. This routine allocates WRBs for the
915 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
916 * of WRBs used up by the packet.
918 static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
920 struct be_wrb_params *wrb_params)
922 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
923 struct device *dev = &adapter->pdev->dev;
924 struct be_queue_info *txq = &txo->q;
925 bool map_single = false;
926 u16 head = txq->head;
930 head = be_tx_get_wrb_hdr(txo);
932 if (skb->len > skb->data_len) {
933 len = skb_headlen(skb);
935 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
936 if (dma_mapping_error(dev, busaddr))
939 be_tx_setup_wrb_frag(txo, busaddr, len);
943 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
944 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
945 len = skb_frag_size(frag);
947 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
948 if (dma_mapping_error(dev, busaddr))
950 be_tx_setup_wrb_frag(txo, busaddr, len);
954 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
956 be_tx_stats_update(txo, skb);
960 adapter->drv_stats.dma_map_errors++;
961 be_xmit_restore(adapter, txo, head, map_single, copied);
965 static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
967 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
970 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
977 skb = skb_share_check(skb, GFP_ATOMIC);
981 if (skb_vlan_tag_present(skb))
982 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
984 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
986 vlan_tag = adapter->pvid;
987 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
988 * skip VLAN insertion
990 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
994 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1001 /* Insert the outer VLAN, if any */
1002 if (adapter->qnq_vid) {
1003 vlan_tag = adapter->qnq_vid;
1004 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1008 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1014 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1016 struct ethhdr *eh = (struct ethhdr *)skb->data;
1017 u16 offset = ETH_HLEN;
1019 if (eh->h_proto == htons(ETH_P_IPV6)) {
1020 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1022 offset += sizeof(struct ipv6hdr);
1023 if (ip6h->nexthdr != NEXTHDR_TCP &&
1024 ip6h->nexthdr != NEXTHDR_UDP) {
1025 struct ipv6_opt_hdr *ehdr =
1026 (struct ipv6_opt_hdr *)(skb->data + offset);
1028 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1029 if (ehdr->hdrlen == 0xff)
1036 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1038 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
1041 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
1043 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
1046 static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1047 struct sk_buff *skb,
1048 struct be_wrb_params
1051 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1052 unsigned int eth_hdr_len;
1055 /* For padded packets, BE HW modifies tot_len field in IP header
1056 * incorrecly when VLAN tag is inserted by HW.
1057 * For padded packets, Lancer computes incorrect checksum.
1059 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1060 VLAN_ETH_HLEN : ETH_HLEN;
1061 if (skb->len <= 60 &&
1062 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
1064 ip = (struct iphdr *)ip_hdr(skb);
1065 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1068 /* If vlan tag is already inlined in the packet, skip HW VLAN
1069 * tagging in pvid-tagging mode
1071 if (be_pvid_tagging_enabled(adapter) &&
1072 veh->h_vlan_proto == htons(ETH_P_8021Q))
1073 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1075 /* HW has a bug wherein it will calculate CSUM for VLAN
1076 * pkts even though it is disabled.
1077 * Manually insert VLAN in pkt.
1079 if (skb->ip_summed != CHECKSUM_PARTIAL &&
1080 skb_vlan_tag_present(skb)) {
1081 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1086 /* HW may lockup when VLAN HW tagging is requested on
1087 * certain ipv6 packets. Drop such pkts if the HW workaround to
1088 * skip HW tagging is not enabled by FW.
1090 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
1091 (adapter->pvid || adapter->qnq_vid) &&
1092 !qnq_async_evt_rcvd(adapter)))
1095 /* Manual VLAN tag insertion to prevent:
1096 * ASIC lockup when the ASIC inserts VLAN tag into
1097 * certain ipv6 packets. Insert VLAN tags in driver,
1098 * and set event, completion, vlan bits accordingly
1101 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1102 be_vlan_tag_tx_chk(adapter, skb)) {
1103 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1110 dev_kfree_skb_any(skb);
1115 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1116 struct sk_buff *skb,
1117 struct be_wrb_params *wrb_params)
1119 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1120 * less may cause a transmit stall on that port. So the work-around is
1121 * to pad short packets (<= 32 bytes) to a 36-byte length.
1123 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1124 if (skb_put_padto(skb, 36))
1128 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1129 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
1137 static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1139 struct be_queue_info *txq = &txo->q;
1140 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1142 /* Mark the last request eventable if it hasn't been marked already */
1143 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1144 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1146 /* compose a dummy wrb if there are odd set of wrbs to notify */
1147 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1148 wrb_fill_dummy(queue_head_node(txq));
1149 queue_head_inc(txq);
1150 atomic_inc(&txq->used);
1151 txo->pend_wrb_cnt++;
1152 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1153 TX_HDR_WRB_NUM_SHIFT);
1154 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1155 TX_HDR_WRB_NUM_SHIFT);
1157 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1158 txo->pend_wrb_cnt = 0;
1161 /* OS2BMC related */
1163 #define DHCP_CLIENT_PORT 68
1164 #define DHCP_SERVER_PORT 67
1165 #define NET_BIOS_PORT1 137
1166 #define NET_BIOS_PORT2 138
1167 #define DHCPV6_RAS_PORT 547
1169 #define is_mc_allowed_on_bmc(adapter, eh) \
1170 (!is_multicast_filt_enabled(adapter) && \
1171 is_multicast_ether_addr(eh->h_dest) && \
1172 !is_broadcast_ether_addr(eh->h_dest))
1174 #define is_bc_allowed_on_bmc(adapter, eh) \
1175 (!is_broadcast_filt_enabled(adapter) && \
1176 is_broadcast_ether_addr(eh->h_dest))
1178 #define is_arp_allowed_on_bmc(adapter, skb) \
1179 (is_arp(skb) && is_arp_filt_enabled(adapter))
1181 #define is_broadcast_packet(eh, adapter) \
1182 (is_multicast_ether_addr(eh->h_dest) && \
1183 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1185 #define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1187 #define is_arp_filt_enabled(adapter) \
1188 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1190 #define is_dhcp_client_filt_enabled(adapter) \
1191 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1193 #define is_dhcp_srvr_filt_enabled(adapter) \
1194 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1196 #define is_nbios_filt_enabled(adapter) \
1197 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1199 #define is_ipv6_na_filt_enabled(adapter) \
1200 (adapter->bmc_filt_mask & \
1201 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1203 #define is_ipv6_ra_filt_enabled(adapter) \
1204 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1206 #define is_ipv6_ras_filt_enabled(adapter) \
1207 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1209 #define is_broadcast_filt_enabled(adapter) \
1210 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1212 #define is_multicast_filt_enabled(adapter) \
1213 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1215 static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1216 struct sk_buff **skb)
1218 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1219 bool os2bmc = false;
1221 if (!be_is_os2bmc_enabled(adapter))
1224 if (!is_multicast_ether_addr(eh->h_dest))
1227 if (is_mc_allowed_on_bmc(adapter, eh) ||
1228 is_bc_allowed_on_bmc(adapter, eh) ||
1229 is_arp_allowed_on_bmc(adapter, (*skb))) {
1234 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1235 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1236 u8 nexthdr = hdr->nexthdr;
1238 if (nexthdr == IPPROTO_ICMPV6) {
1239 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1241 switch (icmp6->icmp6_type) {
1242 case NDISC_ROUTER_ADVERTISEMENT:
1243 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1245 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1246 os2bmc = is_ipv6_na_filt_enabled(adapter);
1254 if (is_udp_pkt((*skb))) {
1255 struct udphdr *udp = udp_hdr((*skb));
1257 switch (ntohs(udp->dest)) {
1258 case DHCP_CLIENT_PORT:
1259 os2bmc = is_dhcp_client_filt_enabled(adapter);
1261 case DHCP_SERVER_PORT:
1262 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1264 case NET_BIOS_PORT1:
1265 case NET_BIOS_PORT2:
1266 os2bmc = is_nbios_filt_enabled(adapter);
1268 case DHCPV6_RAS_PORT:
1269 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1276 /* For packets over a vlan, which are destined
1277 * to BMC, asic expects the vlan to be inline in the packet.
1280 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1285 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1287 struct be_adapter *adapter = netdev_priv(netdev);
1288 u16 q_idx = skb_get_queue_mapping(skb);
1289 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
1290 struct be_wrb_params wrb_params = { 0 };
1291 bool flush = !skb->xmit_more;
1294 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
1298 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1300 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1301 if (unlikely(!wrb_cnt)) {
1302 dev_kfree_skb_any(skb);
1306 /* if os2bmc is enabled and if the pkt is destined to bmc,
1307 * enqueue the pkt a 2nd time with mgmt bit set.
1309 if (be_send_pkt_to_bmc(adapter, &skb)) {
1310 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1311 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1312 if (unlikely(!wrb_cnt))
1318 if (be_is_txq_full(txo)) {
1319 netif_stop_subqueue(netdev, q_idx);
1320 tx_stats(txo)->tx_stops++;
1323 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1324 be_xmit_flush(adapter, txo);
1326 return NETDEV_TX_OK;
1328 tx_stats(txo)->tx_drv_drops++;
1329 /* Flush the already enqueued tx requests */
1330 if (flush && txo->pend_wrb_cnt)
1331 be_xmit_flush(adapter, txo);
1333 return NETDEV_TX_OK;
1336 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1338 struct be_adapter *adapter = netdev_priv(netdev);
1339 struct device *dev = &adapter->pdev->dev;
1341 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1342 dev_info(dev, "MTU must be between %d and %d bytes\n",
1343 BE_MIN_MTU, BE_MAX_MTU);
1347 dev_info(dev, "MTU changed from %d to %d bytes\n",
1348 netdev->mtu, new_mtu);
1349 netdev->mtu = new_mtu;
1353 static inline bool be_in_all_promisc(struct be_adapter *adapter)
1355 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1356 BE_IF_FLAGS_ALL_PROMISCUOUS;
1359 static int be_set_vlan_promisc(struct be_adapter *adapter)
1361 struct device *dev = &adapter->pdev->dev;
1364 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1367 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1369 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1370 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1372 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1377 static int be_clear_vlan_promisc(struct be_adapter *adapter)
1379 struct device *dev = &adapter->pdev->dev;
1382 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1384 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1385 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1391 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1392 * If the user configures more, place BE in vlan promiscuous mode.
1394 static int be_vid_config(struct be_adapter *adapter)
1396 struct device *dev = &adapter->pdev->dev;
1397 u16 vids[BE_NUM_VLANS_SUPPORTED];
1401 /* No need to further configure vids if in promiscuous mode */
1402 if (be_in_all_promisc(adapter))
1405 if (adapter->vlans_added > be_max_vlans(adapter))
1406 return be_set_vlan_promisc(adapter);
1408 /* Construct VLAN Table to give to HW */
1409 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1410 vids[num++] = cpu_to_le16(i);
1412 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
1414 dev_err(dev, "Setting HW VLAN filtering failed\n");
1415 /* Set to VLAN promisc mode as setting VLAN filter failed */
1416 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1417 addl_status(status) ==
1418 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
1419 return be_set_vlan_promisc(adapter);
1420 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1421 status = be_clear_vlan_promisc(adapter);
1426 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1428 struct be_adapter *adapter = netdev_priv(netdev);
1431 /* Packets with VID 0 are always received by Lancer by default */
1432 if (lancer_chip(adapter) && vid == 0)
1435 if (test_bit(vid, adapter->vids))
1438 set_bit(vid, adapter->vids);
1439 adapter->vlans_added++;
1441 status = be_vid_config(adapter);
1443 adapter->vlans_added--;
1444 clear_bit(vid, adapter->vids);
1450 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1452 struct be_adapter *adapter = netdev_priv(netdev);
1454 /* Packets with VID 0 are always received by Lancer by default */
1455 if (lancer_chip(adapter) && vid == 0)
1458 clear_bit(vid, adapter->vids);
1459 adapter->vlans_added--;
1461 return be_vid_config(adapter);
1464 static void be_clear_all_promisc(struct be_adapter *adapter)
1466 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
1467 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1470 static void be_set_all_promisc(struct be_adapter *adapter)
1472 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1473 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1476 static void be_set_mc_promisc(struct be_adapter *adapter)
1480 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1483 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1485 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1488 static void be_set_mc_list(struct be_adapter *adapter)
1492 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1494 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1496 be_set_mc_promisc(adapter);
1499 static void be_set_uc_list(struct be_adapter *adapter)
1501 struct netdev_hw_addr *ha;
1502 int i = 1; /* First slot is claimed by the Primary MAC */
1504 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1505 be_cmd_pmac_del(adapter, adapter->if_handle,
1506 adapter->pmac_id[i], 0);
1508 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1509 be_set_all_promisc(adapter);
1513 netdev_for_each_uc_addr(ha, adapter->netdev) {
1514 adapter->uc_macs++; /* First slot is for Primary MAC */
1515 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1516 &adapter->pmac_id[adapter->uc_macs], 0);
1520 static void be_clear_uc_list(struct be_adapter *adapter)
1524 for (i = 1; i < (adapter->uc_macs + 1); i++)
1525 be_cmd_pmac_del(adapter, adapter->if_handle,
1526 adapter->pmac_id[i], 0);
1527 adapter->uc_macs = 0;
1530 static void be_set_rx_mode(struct net_device *netdev)
1532 struct be_adapter *adapter = netdev_priv(netdev);
1534 if (netdev->flags & IFF_PROMISC) {
1535 be_set_all_promisc(adapter);
1539 /* Interface was previously in promiscuous mode; disable it */
1540 if (be_in_all_promisc(adapter)) {
1541 be_clear_all_promisc(adapter);
1542 if (adapter->vlans_added)
1543 be_vid_config(adapter);
1546 /* Enable multicast promisc if num configured exceeds what we support */
1547 if (netdev->flags & IFF_ALLMULTI ||
1548 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1549 be_set_mc_promisc(adapter);
1553 if (netdev_uc_count(netdev) != adapter->uc_macs)
1554 be_set_uc_list(adapter);
1556 be_set_mc_list(adapter);
1559 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1561 struct be_adapter *adapter = netdev_priv(netdev);
1562 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1565 if (!sriov_enabled(adapter))
1568 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1571 /* Proceed further only if user provided MAC is different
1574 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1577 if (BEx_chip(adapter)) {
1578 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1581 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1582 &vf_cfg->pmac_id, vf + 1);
1584 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1589 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1591 return be_cmd_status(status);
1594 ether_addr_copy(vf_cfg->mac_addr, mac);
1599 static int be_get_vf_config(struct net_device *netdev, int vf,
1600 struct ifla_vf_info *vi)
1602 struct be_adapter *adapter = netdev_priv(netdev);
1603 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1605 if (!sriov_enabled(adapter))
1608 if (vf >= adapter->num_vfs)
1612 vi->max_tx_rate = vf_cfg->tx_rate;
1613 vi->min_tx_rate = 0;
1614 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1615 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1616 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1617 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
1618 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
1623 static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1625 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1626 u16 vids[BE_NUM_VLANS_SUPPORTED];
1627 int vf_if_id = vf_cfg->if_handle;
1630 /* Enable Transparent VLAN Tagging */
1631 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
1635 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1637 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1639 dev_info(&adapter->pdev->dev,
1640 "Cleared guest VLANs on VF%d", vf);
1642 /* After TVT is enabled, disallow VFs to program VLAN filters */
1643 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1644 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1645 ~BE_PRIV_FILTMGMT, vf + 1);
1647 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1652 static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1654 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1655 struct device *dev = &adapter->pdev->dev;
1658 /* Reset Transparent VLAN Tagging. */
1659 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
1660 vf_cfg->if_handle, 0, 0);
1664 /* Allow VFs to program VLAN filtering */
1665 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1666 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1667 BE_PRIV_FILTMGMT, vf + 1);
1669 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1670 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1675 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1679 static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1681 struct be_adapter *adapter = netdev_priv(netdev);
1682 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1685 if (!sriov_enabled(adapter))
1688 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1692 vlan |= qos << VLAN_PRIO_SHIFT;
1693 status = be_set_vf_tvt(adapter, vf, vlan);
1695 status = be_clear_vf_tvt(adapter, vf);
1699 dev_err(&adapter->pdev->dev,
1700 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1702 return be_cmd_status(status);
1705 vf_cfg->vlan_tag = vlan;
1709 static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1710 int min_tx_rate, int max_tx_rate)
1712 struct be_adapter *adapter = netdev_priv(netdev);
1713 struct device *dev = &adapter->pdev->dev;
1714 int percent_rate, status = 0;
1718 if (!sriov_enabled(adapter))
1721 if (vf >= adapter->num_vfs)
1730 status = be_cmd_link_status_query(adapter, &link_speed,
1736 dev_err(dev, "TX-rate setting not allowed when link is down\n");
1741 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1742 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1748 /* On Skyhawk the QOS setting must be done only as a % value */
1749 percent_rate = link_speed / 100;
1750 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1751 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1758 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
1762 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1766 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1768 return be_cmd_status(status);
1771 static int be_set_vf_link_state(struct net_device *netdev, int vf,
1774 struct be_adapter *adapter = netdev_priv(netdev);
1777 if (!sriov_enabled(adapter))
1780 if (vf >= adapter->num_vfs)
1783 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1785 dev_err(&adapter->pdev->dev,
1786 "Link state change on VF %d failed: %#x\n", vf, status);
1787 return be_cmd_status(status);
1790 adapter->vf_cfg[vf].plink_tracking = link_state;
1795 static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1797 struct be_adapter *adapter = netdev_priv(netdev);
1798 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1802 if (!sriov_enabled(adapter))
1805 if (vf >= adapter->num_vfs)
1808 if (BEx_chip(adapter))
1811 if (enable == vf_cfg->spoofchk)
1814 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1816 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1819 dev_err(&adapter->pdev->dev,
1820 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1821 return be_cmd_status(status);
1824 vf_cfg->spoofchk = enable;
1828 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1831 aic->rx_pkts_prev = rx_pkts;
1832 aic->tx_reqs_prev = tx_pkts;
1836 static int be_get_new_eqd(struct be_eq_obj *eqo)
1838 struct be_adapter *adapter = eqo->adapter;
1840 struct be_aic_obj *aic;
1841 struct be_rx_obj *rxo;
1842 struct be_tx_obj *txo;
1843 u64 rx_pkts = 0, tx_pkts = 0;
1848 aic = &adapter->aic_obj[eqo->idx];
1856 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
1858 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1859 rx_pkts += rxo->stats.rx_pkts;
1860 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1863 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
1865 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1866 tx_pkts += txo->stats.tx_reqs;
1867 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1870 /* Skip, if wrapped around or first calculation */
1872 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1873 rx_pkts < aic->rx_pkts_prev ||
1874 tx_pkts < aic->tx_reqs_prev) {
1875 be_aic_update(aic, rx_pkts, tx_pkts, now);
1876 return aic->prev_eqd;
1879 delta = jiffies_to_msecs(now - aic->jiffies);
1881 return aic->prev_eqd;
1883 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1884 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1885 eqd = (pps / 15000) << 2;
1889 eqd = min_t(u32, eqd, aic->max_eqd);
1890 eqd = max_t(u32, eqd, aic->min_eqd);
1892 be_aic_update(aic, rx_pkts, tx_pkts, now);
1897 /* For Skyhawk-R only */
1898 static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1900 struct be_adapter *adapter = eqo->adapter;
1901 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1902 ulong now = jiffies;
1909 if (time_before_eq(now, aic->jiffies) ||
1910 jiffies_to_msecs(now - aic->jiffies) < 1)
1911 eqd = aic->prev_eqd;
1913 eqd = be_get_new_eqd(eqo);
1916 mult_enc = R2I_DLY_ENC_1;
1918 mult_enc = R2I_DLY_ENC_2;
1920 mult_enc = R2I_DLY_ENC_3;
1922 mult_enc = R2I_DLY_ENC_0;
1924 aic->prev_eqd = eqd;
1929 void be_eqd_update(struct be_adapter *adapter, bool force_update)
1931 struct be_set_eqd set_eqd[MAX_EVT_QS];
1932 struct be_aic_obj *aic;
1933 struct be_eq_obj *eqo;
1934 int i, num = 0, eqd;
1936 for_all_evt_queues(adapter, eqo, i) {
1937 aic = &adapter->aic_obj[eqo->idx];
1938 eqd = be_get_new_eqd(eqo);
1939 if (force_update || eqd != aic->prev_eqd) {
1940 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1941 set_eqd[num].eq_id = eqo->q.id;
1942 aic->prev_eqd = eqd;
1948 be_cmd_modify_eqd(adapter, set_eqd, num);
1951 static void be_rx_stats_update(struct be_rx_obj *rxo,
1952 struct be_rx_compl_info *rxcp)
1954 struct be_rx_stats *stats = rx_stats(rxo);
1956 u64_stats_update_begin(&stats->sync);
1958 stats->rx_bytes += rxcp->pkt_size;
1960 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1961 stats->rx_mcast_pkts++;
1963 stats->rx_compl_err++;
1964 u64_stats_update_end(&stats->sync);
1967 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1969 /* L4 checksum is not reliable for non TCP/UDP packets.
1970 * Also ignore ipcksm for ipv6 pkts
1972 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1973 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
1976 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
1978 struct be_adapter *adapter = rxo->adapter;
1979 struct be_rx_page_info *rx_page_info;
1980 struct be_queue_info *rxq = &rxo->q;
1981 u16 frag_idx = rxq->tail;
1983 rx_page_info = &rxo->page_info_tbl[frag_idx];
1984 BUG_ON(!rx_page_info->page);
1986 if (rx_page_info->last_frag) {
1987 dma_unmap_page(&adapter->pdev->dev,
1988 dma_unmap_addr(rx_page_info, bus),
1989 adapter->big_page_size, DMA_FROM_DEVICE);
1990 rx_page_info->last_frag = false;
1992 dma_sync_single_for_cpu(&adapter->pdev->dev,
1993 dma_unmap_addr(rx_page_info, bus),
1994 rx_frag_size, DMA_FROM_DEVICE);
1997 queue_tail_inc(rxq);
1998 atomic_dec(&rxq->used);
1999 return rx_page_info;
2002 /* Throwaway the data in the Rx completion */
2003 static void be_rx_compl_discard(struct be_rx_obj *rxo,
2004 struct be_rx_compl_info *rxcp)
2006 struct be_rx_page_info *page_info;
2007 u16 i, num_rcvd = rxcp->num_rcvd;
2009 for (i = 0; i < num_rcvd; i++) {
2010 page_info = get_rx_page_info(rxo);
2011 put_page(page_info->page);
2012 memset(page_info, 0, sizeof(*page_info));
2017 * skb_fill_rx_data forms a complete skb for an ether frame
2018 * indicated by rxcp.
2020 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2021 struct be_rx_compl_info *rxcp)
2023 struct be_rx_page_info *page_info;
2025 u16 hdr_len, curr_frag_len, remaining;
2028 page_info = get_rx_page_info(rxo);
2029 start = page_address(page_info->page) + page_info->page_offset;
2032 /* Copy data in the first descriptor of this completion */
2033 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
2035 skb->len = curr_frag_len;
2036 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
2037 memcpy(skb->data, start, curr_frag_len);
2038 /* Complete packet has now been moved to data */
2039 put_page(page_info->page);
2041 skb->tail += curr_frag_len;
2044 memcpy(skb->data, start, hdr_len);
2045 skb_shinfo(skb)->nr_frags = 1;
2046 skb_frag_set_page(skb, 0, page_info->page);
2047 skb_shinfo(skb)->frags[0].page_offset =
2048 page_info->page_offset + hdr_len;
2049 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2050 curr_frag_len - hdr_len);
2051 skb->data_len = curr_frag_len - hdr_len;
2052 skb->truesize += rx_frag_size;
2053 skb->tail += hdr_len;
2055 page_info->page = NULL;
2057 if (rxcp->pkt_size <= rx_frag_size) {
2058 BUG_ON(rxcp->num_rcvd != 1);
2062 /* More frags present for this completion */
2063 remaining = rxcp->pkt_size - curr_frag_len;
2064 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
2065 page_info = get_rx_page_info(rxo);
2066 curr_frag_len = min(remaining, rx_frag_size);
2068 /* Coalesce all frags from the same physical page in one slot */
2069 if (page_info->page_offset == 0) {
2072 skb_frag_set_page(skb, j, page_info->page);
2073 skb_shinfo(skb)->frags[j].page_offset =
2074 page_info->page_offset;
2075 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
2076 skb_shinfo(skb)->nr_frags++;
2078 put_page(page_info->page);
2081 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
2082 skb->len += curr_frag_len;
2083 skb->data_len += curr_frag_len;
2084 skb->truesize += rx_frag_size;
2085 remaining -= curr_frag_len;
2086 page_info->page = NULL;
2088 BUG_ON(j > MAX_SKB_FRAGS);
2091 /* Process the RX completion indicated by rxcp when GRO is disabled */
2092 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
2093 struct be_rx_compl_info *rxcp)
2095 struct be_adapter *adapter = rxo->adapter;
2096 struct net_device *netdev = adapter->netdev;
2097 struct sk_buff *skb;
2099 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
2100 if (unlikely(!skb)) {
2101 rx_stats(rxo)->rx_drops_no_skbs++;
2102 be_rx_compl_discard(rxo, rxcp);
2106 skb_fill_rx_data(rxo, skb, rxcp);
2108 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
2109 skb->ip_summed = CHECKSUM_UNNECESSARY;
2111 skb_checksum_none_assert(skb);
2113 skb->protocol = eth_type_trans(skb, netdev);
2114 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
2115 if (netdev->features & NETIF_F_RXHASH)
2116 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
2118 skb->csum_level = rxcp->tunneled;
2119 skb_mark_napi_id(skb, napi);
2122 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
2124 netif_receive_skb(skb);
2127 /* Process the RX completion indicated by rxcp when GRO is enabled */
2128 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2129 struct napi_struct *napi,
2130 struct be_rx_compl_info *rxcp)
2132 struct be_adapter *adapter = rxo->adapter;
2133 struct be_rx_page_info *page_info;
2134 struct sk_buff *skb = NULL;
2135 u16 remaining, curr_frag_len;
2138 skb = napi_get_frags(napi);
2140 be_rx_compl_discard(rxo, rxcp);
2144 remaining = rxcp->pkt_size;
2145 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
2146 page_info = get_rx_page_info(rxo);
2148 curr_frag_len = min(remaining, rx_frag_size);
2150 /* Coalesce all frags from the same physical page in one slot */
2151 if (i == 0 || page_info->page_offset == 0) {
2152 /* First frag or Fresh page */
2154 skb_frag_set_page(skb, j, page_info->page);
2155 skb_shinfo(skb)->frags[j].page_offset =
2156 page_info->page_offset;
2157 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
2159 put_page(page_info->page);
2161 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
2162 skb->truesize += rx_frag_size;
2163 remaining -= curr_frag_len;
2164 memset(page_info, 0, sizeof(*page_info));
2166 BUG_ON(j > MAX_SKB_FRAGS);
2168 skb_shinfo(skb)->nr_frags = j + 1;
2169 skb->len = rxcp->pkt_size;
2170 skb->data_len = rxcp->pkt_size;
2171 skb->ip_summed = CHECKSUM_UNNECESSARY;
2172 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
2173 if (adapter->netdev->features & NETIF_F_RXHASH)
2174 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
2176 skb->csum_level = rxcp->tunneled;
2177 skb_mark_napi_id(skb, napi);
2180 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
2182 napi_gro_frags(napi);
2185 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2186 struct be_rx_compl_info *rxcp)
2188 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2189 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2190 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2191 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2192 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2193 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2194 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2195 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2196 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2197 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2198 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
2200 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2201 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
2203 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
2205 GET_RX_COMPL_V1_BITS(tunneled, compl);
2208 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2209 struct be_rx_compl_info *rxcp)
2211 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2212 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2213 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2214 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2215 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2216 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2217 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2218 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2219 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2220 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2221 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
2223 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2224 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
2226 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2227 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2230 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2232 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2233 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2234 struct be_adapter *adapter = rxo->adapter;
2236 /* For checking the valid bit it is Ok to use either definition as the
2237 * valid bit is at the same position in both v0 and v1 Rx compl */
2238 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2242 be_dws_le_to_cpu(compl, sizeof(*compl));
2244 if (adapter->be3_native)
2245 be_parse_rx_compl_v1(compl, rxcp);
2247 be_parse_rx_compl_v0(compl, rxcp);
2253 /* In QNQ modes, if qnq bit is not set, then the packet was
2254 * tagged only with the transparent outer vlan-tag and must
2255 * not be treated as a vlan packet by host
2257 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
2260 if (!lancer_chip(adapter))
2261 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
2263 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
2264 !test_bit(rxcp->vlan_tag, adapter->vids))
2268 /* As the compl has been parsed, reset it; we wont touch it again */
2269 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
2271 queue_tail_inc(&rxo->cq);
2275 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
2277 u32 order = get_order(size);
2281 return alloc_pages(gfp, order);
2285 * Allocate a page, split it to fragments of size rx_frag_size and post as
2286 * receive buffers to BE
2288 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
2290 struct be_adapter *adapter = rxo->adapter;
2291 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
2292 struct be_queue_info *rxq = &rxo->q;
2293 struct page *pagep = NULL;
2294 struct device *dev = &adapter->pdev->dev;
2295 struct be_eth_rx_d *rxd;
2296 u64 page_dmaaddr = 0, frag_dmaaddr;
2297 u32 posted, page_offset = 0, notify = 0;
2299 page_info = &rxo->page_info_tbl[rxq->head];
2300 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
2302 pagep = be_alloc_pages(adapter->big_page_size, gfp);
2303 if (unlikely(!pagep)) {
2304 rx_stats(rxo)->rx_post_fail++;
2307 page_dmaaddr = dma_map_page(dev, pagep, 0,
2308 adapter->big_page_size,
2310 if (dma_mapping_error(dev, page_dmaaddr)) {
2313 adapter->drv_stats.dma_map_errors++;
2319 page_offset += rx_frag_size;
2321 page_info->page_offset = page_offset;
2322 page_info->page = pagep;
2324 rxd = queue_head_node(rxq);
2325 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
2326 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2327 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
2329 /* Any space left in the current big page for another frag? */
2330 if ((page_offset + rx_frag_size + rx_frag_size) >
2331 adapter->big_page_size) {
2333 page_info->last_frag = true;
2334 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2336 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
2339 prev_page_info = page_info;
2340 queue_head_inc(rxq);
2341 page_info = &rxo->page_info_tbl[rxq->head];
2344 /* Mark the last frag of a page when we break out of the above loop
2345 * with no more slots available in the RXQ
2348 prev_page_info->last_frag = true;
2349 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2353 atomic_add(posted, &rxq->used);
2354 if (rxo->rx_post_starved)
2355 rxo->rx_post_starved = false;
2357 notify = min(MAX_NUM_POST_ERX_DB, posted);
2358 be_rxq_notify(adapter, rxq->id, notify);
2361 } else if (atomic_read(&rxq->used) == 0) {
2362 /* Let be_worker replenish when memory is available */
2363 rxo->rx_post_starved = true;
2367 static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
2369 struct be_queue_info *tx_cq = &txo->cq;
2370 struct be_tx_compl_info *txcp = &txo->txcp;
2371 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
2373 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
2376 /* Ensure load ordering of valid bit dword and other dwords below */
2378 be_dws_le_to_cpu(compl, sizeof(*compl));
2380 txcp->status = GET_TX_COMPL_BITS(status, compl);
2381 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
2383 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
2384 queue_tail_inc(tx_cq);
2388 static u16 be_tx_compl_process(struct be_adapter *adapter,
2389 struct be_tx_obj *txo, u16 last_index)
2391 struct sk_buff **sent_skbs = txo->sent_skb_list;
2392 struct be_queue_info *txq = &txo->q;
2393 u16 frag_index, num_wrbs = 0;
2394 struct sk_buff *skb = NULL;
2395 bool unmap_skb_hdr = false;
2396 struct be_eth_wrb *wrb;
2399 if (sent_skbs[txq->tail]) {
2400 /* Free skb from prev req */
2402 dev_consume_skb_any(skb);
2403 skb = sent_skbs[txq->tail];
2404 sent_skbs[txq->tail] = NULL;
2405 queue_tail_inc(txq); /* skip hdr wrb */
2407 unmap_skb_hdr = true;
2409 wrb = queue_tail_node(txq);
2410 frag_index = txq->tail;
2411 unmap_tx_frag(&adapter->pdev->dev, wrb,
2412 (unmap_skb_hdr && skb_headlen(skb)));
2413 unmap_skb_hdr = false;
2414 queue_tail_inc(txq);
2416 } while (frag_index != last_index);
2417 dev_consume_skb_any(skb);
2422 /* Return the number of events in the event queue */
2423 static inline int events_get(struct be_eq_obj *eqo)
2425 struct be_eq_entry *eqe;
2429 eqe = queue_tail_node(&eqo->q);
2436 queue_tail_inc(&eqo->q);
2442 /* Leaves the EQ is disarmed state */
2443 static void be_eq_clean(struct be_eq_obj *eqo)
2445 int num = events_get(eqo);
2447 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
2450 static void be_rx_cq_clean(struct be_rx_obj *rxo)
2452 struct be_rx_page_info *page_info;
2453 struct be_queue_info *rxq = &rxo->q;
2454 struct be_queue_info *rx_cq = &rxo->cq;
2455 struct be_rx_compl_info *rxcp;
2456 struct be_adapter *adapter = rxo->adapter;
2459 /* Consume pending rx completions.
2460 * Wait for the flush completion (identified by zero num_rcvd)
2461 * to arrive. Notify CQ even when there are no more CQ entries
2462 * for HW to flush partially coalesced CQ entries.
2463 * In Lancer, there is no need to wait for flush compl.
2466 rxcp = be_rx_compl_get(rxo);
2468 if (lancer_chip(adapter))
2471 if (flush_wait++ > 50 ||
2472 be_check_error(adapter,
2474 dev_warn(&adapter->pdev->dev,
2475 "did not receive flush compl\n");
2478 be_cq_notify(adapter, rx_cq->id, true, 0);
2481 be_rx_compl_discard(rxo, rxcp);
2482 be_cq_notify(adapter, rx_cq->id, false, 1);
2483 if (rxcp->num_rcvd == 0)
2488 /* After cleanup, leave the CQ in unarmed state */
2489 be_cq_notify(adapter, rx_cq->id, false, 0);
2491 /* Then free posted rx buffers that were not used */
2492 while (atomic_read(&rxq->used) > 0) {
2493 page_info = get_rx_page_info(rxo);
2494 put_page(page_info->page);
2495 memset(page_info, 0, sizeof(*page_info));
2497 BUG_ON(atomic_read(&rxq->used));
2502 static void be_tx_compl_clean(struct be_adapter *adapter)
2504 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2505 struct device *dev = &adapter->pdev->dev;
2506 struct be_tx_compl_info *txcp;
2507 struct be_queue_info *txq;
2508 struct be_tx_obj *txo;
2509 int i, pending_txqs;
2511 /* Stop polling for compls when HW has been silent for 10ms */
2513 pending_txqs = adapter->num_tx_qs;
2515 for_all_tx_queues(adapter, txo, i) {
2519 while ((txcp = be_tx_compl_get(txo))) {
2521 be_tx_compl_process(adapter, txo,
2526 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2527 atomic_sub(num_wrbs, &txq->used);
2530 if (!be_is_tx_compl_pending(txo))
2534 if (pending_txqs == 0 || ++timeo > 10 ||
2535 be_check_error(adapter, BE_ERROR_HW))
2541 /* Free enqueued TX that was never notified to HW */
2542 for_all_tx_queues(adapter, txo, i) {
2545 if (atomic_read(&txq->used)) {
2546 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2547 i, atomic_read(&txq->used));
2548 notified_idx = txq->tail;
2549 end_idx = txq->tail;
2550 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2552 /* Use the tx-compl process logic to handle requests
2553 * that were not sent to the HW.
2555 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2556 atomic_sub(num_wrbs, &txq->used);
2557 BUG_ON(atomic_read(&txq->used));
2558 txo->pend_wrb_cnt = 0;
2559 /* Since hw was never notified of these requests,
2562 txq->head = notified_idx;
2563 txq->tail = notified_idx;
2568 static void be_evt_queues_destroy(struct be_adapter *adapter)
2570 struct be_eq_obj *eqo;
2573 for_all_evt_queues(adapter, eqo, i) {
2574 if (eqo->q.created) {
2576 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2577 napi_hash_del(&eqo->napi);
2578 netif_napi_del(&eqo->napi);
2580 free_cpumask_var(eqo->affinity_mask);
2581 be_queue_free(adapter, &eqo->q);
2585 static int be_evt_queues_create(struct be_adapter *adapter)
2587 struct be_queue_info *eq;
2588 struct be_eq_obj *eqo;
2589 struct be_aic_obj *aic;
2592 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2593 adapter->cfg_num_qs);
2595 for_all_evt_queues(adapter, eqo, i) {
2596 int numa_node = dev_to_node(&adapter->pdev->dev);
2597 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2599 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2600 eqo->affinity_mask);
2601 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2603 napi_hash_add(&eqo->napi);
2604 aic = &adapter->aic_obj[i];
2605 eqo->adapter = adapter;
2607 aic->max_eqd = BE_MAX_EQD;
2611 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2612 sizeof(struct be_eq_entry));
2616 rc = be_cmd_eq_create(adapter, eqo);
2623 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2625 struct be_queue_info *q;
2627 q = &adapter->mcc_obj.q;
2629 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2630 be_queue_free(adapter, q);
2632 q = &adapter->mcc_obj.cq;
2634 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2635 be_queue_free(adapter, q);
2638 /* Must be called only after TX qs are created as MCC shares TX EQ */
2639 static int be_mcc_queues_create(struct be_adapter *adapter)
2641 struct be_queue_info *q, *cq;
2643 cq = &adapter->mcc_obj.cq;
2644 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2645 sizeof(struct be_mcc_compl)))
2648 /* Use the default EQ for MCC completions */
2649 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2652 q = &adapter->mcc_obj.q;
2653 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2654 goto mcc_cq_destroy;
2656 if (be_cmd_mccq_create(adapter, q, cq))
2662 be_queue_free(adapter, q);
2664 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2666 be_queue_free(adapter, cq);
2671 static void be_tx_queues_destroy(struct be_adapter *adapter)
2673 struct be_queue_info *q;
2674 struct be_tx_obj *txo;
2677 for_all_tx_queues(adapter, txo, i) {
2680 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2681 be_queue_free(adapter, q);
2685 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2686 be_queue_free(adapter, q);
2690 static int be_tx_qs_create(struct be_adapter *adapter)
2692 struct be_queue_info *cq;
2693 struct be_tx_obj *txo;
2694 struct be_eq_obj *eqo;
2697 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2699 for_all_tx_queues(adapter, txo, i) {
2701 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2702 sizeof(struct be_eth_tx_compl));
2706 u64_stats_init(&txo->stats.sync);
2707 u64_stats_init(&txo->stats.sync_compl);
2709 /* If num_evt_qs is less than num_tx_qs, then more than
2710 * one txq share an eq
2712 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2713 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
2717 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2718 sizeof(struct be_eth_wrb));
2722 status = be_cmd_txq_create(adapter, txo);
2726 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2730 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2731 adapter->num_tx_qs);
2735 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2737 struct be_queue_info *q;
2738 struct be_rx_obj *rxo;
2741 for_all_rx_queues(adapter, rxo, i) {
2744 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2745 be_queue_free(adapter, q);
2749 static int be_rx_cqs_create(struct be_adapter *adapter)
2751 struct be_queue_info *eq, *cq;
2752 struct be_rx_obj *rxo;
2755 /* We can create as many RSS rings as there are EQs. */
2756 adapter->num_rss_qs = adapter->num_evt_qs;
2758 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2759 if (adapter->num_rss_qs <= 1)
2760 adapter->num_rss_qs = 0;
2762 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2764 /* When the interface is not capable of RSS rings (and there is no
2765 * need to create a default RXQ) we'll still need one RXQ
2767 if (adapter->num_rx_qs == 0)
2768 adapter->num_rx_qs = 1;
2770 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2771 for_all_rx_queues(adapter, rxo, i) {
2772 rxo->adapter = adapter;
2774 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2775 sizeof(struct be_eth_rx_compl));
2779 u64_stats_init(&rxo->stats.sync);
2780 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2781 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2786 dev_info(&adapter->pdev->dev,
2787 "created %d RX queue(s)\n", adapter->num_rx_qs);
2791 static irqreturn_t be_intx(int irq, void *dev)
2793 struct be_eq_obj *eqo = dev;
2794 struct be_adapter *adapter = eqo->adapter;
2797 /* IRQ is not expected when NAPI is scheduled as the EQ
2798 * will not be armed.
2799 * But, this can happen on Lancer INTx where it takes
2800 * a while to de-assert INTx or in BE2 where occasionaly
2801 * an interrupt may be raised even when EQ is unarmed.
2802 * If NAPI is already scheduled, then counting & notifying
2803 * events will orphan them.
2805 if (napi_schedule_prep(&eqo->napi)) {
2806 num_evts = events_get(eqo);
2807 __napi_schedule(&eqo->napi);
2809 eqo->spurious_intr = 0;
2811 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
2813 /* Return IRQ_HANDLED only for the the first spurious intr
2814 * after a valid intr to stop the kernel from branding
2815 * this irq as a bad one!
2817 if (num_evts || eqo->spurious_intr++ == 0)
2823 static irqreturn_t be_msix(int irq, void *dev)
2825 struct be_eq_obj *eqo = dev;
2827 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
2828 napi_schedule(&eqo->napi);
2832 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2834 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2837 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2838 int budget, int polling)
2840 struct be_adapter *adapter = rxo->adapter;
2841 struct be_queue_info *rx_cq = &rxo->cq;
2842 struct be_rx_compl_info *rxcp;
2844 u32 frags_consumed = 0;
2846 for (work_done = 0; work_done < budget; work_done++) {
2847 rxcp = be_rx_compl_get(rxo);
2851 /* Is it a flush compl that has no data */
2852 if (unlikely(rxcp->num_rcvd == 0))
2855 /* Discard compl with partial DMA Lancer B0 */
2856 if (unlikely(!rxcp->pkt_size)) {
2857 be_rx_compl_discard(rxo, rxcp);
2861 /* On BE drop pkts that arrive due to imperfect filtering in
2862 * promiscuous mode on some skews
2864 if (unlikely(rxcp->port != adapter->port_num &&
2865 !lancer_chip(adapter))) {
2866 be_rx_compl_discard(rxo, rxcp);
2870 /* Don't do gro when we're busy_polling */
2871 if (do_gro(rxcp) && polling != BUSY_POLLING)
2872 be_rx_compl_process_gro(rxo, napi, rxcp);
2874 be_rx_compl_process(rxo, napi, rxcp);
2877 frags_consumed += rxcp->num_rcvd;
2878 be_rx_stats_update(rxo, rxcp);
2882 be_cq_notify(adapter, rx_cq->id, true, work_done);
2884 /* When an rx-obj gets into post_starved state, just
2885 * let be_worker do the posting.
2887 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2888 !rxo->rx_post_starved)
2889 be_post_rx_frags(rxo, GFP_ATOMIC,
2890 max_t(u32, MAX_RX_POST,
2897 static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
2900 case BE_TX_COMP_HDR_PARSE_ERR:
2901 tx_stats(txo)->tx_hdr_parse_err++;
2903 case BE_TX_COMP_NDMA_ERR:
2904 tx_stats(txo)->tx_dma_err++;
2906 case BE_TX_COMP_ACL_ERR:
2907 tx_stats(txo)->tx_spoof_check_err++;
2912 static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
2915 case LANCER_TX_COMP_LSO_ERR:
2916 tx_stats(txo)->tx_tso_err++;
2918 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2919 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2920 tx_stats(txo)->tx_spoof_check_err++;
2922 case LANCER_TX_COMP_QINQ_ERR:
2923 tx_stats(txo)->tx_qinq_err++;
2925 case LANCER_TX_COMP_PARITY_ERR:
2926 tx_stats(txo)->tx_internal_parity_err++;
2928 case LANCER_TX_COMP_DMA_ERR:
2929 tx_stats(txo)->tx_dma_err++;
2934 static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2937 int num_wrbs = 0, work_done = 0;
2938 struct be_tx_compl_info *txcp;
2940 while ((txcp = be_tx_compl_get(txo))) {
2941 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
2945 if (lancer_chip(adapter))
2946 lancer_update_tx_err(txo, txcp->status);
2948 be_update_tx_err(txo, txcp->status);
2953 be_cq_notify(adapter, txo->cq.id, true, work_done);
2954 atomic_sub(num_wrbs, &txo->q.used);
2956 /* As Tx wrbs have been freed up, wake up netdev queue
2957 * if it was stopped due to lack of tx wrbs. */
2958 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2959 be_can_txq_wake(txo)) {
2960 netif_wake_subqueue(adapter->netdev, idx);
2963 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2964 tx_stats(txo)->tx_compl += work_done;
2965 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2969 #ifdef CONFIG_NET_RX_BUSY_POLL
2970 static inline bool be_lock_napi(struct be_eq_obj *eqo)
2974 spin_lock(&eqo->lock); /* BH is already disabled */
2975 if (eqo->state & BE_EQ_LOCKED) {
2976 WARN_ON(eqo->state & BE_EQ_NAPI);
2977 eqo->state |= BE_EQ_NAPI_YIELD;
2980 eqo->state = BE_EQ_NAPI;
2982 spin_unlock(&eqo->lock);
2986 static inline void be_unlock_napi(struct be_eq_obj *eqo)
2988 spin_lock(&eqo->lock); /* BH is already disabled */
2990 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2991 eqo->state = BE_EQ_IDLE;
2993 spin_unlock(&eqo->lock);
2996 static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3000 spin_lock_bh(&eqo->lock);
3001 if (eqo->state & BE_EQ_LOCKED) {
3002 eqo->state |= BE_EQ_POLL_YIELD;
3005 eqo->state |= BE_EQ_POLL;
3007 spin_unlock_bh(&eqo->lock);
3011 static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3013 spin_lock_bh(&eqo->lock);
3015 WARN_ON(eqo->state & (BE_EQ_NAPI));
3016 eqo->state = BE_EQ_IDLE;
3018 spin_unlock_bh(&eqo->lock);
3021 static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3023 spin_lock_init(&eqo->lock);
3024 eqo->state = BE_EQ_IDLE;
3027 static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3031 /* It's enough to just acquire napi lock on the eqo to stop
3032 * be_busy_poll() from processing any queueus.
3034 while (!be_lock_napi(eqo))
3040 #else /* CONFIG_NET_RX_BUSY_POLL */
3042 static inline bool be_lock_napi(struct be_eq_obj *eqo)
3047 static inline void be_unlock_napi(struct be_eq_obj *eqo)
3051 static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3056 static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3060 static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3064 static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3067 #endif /* CONFIG_NET_RX_BUSY_POLL */
3069 int be_poll(struct napi_struct *napi, int budget)
3071 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3072 struct be_adapter *adapter = eqo->adapter;
3073 int max_work = 0, work, i, num_evts;
3074 struct be_rx_obj *rxo;
3075 struct be_tx_obj *txo;
3078 num_evts = events_get(eqo);
3080 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3081 be_process_tx(adapter, txo, i);
3083 if (be_lock_napi(eqo)) {
3084 /* This loop will iterate twice for EQ0 in which
3085 * completions of the last RXQ (default one) are also processed
3086 * For other EQs the loop iterates only once
3088 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3089 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3090 max_work = max(work, max_work);
3092 be_unlock_napi(eqo);
3097 if (is_mcc_eqo(eqo))
3098 be_process_mcc(adapter);
3100 if (max_work < budget) {
3101 napi_complete(napi);
3103 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3104 * delay via a delay multiplier encoding value
3106 if (skyhawk_chip(adapter))
3107 mult_enc = be_get_eq_delay_mult_enc(eqo);
3109 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3112 /* As we'll continue in polling mode, count and clear events */
3113 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
3118 #ifdef CONFIG_NET_RX_BUSY_POLL
3119 static int be_busy_poll(struct napi_struct *napi)
3121 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3122 struct be_adapter *adapter = eqo->adapter;
3123 struct be_rx_obj *rxo;
3126 if (!be_lock_busy_poll(eqo))
3127 return LL_FLUSH_BUSY;
3129 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3130 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3135 be_unlock_busy_poll(eqo);
3140 void be_detect_error(struct be_adapter *adapter)
3142 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3143 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
3145 struct device *dev = &adapter->pdev->dev;
3147 if (be_check_error(adapter, BE_ERROR_HW))
3150 if (lancer_chip(adapter)) {
3151 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3152 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3153 be_set_error(adapter, BE_ERROR_UE);
3154 sliport_err1 = ioread32(adapter->db +
3155 SLIPORT_ERROR1_OFFSET);
3156 sliport_err2 = ioread32(adapter->db +
3157 SLIPORT_ERROR2_OFFSET);
3158 /* Do not log error messages if its a FW reset */
3159 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3160 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3161 dev_info(dev, "Firmware update in progress\n");
3163 dev_err(dev, "Error detected in the card\n");
3164 dev_err(dev, "ERR: sliport status 0x%x\n",
3166 dev_err(dev, "ERR: sliport error1 0x%x\n",
3168 dev_err(dev, "ERR: sliport error2 0x%x\n",
3173 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3174 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3175 ue_lo_mask = ioread32(adapter->pcicfg +
3176 PCICFG_UE_STATUS_LOW_MASK);
3177 ue_hi_mask = ioread32(adapter->pcicfg +
3178 PCICFG_UE_STATUS_HI_MASK);
3180 ue_lo = (ue_lo & ~ue_lo_mask);
3181 ue_hi = (ue_hi & ~ue_hi_mask);
3183 /* On certain platforms BE hardware can indicate spurious UEs.
3184 * Allow HW to stop working completely in case of a real UE.
3185 * Hence not setting the hw_error for UE detection.
3188 if (ue_lo || ue_hi) {
3190 "Unrecoverable Error detected in the adapter");
3191 dev_err(dev, "Please reboot server to recover");
3192 if (skyhawk_chip(adapter))
3193 be_set_error(adapter, BE_ERROR_UE);
3195 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3197 dev_err(dev, "UE: %s bit set\n",
3198 ue_status_low_desc[i]);
3200 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3202 dev_err(dev, "UE: %s bit set\n",
3203 ue_status_hi_desc[i]);
3209 static void be_msix_disable(struct be_adapter *adapter)
3211 if (msix_enabled(adapter)) {
3212 pci_disable_msix(adapter->pdev);
3213 adapter->num_msix_vec = 0;
3214 adapter->num_msix_roce_vec = 0;
3218 static int be_msix_enable(struct be_adapter *adapter)
3221 struct device *dev = &adapter->pdev->dev;
3223 /* If RoCE is supported, program the max number of NIC vectors that
3224 * may be configured via set-channels, along with vectors needed for
3225 * RoCe. Else, just program the number we'll use initially.
3227 if (be_roce_supported(adapter))
3228 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3229 2 * num_online_cpus());
3231 num_vec = adapter->cfg_num_qs;
3233 for (i = 0; i < num_vec; i++)
3234 adapter->msix_entries[i].entry = i;
3236 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3237 MIN_MSIX_VECTORS, num_vec);
3241 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3242 adapter->num_msix_roce_vec = num_vec / 2;
3243 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3244 adapter->num_msix_roce_vec);
3247 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3249 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3250 adapter->num_msix_vec);
3254 dev_warn(dev, "MSIx enable failed\n");
3256 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
3257 if (be_virtfn(adapter))
3262 static inline int be_msix_vec_get(struct be_adapter *adapter,
3263 struct be_eq_obj *eqo)
3265 return adapter->msix_entries[eqo->msix_idx].vector;
3268 static int be_msix_register(struct be_adapter *adapter)
3270 struct net_device *netdev = adapter->netdev;
3271 struct be_eq_obj *eqo;
3274 for_all_evt_queues(adapter, eqo, i) {
3275 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3276 vec = be_msix_vec_get(adapter, eqo);
3277 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3281 irq_set_affinity_hint(vec, eqo->affinity_mask);
3286 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3287 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3288 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
3290 be_msix_disable(adapter);
3294 static int be_irq_register(struct be_adapter *adapter)
3296 struct net_device *netdev = adapter->netdev;
3299 if (msix_enabled(adapter)) {
3300 status = be_msix_register(adapter);
3303 /* INTx is not supported for VF */
3304 if (be_virtfn(adapter))
3308 /* INTx: only the first EQ is used */
3309 netdev->irq = adapter->pdev->irq;
3310 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
3311 &adapter->eq_obj[0]);
3313 dev_err(&adapter->pdev->dev,
3314 "INTx request IRQ failed - err %d\n", status);
3318 adapter->isr_registered = true;
3322 static void be_irq_unregister(struct be_adapter *adapter)
3324 struct net_device *netdev = adapter->netdev;
3325 struct be_eq_obj *eqo;
3328 if (!adapter->isr_registered)
3332 if (!msix_enabled(adapter)) {
3333 free_irq(netdev->irq, &adapter->eq_obj[0]);
3338 for_all_evt_queues(adapter, eqo, i) {
3339 vec = be_msix_vec_get(adapter, eqo);
3340 irq_set_affinity_hint(vec, NULL);
3345 adapter->isr_registered = false;
3348 static void be_rx_qs_destroy(struct be_adapter *adapter)
3350 struct be_queue_info *q;
3351 struct be_rx_obj *rxo;
3354 for_all_rx_queues(adapter, rxo, i) {
3357 be_cmd_rxq_destroy(adapter, q);
3358 be_rx_cq_clean(rxo);
3360 be_queue_free(adapter, q);
3364 static int be_close(struct net_device *netdev)
3366 struct be_adapter *adapter = netdev_priv(netdev);
3367 struct be_eq_obj *eqo;
3370 /* This protection is needed as be_close() may be called even when the
3371 * adapter is in cleared state (after eeh perm failure)
3373 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3376 be_roce_dev_close(adapter);
3378 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3379 for_all_evt_queues(adapter, eqo, i) {
3380 napi_disable(&eqo->napi);
3381 be_disable_busy_poll(eqo);
3383 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
3386 be_async_mcc_disable(adapter);
3388 /* Wait for all pending tx completions to arrive so that
3389 * all tx skbs are freed.
3391 netif_tx_disable(netdev);
3392 be_tx_compl_clean(adapter);
3394 be_rx_qs_destroy(adapter);
3395 be_clear_uc_list(adapter);
3397 for_all_evt_queues(adapter, eqo, i) {
3398 if (msix_enabled(adapter))
3399 synchronize_irq(be_msix_vec_get(adapter, eqo));
3401 synchronize_irq(netdev->irq);
3405 be_irq_unregister(adapter);
3410 static int be_rx_qs_create(struct be_adapter *adapter)
3412 struct rss_info *rss = &adapter->rss_info;
3413 u8 rss_key[RSS_HASH_KEY_LEN];
3414 struct be_rx_obj *rxo;
3417 for_all_rx_queues(adapter, rxo, i) {
3418 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3419 sizeof(struct be_eth_rx_d));
3424 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3425 rxo = default_rxo(adapter);
3426 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3427 rx_frag_size, adapter->if_handle,
3428 false, &rxo->rss_id);
3433 for_all_rss_queues(adapter, rxo, i) {
3434 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3435 rx_frag_size, adapter->if_handle,
3436 true, &rxo->rss_id);
3441 if (be_multi_rxq(adapter)) {
3442 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
3443 for_all_rss_queues(adapter, rxo, i) {
3444 if ((j + i) >= RSS_INDIR_TABLE_LEN)
3446 rss->rsstable[j + i] = rxo->rss_id;
3447 rss->rss_queue[j + i] = i;
3450 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3451 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
3453 if (!BEx_chip(adapter))
3454 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3455 RSS_ENABLE_UDP_IPV6;
3457 /* Disable RSS, if only default RX Q is created */
3458 rss->rss_flags = RSS_ENABLE_NONE;
3461 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3462 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3465 rss->rss_flags = RSS_ENABLE_NONE;
3469 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
3471 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3472 * which is a queue empty condition
3474 for_all_rx_queues(adapter, rxo, i)
3475 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3480 static int be_open(struct net_device *netdev)
3482 struct be_adapter *adapter = netdev_priv(netdev);
3483 struct be_eq_obj *eqo;
3484 struct be_rx_obj *rxo;
3485 struct be_tx_obj *txo;
3489 status = be_rx_qs_create(adapter);
3493 status = be_irq_register(adapter);
3497 for_all_rx_queues(adapter, rxo, i)
3498 be_cq_notify(adapter, rxo->cq.id, true, 0);
3500 for_all_tx_queues(adapter, txo, i)
3501 be_cq_notify(adapter, txo->cq.id, true, 0);
3503 be_async_mcc_enable(adapter);
3505 for_all_evt_queues(adapter, eqo, i) {
3506 napi_enable(&eqo->napi);
3507 be_enable_busy_poll(eqo);
3508 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
3510 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
3512 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
3514 be_link_status_update(adapter, link_status);
3516 netif_tx_start_all_queues(netdev);
3517 be_roce_dev_open(adapter);
3519 #ifdef CONFIG_BE2NET_VXLAN
3520 if (skyhawk_chip(adapter))
3521 vxlan_get_rx_port(netdev);
3526 be_close(adapter->netdev);
3530 static int be_setup_wol(struct be_adapter *adapter, bool enable)
3532 struct device *dev = &adapter->pdev->dev;
3533 struct be_dma_mem cmd;
3539 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
3540 cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
3545 status = pci_write_config_dword(adapter->pdev,
3546 PCICFG_PM_CONTROL_OFFSET,
3547 PCICFG_PM_CONTROL_MASK);
3549 dev_err(dev, "Could not enable Wake-on-lan\n");
3553 ether_addr_copy(mac, adapter->netdev->dev_addr);
3556 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3557 pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
3558 pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
3560 dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
3564 static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3568 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3570 mac[5] = (u8)(addr & 0xFF);
3571 mac[4] = (u8)((addr >> 8) & 0xFF);
3572 mac[3] = (u8)((addr >> 16) & 0xFF);
3573 /* Use the OUI from the current MAC address */
3574 memcpy(mac, adapter->netdev->dev_addr, 3);
3578 * Generate a seed MAC address from the PF MAC Address using jhash.
3579 * MAC Address for VFs are assigned incrementally starting from the seed.
3580 * These addresses are programmed in the ASIC by the PF and the VF driver
3581 * queries for the MAC address during its probe.
3583 static int be_vf_eth_addr_config(struct be_adapter *adapter)
3588 struct be_vf_cfg *vf_cfg;
3590 be_vf_eth_addr_generate(adapter, mac);
3592 for_all_vfs(adapter, vf_cfg, vf) {
3593 if (BEx_chip(adapter))
3594 status = be_cmd_pmac_add(adapter, mac,
3596 &vf_cfg->pmac_id, vf + 1);
3598 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3602 dev_err(&adapter->pdev->dev,
3603 "Mac address assignment failed for VF %d\n",
3606 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3613 static int be_vfs_mac_query(struct be_adapter *adapter)
3617 struct be_vf_cfg *vf_cfg;
3619 for_all_vfs(adapter, vf_cfg, vf) {
3620 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3621 mac, vf_cfg->if_handle,
3625 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3630 static void be_vf_clear(struct be_adapter *adapter)
3632 struct be_vf_cfg *vf_cfg;
3635 if (pci_vfs_assigned(adapter->pdev)) {
3636 dev_warn(&adapter->pdev->dev,
3637 "VFs are assigned to VMs: not disabling VFs\n");
3641 pci_disable_sriov(adapter->pdev);
3643 for_all_vfs(adapter, vf_cfg, vf) {
3644 if (BEx_chip(adapter))
3645 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3646 vf_cfg->pmac_id, vf + 1);
3648 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3651 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3654 kfree(adapter->vf_cfg);
3655 adapter->num_vfs = 0;
3656 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
3659 static void be_clear_queues(struct be_adapter *adapter)
3661 be_mcc_queues_destroy(adapter);
3662 be_rx_cqs_destroy(adapter);
3663 be_tx_queues_destroy(adapter);
3664 be_evt_queues_destroy(adapter);
3667 static void be_cancel_worker(struct be_adapter *adapter)
3669 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3670 cancel_delayed_work_sync(&adapter->work);
3671 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3675 static void be_cancel_err_detection(struct be_adapter *adapter)
3677 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3678 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3679 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3683 static void be_mac_clear(struct be_adapter *adapter)
3685 if (adapter->pmac_id) {
3686 be_cmd_pmac_del(adapter, adapter->if_handle,
3687 adapter->pmac_id[0], 0);
3688 kfree(adapter->pmac_id);
3689 adapter->pmac_id = NULL;
3693 #ifdef CONFIG_BE2NET_VXLAN
3694 static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3696 struct net_device *netdev = adapter->netdev;
3698 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3699 be_cmd_manage_iface(adapter, adapter->if_handle,
3700 OP_CONVERT_TUNNEL_TO_NORMAL);
3702 if (adapter->vxlan_port)
3703 be_cmd_set_vxlan_port(adapter, 0);
3705 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3706 adapter->vxlan_port = 0;
3708 netdev->hw_enc_features = 0;
3709 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
3710 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
3714 static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3716 struct be_resources res = adapter->pool_res;
3719 /* Distribute the queue resources equally among the PF and it's VFs
3720 * Do not distribute queue resources in multi-channel configuration.
3722 if (num_vfs && !be_is_mc(adapter)) {
3723 /* If number of VFs requested is 8 less than max supported,
3724 * assign 8 queue pairs to the PF and divide the remaining
3725 * resources evenly among the VFs
3727 if (num_vfs < (be_max_vfs(adapter) - 8))
3728 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3730 num_vf_qs = res.max_rss_qs / num_vfs;
3732 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3733 * interfaces per port. Provide RSS on VFs, only if number
3734 * of VFs requested is less than MAX_RSS_IFACES limit.
3736 if (num_vfs >= MAX_RSS_IFACES)
3742 static int be_clear(struct be_adapter *adapter)
3744 struct pci_dev *pdev = adapter->pdev;
3747 be_cancel_worker(adapter);
3749 if (sriov_enabled(adapter))
3750 be_vf_clear(adapter);
3752 /* Re-configure FW to distribute resources evenly across max-supported
3753 * number of VFs, only when VFs are not already enabled.
3755 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3756 !pci_vfs_assigned(pdev)) {
3757 num_vf_qs = be_calculate_vf_qs(adapter,
3758 pci_sriov_get_totalvfs(pdev));
3759 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3760 pci_sriov_get_totalvfs(pdev),
3764 #ifdef CONFIG_BE2NET_VXLAN
3765 be_disable_vxlan_offloads(adapter);
3767 /* delete the primary mac along with the uc-mac list */
3768 be_mac_clear(adapter);
3770 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
3772 be_clear_queues(adapter);
3774 be_msix_disable(adapter);
3775 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
3779 static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3780 u32 cap_flags, u32 vf)
3784 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3785 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3786 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
3788 en_flags &= cap_flags;
3790 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
3793 static int be_vfs_if_create(struct be_adapter *adapter)
3795 struct be_resources res = {0};
3796 struct be_vf_cfg *vf_cfg;
3800 /* If a FW profile exists, then cap_flags are updated */
3801 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3802 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3804 for_all_vfs(adapter, vf_cfg, vf) {
3805 if (!BE3_chip(adapter)) {
3806 status = be_cmd_get_profile_config(adapter, &res,
3810 cap_flags = res.if_cap_flags;
3811 /* Prevent VFs from enabling VLAN promiscuous
3814 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3818 status = be_if_create(adapter, &vf_cfg->if_handle,
3827 static int be_vf_setup_init(struct be_adapter *adapter)
3829 struct be_vf_cfg *vf_cfg;
3832 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3834 if (!adapter->vf_cfg)
3837 for_all_vfs(adapter, vf_cfg, vf) {
3838 vf_cfg->if_handle = -1;
3839 vf_cfg->pmac_id = -1;
3844 static int be_vf_setup(struct be_adapter *adapter)
3846 struct device *dev = &adapter->pdev->dev;
3847 struct be_vf_cfg *vf_cfg;
3848 int status, old_vfs, vf;
3851 old_vfs = pci_num_vf(adapter->pdev);
3853 status = be_vf_setup_init(adapter);
3858 for_all_vfs(adapter, vf_cfg, vf) {
3859 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3864 status = be_vfs_mac_query(adapter);
3868 status = be_vfs_if_create(adapter);
3872 status = be_vf_eth_addr_config(adapter);
3877 for_all_vfs(adapter, vf_cfg, vf) {
3878 /* Allow VFs to programs MAC/VLAN filters */
3879 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3881 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
3882 status = be_cmd_set_fn_privileges(adapter,
3883 vf_cfg->privileges |
3887 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
3888 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3893 /* Allow full available bandwidth */
3895 be_cmd_config_qos(adapter, 0, 0, vf + 1);
3897 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3898 vf_cfg->if_handle, NULL,
3901 vf_cfg->spoofchk = spoofchk;
3904 be_cmd_enable_vf(adapter, vf + 1);
3905 be_cmd_set_logical_link_config(adapter,
3906 IFLA_VF_LINK_STATE_AUTO,
3912 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3914 dev_err(dev, "SRIOV enable failed\n");
3915 adapter->num_vfs = 0;
3920 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
3923 dev_err(dev, "VF setup failed\n");
3924 be_vf_clear(adapter);
3928 /* Converting function_mode bits on BE3 to SH mc_type enums */
3930 static u8 be_convert_mc_type(u32 function_mode)
3932 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
3934 else if (function_mode & QNQ_MODE)
3936 else if (function_mode & VNIC_MODE)
3938 else if (function_mode & UMC_ENABLED)
3944 /* On BE2/BE3 FW does not suggest the supported limits */
3945 static void BEx_get_resources(struct be_adapter *adapter,
3946 struct be_resources *res)
3948 bool use_sriov = adapter->num_vfs ? 1 : 0;
3950 if (be_physfn(adapter))
3951 res->max_uc_mac = BE_UC_PMAC_COUNT;
3953 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3955 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3957 if (be_is_mc(adapter)) {
3958 /* Assuming that there are 4 channels per port,
3959 * when multi-channel is enabled
3961 if (be_is_qnq_mode(adapter))
3962 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3964 /* In a non-qnq multichannel mode, the pvid
3965 * takes up one vlan entry
3967 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3969 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3972 res->max_mcast_mac = BE_MAX_MC;
3974 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3975 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3976 * *only* if it is RSS-capable.
3978 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3979 be_virtfn(adapter) ||
3980 (be_is_mc(adapter) &&
3981 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
3983 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3984 struct be_resources super_nic_res = {0};
3986 /* On a SuperNIC profile, the driver needs to use the
3987 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3989 be_cmd_get_profile_config(adapter, &super_nic_res,
3990 RESOURCE_LIMITS, 0);
3991 /* Some old versions of BE3 FW don't report max_tx_qs value */
3992 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3994 res->max_tx_qs = BE3_MAX_TX_QS;
3997 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3998 !use_sriov && be_physfn(adapter))
3999 res->max_rss_qs = (adapter->be3_native) ?
4000 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4001 res->max_rx_qs = res->max_rss_qs + 1;
4003 if (be_physfn(adapter))
4004 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
4005 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4007 res->max_evt_qs = 1;
4009 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
4010 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
4011 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4012 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4015 static void be_setup_init(struct be_adapter *adapter)
4017 adapter->vlan_prio_bmap = 0xff;
4018 adapter->phy.link_speed = -1;
4019 adapter->if_handle = -1;
4020 adapter->be3_native = false;
4021 adapter->if_flags = 0;
4022 if (be_physfn(adapter))
4023 adapter->cmd_privileges = MAX_PRIVILEGES;
4025 adapter->cmd_privileges = MIN_PRIVILEGES;
4028 static int be_get_sriov_config(struct be_adapter *adapter)
4030 struct be_resources res = {0};
4031 int max_vfs, old_vfs;
4033 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
4035 /* Some old versions of BE3 FW don't report max_vfs value */
4036 if (BE3_chip(adapter) && !res.max_vfs) {
4037 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4038 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4041 adapter->pool_res = res;
4043 /* If during previous unload of the driver, the VFs were not disabled,
4044 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4045 * Instead use the TotalVFs value stored in the pci-dev struct.
4047 old_vfs = pci_num_vf(adapter->pdev);
4049 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4052 adapter->pool_res.max_vfs =
4053 pci_sriov_get_totalvfs(adapter->pdev);
4054 adapter->num_vfs = old_vfs;
4060 static void be_alloc_sriov_res(struct be_adapter *adapter)
4062 int old_vfs = pci_num_vf(adapter->pdev);
4066 be_get_sriov_config(adapter);
4069 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4071 /* When the HW is in SRIOV capable configuration, the PF-pool
4072 * resources are given to PF during driver load, if there are no
4073 * old VFs. This facility is not available in BE3 FW.
4074 * Also, this is done by FW in Lancer chip.
4076 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4077 num_vf_qs = be_calculate_vf_qs(adapter, 0);
4078 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4081 dev_err(&adapter->pdev->dev,
4082 "Failed to optimize SRIOV resources\n");
4086 static int be_get_resources(struct be_adapter *adapter)
4088 struct device *dev = &adapter->pdev->dev;
4089 struct be_resources res = {0};
4092 if (BEx_chip(adapter)) {
4093 BEx_get_resources(adapter, &res);
4097 /* For Lancer, SH etc read per-function resource limits from FW.
4098 * GET_FUNC_CONFIG returns per function guaranteed limits.
4099 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4101 if (!BEx_chip(adapter)) {
4102 status = be_cmd_get_func_config(adapter, &res);
4106 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4107 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4108 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4109 res.max_rss_qs -= 1;
4111 /* If RoCE may be enabled stash away half the EQs for RoCE */
4112 if (be_roce_supported(adapter))
4113 res.max_evt_qs /= 2;
4117 /* If FW supports RSS default queue, then skip creating non-RSS
4118 * queue for non-IP traffic.
4120 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4121 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4123 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4124 be_max_txqs(adapter), be_max_rxqs(adapter),
4125 be_max_rss(adapter), be_max_eqs(adapter),
4126 be_max_vfs(adapter));
4127 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4128 be_max_uc(adapter), be_max_mc(adapter),
4129 be_max_vlans(adapter));
4131 /* Sanitize cfg_num_qs based on HW and platform limits */
4132 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4133 be_max_qs(adapter));
4137 static int be_get_config(struct be_adapter *adapter)
4142 status = be_cmd_get_cntl_attributes(adapter);
4146 status = be_cmd_query_fw_cfg(adapter);
4150 if (BEx_chip(adapter)) {
4151 level = be_cmd_get_fw_log_level(adapter);
4152 adapter->msg_enable =
4153 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4156 be_cmd_get_acpi_wol_cap(adapter);
4158 be_cmd_query_port_name(adapter);
4160 if (be_physfn(adapter)) {
4161 status = be_cmd_get_active_profile(adapter, &profile_id);
4163 dev_info(&adapter->pdev->dev,
4164 "Using profile 0x%x\n", profile_id);
4167 status = be_get_resources(adapter);
4171 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4172 sizeof(*adapter->pmac_id), GFP_KERNEL);
4173 if (!adapter->pmac_id)
4179 static int be_mac_setup(struct be_adapter *adapter)
4184 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4185 status = be_cmd_get_perm_mac(adapter, mac);
4189 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4190 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4192 /* Maybe the HW was reset; dev_addr must be re-programmed */
4193 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
4196 /* For BE3-R VFs, the PF programs the initial MAC address */
4197 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
4198 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
4199 &adapter->pmac_id[0], 0);
4203 static void be_schedule_worker(struct be_adapter *adapter)
4205 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4206 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4209 static void be_schedule_err_detection(struct be_adapter *adapter)
4211 schedule_delayed_work(&adapter->be_err_detection_work,
4212 msecs_to_jiffies(1000));
4213 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4216 static int be_setup_queues(struct be_adapter *adapter)
4218 struct net_device *netdev = adapter->netdev;
4221 status = be_evt_queues_create(adapter);
4225 status = be_tx_qs_create(adapter);
4229 status = be_rx_cqs_create(adapter);
4233 status = be_mcc_queues_create(adapter);
4237 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4241 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4247 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4251 int be_update_queues(struct be_adapter *adapter)
4253 struct net_device *netdev = adapter->netdev;
4256 if (netif_running(netdev))
4259 be_cancel_worker(adapter);
4261 /* If any vectors have been shared with RoCE we cannot re-program
4264 if (!adapter->num_msix_roce_vec)
4265 be_msix_disable(adapter);
4267 be_clear_queues(adapter);
4269 if (!msix_enabled(adapter)) {
4270 status = be_msix_enable(adapter);
4275 status = be_setup_queues(adapter);
4279 be_schedule_worker(adapter);
4281 if (netif_running(netdev))
4282 status = be_open(netdev);
4287 static inline int fw_major_num(const char *fw_ver)
4289 int fw_major = 0, i;
4291 i = sscanf(fw_ver, "%d.", &fw_major);
4298 /* If any VFs are already enabled don't FLR the PF */
4299 static bool be_reset_required(struct be_adapter *adapter)
4301 return pci_num_vf(adapter->pdev) ? false : true;
4304 /* Wait for the FW to be ready and perform the required initialization */
4305 static int be_func_init(struct be_adapter *adapter)
4309 status = be_fw_wait_ready(adapter);
4313 if (be_reset_required(adapter)) {
4314 status = be_cmd_reset_function(adapter);
4318 /* Wait for interrupts to quiesce after an FLR */
4321 /* We can clear all errors when function reset succeeds */
4322 be_clear_error(adapter, BE_CLEAR_ALL);
4325 /* Tell FW we're ready to fire cmds */
4326 status = be_cmd_fw_init(adapter);
4330 /* Allow interrupts for other ULPs running on NIC function */
4331 be_intr_set(adapter, true);
4336 static int be_setup(struct be_adapter *adapter)
4338 struct device *dev = &adapter->pdev->dev;
4341 status = be_func_init(adapter);
4345 be_setup_init(adapter);
4347 if (!lancer_chip(adapter))
4348 be_cmd_req_native_mode(adapter);
4350 if (!BE2_chip(adapter) && be_physfn(adapter))
4351 be_alloc_sriov_res(adapter);
4353 status = be_get_config(adapter);
4357 status = be_msix_enable(adapter);
4361 status = be_if_create(adapter, &adapter->if_handle,
4362 be_if_cap_flags(adapter), 0);
4366 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4368 status = be_setup_queues(adapter);
4373 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
4375 status = be_mac_setup(adapter);
4379 be_cmd_get_fw_ver(adapter);
4380 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
4382 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
4383 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
4385 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4388 if (adapter->vlans_added)
4389 be_vid_config(adapter);
4391 be_set_rx_mode(adapter->netdev);
4393 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4396 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4399 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4400 adapter->tx_fc, adapter->rx_fc);
4402 if (be_physfn(adapter))
4403 be_cmd_set_logical_link_config(adapter,
4404 IFLA_VF_LINK_STATE_AUTO, 0);
4406 if (adapter->num_vfs)
4407 be_vf_setup(adapter);
4409 status = be_cmd_get_phy_info(adapter);
4410 if (!status && be_pause_supported(adapter))
4411 adapter->phy.fc_autoneg = 1;
4413 be_schedule_worker(adapter);
4414 adapter->flags |= BE_FLAGS_SETUP_DONE;
4421 #ifdef CONFIG_NET_POLL_CONTROLLER
4422 static void be_netpoll(struct net_device *netdev)
4424 struct be_adapter *adapter = netdev_priv(netdev);
4425 struct be_eq_obj *eqo;
4428 for_all_evt_queues(adapter, eqo, i) {
4429 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
4430 napi_schedule(&eqo->napi);
4435 static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
4437 static bool phy_flashing_required(struct be_adapter *adapter)
4439 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
4440 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
4443 static bool is_comp_in_ufi(struct be_adapter *adapter,
4444 struct flash_section_info *fsec, int type)
4446 int i = 0, img_type = 0;
4447 struct flash_section_info_g2 *fsec_g2 = NULL;
4449 if (BE2_chip(adapter))
4450 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4452 for (i = 0; i < MAX_FLASH_COMP; i++) {
4454 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4456 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4458 if (img_type == type)
4465 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
4467 const struct firmware *fw)
4469 struct flash_section_info *fsec = NULL;
4470 const u8 *p = fw->data;
4473 while (p < (fw->data + fw->size)) {
4474 fsec = (struct flash_section_info *)p;
4475 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4482 static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4483 u32 img_offset, u32 img_size, int hdr_size,
4484 u16 img_optype, bool *crc_match)
4490 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4495 crc_offset = hdr_size + img_offset + img_size - 4;
4497 /* Skip flashing, if crc of flashed region matches */
4498 if (!memcmp(crc, p + crc_offset, 4))
4506 static int be_flash(struct be_adapter *adapter, const u8 *img,
4507 struct be_dma_mem *flash_cmd, int optype, int img_size,
4510 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
4511 struct be_cmd_write_flashrom *req = flash_cmd->va;
4514 while (total_bytes) {
4515 num_bytes = min_t(u32, 32*1024, total_bytes);
4517 total_bytes -= num_bytes;
4520 if (optype == OPTYPE_PHY_FW)
4521 flash_op = FLASHROM_OPER_PHY_FLASH;
4523 flash_op = FLASHROM_OPER_FLASH;
4525 if (optype == OPTYPE_PHY_FW)
4526 flash_op = FLASHROM_OPER_PHY_SAVE;
4528 flash_op = FLASHROM_OPER_SAVE;
4531 memcpy(req->data_buf, img, num_bytes);
4533 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
4534 flash_op, img_offset +
4535 bytes_sent, num_bytes);
4536 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
4537 optype == OPTYPE_PHY_FW)
4542 bytes_sent += num_bytes;
4547 /* For BE2, BE3 and BE3-R */
4548 static int be_flash_BEx(struct be_adapter *adapter,
4549 const struct firmware *fw,
4550 struct be_dma_mem *flash_cmd, int num_of_images)
4552 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
4553 struct device *dev = &adapter->pdev->dev;
4554 struct flash_section_info *fsec = NULL;
4555 int status, i, filehdr_size, num_comp;
4556 const struct flash_comp *pflashcomp;
4560 struct flash_comp gen3_flash_types[] = {
4561 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4562 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4563 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4564 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4565 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4566 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4567 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4568 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4569 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4570 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4571 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4572 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4573 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4574 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4575 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4576 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4577 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4578 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4579 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4580 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
4583 struct flash_comp gen2_flash_types[] = {
4584 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4585 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4586 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4587 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4588 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4589 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4590 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4591 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4592 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4593 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4594 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4595 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4596 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4597 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4598 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4599 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
4602 if (BE3_chip(adapter)) {
4603 pflashcomp = gen3_flash_types;
4604 filehdr_size = sizeof(struct flash_file_hdr_g3);
4605 num_comp = ARRAY_SIZE(gen3_flash_types);
4607 pflashcomp = gen2_flash_types;
4608 filehdr_size = sizeof(struct flash_file_hdr_g2);
4609 num_comp = ARRAY_SIZE(gen2_flash_types);
4613 /* Get flash section info*/
4614 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4616 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
4619 for (i = 0; i < num_comp; i++) {
4620 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
4623 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4624 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4627 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4628 !phy_flashing_required(adapter))
4631 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
4632 status = be_check_flash_crc(adapter, fw->data,
4633 pflashcomp[i].offset,
4637 OPTYPE_REDBOOT, &crc_match);
4640 "Could not get CRC for 0x%x region\n",
4641 pflashcomp[i].optype);
4649 p = fw->data + filehdr_size + pflashcomp[i].offset +
4651 if (p + pflashcomp[i].size > fw->data + fw->size)
4654 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
4655 pflashcomp[i].size, 0);
4657 dev_err(dev, "Flashing section type 0x%x failed\n",
4658 pflashcomp[i].img_type);
4665 static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4667 u32 img_type = le32_to_cpu(fsec_entry.type);
4668 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4670 if (img_optype != 0xFFFF)
4674 case IMAGE_FIRMWARE_iSCSI:
4675 img_optype = OPTYPE_ISCSI_ACTIVE;
4677 case IMAGE_BOOT_CODE:
4678 img_optype = OPTYPE_REDBOOT;
4680 case IMAGE_OPTION_ROM_ISCSI:
4681 img_optype = OPTYPE_BIOS;
4683 case IMAGE_OPTION_ROM_PXE:
4684 img_optype = OPTYPE_PXE_BIOS;
4686 case IMAGE_OPTION_ROM_FCoE:
4687 img_optype = OPTYPE_FCOE_BIOS;
4689 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4690 img_optype = OPTYPE_ISCSI_BACKUP;
4693 img_optype = OPTYPE_NCSI_FW;
4695 case IMAGE_FLASHISM_JUMPVECTOR:
4696 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4698 case IMAGE_FIRMWARE_PHY:
4699 img_optype = OPTYPE_SH_PHY_FW;
4701 case IMAGE_REDBOOT_DIR:
4702 img_optype = OPTYPE_REDBOOT_DIR;
4704 case IMAGE_REDBOOT_CONFIG:
4705 img_optype = OPTYPE_REDBOOT_CONFIG;
4708 img_optype = OPTYPE_UFI_DIR;
4717 static int be_flash_skyhawk(struct be_adapter *adapter,
4718 const struct firmware *fw,
4719 struct be_dma_mem *flash_cmd, int num_of_images)
4721 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
4722 bool crc_match, old_fw_img, flash_offset_support = true;
4723 struct device *dev = &adapter->pdev->dev;
4724 struct flash_section_info *fsec = NULL;
4725 u32 img_offset, img_size, img_type;
4726 u16 img_optype, flash_optype;
4727 int status, i, filehdr_size;
4730 filehdr_size = sizeof(struct flash_file_hdr_g3);
4731 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4733 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
4738 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4739 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4740 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
4741 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4742 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4743 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
4745 if (img_optype == 0xFFFF)
4748 if (flash_offset_support)
4749 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4751 flash_optype = img_optype;
4753 /* Don't bother verifying CRC if an old FW image is being
4759 status = be_check_flash_crc(adapter, fw->data, img_offset,
4760 img_size, filehdr_size +
4761 img_hdrs_size, flash_optype,
4763 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4764 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
4765 /* The current FW image on the card does not support
4766 * OFFSET based flashing. Retry using older mechanism
4767 * of OPTYPE based flashing
4769 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4770 flash_offset_support = false;
4774 /* The current FW image on the card does not recognize
4775 * the new FLASH op_type. The FW download is partially
4776 * complete. Reboot the server now to enable FW image
4777 * to recognize the new FLASH op_type. To complete the
4778 * remaining process, download the same FW again after
4781 dev_err(dev, "Flash incomplete. Reset the server\n");
4782 dev_err(dev, "Download FW image again after reset\n");
4784 } else if (status) {
4785 dev_err(dev, "Could not get CRC for 0x%x region\n",
4794 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
4795 if (p + img_size > fw->data + fw->size)
4798 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4801 /* The current FW image on the card does not support OFFSET
4802 * based flashing. Retry using older mechanism of OPTYPE based
4805 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4806 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4807 flash_offset_support = false;
4811 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4815 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4816 (img_optype == OPTYPE_UFI_DIR &&
4817 base_status(status) == MCC_STATUS_FAILED))) {
4819 } else if (status) {
4820 dev_err(dev, "Flashing section type 0x%x failed\n",
4828 static int lancer_fw_download(struct be_adapter *adapter,
4829 const struct firmware *fw)
4831 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4832 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
4833 struct device *dev = &adapter->pdev->dev;
4834 struct be_dma_mem flash_cmd;
4835 const u8 *data_ptr = NULL;
4836 u8 *dest_image_ptr = NULL;
4837 size_t image_size = 0;
4839 u32 data_written = 0;
4845 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
4846 dev_err(dev, "FW image size should be multiple of 4\n");
4850 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4851 + LANCER_FW_DOWNLOAD_CHUNK;
4852 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
4853 &flash_cmd.dma, GFP_KERNEL);
4857 dest_image_ptr = flash_cmd.va +
4858 sizeof(struct lancer_cmd_req_write_object);
4859 image_size = fw->size;
4860 data_ptr = fw->data;
4862 while (image_size) {
4863 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4865 /* Copy the image chunk content. */
4866 memcpy(dest_image_ptr, data_ptr, chunk_size);
4868 status = lancer_cmd_write_object(adapter, &flash_cmd,
4870 LANCER_FW_DOWNLOAD_LOCATION,
4871 &data_written, &change_status,
4876 offset += data_written;
4877 data_ptr += data_written;
4878 image_size -= data_written;
4882 /* Commit the FW written */
4883 status = lancer_cmd_write_object(adapter, &flash_cmd,
4885 LANCER_FW_DOWNLOAD_LOCATION,
4886 &data_written, &change_status,
4890 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4892 dev_err(dev, "Firmware load error\n");
4893 return be_cmd_status(status);
4896 dev_info(dev, "Firmware flashed successfully\n");
4898 if (change_status == LANCER_FW_RESET_NEEDED) {
4899 dev_info(dev, "Resetting adapter to activate new FW\n");
4900 status = lancer_physdev_ctrl(adapter,
4901 PHYSDEV_CONTROL_FW_RESET_MASK);
4903 dev_err(dev, "Adapter busy, could not reset FW\n");
4904 dev_err(dev, "Reboot server to activate new FW\n");
4906 } else if (change_status != LANCER_NO_RESET_NEEDED) {
4907 dev_info(dev, "Reboot server to activate new FW\n");
4913 /* Check if the flash image file is compatible with the adapter that
4916 static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4917 struct flash_file_hdr_g3 *fhdr)
4920 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4924 /* First letter of the build version is used to identify
4925 * which chip this image file is meant for.
4927 switch (fhdr->build[0]) {
4928 case BLD_STR_UFI_TYPE_SH:
4929 if (!skyhawk_chip(adapter))
4932 case BLD_STR_UFI_TYPE_BE3:
4933 if (!BE3_chip(adapter))
4936 case BLD_STR_UFI_TYPE_BE2:
4937 if (!BE2_chip(adapter))
4944 return (fhdr->asic_type_rev >= adapter->asic_rev);
4947 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4949 struct device *dev = &adapter->pdev->dev;
4950 struct flash_file_hdr_g3 *fhdr3;
4951 struct image_hdr *img_hdr_ptr;
4952 int status = 0, i, num_imgs;
4953 struct be_dma_mem flash_cmd;
4955 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4956 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4957 dev_err(dev, "Flash image is not compatible with adapter\n");
4961 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4962 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4967 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4968 for (i = 0; i < num_imgs; i++) {
4969 img_hdr_ptr = (struct image_hdr *)(fw->data +
4970 (sizeof(struct flash_file_hdr_g3) +
4971 i * sizeof(struct image_hdr)));
4972 if (!BE2_chip(adapter) &&
4973 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4976 if (skyhawk_chip(adapter))
4977 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4980 status = be_flash_BEx(adapter, fw, &flash_cmd,
4984 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4986 dev_info(dev, "Firmware flashed successfully\n");
4991 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4993 const struct firmware *fw;
4996 if (!netif_running(adapter->netdev)) {
4997 dev_err(&adapter->pdev->dev,
4998 "Firmware load not allowed (interface is down)\n");
5002 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
5006 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
5008 if (lancer_chip(adapter))
5009 status = lancer_fw_download(adapter, fw);
5011 status = be_fw_download(adapter, fw);
5014 be_cmd_get_fw_ver(adapter);
5017 release_firmware(fw);
5021 static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
5024 struct be_adapter *adapter = netdev_priv(dev);
5025 struct nlattr *attr, *br_spec;
5030 if (!sriov_enabled(adapter))
5033 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5037 nla_for_each_nested(attr, br_spec, rem) {
5038 if (nla_type(attr) != IFLA_BRIDGE_MODE)
5041 if (nla_len(attr) < sizeof(mode))
5044 mode = nla_get_u16(attr);
5045 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
5048 status = be_cmd_set_hsw_config(adapter, 0, 0,
5050 mode == BRIDGE_MODE_VEPA ?
5051 PORT_FWD_TYPE_VEPA :
5052 PORT_FWD_TYPE_VEB, 0);
5056 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
5057 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5062 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
5063 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5068 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
5069 struct net_device *dev, u32 filter_mask,
5072 struct be_adapter *adapter = netdev_priv(dev);
5076 /* BE and Lancer chips support VEB mode only */
5077 if (BEx_chip(adapter) || lancer_chip(adapter)) {
5078 hsw_mode = PORT_FWD_TYPE_VEB;
5080 status = be_cmd_get_hsw_config(adapter, NULL, 0,
5081 adapter->if_handle, &hsw_mode,
5086 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
5090 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5091 hsw_mode == PORT_FWD_TYPE_VEPA ?
5092 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
5093 0, 0, nlflags, filter_mask, NULL);
5096 #ifdef CONFIG_BE2NET_VXLAN
5097 /* VxLAN offload Notes:
5099 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5100 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5101 * is expected to work across all types of IP tunnels once exported. Skyhawk
5102 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
5103 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5104 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5105 * those other tunnels are unexported on the fly through ndo_features_check().
5107 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5108 * adds more than one port, disable offloads and don't re-enable them again
5109 * until after all the tunnels are removed.
5111 static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5114 struct be_adapter *adapter = netdev_priv(netdev);
5115 struct device *dev = &adapter->pdev->dev;
5118 if (lancer_chip(adapter) || BEx_chip(adapter))
5121 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
5123 "Only one UDP port supported for VxLAN offloads\n");
5124 dev_info(dev, "Disabling VxLAN offloads\n");
5125 adapter->vxlan_port_count++;
5129 if (adapter->vxlan_port_count++ >= 1)
5132 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5133 OP_CONVERT_NORMAL_TO_TUNNEL);
5135 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5139 status = be_cmd_set_vxlan_port(adapter, port);
5141 dev_warn(dev, "Failed to add VxLAN port\n");
5144 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5145 adapter->vxlan_port = port;
5147 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5148 NETIF_F_TSO | NETIF_F_TSO6 |
5149 NETIF_F_GSO_UDP_TUNNEL;
5150 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
5151 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
5153 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5157 be_disable_vxlan_offloads(adapter);
5160 static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5163 struct be_adapter *adapter = netdev_priv(netdev);
5165 if (lancer_chip(adapter) || BEx_chip(adapter))
5168 if (adapter->vxlan_port != port)
5171 be_disable_vxlan_offloads(adapter);
5173 dev_info(&adapter->pdev->dev,
5174 "Disabled VxLAN offloads for UDP port %d\n",
5177 adapter->vxlan_port_count--;
5180 static netdev_features_t be_features_check(struct sk_buff *skb,
5181 struct net_device *dev,
5182 netdev_features_t features)
5184 struct be_adapter *adapter = netdev_priv(dev);
5187 /* The code below restricts offload features for some tunneled packets.
5188 * Offload features for normal (non tunnel) packets are unchanged.
5190 if (!skb->encapsulation ||
5191 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5194 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5195 * should disable tunnel offload features if it's not a VxLAN packet,
5196 * as tunnel offloads have been enabled only for VxLAN. This is done to
5197 * allow other tunneled traffic like GRE work fine while VxLAN
5198 * offloads are configured in Skyhawk-R.
5200 switch (vlan_get_protocol(skb)) {
5201 case htons(ETH_P_IP):
5202 l4_hdr = ip_hdr(skb)->protocol;
5204 case htons(ETH_P_IPV6):
5205 l4_hdr = ipv6_hdr(skb)->nexthdr;
5211 if (l4_hdr != IPPROTO_UDP ||
5212 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5213 skb->inner_protocol != htons(ETH_P_TEB) ||
5214 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5215 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
5216 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
5222 static const struct net_device_ops be_netdev_ops = {
5223 .ndo_open = be_open,
5224 .ndo_stop = be_close,
5225 .ndo_start_xmit = be_xmit,
5226 .ndo_set_rx_mode = be_set_rx_mode,
5227 .ndo_set_mac_address = be_mac_addr_set,
5228 .ndo_change_mtu = be_change_mtu,
5229 .ndo_get_stats64 = be_get_stats64,
5230 .ndo_validate_addr = eth_validate_addr,
5231 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5232 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
5233 .ndo_set_vf_mac = be_set_vf_mac,
5234 .ndo_set_vf_vlan = be_set_vf_vlan,
5235 .ndo_set_vf_rate = be_set_vf_tx_rate,
5236 .ndo_get_vf_config = be_get_vf_config,
5237 .ndo_set_vf_link_state = be_set_vf_link_state,
5238 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
5239 #ifdef CONFIG_NET_POLL_CONTROLLER
5240 .ndo_poll_controller = be_netpoll,
5242 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5243 .ndo_bridge_getlink = be_ndo_bridge_getlink,
5244 #ifdef CONFIG_NET_RX_BUSY_POLL
5245 .ndo_busy_poll = be_busy_poll,
5247 #ifdef CONFIG_BE2NET_VXLAN
5248 .ndo_add_vxlan_port = be_add_vxlan_port,
5249 .ndo_del_vxlan_port = be_del_vxlan_port,
5250 .ndo_features_check = be_features_check,
5254 static void be_netdev_init(struct net_device *netdev)
5256 struct be_adapter *adapter = netdev_priv(netdev);
5258 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
5259 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
5260 NETIF_F_HW_VLAN_CTAG_TX;
5261 if (be_multi_rxq(adapter))
5262 netdev->hw_features |= NETIF_F_RXHASH;
5264 netdev->features |= netdev->hw_features |
5265 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
5267 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
5268 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
5270 netdev->priv_flags |= IFF_UNICAST_FLT;
5272 netdev->flags |= IFF_MULTICAST;
5274 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
5276 netdev->netdev_ops = &be_netdev_ops;
5278 netdev->ethtool_ops = &be_ethtool_ops;
5281 static void be_cleanup(struct be_adapter *adapter)
5283 struct net_device *netdev = adapter->netdev;
5286 netif_device_detach(netdev);
5287 if (netif_running(netdev))
5294 static int be_resume(struct be_adapter *adapter)
5296 struct net_device *netdev = adapter->netdev;
5299 status = be_setup(adapter);
5303 if (netif_running(netdev)) {
5304 status = be_open(netdev);
5309 netif_device_attach(netdev);
5314 static int be_err_recover(struct be_adapter *adapter)
5316 struct device *dev = &adapter->pdev->dev;
5319 status = be_resume(adapter);
5323 dev_info(dev, "Adapter recovery successful\n");
5326 if (be_physfn(adapter))
5327 dev_err(dev, "Adapter recovery failed\n");
5329 dev_err(dev, "Re-trying adapter recovery\n");
5334 static void be_err_detection_task(struct work_struct *work)
5336 struct be_adapter *adapter =
5337 container_of(work, struct be_adapter,
5338 be_err_detection_work.work);
5341 be_detect_error(adapter);
5343 if (be_check_error(adapter, BE_ERROR_HW)) {
5344 be_cleanup(adapter);
5346 /* As of now error recovery support is in Lancer only */
5347 if (lancer_chip(adapter))
5348 status = be_err_recover(adapter);
5351 /* Always attempt recovery on VFs */
5352 if (!status || be_virtfn(adapter))
5353 be_schedule_err_detection(adapter);
5356 static void be_log_sfp_info(struct be_adapter *adapter)
5360 status = be_cmd_query_sfp_info(adapter);
5362 dev_err(&adapter->pdev->dev,
5363 "Unqualified SFP+ detected on %c from %s part no: %s",
5364 adapter->port_name, adapter->phy.vendor_name,
5365 adapter->phy.vendor_pn);
5367 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5370 static void be_worker(struct work_struct *work)
5372 struct be_adapter *adapter =
5373 container_of(work, struct be_adapter, work.work);
5374 struct be_rx_obj *rxo;
5377 /* when interrupts are not yet enabled, just reap any pending
5380 if (!netif_running(adapter->netdev)) {
5382 be_process_mcc(adapter);
5387 if (!adapter->stats_cmd_sent) {
5388 if (lancer_chip(adapter))
5389 lancer_cmd_get_pport_stats(adapter,
5390 &adapter->stats_cmd);
5392 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5395 if (be_physfn(adapter) &&
5396 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5397 be_cmd_get_die_temperature(adapter);
5399 for_all_rx_queues(adapter, rxo, i) {
5400 /* Replenish RX-queues starved due to memory
5401 * allocation failures.
5403 if (rxo->rx_post_starved)
5404 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5407 /* EQ-delay update for Skyhawk is done while notifying EQ */
5408 if (!skyhawk_chip(adapter))
5409 be_eqd_update(adapter, false);
5411 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5412 be_log_sfp_info(adapter);
5415 adapter->work_counter++;
5416 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5419 static void be_unmap_pci_bars(struct be_adapter *adapter)
5422 pci_iounmap(adapter->pdev, adapter->csr);
5424 pci_iounmap(adapter->pdev, adapter->db);
5427 static int db_bar(struct be_adapter *adapter)
5429 if (lancer_chip(adapter) || be_virtfn(adapter))
5435 static int be_roce_map_pci_bars(struct be_adapter *adapter)
5437 if (skyhawk_chip(adapter)) {
5438 adapter->roce_db.size = 4096;
5439 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5441 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5447 static int be_map_pci_bars(struct be_adapter *adapter)
5449 struct pci_dev *pdev = adapter->pdev;
5453 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5454 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5455 SLI_INTF_FAMILY_SHIFT;
5456 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5458 if (BEx_chip(adapter) && be_physfn(adapter)) {
5459 adapter->csr = pci_iomap(pdev, 2, 0);
5464 addr = pci_iomap(pdev, db_bar(adapter), 0);
5469 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5470 if (be_physfn(adapter)) {
5471 /* PCICFG is the 2nd BAR in BE2 */
5472 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5475 adapter->pcicfg = addr;
5477 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5481 be_roce_map_pci_bars(adapter);
5485 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
5486 be_unmap_pci_bars(adapter);
5490 static void be_drv_cleanup(struct be_adapter *adapter)
5492 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5493 struct device *dev = &adapter->pdev->dev;
5496 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5498 mem = &adapter->rx_filter;
5500 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5502 mem = &adapter->stats_cmd;
5504 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5507 /* Allocate and initialize various fields in be_adapter struct */
5508 static int be_drv_init(struct be_adapter *adapter)
5510 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5511 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5512 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5513 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5514 struct device *dev = &adapter->pdev->dev;
5517 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5518 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5519 &mbox_mem_alloc->dma,
5521 if (!mbox_mem_alloc->va)
5524 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5525 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5526 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5528 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5529 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5530 &rx_filter->dma, GFP_KERNEL);
5531 if (!rx_filter->va) {
5536 if (lancer_chip(adapter))
5537 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5538 else if (BE2_chip(adapter))
5539 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5540 else if (BE3_chip(adapter))
5541 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5543 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5544 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5545 &stats_cmd->dma, GFP_KERNEL);
5546 if (!stats_cmd->va) {
5548 goto free_rx_filter;
5551 mutex_init(&adapter->mbox_lock);
5552 spin_lock_init(&adapter->mcc_lock);
5553 spin_lock_init(&adapter->mcc_cq_lock);
5554 init_completion(&adapter->et_cmd_compl);
5556 pci_save_state(adapter->pdev);
5558 INIT_DELAYED_WORK(&adapter->work, be_worker);
5559 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5560 be_err_detection_task);
5562 adapter->rx_fc = true;
5563 adapter->tx_fc = true;
5565 /* Must be a power of 2 or else MODULO will BUG_ON */
5566 adapter->be_get_temp_freq = 64;
5571 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5573 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5574 mbox_mem_alloc->dma);
5578 static void be_remove(struct pci_dev *pdev)
5580 struct be_adapter *adapter = pci_get_drvdata(pdev);
5585 be_roce_dev_remove(adapter);
5586 be_intr_set(adapter, false);
5588 be_cancel_err_detection(adapter);
5590 unregister_netdev(adapter->netdev);
5594 /* tell fw we're done with firing cmds */
5595 be_cmd_fw_clean(adapter);
5597 be_unmap_pci_bars(adapter);
5598 be_drv_cleanup(adapter);
5600 pci_disable_pcie_error_reporting(pdev);
5602 pci_release_regions(pdev);
5603 pci_disable_device(pdev);
5605 free_netdev(adapter->netdev);
5608 static ssize_t be_hwmon_show_temp(struct device *dev,
5609 struct device_attribute *dev_attr,
5612 struct be_adapter *adapter = dev_get_drvdata(dev);
5614 /* Unit: millidegree Celsius */
5615 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5618 return sprintf(buf, "%u\n",
5619 adapter->hwmon_info.be_on_die_temp * 1000);
5622 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5623 be_hwmon_show_temp, NULL, 1);
5625 static struct attribute *be_hwmon_attrs[] = {
5626 &sensor_dev_attr_temp1_input.dev_attr.attr,
5630 ATTRIBUTE_GROUPS(be_hwmon);
5632 static char *mc_name(struct be_adapter *adapter)
5634 char *str = ""; /* default */
5636 switch (adapter->mc_type) {
5662 static inline char *func_name(struct be_adapter *adapter)
5664 return be_physfn(adapter) ? "PF" : "VF";
5667 static inline char *nic_name(struct pci_dev *pdev)
5669 switch (pdev->device) {
5676 return OC_NAME_LANCER;
5687 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
5689 struct be_adapter *adapter;
5690 struct net_device *netdev;
5693 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5695 status = pci_enable_device(pdev);
5699 status = pci_request_regions(pdev, DRV_NAME);
5702 pci_set_master(pdev);
5704 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
5709 adapter = netdev_priv(netdev);
5710 adapter->pdev = pdev;
5711 pci_set_drvdata(pdev, adapter);
5712 adapter->netdev = netdev;
5713 SET_NETDEV_DEV(netdev, &pdev->dev);
5715 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5717 netdev->features |= NETIF_F_HIGHDMA;
5719 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
5721 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5726 status = pci_enable_pcie_error_reporting(pdev);
5728 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
5730 status = be_map_pci_bars(adapter);
5734 status = be_drv_init(adapter);
5738 status = be_setup(adapter);
5742 be_netdev_init(netdev);
5743 status = register_netdev(netdev);
5747 be_roce_dev_add(adapter);
5749 be_schedule_err_detection(adapter);
5751 /* On Die temperature not supported for VF. */
5752 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
5753 adapter->hwmon_info.hwmon_dev =
5754 devm_hwmon_device_register_with_groups(&pdev->dev,
5758 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5761 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
5762 func_name(adapter), mc_name(adapter), adapter->port_name);
5769 be_drv_cleanup(adapter);
5771 be_unmap_pci_bars(adapter);
5773 free_netdev(netdev);
5775 pci_release_regions(pdev);
5777 pci_disable_device(pdev);
5779 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
5783 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5785 struct be_adapter *adapter = pci_get_drvdata(pdev);
5787 if (adapter->wol_en)
5788 be_setup_wol(adapter, true);
5790 be_intr_set(adapter, false);
5791 be_cancel_err_detection(adapter);
5793 be_cleanup(adapter);
5795 pci_save_state(pdev);
5796 pci_disable_device(pdev);
5797 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5801 static int be_pci_resume(struct pci_dev *pdev)
5803 struct be_adapter *adapter = pci_get_drvdata(pdev);
5806 status = pci_enable_device(pdev);
5810 pci_restore_state(pdev);
5812 status = be_resume(adapter);
5816 be_schedule_err_detection(adapter);
5818 if (adapter->wol_en)
5819 be_setup_wol(adapter, false);
5825 * An FLR will stop BE from DMAing any data.
5827 static void be_shutdown(struct pci_dev *pdev)
5829 struct be_adapter *adapter = pci_get_drvdata(pdev);
5834 be_roce_dev_shutdown(adapter);
5835 cancel_delayed_work_sync(&adapter->work);
5836 be_cancel_err_detection(adapter);
5838 netif_device_detach(adapter->netdev);
5840 be_cmd_reset_function(adapter);
5842 pci_disable_device(pdev);
5845 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
5846 pci_channel_state_t state)
5848 struct be_adapter *adapter = pci_get_drvdata(pdev);
5850 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5852 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5853 be_set_error(adapter, BE_ERROR_EEH);
5855 be_cancel_err_detection(adapter);
5857 be_cleanup(adapter);
5860 if (state == pci_channel_io_perm_failure)
5861 return PCI_ERS_RESULT_DISCONNECT;
5863 pci_disable_device(pdev);
5865 /* The error could cause the FW to trigger a flash debug dump.
5866 * Resetting the card while flash dump is in progress
5867 * can cause it not to recover; wait for it to finish.
5868 * Wait only for first function as it is needed only once per
5871 if (pdev->devfn == 0)
5874 return PCI_ERS_RESULT_NEED_RESET;
5877 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5879 struct be_adapter *adapter = pci_get_drvdata(pdev);
5882 dev_info(&adapter->pdev->dev, "EEH reset\n");
5884 status = pci_enable_device(pdev);
5886 return PCI_ERS_RESULT_DISCONNECT;
5888 pci_set_master(pdev);
5889 pci_restore_state(pdev);
5891 /* Check if card is ok and fw is ready */
5892 dev_info(&adapter->pdev->dev,
5893 "Waiting for FW to be ready after EEH reset\n");
5894 status = be_fw_wait_ready(adapter);
5896 return PCI_ERS_RESULT_DISCONNECT;
5898 pci_cleanup_aer_uncorrect_error_status(pdev);
5899 be_clear_error(adapter, BE_CLEAR_ALL);
5900 return PCI_ERS_RESULT_RECOVERED;
5903 static void be_eeh_resume(struct pci_dev *pdev)
5906 struct be_adapter *adapter = pci_get_drvdata(pdev);
5908 dev_info(&adapter->pdev->dev, "EEH resume\n");
5910 pci_save_state(pdev);
5912 status = be_resume(adapter);
5916 be_schedule_err_detection(adapter);
5919 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
5922 static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5924 struct be_adapter *adapter = pci_get_drvdata(pdev);
5929 be_vf_clear(adapter);
5931 adapter->num_vfs = num_vfs;
5933 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5934 dev_warn(&pdev->dev,
5935 "Cannot disable VFs while they are assigned\n");
5939 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5940 * are equally distributed across the max-number of VFs. The user may
5941 * request only a subset of the max-vfs to be enabled.
5942 * Based on num_vfs, redistribute the resources across num_vfs so that
5943 * each VF will have access to more number of resources.
5944 * This facility is not available in BE3 FW.
5945 * Also, this is done by FW in Lancer chip.
5947 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5948 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5949 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5950 adapter->num_vfs, num_vf_qs);
5953 "Failed to optimize SR-IOV resources\n");
5956 status = be_get_resources(adapter);
5958 return be_cmd_status(status);
5960 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5962 status = be_update_queues(adapter);
5965 return be_cmd_status(status);
5967 if (adapter->num_vfs)
5968 status = be_vf_setup(adapter);
5971 return adapter->num_vfs;
5976 static const struct pci_error_handlers be_eeh_handlers = {
5977 .error_detected = be_eeh_err_detected,
5978 .slot_reset = be_eeh_reset,
5979 .resume = be_eeh_resume,
5982 static struct pci_driver be_driver = {
5984 .id_table = be_dev_ids,
5986 .remove = be_remove,
5987 .suspend = be_suspend,
5988 .resume = be_pci_resume,
5989 .shutdown = be_shutdown,
5990 .sriov_configure = be_pci_sriov_configure,
5991 .err_handler = &be_eeh_handlers
5994 static int __init be_init_module(void)
5996 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5997 rx_frag_size != 2048) {
5998 printk(KERN_WARNING DRV_NAME
5999 " : Module param rx_frag_size must be 2048/4096/8192."
6001 rx_frag_size = 2048;
6005 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6006 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6009 return pci_register_driver(&be_driver);
6011 module_init(be_init_module);
6013 static void __exit be_exit_module(void)
6015 pci_unregister_driver(&be_driver);
6017 module_exit(be_exit_module);