2 * Copyright (C) 2005 - 2014 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26 #include <net/vxlan.h>
28 MODULE_VERSION(DRV_VER);
29 MODULE_DEVICE_TABLE(pci, be_dev_ids);
30 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
31 MODULE_AUTHOR("Emulex Corporation");
32 MODULE_LICENSE("GPL");
34 static unsigned int num_vfs;
35 module_param(num_vfs, uint, S_IRUGO);
36 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
38 static ushort rx_frag_size = 2048;
39 module_param(rx_frag_size, ushort, S_IRUGO);
40 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
42 static const struct pci_device_id be_dev_ids[] = {
43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
44 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
53 MODULE_DEVICE_TABLE(pci, be_dev_ids);
54 /* UE Status Low CSR */
55 static const char * const ue_status_low_desc[] = {
90 /* UE Status High CSR */
91 static const char * const ue_status_hi_desc[] = {
126 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128 struct be_dma_mem *mem = &q->dma_mem;
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
137 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
140 struct be_dma_mem *mem = &q->dma_mem;
142 memset(q, 0, sizeof(*q));
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
146 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
153 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
161 if (!enabled && enable)
162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163 else if (enabled && !enable)
164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
168 pci_write_config_dword(adapter->pdev,
169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 static void be_intr_set(struct be_adapter *adapter, bool enable)
176 /* On lancer interrupts can't be controlled via this register */
177 if (lancer_chip(adapter))
180 if (adapter->eeh_error)
183 status = be_cmd_intr_set(adapter, enable);
185 be_reg_intr_set(adapter, enable);
188 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
192 val |= qid & DB_RQ_RING_ID_MASK;
193 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
196 iowrite32(val, adapter->db + DB_RQ_OFFSET);
199 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
208 iowrite32(val, adapter->db + txo->db_offset);
211 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
212 bool arm, bool clear_int, u16 num_popped)
216 val |= qid & DB_EQ_RING_ID_MASK;
217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
219 if (adapter->eeh_error)
223 val |= 1 << DB_EQ_REARM_SHIFT;
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
231 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
235 val |= qid & DB_CQ_RING_ID_MASK;
236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
239 if (adapter->eeh_error)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
248 static int be_mac_addr_set(struct net_device *netdev, void *p)
250 struct be_adapter *adapter = netdev_priv(netdev);
251 struct device *dev = &adapter->pdev->dev;
252 struct sockaddr *addr = p;
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
260 /* Proceed further only if, User provided MAC is different
263 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
266 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
267 * privilege or if PF did not provision the new MAC address.
268 * On BE3, this cmd will always fail if the VF doesn't have the
269 * FILTMGMT privilege. This failure is OK, only if the PF programmed
270 * the MAC for the VF.
272 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
273 adapter->if_handle, &adapter->pmac_id[0], 0);
275 curr_pmac_id = adapter->pmac_id[0];
277 /* Delete the old programmed MAC. This call may fail if the
278 * old MAC was already deleted by the PF driver.
280 if (adapter->pmac_id[0] != old_pmac_id)
281 be_cmd_pmac_del(adapter, adapter->if_handle,
285 /* Decide if the new MAC is successfully activated only after
288 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
289 adapter->if_handle, true, 0);
293 /* The MAC change did not happen, either due to lack of privilege
294 * or PF didn't pre-provision.
296 if (!ether_addr_equal(addr->sa_data, mac)) {
301 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
302 dev_info(dev, "MAC address changed to %pM\n", mac);
305 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
309 /* BE2 supports only v0 cmd */
310 static void *hw_stats_from_cmd(struct be_adapter *adapter)
312 if (BE2_chip(adapter)) {
313 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
315 return &cmd->hw_stats;
316 } else if (BE3_chip(adapter)) {
317 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
319 return &cmd->hw_stats;
321 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
323 return &cmd->hw_stats;
327 /* BE2 supports only v0 cmd */
328 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
330 if (BE2_chip(adapter)) {
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
333 return &hw_stats->erx;
334 } else if (BE3_chip(adapter)) {
335 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
337 return &hw_stats->erx;
339 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
341 return &hw_stats->erx;
345 static void populate_be_v0_stats(struct be_adapter *adapter)
347 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
348 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
349 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
350 struct be_port_rxf_stats_v0 *port_stats =
351 &rxf_stats->port[adapter->port_num];
352 struct be_drv_stats *drvs = &adapter->drv_stats;
354 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
355 drvs->rx_pause_frames = port_stats->rx_pause_frames;
356 drvs->rx_crc_errors = port_stats->rx_crc_errors;
357 drvs->rx_control_frames = port_stats->rx_control_frames;
358 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
359 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
360 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
361 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
362 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
363 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
364 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
365 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
366 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
367 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
368 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
369 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
370 drvs->rx_dropped_header_too_small =
371 port_stats->rx_dropped_header_too_small;
372 drvs->rx_address_filtered =
373 port_stats->rx_address_filtered +
374 port_stats->rx_vlan_filtered;
375 drvs->rx_alignment_symbol_errors =
376 port_stats->rx_alignment_symbol_errors;
378 drvs->tx_pauseframes = port_stats->tx_pauseframes;
379 drvs->tx_controlframes = port_stats->tx_controlframes;
381 if (adapter->port_num)
382 drvs->jabber_events = rxf_stats->port1_jabber_events;
384 drvs->jabber_events = rxf_stats->port0_jabber_events;
385 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
386 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
387 drvs->forwarded_packets = rxf_stats->forwarded_packets;
388 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
389 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
390 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
391 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
394 static void populate_be_v1_stats(struct be_adapter *adapter)
396 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
397 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
398 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
399 struct be_port_rxf_stats_v1 *port_stats =
400 &rxf_stats->port[adapter->port_num];
401 struct be_drv_stats *drvs = &adapter->drv_stats;
403 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
404 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
405 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
406 drvs->rx_pause_frames = port_stats->rx_pause_frames;
407 drvs->rx_crc_errors = port_stats->rx_crc_errors;
408 drvs->rx_control_frames = port_stats->rx_control_frames;
409 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
410 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
411 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
412 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
413 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
414 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
415 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
416 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
417 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
418 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
419 drvs->rx_dropped_header_too_small =
420 port_stats->rx_dropped_header_too_small;
421 drvs->rx_input_fifo_overflow_drop =
422 port_stats->rx_input_fifo_overflow_drop;
423 drvs->rx_address_filtered = port_stats->rx_address_filtered;
424 drvs->rx_alignment_symbol_errors =
425 port_stats->rx_alignment_symbol_errors;
426 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
427 drvs->tx_pauseframes = port_stats->tx_pauseframes;
428 drvs->tx_controlframes = port_stats->tx_controlframes;
429 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
430 drvs->jabber_events = port_stats->jabber_events;
431 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
432 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
433 drvs->forwarded_packets = rxf_stats->forwarded_packets;
434 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
435 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
436 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
437 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
440 static void populate_be_v2_stats(struct be_adapter *adapter)
442 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
443 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
444 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
445 struct be_port_rxf_stats_v2 *port_stats =
446 &rxf_stats->port[adapter->port_num];
447 struct be_drv_stats *drvs = &adapter->drv_stats;
449 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
450 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
451 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
452 drvs->rx_pause_frames = port_stats->rx_pause_frames;
453 drvs->rx_crc_errors = port_stats->rx_crc_errors;
454 drvs->rx_control_frames = port_stats->rx_control_frames;
455 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
456 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
457 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
458 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
459 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
460 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
461 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
462 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
463 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
464 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
465 drvs->rx_dropped_header_too_small =
466 port_stats->rx_dropped_header_too_small;
467 drvs->rx_input_fifo_overflow_drop =
468 port_stats->rx_input_fifo_overflow_drop;
469 drvs->rx_address_filtered = port_stats->rx_address_filtered;
470 drvs->rx_alignment_symbol_errors =
471 port_stats->rx_alignment_symbol_errors;
472 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
473 drvs->tx_pauseframes = port_stats->tx_pauseframes;
474 drvs->tx_controlframes = port_stats->tx_controlframes;
475 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
476 drvs->jabber_events = port_stats->jabber_events;
477 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
478 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
479 drvs->forwarded_packets = rxf_stats->forwarded_packets;
480 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
481 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
482 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
483 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
484 if (be_roce_supported(adapter)) {
485 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
486 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
487 drvs->rx_roce_frames = port_stats->roce_frames_received;
488 drvs->roce_drops_crc = port_stats->roce_drops_crc;
489 drvs->roce_drops_payload_len =
490 port_stats->roce_drops_payload_len;
494 static void populate_lancer_stats(struct be_adapter *adapter)
496 struct be_drv_stats *drvs = &adapter->drv_stats;
497 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
499 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
500 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
501 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
502 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
503 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
504 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
505 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
506 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
507 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
508 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
509 drvs->rx_dropped_tcp_length =
510 pport_stats->rx_dropped_invalid_tcp_length;
511 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
512 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
513 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
514 drvs->rx_dropped_header_too_small =
515 pport_stats->rx_dropped_header_too_small;
516 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
517 drvs->rx_address_filtered =
518 pport_stats->rx_address_filtered +
519 pport_stats->rx_vlan_filtered;
520 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
521 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
522 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
523 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
524 drvs->jabber_events = pport_stats->rx_jabbers;
525 drvs->forwarded_packets = pport_stats->num_forwards_lo;
526 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
527 drvs->rx_drops_too_many_frags =
528 pport_stats->rx_drops_too_many_frags_lo;
531 static void accumulate_16bit_val(u32 *acc, u16 val)
533 #define lo(x) (x & 0xFFFF)
534 #define hi(x) (x & 0xFFFF0000)
535 bool wrapped = val < lo(*acc);
536 u32 newacc = hi(*acc) + val;
540 ACCESS_ONCE(*acc) = newacc;
543 static void populate_erx_stats(struct be_adapter *adapter,
544 struct be_rx_obj *rxo, u32 erx_stat)
546 if (!BEx_chip(adapter))
547 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
549 /* below erx HW counter can actually wrap around after
550 * 65535. Driver accumulates a 32-bit value
552 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
556 void be_parse_stats(struct be_adapter *adapter)
558 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
559 struct be_rx_obj *rxo;
563 if (lancer_chip(adapter)) {
564 populate_lancer_stats(adapter);
566 if (BE2_chip(adapter))
567 populate_be_v0_stats(adapter);
568 else if (BE3_chip(adapter))
570 populate_be_v1_stats(adapter);
572 populate_be_v2_stats(adapter);
574 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
575 for_all_rx_queues(adapter, rxo, i) {
576 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
577 populate_erx_stats(adapter, rxo, erx_stat);
582 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
583 struct rtnl_link_stats64 *stats)
585 struct be_adapter *adapter = netdev_priv(netdev);
586 struct be_drv_stats *drvs = &adapter->drv_stats;
587 struct be_rx_obj *rxo;
588 struct be_tx_obj *txo;
593 for_all_rx_queues(adapter, rxo, i) {
594 const struct be_rx_stats *rx_stats = rx_stats(rxo);
597 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
598 pkts = rx_stats(rxo)->rx_pkts;
599 bytes = rx_stats(rxo)->rx_bytes;
600 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
601 stats->rx_packets += pkts;
602 stats->rx_bytes += bytes;
603 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
604 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
605 rx_stats(rxo)->rx_drops_no_frags;
608 for_all_tx_queues(adapter, txo, i) {
609 const struct be_tx_stats *tx_stats = tx_stats(txo);
612 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
613 pkts = tx_stats(txo)->tx_pkts;
614 bytes = tx_stats(txo)->tx_bytes;
615 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
616 stats->tx_packets += pkts;
617 stats->tx_bytes += bytes;
620 /* bad pkts received */
621 stats->rx_errors = drvs->rx_crc_errors +
622 drvs->rx_alignment_symbol_errors +
623 drvs->rx_in_range_errors +
624 drvs->rx_out_range_errors +
625 drvs->rx_frame_too_long +
626 drvs->rx_dropped_too_small +
627 drvs->rx_dropped_too_short +
628 drvs->rx_dropped_header_too_small +
629 drvs->rx_dropped_tcp_length +
630 drvs->rx_dropped_runt;
632 /* detailed rx errors */
633 stats->rx_length_errors = drvs->rx_in_range_errors +
634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long;
637 stats->rx_crc_errors = drvs->rx_crc_errors;
639 /* frame alignment errors */
640 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
642 /* receiver fifo overrun */
643 /* drops_no_pbuf is no per i/f, it's per BE card */
644 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
645 drvs->rx_input_fifo_overflow_drop +
646 drvs->rx_drops_no_pbuf;
650 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
652 struct net_device *netdev = adapter->netdev;
654 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
655 netif_carrier_off(netdev);
656 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
660 netif_carrier_on(netdev);
662 netif_carrier_off(netdev);
665 static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
667 struct be_tx_stats *stats = tx_stats(txo);
669 u64_stats_update_begin(&stats->sync);
671 stats->tx_bytes += skb->len;
672 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
673 u64_stats_update_end(&stats->sync);
676 /* Returns number of WRBs needed for the skb */
677 static u32 skb_wrb_cnt(struct sk_buff *skb)
679 /* +1 for the header wrb */
680 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
683 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
685 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
686 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
687 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
691 /* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
692 * to avoid the swap and shift/mask operations in wrb_fill().
694 static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
702 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
708 vlan_tag = skb_vlan_tag_get(skb);
709 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
710 /* If vlan priority provided by OS is NOT in available bmap */
711 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
712 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
713 adapter->recommended_prio;
718 /* Used only for IP tunnel packets */
719 static u16 skb_inner_ip_proto(struct sk_buff *skb)
721 return (inner_ip_hdr(skb)->version == 4) ?
722 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725 static u16 skb_ip_proto(struct sk_buff *skb)
727 return (ip_hdr(skb)->version == 4) ?
728 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
732 struct sk_buff *skb, u32 wrb_cnt, u32 len,
737 memset(hdr, 0, sizeof(*hdr));
739 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
741 if (skb_is_gso(skb)) {
742 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
743 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
744 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
745 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
746 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
747 if (skb->encapsulation) {
748 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
749 proto = skb_inner_ip_proto(skb);
751 proto = skb_ip_proto(skb);
753 if (proto == IPPROTO_TCP)
754 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
755 else if (proto == IPPROTO_UDP)
756 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
759 if (skb_vlan_tag_present(skb)) {
760 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
761 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
762 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
765 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
766 SET_TX_WRB_HDR_BITS(len, hdr, len);
768 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
769 * When this hack is not needed, the evt bit is set while ringing DB
772 SET_TX_WRB_HDR_BITS(event, hdr, 1);
775 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
779 u32 frag_len = le32_to_cpu(wrb->frag_len);
782 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
783 (u64)le32_to_cpu(wrb->frag_pa_lo);
786 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
788 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
792 /* Returns the number of WRBs used up by the skb */
793 static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
794 struct sk_buff *skb, bool skip_hw_vlan)
796 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
797 struct device *dev = &adapter->pdev->dev;
798 struct be_queue_info *txq = &txo->q;
799 struct be_eth_hdr_wrb *hdr;
800 bool map_single = false;
801 struct be_eth_wrb *wrb;
803 u16 head = txq->head;
805 hdr = queue_head_node(txq);
806 wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
807 be_dws_cpu_to_le(hdr, sizeof(*hdr));
811 if (skb->len > skb->data_len) {
812 int len = skb_headlen(skb);
814 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
815 if (dma_mapping_error(dev, busaddr))
818 wrb = queue_head_node(txq);
819 wrb_fill(wrb, busaddr, len);
824 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
825 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
827 busaddr = skb_frag_dma_map(dev, frag, 0,
828 skb_frag_size(frag), DMA_TO_DEVICE);
829 if (dma_mapping_error(dev, busaddr))
831 wrb = queue_head_node(txq);
832 wrb_fill(wrb, busaddr, skb_frag_size(frag));
834 copied += skb_frag_size(frag);
837 BUG_ON(txo->sent_skb_list[head]);
838 txo->sent_skb_list[head] = skb;
839 txo->last_req_hdr = head;
840 atomic_add(wrb_cnt, &txq->used);
841 txo->last_req_wrb_cnt = wrb_cnt;
842 txo->pend_wrb_cnt += wrb_cnt;
844 be_tx_stats_update(txo, skb);
848 /* Bring the queue back to the state it was in before this
849 * routine was invoked.
852 /* skip the first wrb (hdr); it's not mapped */
855 wrb = queue_head_node(txq);
856 unmap_tx_frag(dev, wrb, map_single);
858 copied -= le32_to_cpu(wrb->frag_len);
859 adapter->drv_stats.dma_map_errors++;
866 static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
868 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
871 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
877 skb = skb_share_check(skb, GFP_ATOMIC);
881 if (skb_vlan_tag_present(skb))
882 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
884 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
886 vlan_tag = adapter->pvid;
887 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
888 * skip VLAN insertion
891 *skip_hw_vlan = true;
895 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
902 /* Insert the outer VLAN, if any */
903 if (adapter->qnq_vid) {
904 vlan_tag = adapter->qnq_vid;
905 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
910 *skip_hw_vlan = true;
916 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
918 struct ethhdr *eh = (struct ethhdr *)skb->data;
919 u16 offset = ETH_HLEN;
921 if (eh->h_proto == htons(ETH_P_IPV6)) {
922 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
924 offset += sizeof(struct ipv6hdr);
925 if (ip6h->nexthdr != NEXTHDR_TCP &&
926 ip6h->nexthdr != NEXTHDR_UDP) {
927 struct ipv6_opt_hdr *ehdr =
928 (struct ipv6_opt_hdr *)(skb->data + offset);
930 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
931 if (ehdr->hdrlen == 0xff)
938 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
940 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
943 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
945 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
948 static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
952 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
953 unsigned int eth_hdr_len;
956 /* For padded packets, BE HW modifies tot_len field in IP header
957 * incorrecly when VLAN tag is inserted by HW.
958 * For padded packets, Lancer computes incorrect checksum.
960 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
961 VLAN_ETH_HLEN : ETH_HLEN;
962 if (skb->len <= 60 &&
963 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
965 ip = (struct iphdr *)ip_hdr(skb);
966 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
969 /* If vlan tag is already inlined in the packet, skip HW VLAN
970 * tagging in pvid-tagging mode
972 if (be_pvid_tagging_enabled(adapter) &&
973 veh->h_vlan_proto == htons(ETH_P_8021Q))
974 *skip_hw_vlan = true;
976 /* HW has a bug wherein it will calculate CSUM for VLAN
977 * pkts even though it is disabled.
978 * Manually insert VLAN in pkt.
980 if (skb->ip_summed != CHECKSUM_PARTIAL &&
981 skb_vlan_tag_present(skb)) {
982 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
987 /* HW may lockup when VLAN HW tagging is requested on
988 * certain ipv6 packets. Drop such pkts if the HW workaround to
989 * skip HW tagging is not enabled by FW.
991 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
992 (adapter->pvid || adapter->qnq_vid) &&
993 !qnq_async_evt_rcvd(adapter)))
996 /* Manual VLAN tag insertion to prevent:
997 * ASIC lockup when the ASIC inserts VLAN tag into
998 * certain ipv6 packets. Insert VLAN tags in driver,
999 * and set event, completion, vlan bits accordingly
1002 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1003 be_vlan_tag_tx_chk(adapter, skb)) {
1004 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1011 dev_kfree_skb_any(skb);
1016 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1017 struct sk_buff *skb,
1020 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1021 * less may cause a transmit stall on that port. So the work-around is
1022 * to pad short packets (<= 32 bytes) to a 36-byte length.
1024 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1025 if (skb_put_padto(skb, 36))
1029 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1030 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1038 static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1040 struct be_queue_info *txq = &txo->q;
1041 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1043 /* Mark the last request eventable if it hasn't been marked already */
1044 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1045 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1047 /* compose a dummy wrb if there are odd set of wrbs to notify */
1048 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1049 wrb_fill_dummy(queue_head_node(txq));
1050 queue_head_inc(txq);
1051 atomic_inc(&txq->used);
1052 txo->pend_wrb_cnt++;
1053 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1054 TX_HDR_WRB_NUM_SHIFT);
1055 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1056 TX_HDR_WRB_NUM_SHIFT);
1058 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1059 txo->pend_wrb_cnt = 0;
1062 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1064 bool skip_hw_vlan = false, flush = !skb->xmit_more;
1065 struct be_adapter *adapter = netdev_priv(netdev);
1066 u16 q_idx = skb_get_queue_mapping(skb);
1067 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
1068 struct be_queue_info *txq = &txo->q;
1071 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1075 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
1076 if (unlikely(!wrb_cnt)) {
1077 dev_kfree_skb_any(skb);
1081 if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
1082 netif_stop_subqueue(netdev, q_idx);
1083 tx_stats(txo)->tx_stops++;
1086 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1087 be_xmit_flush(adapter, txo);
1089 return NETDEV_TX_OK;
1091 tx_stats(txo)->tx_drv_drops++;
1092 /* Flush the already enqueued tx requests */
1093 if (flush && txo->pend_wrb_cnt)
1094 be_xmit_flush(adapter, txo);
1096 return NETDEV_TX_OK;
1099 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1101 struct be_adapter *adapter = netdev_priv(netdev);
1102 struct device *dev = &adapter->pdev->dev;
1104 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1105 dev_info(dev, "MTU must be between %d and %d bytes\n",
1106 BE_MIN_MTU, BE_MAX_MTU);
1110 dev_info(dev, "MTU changed from %d to %d bytes\n",
1111 netdev->mtu, new_mtu);
1112 netdev->mtu = new_mtu;
1116 static inline bool be_in_all_promisc(struct be_adapter *adapter)
1118 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1119 BE_IF_FLAGS_ALL_PROMISCUOUS;
1122 static int be_set_vlan_promisc(struct be_adapter *adapter)
1124 struct device *dev = &adapter->pdev->dev;
1127 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1130 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1132 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1133 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1135 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1140 static int be_clear_vlan_promisc(struct be_adapter *adapter)
1142 struct device *dev = &adapter->pdev->dev;
1145 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1147 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1148 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1154 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1155 * If the user configures more, place BE in vlan promiscuous mode.
1157 static int be_vid_config(struct be_adapter *adapter)
1159 struct device *dev = &adapter->pdev->dev;
1160 u16 vids[BE_NUM_VLANS_SUPPORTED];
1164 /* No need to further configure vids if in promiscuous mode */
1165 if (be_in_all_promisc(adapter))
1168 if (adapter->vlans_added > be_max_vlans(adapter))
1169 return be_set_vlan_promisc(adapter);
1171 /* Construct VLAN Table to give to HW */
1172 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1173 vids[num++] = cpu_to_le16(i);
1175 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
1177 dev_err(dev, "Setting HW VLAN filtering failed\n");
1178 /* Set to VLAN promisc mode as setting VLAN filter failed */
1179 if (addl_status(status) ==
1180 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
1181 return be_set_vlan_promisc(adapter);
1182 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1183 status = be_clear_vlan_promisc(adapter);
1188 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1190 struct be_adapter *adapter = netdev_priv(netdev);
1193 /* Packets with VID 0 are always received by Lancer by default */
1194 if (lancer_chip(adapter) && vid == 0)
1197 if (test_bit(vid, adapter->vids))
1200 set_bit(vid, adapter->vids);
1201 adapter->vlans_added++;
1203 status = be_vid_config(adapter);
1205 adapter->vlans_added--;
1206 clear_bit(vid, adapter->vids);
1212 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1214 struct be_adapter *adapter = netdev_priv(netdev);
1216 /* Packets with VID 0 are always received by Lancer by default */
1217 if (lancer_chip(adapter) && vid == 0)
1220 clear_bit(vid, adapter->vids);
1221 adapter->vlans_added--;
1223 return be_vid_config(adapter);
1226 static void be_clear_all_promisc(struct be_adapter *adapter)
1228 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
1229 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1232 static void be_set_all_promisc(struct be_adapter *adapter)
1234 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1235 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1238 static void be_set_mc_promisc(struct be_adapter *adapter)
1242 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1245 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1247 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1250 static void be_set_mc_list(struct be_adapter *adapter)
1254 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1256 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1258 be_set_mc_promisc(adapter);
1261 static void be_set_uc_list(struct be_adapter *adapter)
1263 struct netdev_hw_addr *ha;
1264 int i = 1; /* First slot is claimed by the Primary MAC */
1266 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1267 be_cmd_pmac_del(adapter, adapter->if_handle,
1268 adapter->pmac_id[i], 0);
1270 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1271 be_set_all_promisc(adapter);
1275 netdev_for_each_uc_addr(ha, adapter->netdev) {
1276 adapter->uc_macs++; /* First slot is for Primary MAC */
1277 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1278 &adapter->pmac_id[adapter->uc_macs], 0);
1282 static void be_clear_uc_list(struct be_adapter *adapter)
1286 for (i = 1; i < (adapter->uc_macs + 1); i++)
1287 be_cmd_pmac_del(adapter, adapter->if_handle,
1288 adapter->pmac_id[i], 0);
1289 adapter->uc_macs = 0;
1292 static void be_set_rx_mode(struct net_device *netdev)
1294 struct be_adapter *adapter = netdev_priv(netdev);
1296 if (netdev->flags & IFF_PROMISC) {
1297 be_set_all_promisc(adapter);
1301 /* Interface was previously in promiscuous mode; disable it */
1302 if (be_in_all_promisc(adapter)) {
1303 be_clear_all_promisc(adapter);
1304 if (adapter->vlans_added)
1305 be_vid_config(adapter);
1308 /* Enable multicast promisc if num configured exceeds what we support */
1309 if (netdev->flags & IFF_ALLMULTI ||
1310 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1311 be_set_mc_promisc(adapter);
1315 if (netdev_uc_count(netdev) != adapter->uc_macs)
1316 be_set_uc_list(adapter);
1318 be_set_mc_list(adapter);
1321 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1323 struct be_adapter *adapter = netdev_priv(netdev);
1324 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1327 if (!sriov_enabled(adapter))
1330 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1333 /* Proceed further only if user provided MAC is different
1336 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1339 if (BEx_chip(adapter)) {
1340 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1343 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1344 &vf_cfg->pmac_id, vf + 1);
1346 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1351 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1353 return be_cmd_status(status);
1356 ether_addr_copy(vf_cfg->mac_addr, mac);
1361 static int be_get_vf_config(struct net_device *netdev, int vf,
1362 struct ifla_vf_info *vi)
1364 struct be_adapter *adapter = netdev_priv(netdev);
1365 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1367 if (!sriov_enabled(adapter))
1370 if (vf >= adapter->num_vfs)
1374 vi->max_tx_rate = vf_cfg->tx_rate;
1375 vi->min_tx_rate = 0;
1376 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1377 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1378 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1379 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
1384 static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1386 struct be_adapter *adapter = netdev_priv(netdev);
1387 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1390 if (!sriov_enabled(adapter))
1393 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1397 vlan |= qos << VLAN_PRIO_SHIFT;
1398 if (vf_cfg->vlan_tag != vlan)
1399 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1400 vf_cfg->if_handle, 0);
1402 /* Reset Transparent Vlan Tagging. */
1403 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1404 vf + 1, vf_cfg->if_handle, 0);
1408 dev_err(&adapter->pdev->dev,
1409 "VLAN %d config on VF %d failed : %#x\n", vlan,
1411 return be_cmd_status(status);
1414 vf_cfg->vlan_tag = vlan;
1419 static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1420 int min_tx_rate, int max_tx_rate)
1422 struct be_adapter *adapter = netdev_priv(netdev);
1423 struct device *dev = &adapter->pdev->dev;
1424 int percent_rate, status = 0;
1428 if (!sriov_enabled(adapter))
1431 if (vf >= adapter->num_vfs)
1440 status = be_cmd_link_status_query(adapter, &link_speed,
1446 dev_err(dev, "TX-rate setting not allowed when link is down\n");
1451 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1452 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1458 /* On Skyhawk the QOS setting must be done only as a % value */
1459 percent_rate = link_speed / 100;
1460 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1461 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1468 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
1472 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1476 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1478 return be_cmd_status(status);
1481 static int be_set_vf_link_state(struct net_device *netdev, int vf,
1484 struct be_adapter *adapter = netdev_priv(netdev);
1487 if (!sriov_enabled(adapter))
1490 if (vf >= adapter->num_vfs)
1493 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1495 dev_err(&adapter->pdev->dev,
1496 "Link state change on VF %d failed: %#x\n", vf, status);
1497 return be_cmd_status(status);
1500 adapter->vf_cfg[vf].plink_tracking = link_state;
1505 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1508 aic->rx_pkts_prev = rx_pkts;
1509 aic->tx_reqs_prev = tx_pkts;
1513 static void be_eqd_update(struct be_adapter *adapter)
1515 struct be_set_eqd set_eqd[MAX_EVT_QS];
1516 int eqd, i, num = 0, start;
1517 struct be_aic_obj *aic;
1518 struct be_eq_obj *eqo;
1519 struct be_rx_obj *rxo;
1520 struct be_tx_obj *txo;
1521 u64 rx_pkts, tx_pkts;
1525 for_all_evt_queues(adapter, eqo, i) {
1526 aic = &adapter->aic_obj[eqo->idx];
1534 rxo = &adapter->rx_obj[eqo->idx];
1536 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1537 rx_pkts = rxo->stats.rx_pkts;
1538 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1540 txo = &adapter->tx_obj[eqo->idx];
1542 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1543 tx_pkts = txo->stats.tx_reqs;
1544 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1546 /* Skip, if wrapped around or first calculation */
1548 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1549 rx_pkts < aic->rx_pkts_prev ||
1550 tx_pkts < aic->tx_reqs_prev) {
1551 be_aic_update(aic, rx_pkts, tx_pkts, now);
1555 delta = jiffies_to_msecs(now - aic->jiffies);
1556 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1557 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1558 eqd = (pps / 15000) << 2;
1562 eqd = min_t(u32, eqd, aic->max_eqd);
1563 eqd = max_t(u32, eqd, aic->min_eqd);
1565 be_aic_update(aic, rx_pkts, tx_pkts, now);
1567 if (eqd != aic->prev_eqd) {
1568 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1569 set_eqd[num].eq_id = eqo->q.id;
1570 aic->prev_eqd = eqd;
1576 be_cmd_modify_eqd(adapter, set_eqd, num);
1579 static void be_rx_stats_update(struct be_rx_obj *rxo,
1580 struct be_rx_compl_info *rxcp)
1582 struct be_rx_stats *stats = rx_stats(rxo);
1584 u64_stats_update_begin(&stats->sync);
1586 stats->rx_bytes += rxcp->pkt_size;
1588 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1589 stats->rx_mcast_pkts++;
1591 stats->rx_compl_err++;
1592 u64_stats_update_end(&stats->sync);
1595 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1597 /* L4 checksum is not reliable for non TCP/UDP packets.
1598 * Also ignore ipcksm for ipv6 pkts
1600 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1601 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
1604 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
1606 struct be_adapter *adapter = rxo->adapter;
1607 struct be_rx_page_info *rx_page_info;
1608 struct be_queue_info *rxq = &rxo->q;
1609 u16 frag_idx = rxq->tail;
1611 rx_page_info = &rxo->page_info_tbl[frag_idx];
1612 BUG_ON(!rx_page_info->page);
1614 if (rx_page_info->last_frag) {
1615 dma_unmap_page(&adapter->pdev->dev,
1616 dma_unmap_addr(rx_page_info, bus),
1617 adapter->big_page_size, DMA_FROM_DEVICE);
1618 rx_page_info->last_frag = false;
1620 dma_sync_single_for_cpu(&adapter->pdev->dev,
1621 dma_unmap_addr(rx_page_info, bus),
1622 rx_frag_size, DMA_FROM_DEVICE);
1625 queue_tail_inc(rxq);
1626 atomic_dec(&rxq->used);
1627 return rx_page_info;
1630 /* Throwaway the data in the Rx completion */
1631 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1632 struct be_rx_compl_info *rxcp)
1634 struct be_rx_page_info *page_info;
1635 u16 i, num_rcvd = rxcp->num_rcvd;
1637 for (i = 0; i < num_rcvd; i++) {
1638 page_info = get_rx_page_info(rxo);
1639 put_page(page_info->page);
1640 memset(page_info, 0, sizeof(*page_info));
1645 * skb_fill_rx_data forms a complete skb for an ether frame
1646 * indicated by rxcp.
1648 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1649 struct be_rx_compl_info *rxcp)
1651 struct be_rx_page_info *page_info;
1653 u16 hdr_len, curr_frag_len, remaining;
1656 page_info = get_rx_page_info(rxo);
1657 start = page_address(page_info->page) + page_info->page_offset;
1660 /* Copy data in the first descriptor of this completion */
1661 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1663 skb->len = curr_frag_len;
1664 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1665 memcpy(skb->data, start, curr_frag_len);
1666 /* Complete packet has now been moved to data */
1667 put_page(page_info->page);
1669 skb->tail += curr_frag_len;
1672 memcpy(skb->data, start, hdr_len);
1673 skb_shinfo(skb)->nr_frags = 1;
1674 skb_frag_set_page(skb, 0, page_info->page);
1675 skb_shinfo(skb)->frags[0].page_offset =
1676 page_info->page_offset + hdr_len;
1677 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1678 curr_frag_len - hdr_len);
1679 skb->data_len = curr_frag_len - hdr_len;
1680 skb->truesize += rx_frag_size;
1681 skb->tail += hdr_len;
1683 page_info->page = NULL;
1685 if (rxcp->pkt_size <= rx_frag_size) {
1686 BUG_ON(rxcp->num_rcvd != 1);
1690 /* More frags present for this completion */
1691 remaining = rxcp->pkt_size - curr_frag_len;
1692 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1693 page_info = get_rx_page_info(rxo);
1694 curr_frag_len = min(remaining, rx_frag_size);
1696 /* Coalesce all frags from the same physical page in one slot */
1697 if (page_info->page_offset == 0) {
1700 skb_frag_set_page(skb, j, page_info->page);
1701 skb_shinfo(skb)->frags[j].page_offset =
1702 page_info->page_offset;
1703 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1704 skb_shinfo(skb)->nr_frags++;
1706 put_page(page_info->page);
1709 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1710 skb->len += curr_frag_len;
1711 skb->data_len += curr_frag_len;
1712 skb->truesize += rx_frag_size;
1713 remaining -= curr_frag_len;
1714 page_info->page = NULL;
1716 BUG_ON(j > MAX_SKB_FRAGS);
1719 /* Process the RX completion indicated by rxcp when GRO is disabled */
1720 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1721 struct be_rx_compl_info *rxcp)
1723 struct be_adapter *adapter = rxo->adapter;
1724 struct net_device *netdev = adapter->netdev;
1725 struct sk_buff *skb;
1727 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1728 if (unlikely(!skb)) {
1729 rx_stats(rxo)->rx_drops_no_skbs++;
1730 be_rx_compl_discard(rxo, rxcp);
1734 skb_fill_rx_data(rxo, skb, rxcp);
1736 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1737 skb->ip_summed = CHECKSUM_UNNECESSARY;
1739 skb_checksum_none_assert(skb);
1741 skb->protocol = eth_type_trans(skb, netdev);
1742 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1743 if (netdev->features & NETIF_F_RXHASH)
1744 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1746 skb->csum_level = rxcp->tunneled;
1747 skb_mark_napi_id(skb, napi);
1750 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1752 netif_receive_skb(skb);
1755 /* Process the RX completion indicated by rxcp when GRO is enabled */
1756 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1757 struct napi_struct *napi,
1758 struct be_rx_compl_info *rxcp)
1760 struct be_adapter *adapter = rxo->adapter;
1761 struct be_rx_page_info *page_info;
1762 struct sk_buff *skb = NULL;
1763 u16 remaining, curr_frag_len;
1766 skb = napi_get_frags(napi);
1768 be_rx_compl_discard(rxo, rxcp);
1772 remaining = rxcp->pkt_size;
1773 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1774 page_info = get_rx_page_info(rxo);
1776 curr_frag_len = min(remaining, rx_frag_size);
1778 /* Coalesce all frags from the same physical page in one slot */
1779 if (i == 0 || page_info->page_offset == 0) {
1780 /* First frag or Fresh page */
1782 skb_frag_set_page(skb, j, page_info->page);
1783 skb_shinfo(skb)->frags[j].page_offset =
1784 page_info->page_offset;
1785 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1787 put_page(page_info->page);
1789 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1790 skb->truesize += rx_frag_size;
1791 remaining -= curr_frag_len;
1792 memset(page_info, 0, sizeof(*page_info));
1794 BUG_ON(j > MAX_SKB_FRAGS);
1796 skb_shinfo(skb)->nr_frags = j + 1;
1797 skb->len = rxcp->pkt_size;
1798 skb->data_len = rxcp->pkt_size;
1799 skb->ip_summed = CHECKSUM_UNNECESSARY;
1800 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1801 if (adapter->netdev->features & NETIF_F_RXHASH)
1802 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1804 skb->csum_level = rxcp->tunneled;
1805 skb_mark_napi_id(skb, napi);
1808 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1810 napi_gro_frags(napi);
1813 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1814 struct be_rx_compl_info *rxcp)
1816 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1817 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1818 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1819 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1820 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1821 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1822 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1823 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1824 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1825 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1826 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
1828 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1829 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
1831 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
1833 GET_RX_COMPL_V1_BITS(tunneled, compl);
1836 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1837 struct be_rx_compl_info *rxcp)
1839 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1840 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1841 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1842 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1843 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1844 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1845 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1846 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1847 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1848 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1849 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
1851 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1852 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
1854 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1855 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
1858 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1860 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1861 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1862 struct be_adapter *adapter = rxo->adapter;
1864 /* For checking the valid bit it is Ok to use either definition as the
1865 * valid bit is at the same position in both v0 and v1 Rx compl */
1866 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1870 be_dws_le_to_cpu(compl, sizeof(*compl));
1872 if (adapter->be3_native)
1873 be_parse_rx_compl_v1(compl, rxcp);
1875 be_parse_rx_compl_v0(compl, rxcp);
1881 /* In QNQ modes, if qnq bit is not set, then the packet was
1882 * tagged only with the transparent outer vlan-tag and must
1883 * not be treated as a vlan packet by host
1885 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
1888 if (!lancer_chip(adapter))
1889 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1891 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1892 !test_bit(rxcp->vlan_tag, adapter->vids))
1896 /* As the compl has been parsed, reset it; we wont touch it again */
1897 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1899 queue_tail_inc(&rxo->cq);
1903 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1905 u32 order = get_order(size);
1909 return alloc_pages(gfp, order);
1913 * Allocate a page, split it to fragments of size rx_frag_size and post as
1914 * receive buffers to BE
1916 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
1918 struct be_adapter *adapter = rxo->adapter;
1919 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1920 struct be_queue_info *rxq = &rxo->q;
1921 struct page *pagep = NULL;
1922 struct device *dev = &adapter->pdev->dev;
1923 struct be_eth_rx_d *rxd;
1924 u64 page_dmaaddr = 0, frag_dmaaddr;
1925 u32 posted, page_offset = 0, notify = 0;
1927 page_info = &rxo->page_info_tbl[rxq->head];
1928 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
1930 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1931 if (unlikely(!pagep)) {
1932 rx_stats(rxo)->rx_post_fail++;
1935 page_dmaaddr = dma_map_page(dev, pagep, 0,
1936 adapter->big_page_size,
1938 if (dma_mapping_error(dev, page_dmaaddr)) {
1941 adapter->drv_stats.dma_map_errors++;
1947 page_offset += rx_frag_size;
1949 page_info->page_offset = page_offset;
1950 page_info->page = pagep;
1952 rxd = queue_head_node(rxq);
1953 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1954 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1955 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1957 /* Any space left in the current big page for another frag? */
1958 if ((page_offset + rx_frag_size + rx_frag_size) >
1959 adapter->big_page_size) {
1961 page_info->last_frag = true;
1962 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1964 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
1967 prev_page_info = page_info;
1968 queue_head_inc(rxq);
1969 page_info = &rxo->page_info_tbl[rxq->head];
1972 /* Mark the last frag of a page when we break out of the above loop
1973 * with no more slots available in the RXQ
1976 prev_page_info->last_frag = true;
1977 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1981 atomic_add(posted, &rxq->used);
1982 if (rxo->rx_post_starved)
1983 rxo->rx_post_starved = false;
1985 notify = min(256u, posted);
1986 be_rxq_notify(adapter, rxq->id, notify);
1989 } else if (atomic_read(&rxq->used) == 0) {
1990 /* Let be_worker replenish when memory is available */
1991 rxo->rx_post_starved = true;
1995 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1997 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1999 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
2003 be_dws_le_to_cpu(txcp, sizeof(*txcp));
2005 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
2007 queue_tail_inc(tx_cq);
2011 static u16 be_tx_compl_process(struct be_adapter *adapter,
2012 struct be_tx_obj *txo, u16 last_index)
2014 struct sk_buff **sent_skbs = txo->sent_skb_list;
2015 struct be_queue_info *txq = &txo->q;
2016 u16 frag_index, num_wrbs = 0;
2017 struct sk_buff *skb = NULL;
2018 bool unmap_skb_hdr = false;
2019 struct be_eth_wrb *wrb;
2022 if (sent_skbs[txq->tail]) {
2023 /* Free skb from prev req */
2025 dev_consume_skb_any(skb);
2026 skb = sent_skbs[txq->tail];
2027 sent_skbs[txq->tail] = NULL;
2028 queue_tail_inc(txq); /* skip hdr wrb */
2030 unmap_skb_hdr = true;
2032 wrb = queue_tail_node(txq);
2033 frag_index = txq->tail;
2034 unmap_tx_frag(&adapter->pdev->dev, wrb,
2035 (unmap_skb_hdr && skb_headlen(skb)));
2036 unmap_skb_hdr = false;
2037 queue_tail_inc(txq);
2039 } while (frag_index != last_index);
2040 dev_consume_skb_any(skb);
2045 /* Return the number of events in the event queue */
2046 static inline int events_get(struct be_eq_obj *eqo)
2048 struct be_eq_entry *eqe;
2052 eqe = queue_tail_node(&eqo->q);
2059 queue_tail_inc(&eqo->q);
2065 /* Leaves the EQ is disarmed state */
2066 static void be_eq_clean(struct be_eq_obj *eqo)
2068 int num = events_get(eqo);
2070 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2073 static void be_rx_cq_clean(struct be_rx_obj *rxo)
2075 struct be_rx_page_info *page_info;
2076 struct be_queue_info *rxq = &rxo->q;
2077 struct be_queue_info *rx_cq = &rxo->cq;
2078 struct be_rx_compl_info *rxcp;
2079 struct be_adapter *adapter = rxo->adapter;
2082 /* Consume pending rx completions.
2083 * Wait for the flush completion (identified by zero num_rcvd)
2084 * to arrive. Notify CQ even when there are no more CQ entries
2085 * for HW to flush partially coalesced CQ entries.
2086 * In Lancer, there is no need to wait for flush compl.
2089 rxcp = be_rx_compl_get(rxo);
2091 if (lancer_chip(adapter))
2094 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2095 dev_warn(&adapter->pdev->dev,
2096 "did not receive flush compl\n");
2099 be_cq_notify(adapter, rx_cq->id, true, 0);
2102 be_rx_compl_discard(rxo, rxcp);
2103 be_cq_notify(adapter, rx_cq->id, false, 1);
2104 if (rxcp->num_rcvd == 0)
2109 /* After cleanup, leave the CQ in unarmed state */
2110 be_cq_notify(adapter, rx_cq->id, false, 0);
2112 /* Then free posted rx buffers that were not used */
2113 while (atomic_read(&rxq->used) > 0) {
2114 page_info = get_rx_page_info(rxo);
2115 put_page(page_info->page);
2116 memset(page_info, 0, sizeof(*page_info));
2118 BUG_ON(atomic_read(&rxq->used));
2123 static void be_tx_compl_clean(struct be_adapter *adapter)
2125 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2126 struct device *dev = &adapter->pdev->dev;
2127 struct be_tx_obj *txo;
2128 struct be_queue_info *txq;
2129 struct be_eth_tx_compl *txcp;
2130 int i, pending_txqs;
2132 /* Stop polling for compls when HW has been silent for 10ms */
2134 pending_txqs = adapter->num_tx_qs;
2136 for_all_tx_queues(adapter, txo, i) {
2140 while ((txcp = be_tx_compl_get(&txo->cq))) {
2141 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2142 num_wrbs += be_tx_compl_process(adapter, txo,
2147 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2148 atomic_sub(num_wrbs, &txq->used);
2151 if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
2155 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
2161 /* Free enqueued TX that was never notified to HW */
2162 for_all_tx_queues(adapter, txo, i) {
2165 if (atomic_read(&txq->used)) {
2166 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2167 i, atomic_read(&txq->used));
2168 notified_idx = txq->tail;
2169 end_idx = txq->tail;
2170 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2172 /* Use the tx-compl process logic to handle requests
2173 * that were not sent to the HW.
2175 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2176 atomic_sub(num_wrbs, &txq->used);
2177 BUG_ON(atomic_read(&txq->used));
2178 txo->pend_wrb_cnt = 0;
2179 /* Since hw was never notified of these requests,
2182 txq->head = notified_idx;
2183 txq->tail = notified_idx;
2188 static void be_evt_queues_destroy(struct be_adapter *adapter)
2190 struct be_eq_obj *eqo;
2193 for_all_evt_queues(adapter, eqo, i) {
2194 if (eqo->q.created) {
2196 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2197 napi_hash_del(&eqo->napi);
2198 netif_napi_del(&eqo->napi);
2200 be_queue_free(adapter, &eqo->q);
2204 static int be_evt_queues_create(struct be_adapter *adapter)
2206 struct be_queue_info *eq;
2207 struct be_eq_obj *eqo;
2208 struct be_aic_obj *aic;
2211 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2212 adapter->cfg_num_qs);
2214 for_all_evt_queues(adapter, eqo, i) {
2215 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2217 napi_hash_add(&eqo->napi);
2218 aic = &adapter->aic_obj[i];
2219 eqo->adapter = adapter;
2221 aic->max_eqd = BE_MAX_EQD;
2225 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2226 sizeof(struct be_eq_entry));
2230 rc = be_cmd_eq_create(adapter, eqo);
2237 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2239 struct be_queue_info *q;
2241 q = &adapter->mcc_obj.q;
2243 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2244 be_queue_free(adapter, q);
2246 q = &adapter->mcc_obj.cq;
2248 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2249 be_queue_free(adapter, q);
2252 /* Must be called only after TX qs are created as MCC shares TX EQ */
2253 static int be_mcc_queues_create(struct be_adapter *adapter)
2255 struct be_queue_info *q, *cq;
2257 cq = &adapter->mcc_obj.cq;
2258 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2259 sizeof(struct be_mcc_compl)))
2262 /* Use the default EQ for MCC completions */
2263 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2266 q = &adapter->mcc_obj.q;
2267 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2268 goto mcc_cq_destroy;
2270 if (be_cmd_mccq_create(adapter, q, cq))
2276 be_queue_free(adapter, q);
2278 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2280 be_queue_free(adapter, cq);
2285 static void be_tx_queues_destroy(struct be_adapter *adapter)
2287 struct be_queue_info *q;
2288 struct be_tx_obj *txo;
2291 for_all_tx_queues(adapter, txo, i) {
2294 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2295 be_queue_free(adapter, q);
2299 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2300 be_queue_free(adapter, q);
2304 static int be_tx_qs_create(struct be_adapter *adapter)
2306 struct be_queue_info *cq, *eq;
2307 struct be_tx_obj *txo;
2310 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2312 for_all_tx_queues(adapter, txo, i) {
2314 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2315 sizeof(struct be_eth_tx_compl));
2319 u64_stats_init(&txo->stats.sync);
2320 u64_stats_init(&txo->stats.sync_compl);
2322 /* If num_evt_qs is less than num_tx_qs, then more than
2323 * one txq share an eq
2325 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2326 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2330 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2331 sizeof(struct be_eth_wrb));
2335 status = be_cmd_txq_create(adapter, txo);
2340 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2341 adapter->num_tx_qs);
2345 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2347 struct be_queue_info *q;
2348 struct be_rx_obj *rxo;
2351 for_all_rx_queues(adapter, rxo, i) {
2354 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2355 be_queue_free(adapter, q);
2359 static int be_rx_cqs_create(struct be_adapter *adapter)
2361 struct be_queue_info *eq, *cq;
2362 struct be_rx_obj *rxo;
2365 /* We can create as many RSS rings as there are EQs. */
2366 adapter->num_rx_qs = adapter->num_evt_qs;
2368 /* We'll use RSS only if atleast 2 RSS rings are supported.
2369 * When RSS is used, we'll need a default RXQ for non-IP traffic.
2371 if (adapter->num_rx_qs > 1)
2372 adapter->num_rx_qs++;
2374 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2375 for_all_rx_queues(adapter, rxo, i) {
2376 rxo->adapter = adapter;
2378 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2379 sizeof(struct be_eth_rx_compl));
2383 u64_stats_init(&rxo->stats.sync);
2384 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2385 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2390 dev_info(&adapter->pdev->dev,
2391 "created %d RSS queue(s) and 1 default RX queue\n",
2392 adapter->num_rx_qs - 1);
2396 static irqreturn_t be_intx(int irq, void *dev)
2398 struct be_eq_obj *eqo = dev;
2399 struct be_adapter *adapter = eqo->adapter;
2402 /* IRQ is not expected when NAPI is scheduled as the EQ
2403 * will not be armed.
2404 * But, this can happen on Lancer INTx where it takes
2405 * a while to de-assert INTx or in BE2 where occasionaly
2406 * an interrupt may be raised even when EQ is unarmed.
2407 * If NAPI is already scheduled, then counting & notifying
2408 * events will orphan them.
2410 if (napi_schedule_prep(&eqo->napi)) {
2411 num_evts = events_get(eqo);
2412 __napi_schedule(&eqo->napi);
2414 eqo->spurious_intr = 0;
2416 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2418 /* Return IRQ_HANDLED only for the the first spurious intr
2419 * after a valid intr to stop the kernel from branding
2420 * this irq as a bad one!
2422 if (num_evts || eqo->spurious_intr++ == 0)
2428 static irqreturn_t be_msix(int irq, void *dev)
2430 struct be_eq_obj *eqo = dev;
2432 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2433 napi_schedule(&eqo->napi);
2437 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2439 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2442 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2443 int budget, int polling)
2445 struct be_adapter *adapter = rxo->adapter;
2446 struct be_queue_info *rx_cq = &rxo->cq;
2447 struct be_rx_compl_info *rxcp;
2449 u32 frags_consumed = 0;
2451 for (work_done = 0; work_done < budget; work_done++) {
2452 rxcp = be_rx_compl_get(rxo);
2456 /* Is it a flush compl that has no data */
2457 if (unlikely(rxcp->num_rcvd == 0))
2460 /* Discard compl with partial DMA Lancer B0 */
2461 if (unlikely(!rxcp->pkt_size)) {
2462 be_rx_compl_discard(rxo, rxcp);
2466 /* On BE drop pkts that arrive due to imperfect filtering in
2467 * promiscuous mode on some skews
2469 if (unlikely(rxcp->port != adapter->port_num &&
2470 !lancer_chip(adapter))) {
2471 be_rx_compl_discard(rxo, rxcp);
2475 /* Don't do gro when we're busy_polling */
2476 if (do_gro(rxcp) && polling != BUSY_POLLING)
2477 be_rx_compl_process_gro(rxo, napi, rxcp);
2479 be_rx_compl_process(rxo, napi, rxcp);
2482 frags_consumed += rxcp->num_rcvd;
2483 be_rx_stats_update(rxo, rxcp);
2487 be_cq_notify(adapter, rx_cq->id, true, work_done);
2489 /* When an rx-obj gets into post_starved state, just
2490 * let be_worker do the posting.
2492 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2493 !rxo->rx_post_starved)
2494 be_post_rx_frags(rxo, GFP_ATOMIC,
2495 max_t(u32, MAX_RX_POST,
2502 static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2505 case BE_TX_COMP_HDR_PARSE_ERR:
2506 tx_stats(txo)->tx_hdr_parse_err++;
2508 case BE_TX_COMP_NDMA_ERR:
2509 tx_stats(txo)->tx_dma_err++;
2511 case BE_TX_COMP_ACL_ERR:
2512 tx_stats(txo)->tx_spoof_check_err++;
2517 static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2520 case LANCER_TX_COMP_LSO_ERR:
2521 tx_stats(txo)->tx_tso_err++;
2523 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2524 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2525 tx_stats(txo)->tx_spoof_check_err++;
2527 case LANCER_TX_COMP_QINQ_ERR:
2528 tx_stats(txo)->tx_qinq_err++;
2530 case LANCER_TX_COMP_PARITY_ERR:
2531 tx_stats(txo)->tx_internal_parity_err++;
2533 case LANCER_TX_COMP_DMA_ERR:
2534 tx_stats(txo)->tx_dma_err++;
2539 static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2542 struct be_eth_tx_compl *txcp;
2543 int num_wrbs = 0, work_done = 0;
2547 while ((txcp = be_tx_compl_get(&txo->cq))) {
2548 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2549 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2552 compl_status = GET_TX_COMPL_BITS(status, txcp);
2554 if (lancer_chip(adapter))
2555 lancer_update_tx_err(txo, compl_status);
2557 be_update_tx_err(txo, compl_status);
2562 be_cq_notify(adapter, txo->cq.id, true, work_done);
2563 atomic_sub(num_wrbs, &txo->q.used);
2565 /* As Tx wrbs have been freed up, wake up netdev queue
2566 * if it was stopped due to lack of tx wrbs. */
2567 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2568 atomic_read(&txo->q.used) < txo->q.len / 2) {
2569 netif_wake_subqueue(adapter->netdev, idx);
2572 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2573 tx_stats(txo)->tx_compl += work_done;
2574 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2578 #ifdef CONFIG_NET_RX_BUSY_POLL
2579 static inline bool be_lock_napi(struct be_eq_obj *eqo)
2583 spin_lock(&eqo->lock); /* BH is already disabled */
2584 if (eqo->state & BE_EQ_LOCKED) {
2585 WARN_ON(eqo->state & BE_EQ_NAPI);
2586 eqo->state |= BE_EQ_NAPI_YIELD;
2589 eqo->state = BE_EQ_NAPI;
2591 spin_unlock(&eqo->lock);
2595 static inline void be_unlock_napi(struct be_eq_obj *eqo)
2597 spin_lock(&eqo->lock); /* BH is already disabled */
2599 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2600 eqo->state = BE_EQ_IDLE;
2602 spin_unlock(&eqo->lock);
2605 static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2609 spin_lock_bh(&eqo->lock);
2610 if (eqo->state & BE_EQ_LOCKED) {
2611 eqo->state |= BE_EQ_POLL_YIELD;
2614 eqo->state |= BE_EQ_POLL;
2616 spin_unlock_bh(&eqo->lock);
2620 static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2622 spin_lock_bh(&eqo->lock);
2624 WARN_ON(eqo->state & (BE_EQ_NAPI));
2625 eqo->state = BE_EQ_IDLE;
2627 spin_unlock_bh(&eqo->lock);
2630 static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2632 spin_lock_init(&eqo->lock);
2633 eqo->state = BE_EQ_IDLE;
2636 static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2640 /* It's enough to just acquire napi lock on the eqo to stop
2641 * be_busy_poll() from processing any queueus.
2643 while (!be_lock_napi(eqo))
2649 #else /* CONFIG_NET_RX_BUSY_POLL */
2651 static inline bool be_lock_napi(struct be_eq_obj *eqo)
2656 static inline void be_unlock_napi(struct be_eq_obj *eqo)
2660 static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2665 static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2669 static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2673 static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2676 #endif /* CONFIG_NET_RX_BUSY_POLL */
2678 int be_poll(struct napi_struct *napi, int budget)
2680 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2681 struct be_adapter *adapter = eqo->adapter;
2682 int max_work = 0, work, i, num_evts;
2683 struct be_rx_obj *rxo;
2684 struct be_tx_obj *txo;
2686 num_evts = events_get(eqo);
2688 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2689 be_process_tx(adapter, txo, i);
2691 if (be_lock_napi(eqo)) {
2692 /* This loop will iterate twice for EQ0 in which
2693 * completions of the last RXQ (default one) are also processed
2694 * For other EQs the loop iterates only once
2696 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2697 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2698 max_work = max(work, max_work);
2700 be_unlock_napi(eqo);
2705 if (is_mcc_eqo(eqo))
2706 be_process_mcc(adapter);
2708 if (max_work < budget) {
2709 napi_complete(napi);
2710 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2712 /* As we'll continue in polling mode, count and clear events */
2713 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2718 #ifdef CONFIG_NET_RX_BUSY_POLL
2719 static int be_busy_poll(struct napi_struct *napi)
2721 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2722 struct be_adapter *adapter = eqo->adapter;
2723 struct be_rx_obj *rxo;
2726 if (!be_lock_busy_poll(eqo))
2727 return LL_FLUSH_BUSY;
2729 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2730 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2735 be_unlock_busy_poll(eqo);
2740 void be_detect_error(struct be_adapter *adapter)
2742 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2743 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2745 bool error_detected = false;
2746 struct device *dev = &adapter->pdev->dev;
2747 struct net_device *netdev = adapter->netdev;
2749 if (be_hw_error(adapter))
2752 if (lancer_chip(adapter)) {
2753 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2754 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2755 sliport_err1 = ioread32(adapter->db +
2756 SLIPORT_ERROR1_OFFSET);
2757 sliport_err2 = ioread32(adapter->db +
2758 SLIPORT_ERROR2_OFFSET);
2759 adapter->hw_error = true;
2760 /* Do not log error messages if its a FW reset */
2761 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2762 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2763 dev_info(dev, "Firmware update in progress\n");
2765 error_detected = true;
2766 dev_err(dev, "Error detected in the card\n");
2767 dev_err(dev, "ERR: sliport status 0x%x\n",
2769 dev_err(dev, "ERR: sliport error1 0x%x\n",
2771 dev_err(dev, "ERR: sliport error2 0x%x\n",
2776 pci_read_config_dword(adapter->pdev,
2777 PCICFG_UE_STATUS_LOW, &ue_lo);
2778 pci_read_config_dword(adapter->pdev,
2779 PCICFG_UE_STATUS_HIGH, &ue_hi);
2780 pci_read_config_dword(adapter->pdev,
2781 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2782 pci_read_config_dword(adapter->pdev,
2783 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2785 ue_lo = (ue_lo & ~ue_lo_mask);
2786 ue_hi = (ue_hi & ~ue_hi_mask);
2788 /* On certain platforms BE hardware can indicate spurious UEs.
2789 * Allow HW to stop working completely in case of a real UE.
2790 * Hence not setting the hw_error for UE detection.
2793 if (ue_lo || ue_hi) {
2794 error_detected = true;
2796 "Unrecoverable Error detected in the adapter");
2797 dev_err(dev, "Please reboot server to recover");
2798 if (skyhawk_chip(adapter))
2799 adapter->hw_error = true;
2800 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2802 dev_err(dev, "UE: %s bit set\n",
2803 ue_status_low_desc[i]);
2805 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2807 dev_err(dev, "UE: %s bit set\n",
2808 ue_status_hi_desc[i]);
2813 netif_carrier_off(netdev);
2816 static void be_msix_disable(struct be_adapter *adapter)
2818 if (msix_enabled(adapter)) {
2819 pci_disable_msix(adapter->pdev);
2820 adapter->num_msix_vec = 0;
2821 adapter->num_msix_roce_vec = 0;
2825 static int be_msix_enable(struct be_adapter *adapter)
2828 struct device *dev = &adapter->pdev->dev;
2830 /* If RoCE is supported, program the max number of NIC vectors that
2831 * may be configured via set-channels, along with vectors needed for
2832 * RoCe. Else, just program the number we'll use initially.
2834 if (be_roce_supported(adapter))
2835 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2836 2 * num_online_cpus());
2838 num_vec = adapter->cfg_num_qs;
2840 for (i = 0; i < num_vec; i++)
2841 adapter->msix_entries[i].entry = i;
2843 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2844 MIN_MSIX_VECTORS, num_vec);
2848 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2849 adapter->num_msix_roce_vec = num_vec / 2;
2850 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2851 adapter->num_msix_roce_vec);
2854 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2856 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2857 adapter->num_msix_vec);
2861 dev_warn(dev, "MSIx enable failed\n");
2863 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2864 if (!be_physfn(adapter))
2869 static inline int be_msix_vec_get(struct be_adapter *adapter,
2870 struct be_eq_obj *eqo)
2872 return adapter->msix_entries[eqo->msix_idx].vector;
2875 static int be_msix_register(struct be_adapter *adapter)
2877 struct net_device *netdev = adapter->netdev;
2878 struct be_eq_obj *eqo;
2881 for_all_evt_queues(adapter, eqo, i) {
2882 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2883 vec = be_msix_vec_get(adapter, eqo);
2884 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2891 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2892 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2893 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2895 be_msix_disable(adapter);
2899 static int be_irq_register(struct be_adapter *adapter)
2901 struct net_device *netdev = adapter->netdev;
2904 if (msix_enabled(adapter)) {
2905 status = be_msix_register(adapter);
2908 /* INTx is not supported for VF */
2909 if (!be_physfn(adapter))
2913 /* INTx: only the first EQ is used */
2914 netdev->irq = adapter->pdev->irq;
2915 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2916 &adapter->eq_obj[0]);
2918 dev_err(&adapter->pdev->dev,
2919 "INTx request IRQ failed - err %d\n", status);
2923 adapter->isr_registered = true;
2927 static void be_irq_unregister(struct be_adapter *adapter)
2929 struct net_device *netdev = adapter->netdev;
2930 struct be_eq_obj *eqo;
2933 if (!adapter->isr_registered)
2937 if (!msix_enabled(adapter)) {
2938 free_irq(netdev->irq, &adapter->eq_obj[0]);
2943 for_all_evt_queues(adapter, eqo, i)
2944 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2947 adapter->isr_registered = false;
2950 static void be_rx_qs_destroy(struct be_adapter *adapter)
2952 struct be_queue_info *q;
2953 struct be_rx_obj *rxo;
2956 for_all_rx_queues(adapter, rxo, i) {
2959 be_cmd_rxq_destroy(adapter, q);
2960 be_rx_cq_clean(rxo);
2962 be_queue_free(adapter, q);
2966 static int be_close(struct net_device *netdev)
2968 struct be_adapter *adapter = netdev_priv(netdev);
2969 struct be_eq_obj *eqo;
2972 /* This protection is needed as be_close() may be called even when the
2973 * adapter is in cleared state (after eeh perm failure)
2975 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2978 be_roce_dev_close(adapter);
2980 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2981 for_all_evt_queues(adapter, eqo, i) {
2982 napi_disable(&eqo->napi);
2983 be_disable_busy_poll(eqo);
2985 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2988 be_async_mcc_disable(adapter);
2990 /* Wait for all pending tx completions to arrive so that
2991 * all tx skbs are freed.
2993 netif_tx_disable(netdev);
2994 be_tx_compl_clean(adapter);
2996 be_rx_qs_destroy(adapter);
2997 be_clear_uc_list(adapter);
2999 for_all_evt_queues(adapter, eqo, i) {
3000 if (msix_enabled(adapter))
3001 synchronize_irq(be_msix_vec_get(adapter, eqo));
3003 synchronize_irq(netdev->irq);
3007 be_irq_unregister(adapter);
3012 static int be_rx_qs_create(struct be_adapter *adapter)
3014 struct rss_info *rss = &adapter->rss_info;
3015 u8 rss_key[RSS_HASH_KEY_LEN];
3016 struct be_rx_obj *rxo;
3019 for_all_rx_queues(adapter, rxo, i) {
3020 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3021 sizeof(struct be_eth_rx_d));
3026 /* The FW would like the default RXQ to be created first */
3027 rxo = default_rxo(adapter);
3028 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
3029 adapter->if_handle, false, &rxo->rss_id);
3033 for_all_rss_queues(adapter, rxo, i) {
3034 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3035 rx_frag_size, adapter->if_handle,
3036 true, &rxo->rss_id);
3041 if (be_multi_rxq(adapter)) {
3042 for (j = 0; j < RSS_INDIR_TABLE_LEN;
3043 j += adapter->num_rx_qs - 1) {
3044 for_all_rss_queues(adapter, rxo, i) {
3045 if ((j + i) >= RSS_INDIR_TABLE_LEN)
3047 rss->rsstable[j + i] = rxo->rss_id;
3048 rss->rss_queue[j + i] = i;
3051 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3052 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
3054 if (!BEx_chip(adapter))
3055 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3056 RSS_ENABLE_UDP_IPV6;
3058 /* Disable RSS, if only default RX Q is created */
3059 rss->rss_flags = RSS_ENABLE_NONE;
3062 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3063 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3066 rss->rss_flags = RSS_ENABLE_NONE;
3070 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
3072 /* First time posting */
3073 for_all_rx_queues(adapter, rxo, i)
3074 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
3078 static int be_open(struct net_device *netdev)
3080 struct be_adapter *adapter = netdev_priv(netdev);
3081 struct be_eq_obj *eqo;
3082 struct be_rx_obj *rxo;
3083 struct be_tx_obj *txo;
3087 status = be_rx_qs_create(adapter);
3091 status = be_irq_register(adapter);
3095 for_all_rx_queues(adapter, rxo, i)
3096 be_cq_notify(adapter, rxo->cq.id, true, 0);
3098 for_all_tx_queues(adapter, txo, i)
3099 be_cq_notify(adapter, txo->cq.id, true, 0);
3101 be_async_mcc_enable(adapter);
3103 for_all_evt_queues(adapter, eqo, i) {
3104 napi_enable(&eqo->napi);
3105 be_enable_busy_poll(eqo);
3106 be_eq_notify(adapter, eqo->q.id, true, true, 0);
3108 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
3110 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
3112 be_link_status_update(adapter, link_status);
3114 netif_tx_start_all_queues(netdev);
3115 be_roce_dev_open(adapter);
3117 #ifdef CONFIG_BE2NET_VXLAN
3118 if (skyhawk_chip(adapter))
3119 vxlan_get_rx_port(netdev);
3124 be_close(adapter->netdev);
3128 static int be_setup_wol(struct be_adapter *adapter, bool enable)
3130 struct be_dma_mem cmd;
3134 memset(mac, 0, ETH_ALEN);
3136 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
3137 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3143 status = pci_write_config_dword(adapter->pdev,
3144 PCICFG_PM_CONTROL_OFFSET,
3145 PCICFG_PM_CONTROL_MASK);
3147 dev_err(&adapter->pdev->dev,
3148 "Could not enable Wake-on-lan\n");
3149 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3153 status = be_cmd_enable_magic_wol(adapter,
3154 adapter->netdev->dev_addr,
3156 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3157 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3159 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3160 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3161 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3164 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
3168 static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3172 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3174 mac[5] = (u8)(addr & 0xFF);
3175 mac[4] = (u8)((addr >> 8) & 0xFF);
3176 mac[3] = (u8)((addr >> 16) & 0xFF);
3177 /* Use the OUI from the current MAC address */
3178 memcpy(mac, adapter->netdev->dev_addr, 3);
3182 * Generate a seed MAC address from the PF MAC Address using jhash.
3183 * MAC Address for VFs are assigned incrementally starting from the seed.
3184 * These addresses are programmed in the ASIC by the PF and the VF driver
3185 * queries for the MAC address during its probe.
3187 static int be_vf_eth_addr_config(struct be_adapter *adapter)
3192 struct be_vf_cfg *vf_cfg;
3194 be_vf_eth_addr_generate(adapter, mac);
3196 for_all_vfs(adapter, vf_cfg, vf) {
3197 if (BEx_chip(adapter))
3198 status = be_cmd_pmac_add(adapter, mac,
3200 &vf_cfg->pmac_id, vf + 1);
3202 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3206 dev_err(&adapter->pdev->dev,
3207 "Mac address assignment failed for VF %d\n",
3210 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3217 static int be_vfs_mac_query(struct be_adapter *adapter)
3221 struct be_vf_cfg *vf_cfg;
3223 for_all_vfs(adapter, vf_cfg, vf) {
3224 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3225 mac, vf_cfg->if_handle,
3229 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3234 static void be_vf_clear(struct be_adapter *adapter)
3236 struct be_vf_cfg *vf_cfg;
3239 if (pci_vfs_assigned(adapter->pdev)) {
3240 dev_warn(&adapter->pdev->dev,
3241 "VFs are assigned to VMs: not disabling VFs\n");
3245 pci_disable_sriov(adapter->pdev);
3247 for_all_vfs(adapter, vf_cfg, vf) {
3248 if (BEx_chip(adapter))
3249 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3250 vf_cfg->pmac_id, vf + 1);
3252 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3255 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3258 kfree(adapter->vf_cfg);
3259 adapter->num_vfs = 0;
3260 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
3263 static void be_clear_queues(struct be_adapter *adapter)
3265 be_mcc_queues_destroy(adapter);
3266 be_rx_cqs_destroy(adapter);
3267 be_tx_queues_destroy(adapter);
3268 be_evt_queues_destroy(adapter);
3271 static void be_cancel_worker(struct be_adapter *adapter)
3273 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3274 cancel_delayed_work_sync(&adapter->work);
3275 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3279 static void be_mac_clear(struct be_adapter *adapter)
3281 if (adapter->pmac_id) {
3282 be_cmd_pmac_del(adapter, adapter->if_handle,
3283 adapter->pmac_id[0], 0);
3284 kfree(adapter->pmac_id);
3285 adapter->pmac_id = NULL;
3289 #ifdef CONFIG_BE2NET_VXLAN
3290 static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3292 struct net_device *netdev = adapter->netdev;
3294 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3295 be_cmd_manage_iface(adapter, adapter->if_handle,
3296 OP_CONVERT_TUNNEL_TO_NORMAL);
3298 if (adapter->vxlan_port)
3299 be_cmd_set_vxlan_port(adapter, 0);
3301 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3302 adapter->vxlan_port = 0;
3304 netdev->hw_enc_features = 0;
3305 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
3306 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
3310 static int be_clear(struct be_adapter *adapter)
3312 be_cancel_worker(adapter);
3314 if (sriov_enabled(adapter))
3315 be_vf_clear(adapter);
3317 /* Re-configure FW to distribute resources evenly across max-supported
3318 * number of VFs, only when VFs are not already enabled.
3320 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3321 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3322 pci_sriov_get_totalvfs(adapter->pdev));
3324 #ifdef CONFIG_BE2NET_VXLAN
3325 be_disable_vxlan_offloads(adapter);
3327 /* delete the primary mac along with the uc-mac list */
3328 be_mac_clear(adapter);
3330 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
3332 be_clear_queues(adapter);
3334 be_msix_disable(adapter);
3335 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
3339 static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3340 u32 cap_flags, u32 vf)
3345 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3346 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3349 en_flags &= cap_flags;
3351 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3357 static int be_vfs_if_create(struct be_adapter *adapter)
3359 struct be_resources res = {0};
3360 struct be_vf_cfg *vf_cfg;
3364 /* If a FW profile exists, then cap_flags are updated */
3365 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3366 BE_IF_FLAGS_MULTICAST;
3368 for_all_vfs(adapter, vf_cfg, vf) {
3369 if (!BE3_chip(adapter)) {
3370 status = be_cmd_get_profile_config(adapter, &res,
3373 cap_flags = res.if_cap_flags;
3376 status = be_if_create(adapter, &vf_cfg->if_handle,
3385 static int be_vf_setup_init(struct be_adapter *adapter)
3387 struct be_vf_cfg *vf_cfg;
3390 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3392 if (!adapter->vf_cfg)
3395 for_all_vfs(adapter, vf_cfg, vf) {
3396 vf_cfg->if_handle = -1;
3397 vf_cfg->pmac_id = -1;
3402 static int be_vf_setup(struct be_adapter *adapter)
3404 struct device *dev = &adapter->pdev->dev;
3405 struct be_vf_cfg *vf_cfg;
3406 int status, old_vfs, vf;
3409 old_vfs = pci_num_vf(adapter->pdev);
3411 status = be_vf_setup_init(adapter);
3416 for_all_vfs(adapter, vf_cfg, vf) {
3417 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3422 status = be_vfs_mac_query(adapter);
3426 status = be_vfs_if_create(adapter);
3430 status = be_vf_eth_addr_config(adapter);
3435 for_all_vfs(adapter, vf_cfg, vf) {
3436 /* Allow VFs to programs MAC/VLAN filters */
3437 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3438 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3439 status = be_cmd_set_fn_privileges(adapter,
3444 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3448 /* Allow full available bandwidth */
3450 be_cmd_config_qos(adapter, 0, 0, vf + 1);
3453 be_cmd_enable_vf(adapter, vf + 1);
3454 be_cmd_set_logical_link_config(adapter,
3455 IFLA_VF_LINK_STATE_AUTO,
3461 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3463 dev_err(dev, "SRIOV enable failed\n");
3464 adapter->num_vfs = 0;
3469 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
3472 dev_err(dev, "VF setup failed\n");
3473 be_vf_clear(adapter);
3477 /* Converting function_mode bits on BE3 to SH mc_type enums */
3479 static u8 be_convert_mc_type(u32 function_mode)
3481 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
3483 else if (function_mode & QNQ_MODE)
3485 else if (function_mode & VNIC_MODE)
3487 else if (function_mode & UMC_ENABLED)
3493 /* On BE2/BE3 FW does not suggest the supported limits */
3494 static void BEx_get_resources(struct be_adapter *adapter,
3495 struct be_resources *res)
3497 bool use_sriov = adapter->num_vfs ? 1 : 0;
3499 if (be_physfn(adapter))
3500 res->max_uc_mac = BE_UC_PMAC_COUNT;
3502 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3504 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3506 if (be_is_mc(adapter)) {
3507 /* Assuming that there are 4 channels per port,
3508 * when multi-channel is enabled
3510 if (be_is_qnq_mode(adapter))
3511 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3513 /* In a non-qnq multichannel mode, the pvid
3514 * takes up one vlan entry
3516 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3518 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3521 res->max_mcast_mac = BE_MAX_MC;
3523 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3524 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3525 * *only* if it is RSS-capable.
3527 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3528 !be_physfn(adapter) || (be_is_mc(adapter) &&
3529 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
3531 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3532 struct be_resources super_nic_res = {0};
3534 /* On a SuperNIC profile, the driver needs to use the
3535 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3537 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3538 /* Some old versions of BE3 FW don't report max_tx_qs value */
3539 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3541 res->max_tx_qs = BE3_MAX_TX_QS;
3544 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3545 !use_sriov && be_physfn(adapter))
3546 res->max_rss_qs = (adapter->be3_native) ?
3547 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3548 res->max_rx_qs = res->max_rss_qs + 1;
3550 if (be_physfn(adapter))
3551 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
3552 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3554 res->max_evt_qs = 1;
3556 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3557 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3558 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3561 static void be_setup_init(struct be_adapter *adapter)
3563 adapter->vlan_prio_bmap = 0xff;
3564 adapter->phy.link_speed = -1;
3565 adapter->if_handle = -1;
3566 adapter->be3_native = false;
3567 adapter->if_flags = 0;
3568 if (be_physfn(adapter))
3569 adapter->cmd_privileges = MAX_PRIVILEGES;
3571 adapter->cmd_privileges = MIN_PRIVILEGES;
3574 static int be_get_sriov_config(struct be_adapter *adapter)
3576 struct device *dev = &adapter->pdev->dev;
3577 struct be_resources res = {0};
3578 int max_vfs, old_vfs;
3580 /* Some old versions of BE3 FW don't report max_vfs value */
3581 be_cmd_get_profile_config(adapter, &res, 0);
3583 if (BE3_chip(adapter) && !res.max_vfs) {
3584 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3585 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3588 adapter->pool_res = res;
3590 if (!be_max_vfs(adapter)) {
3592 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
3593 adapter->num_vfs = 0;
3597 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3599 /* validate num_vfs module param */
3600 old_vfs = pci_num_vf(adapter->pdev);
3602 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3603 if (old_vfs != num_vfs)
3604 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3605 adapter->num_vfs = old_vfs;
3607 if (num_vfs > be_max_vfs(adapter)) {
3608 dev_info(dev, "Resources unavailable to init %d VFs\n",
3610 dev_info(dev, "Limiting to %d VFs\n",
3611 be_max_vfs(adapter));
3613 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3619 static int be_get_resources(struct be_adapter *adapter)
3621 struct device *dev = &adapter->pdev->dev;
3622 struct be_resources res = {0};
3625 if (BEx_chip(adapter)) {
3626 BEx_get_resources(adapter, &res);
3630 /* For Lancer, SH etc read per-function resource limits from FW.
3631 * GET_FUNC_CONFIG returns per function guaranteed limits.
3632 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3634 if (!BEx_chip(adapter)) {
3635 status = be_cmd_get_func_config(adapter, &res);
3639 /* If RoCE may be enabled stash away half the EQs for RoCE */
3640 if (be_roce_supported(adapter))
3641 res.max_evt_qs /= 2;
3645 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3646 be_max_txqs(adapter), be_max_rxqs(adapter),
3647 be_max_rss(adapter), be_max_eqs(adapter),
3648 be_max_vfs(adapter));
3649 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3650 be_max_uc(adapter), be_max_mc(adapter),
3651 be_max_vlans(adapter));
3656 static void be_sriov_config(struct be_adapter *adapter)
3658 struct device *dev = &adapter->pdev->dev;
3661 status = be_get_sriov_config(adapter);
3663 dev_err(dev, "Failed to query SR-IOV configuration\n");
3664 dev_err(dev, "SR-IOV cannot be enabled\n");
3668 /* When the HW is in SRIOV capable configuration, the PF-pool
3669 * resources are equally distributed across the max-number of
3670 * VFs. The user may request only a subset of the max-vfs to be
3671 * enabled. Based on num_vfs, redistribute the resources across
3672 * num_vfs so that each VF will have access to more number of
3673 * resources. This facility is not available in BE3 FW.
3674 * Also, this is done by FW in Lancer chip.
3676 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3677 status = be_cmd_set_sriov_config(adapter,
3681 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3685 static int be_get_config(struct be_adapter *adapter)
3690 status = be_cmd_query_fw_cfg(adapter);
3694 be_cmd_query_port_name(adapter);
3696 if (be_physfn(adapter)) {
3697 status = be_cmd_get_active_profile(adapter, &profile_id);
3699 dev_info(&adapter->pdev->dev,
3700 "Using profile 0x%x\n", profile_id);
3703 if (!BE2_chip(adapter) && be_physfn(adapter))
3704 be_sriov_config(adapter);
3706 status = be_get_resources(adapter);
3710 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3711 sizeof(*adapter->pmac_id), GFP_KERNEL);
3712 if (!adapter->pmac_id)
3715 /* Sanitize cfg_num_qs based on HW and platform limits */
3716 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3721 static int be_mac_setup(struct be_adapter *adapter)
3726 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3727 status = be_cmd_get_perm_mac(adapter, mac);
3731 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3732 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3734 /* Maybe the HW was reset; dev_addr must be re-programmed */
3735 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3738 /* For BE3-R VFs, the PF programs the initial MAC address */
3739 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3740 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3741 &adapter->pmac_id[0], 0);
3745 static void be_schedule_worker(struct be_adapter *adapter)
3747 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3748 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3751 static int be_setup_queues(struct be_adapter *adapter)
3753 struct net_device *netdev = adapter->netdev;
3756 status = be_evt_queues_create(adapter);
3760 status = be_tx_qs_create(adapter);
3764 status = be_rx_cqs_create(adapter);
3768 status = be_mcc_queues_create(adapter);
3772 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3776 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3782 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3786 int be_update_queues(struct be_adapter *adapter)
3788 struct net_device *netdev = adapter->netdev;
3791 if (netif_running(netdev))
3794 be_cancel_worker(adapter);
3796 /* If any vectors have been shared with RoCE we cannot re-program
3799 if (!adapter->num_msix_roce_vec)
3800 be_msix_disable(adapter);
3802 be_clear_queues(adapter);
3804 if (!msix_enabled(adapter)) {
3805 status = be_msix_enable(adapter);
3810 status = be_setup_queues(adapter);
3814 be_schedule_worker(adapter);
3816 if (netif_running(netdev))
3817 status = be_open(netdev);
3822 static inline int fw_major_num(const char *fw_ver)
3824 int fw_major = 0, i;
3826 i = sscanf(fw_ver, "%d.", &fw_major);
3833 static int be_setup(struct be_adapter *adapter)
3835 struct device *dev = &adapter->pdev->dev;
3838 be_setup_init(adapter);
3840 if (!lancer_chip(adapter))
3841 be_cmd_req_native_mode(adapter);
3843 status = be_get_config(adapter);
3847 status = be_msix_enable(adapter);
3851 status = be_if_create(adapter, &adapter->if_handle,
3852 be_if_cap_flags(adapter), 0);
3856 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3858 status = be_setup_queues(adapter);
3863 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3865 status = be_mac_setup(adapter);
3869 be_cmd_get_fw_ver(adapter);
3870 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
3872 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3873 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
3875 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3878 if (adapter->vlans_added)
3879 be_vid_config(adapter);
3881 be_set_rx_mode(adapter->netdev);
3883 be_cmd_get_acpi_wol_cap(adapter);
3885 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
3888 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
3891 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
3892 adapter->tx_fc, adapter->rx_fc);
3894 if (be_physfn(adapter))
3895 be_cmd_set_logical_link_config(adapter,
3896 IFLA_VF_LINK_STATE_AUTO, 0);
3898 if (adapter->num_vfs)
3899 be_vf_setup(adapter);
3901 status = be_cmd_get_phy_info(adapter);
3902 if (!status && be_pause_supported(adapter))
3903 adapter->phy.fc_autoneg = 1;
3905 be_schedule_worker(adapter);
3906 adapter->flags |= BE_FLAGS_SETUP_DONE;
3913 #ifdef CONFIG_NET_POLL_CONTROLLER
3914 static void be_netpoll(struct net_device *netdev)
3916 struct be_adapter *adapter = netdev_priv(netdev);
3917 struct be_eq_obj *eqo;
3920 for_all_evt_queues(adapter, eqo, i) {
3921 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3922 napi_schedule(&eqo->napi);
3927 static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3929 static bool phy_flashing_required(struct be_adapter *adapter)
3931 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
3932 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3935 static bool is_comp_in_ufi(struct be_adapter *adapter,
3936 struct flash_section_info *fsec, int type)
3938 int i = 0, img_type = 0;
3939 struct flash_section_info_g2 *fsec_g2 = NULL;
3941 if (BE2_chip(adapter))
3942 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3944 for (i = 0; i < MAX_FLASH_COMP; i++) {
3946 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3948 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3950 if (img_type == type)
3957 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3959 const struct firmware *fw)
3961 struct flash_section_info *fsec = NULL;
3962 const u8 *p = fw->data;
3965 while (p < (fw->data + fw->size)) {
3966 fsec = (struct flash_section_info *)p;
3967 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3974 static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3975 u32 img_offset, u32 img_size, int hdr_size,
3976 u16 img_optype, bool *crc_match)
3982 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
3987 crc_offset = hdr_size + img_offset + img_size - 4;
3989 /* Skip flashing, if crc of flashed region matches */
3990 if (!memcmp(crc, p + crc_offset, 4))
3998 static int be_flash(struct be_adapter *adapter, const u8 *img,
3999 struct be_dma_mem *flash_cmd, int optype, int img_size,
4002 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
4003 struct be_cmd_write_flashrom *req = flash_cmd->va;
4006 while (total_bytes) {
4007 num_bytes = min_t(u32, 32*1024, total_bytes);
4009 total_bytes -= num_bytes;
4012 if (optype == OPTYPE_PHY_FW)
4013 flash_op = FLASHROM_OPER_PHY_FLASH;
4015 flash_op = FLASHROM_OPER_FLASH;
4017 if (optype == OPTYPE_PHY_FW)
4018 flash_op = FLASHROM_OPER_PHY_SAVE;
4020 flash_op = FLASHROM_OPER_SAVE;
4023 memcpy(req->data_buf, img, num_bytes);
4025 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
4026 flash_op, img_offset +
4027 bytes_sent, num_bytes);
4028 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
4029 optype == OPTYPE_PHY_FW)
4034 bytes_sent += num_bytes;
4039 /* For BE2, BE3 and BE3-R */
4040 static int be_flash_BEx(struct be_adapter *adapter,
4041 const struct firmware *fw,
4042 struct be_dma_mem *flash_cmd, int num_of_images)
4044 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
4045 struct device *dev = &adapter->pdev->dev;
4046 struct flash_section_info *fsec = NULL;
4047 int status, i, filehdr_size, num_comp;
4048 const struct flash_comp *pflashcomp;
4052 struct flash_comp gen3_flash_types[] = {
4053 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4054 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4055 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4056 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4057 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4058 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4059 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4060 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4061 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4062 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4063 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4064 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4065 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4066 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4067 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4068 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4069 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4070 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4071 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4072 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
4075 struct flash_comp gen2_flash_types[] = {
4076 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4077 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4078 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4079 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4080 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4081 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4082 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4083 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4084 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4085 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4086 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4087 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4088 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4089 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4090 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4091 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
4094 if (BE3_chip(adapter)) {
4095 pflashcomp = gen3_flash_types;
4096 filehdr_size = sizeof(struct flash_file_hdr_g3);
4097 num_comp = ARRAY_SIZE(gen3_flash_types);
4099 pflashcomp = gen2_flash_types;
4100 filehdr_size = sizeof(struct flash_file_hdr_g2);
4101 num_comp = ARRAY_SIZE(gen2_flash_types);
4105 /* Get flash section info*/
4106 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4108 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
4111 for (i = 0; i < num_comp; i++) {
4112 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
4115 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4116 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4119 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4120 !phy_flashing_required(adapter))
4123 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
4124 status = be_check_flash_crc(adapter, fw->data,
4125 pflashcomp[i].offset,
4129 OPTYPE_REDBOOT, &crc_match);
4132 "Could not get CRC for 0x%x region\n",
4133 pflashcomp[i].optype);
4141 p = fw->data + filehdr_size + pflashcomp[i].offset +
4143 if (p + pflashcomp[i].size > fw->data + fw->size)
4146 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
4147 pflashcomp[i].size, 0);
4149 dev_err(dev, "Flashing section type 0x%x failed\n",
4150 pflashcomp[i].img_type);
4157 static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4159 u32 img_type = le32_to_cpu(fsec_entry.type);
4160 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4162 if (img_optype != 0xFFFF)
4166 case IMAGE_FIRMWARE_iSCSI:
4167 img_optype = OPTYPE_ISCSI_ACTIVE;
4169 case IMAGE_BOOT_CODE:
4170 img_optype = OPTYPE_REDBOOT;
4172 case IMAGE_OPTION_ROM_ISCSI:
4173 img_optype = OPTYPE_BIOS;
4175 case IMAGE_OPTION_ROM_PXE:
4176 img_optype = OPTYPE_PXE_BIOS;
4178 case IMAGE_OPTION_ROM_FCoE:
4179 img_optype = OPTYPE_FCOE_BIOS;
4181 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4182 img_optype = OPTYPE_ISCSI_BACKUP;
4185 img_optype = OPTYPE_NCSI_FW;
4187 case IMAGE_FLASHISM_JUMPVECTOR:
4188 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4190 case IMAGE_FIRMWARE_PHY:
4191 img_optype = OPTYPE_SH_PHY_FW;
4193 case IMAGE_REDBOOT_DIR:
4194 img_optype = OPTYPE_REDBOOT_DIR;
4196 case IMAGE_REDBOOT_CONFIG:
4197 img_optype = OPTYPE_REDBOOT_CONFIG;
4200 img_optype = OPTYPE_UFI_DIR;
4209 static int be_flash_skyhawk(struct be_adapter *adapter,
4210 const struct firmware *fw,
4211 struct be_dma_mem *flash_cmd, int num_of_images)
4213 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
4214 bool crc_match, old_fw_img, flash_offset_support = true;
4215 struct device *dev = &adapter->pdev->dev;
4216 struct flash_section_info *fsec = NULL;
4217 u32 img_offset, img_size, img_type;
4218 u16 img_optype, flash_optype;
4219 int status, i, filehdr_size;
4222 filehdr_size = sizeof(struct flash_file_hdr_g3);
4223 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4225 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
4230 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4231 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4232 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
4233 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4234 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4235 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
4237 if (img_optype == 0xFFFF)
4240 if (flash_offset_support)
4241 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4243 flash_optype = img_optype;
4245 /* Don't bother verifying CRC if an old FW image is being
4251 status = be_check_flash_crc(adapter, fw->data, img_offset,
4252 img_size, filehdr_size +
4253 img_hdrs_size, flash_optype,
4255 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4256 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
4257 /* The current FW image on the card does not support
4258 * OFFSET based flashing. Retry using older mechanism
4259 * of OPTYPE based flashing
4261 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4262 flash_offset_support = false;
4266 /* The current FW image on the card does not recognize
4267 * the new FLASH op_type. The FW download is partially
4268 * complete. Reboot the server now to enable FW image
4269 * to recognize the new FLASH op_type. To complete the
4270 * remaining process, download the same FW again after
4273 dev_err(dev, "Flash incomplete. Reset the server\n");
4274 dev_err(dev, "Download FW image again after reset\n");
4276 } else if (status) {
4277 dev_err(dev, "Could not get CRC for 0x%x region\n",
4286 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
4287 if (p + img_size > fw->data + fw->size)
4290 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4293 /* The current FW image on the card does not support OFFSET
4294 * based flashing. Retry using older mechanism of OPTYPE based
4297 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4298 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4299 flash_offset_support = false;
4303 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4307 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4308 (img_optype == OPTYPE_UFI_DIR &&
4309 base_status(status) == MCC_STATUS_FAILED))) {
4311 } else if (status) {
4312 dev_err(dev, "Flashing section type 0x%x failed\n",
4320 static int lancer_fw_download(struct be_adapter *adapter,
4321 const struct firmware *fw)
4323 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4324 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
4325 struct device *dev = &adapter->pdev->dev;
4326 struct be_dma_mem flash_cmd;
4327 const u8 *data_ptr = NULL;
4328 u8 *dest_image_ptr = NULL;
4329 size_t image_size = 0;
4331 u32 data_written = 0;
4337 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
4338 dev_err(dev, "FW image size should be multiple of 4\n");
4342 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4343 + LANCER_FW_DOWNLOAD_CHUNK;
4344 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
4345 &flash_cmd.dma, GFP_KERNEL);
4349 dest_image_ptr = flash_cmd.va +
4350 sizeof(struct lancer_cmd_req_write_object);
4351 image_size = fw->size;
4352 data_ptr = fw->data;
4354 while (image_size) {
4355 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4357 /* Copy the image chunk content. */
4358 memcpy(dest_image_ptr, data_ptr, chunk_size);
4360 status = lancer_cmd_write_object(adapter, &flash_cmd,
4362 LANCER_FW_DOWNLOAD_LOCATION,
4363 &data_written, &change_status,
4368 offset += data_written;
4369 data_ptr += data_written;
4370 image_size -= data_written;
4374 /* Commit the FW written */
4375 status = lancer_cmd_write_object(adapter, &flash_cmd,
4377 LANCER_FW_DOWNLOAD_LOCATION,
4378 &data_written, &change_status,
4382 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4384 dev_err(dev, "Firmware load error\n");
4385 return be_cmd_status(status);
4388 dev_info(dev, "Firmware flashed successfully\n");
4390 if (change_status == LANCER_FW_RESET_NEEDED) {
4391 dev_info(dev, "Resetting adapter to activate new FW\n");
4392 status = lancer_physdev_ctrl(adapter,
4393 PHYSDEV_CONTROL_FW_RESET_MASK);
4395 dev_err(dev, "Adapter busy, could not reset FW\n");
4396 dev_err(dev, "Reboot server to activate new FW\n");
4398 } else if (change_status != LANCER_NO_RESET_NEEDED) {
4399 dev_info(dev, "Reboot server to activate new FW\n");
4409 #define SH_P2_UFI 11
4411 static int be_get_ufi_type(struct be_adapter *adapter,
4412 struct flash_file_hdr_g3 *fhdr)
4415 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4419 /* First letter of the build version is used to identify
4420 * which chip this image file is meant for.
4422 switch (fhdr->build[0]) {
4423 case BLD_STR_UFI_TYPE_SH:
4424 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4426 case BLD_STR_UFI_TYPE_BE3:
4427 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4429 case BLD_STR_UFI_TYPE_BE2:
4436 /* Check if the flash image file is compatible with the adapter that
4438 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
4439 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
4441 static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4442 struct flash_file_hdr_g3 *fhdr)
4444 int ufi_type = be_get_ufi_type(adapter, fhdr);
4448 return skyhawk_chip(adapter);
4450 return (skyhawk_chip(adapter) &&
4451 adapter->asic_rev < ASIC_REV_P2);
4453 return BE3_chip(adapter);
4455 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4457 return BE2_chip(adapter);
4463 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4465 struct device *dev = &adapter->pdev->dev;
4466 struct flash_file_hdr_g3 *fhdr3;
4467 struct image_hdr *img_hdr_ptr;
4468 int status = 0, i, num_imgs;
4469 struct be_dma_mem flash_cmd;
4471 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4472 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4473 dev_err(dev, "Flash image is not compatible with adapter\n");
4477 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4478 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4483 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4484 for (i = 0; i < num_imgs; i++) {
4485 img_hdr_ptr = (struct image_hdr *)(fw->data +
4486 (sizeof(struct flash_file_hdr_g3) +
4487 i * sizeof(struct image_hdr)));
4488 if (!BE2_chip(adapter) &&
4489 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4492 if (skyhawk_chip(adapter))
4493 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4496 status = be_flash_BEx(adapter, fw, &flash_cmd,
4500 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4502 dev_info(dev, "Firmware flashed successfully\n");
4507 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4509 const struct firmware *fw;
4512 if (!netif_running(adapter->netdev)) {
4513 dev_err(&adapter->pdev->dev,
4514 "Firmware load not allowed (interface is down)\n");
4518 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4522 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4524 if (lancer_chip(adapter))
4525 status = lancer_fw_download(adapter, fw);
4527 status = be_fw_download(adapter, fw);
4530 be_cmd_get_fw_ver(adapter);
4533 release_firmware(fw);
4537 static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4540 struct be_adapter *adapter = netdev_priv(dev);
4541 struct nlattr *attr, *br_spec;
4546 if (!sriov_enabled(adapter))
4549 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4553 nla_for_each_nested(attr, br_spec, rem) {
4554 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4557 if (nla_len(attr) < sizeof(mode))
4560 mode = nla_get_u16(attr);
4561 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4564 status = be_cmd_set_hsw_config(adapter, 0, 0,
4566 mode == BRIDGE_MODE_VEPA ?
4567 PORT_FWD_TYPE_VEPA :
4572 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4573 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4578 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4579 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4584 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4585 struct net_device *dev, u32 filter_mask)
4587 struct be_adapter *adapter = netdev_priv(dev);
4591 if (!sriov_enabled(adapter))
4594 /* BE and Lancer chips support VEB mode only */
4595 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4596 hsw_mode = PORT_FWD_TYPE_VEB;
4598 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4599 adapter->if_handle, &hsw_mode);
4604 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4605 hsw_mode == PORT_FWD_TYPE_VEPA ?
4606 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4610 #ifdef CONFIG_BE2NET_VXLAN
4611 /* VxLAN offload Notes:
4613 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4614 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4615 * is expected to work across all types of IP tunnels once exported. Skyhawk
4616 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
4617 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4618 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4619 * those other tunnels are unexported on the fly through ndo_features_check().
4621 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4622 * adds more than one port, disable offloads and don't re-enable them again
4623 * until after all the tunnels are removed.
4625 static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4628 struct be_adapter *adapter = netdev_priv(netdev);
4629 struct device *dev = &adapter->pdev->dev;
4632 if (lancer_chip(adapter) || BEx_chip(adapter))
4635 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4637 "Only one UDP port supported for VxLAN offloads\n");
4638 dev_info(dev, "Disabling VxLAN offloads\n");
4639 adapter->vxlan_port_count++;
4643 if (adapter->vxlan_port_count++ >= 1)
4646 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4647 OP_CONVERT_NORMAL_TO_TUNNEL);
4649 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4653 status = be_cmd_set_vxlan_port(adapter, port);
4655 dev_warn(dev, "Failed to add VxLAN port\n");
4658 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4659 adapter->vxlan_port = port;
4661 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4662 NETIF_F_TSO | NETIF_F_TSO6 |
4663 NETIF_F_GSO_UDP_TUNNEL;
4664 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4665 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
4667 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4671 be_disable_vxlan_offloads(adapter);
4674 static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4677 struct be_adapter *adapter = netdev_priv(netdev);
4679 if (lancer_chip(adapter) || BEx_chip(adapter))
4682 if (adapter->vxlan_port != port)
4685 be_disable_vxlan_offloads(adapter);
4687 dev_info(&adapter->pdev->dev,
4688 "Disabled VxLAN offloads for UDP port %d\n",
4691 adapter->vxlan_port_count--;
4694 static netdev_features_t be_features_check(struct sk_buff *skb,
4695 struct net_device *dev,
4696 netdev_features_t features)
4698 struct be_adapter *adapter = netdev_priv(dev);
4701 /* The code below restricts offload features for some tunneled packets.
4702 * Offload features for normal (non tunnel) packets are unchanged.
4704 if (!skb->encapsulation ||
4705 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4708 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4709 * should disable tunnel offload features if it's not a VxLAN packet,
4710 * as tunnel offloads have been enabled only for VxLAN. This is done to
4711 * allow other tunneled traffic like GRE work fine while VxLAN
4712 * offloads are configured in Skyhawk-R.
4714 switch (vlan_get_protocol(skb)) {
4715 case htons(ETH_P_IP):
4716 l4_hdr = ip_hdr(skb)->protocol;
4718 case htons(ETH_P_IPV6):
4719 l4_hdr = ipv6_hdr(skb)->nexthdr;
4725 if (l4_hdr != IPPROTO_UDP ||
4726 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4727 skb->inner_protocol != htons(ETH_P_TEB) ||
4728 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4729 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4730 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
4736 static const struct net_device_ops be_netdev_ops = {
4737 .ndo_open = be_open,
4738 .ndo_stop = be_close,
4739 .ndo_start_xmit = be_xmit,
4740 .ndo_set_rx_mode = be_set_rx_mode,
4741 .ndo_set_mac_address = be_mac_addr_set,
4742 .ndo_change_mtu = be_change_mtu,
4743 .ndo_get_stats64 = be_get_stats64,
4744 .ndo_validate_addr = eth_validate_addr,
4745 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4746 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
4747 .ndo_set_vf_mac = be_set_vf_mac,
4748 .ndo_set_vf_vlan = be_set_vf_vlan,
4749 .ndo_set_vf_rate = be_set_vf_tx_rate,
4750 .ndo_get_vf_config = be_get_vf_config,
4751 .ndo_set_vf_link_state = be_set_vf_link_state,
4752 #ifdef CONFIG_NET_POLL_CONTROLLER
4753 .ndo_poll_controller = be_netpoll,
4755 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4756 .ndo_bridge_getlink = be_ndo_bridge_getlink,
4757 #ifdef CONFIG_NET_RX_BUSY_POLL
4758 .ndo_busy_poll = be_busy_poll,
4760 #ifdef CONFIG_BE2NET_VXLAN
4761 .ndo_add_vxlan_port = be_add_vxlan_port,
4762 .ndo_del_vxlan_port = be_del_vxlan_port,
4763 .ndo_features_check = be_features_check,
4767 static void be_netdev_init(struct net_device *netdev)
4769 struct be_adapter *adapter = netdev_priv(netdev);
4771 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4772 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4773 NETIF_F_HW_VLAN_CTAG_TX;
4774 if (be_multi_rxq(adapter))
4775 netdev->hw_features |= NETIF_F_RXHASH;
4777 netdev->features |= netdev->hw_features |
4778 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4780 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4781 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4783 netdev->priv_flags |= IFF_UNICAST_FLT;
4785 netdev->flags |= IFF_MULTICAST;
4787 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4789 netdev->netdev_ops = &be_netdev_ops;
4791 netdev->ethtool_ops = &be_ethtool_ops;
4794 static void be_unmap_pci_bars(struct be_adapter *adapter)
4797 pci_iounmap(adapter->pdev, adapter->csr);
4799 pci_iounmap(adapter->pdev, adapter->db);
4802 static int db_bar(struct be_adapter *adapter)
4804 if (lancer_chip(adapter) || !be_physfn(adapter))
4810 static int be_roce_map_pci_bars(struct be_adapter *adapter)
4812 if (skyhawk_chip(adapter)) {
4813 adapter->roce_db.size = 4096;
4814 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4816 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4822 static int be_map_pci_bars(struct be_adapter *adapter)
4826 if (BEx_chip(adapter) && be_physfn(adapter)) {
4827 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4832 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
4837 be_roce_map_pci_bars(adapter);
4841 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
4842 be_unmap_pci_bars(adapter);
4846 static void be_ctrl_cleanup(struct be_adapter *adapter)
4848 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
4850 be_unmap_pci_bars(adapter);
4853 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4856 mem = &adapter->rx_filter;
4858 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4862 static int be_ctrl_init(struct be_adapter *adapter)
4864 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4865 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4866 struct be_dma_mem *rx_filter = &adapter->rx_filter;
4870 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4871 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4872 SLI_INTF_FAMILY_SHIFT;
4873 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4875 status = be_map_pci_bars(adapter);
4879 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4880 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4881 mbox_mem_alloc->size,
4882 &mbox_mem_alloc->dma,
4884 if (!mbox_mem_alloc->va) {
4886 goto unmap_pci_bars;
4888 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4889 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4890 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4891 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4893 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4894 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4895 rx_filter->size, &rx_filter->dma,
4897 if (!rx_filter->va) {
4902 mutex_init(&adapter->mbox_lock);
4903 spin_lock_init(&adapter->mcc_lock);
4904 spin_lock_init(&adapter->mcc_cq_lock);
4906 init_completion(&adapter->et_cmd_compl);
4907 pci_save_state(adapter->pdev);
4911 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4912 mbox_mem_alloc->va, mbox_mem_alloc->dma);
4915 be_unmap_pci_bars(adapter);
4921 static void be_stats_cleanup(struct be_adapter *adapter)
4923 struct be_dma_mem *cmd = &adapter->stats_cmd;
4926 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4930 static int be_stats_init(struct be_adapter *adapter)
4932 struct be_dma_mem *cmd = &adapter->stats_cmd;
4934 if (lancer_chip(adapter))
4935 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4936 else if (BE2_chip(adapter))
4937 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4938 else if (BE3_chip(adapter))
4939 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4941 /* ALL non-BE ASICs */
4942 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
4944 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4951 static void be_remove(struct pci_dev *pdev)
4953 struct be_adapter *adapter = pci_get_drvdata(pdev);
4958 be_roce_dev_remove(adapter);
4959 be_intr_set(adapter, false);
4961 cancel_delayed_work_sync(&adapter->func_recovery_work);
4963 unregister_netdev(adapter->netdev);
4967 /* tell fw we're done with firing cmds */
4968 be_cmd_fw_clean(adapter);
4970 be_stats_cleanup(adapter);
4972 be_ctrl_cleanup(adapter);
4974 pci_disable_pcie_error_reporting(pdev);
4976 pci_release_regions(pdev);
4977 pci_disable_device(pdev);
4979 free_netdev(adapter->netdev);
4982 static int be_get_initial_config(struct be_adapter *adapter)
4986 status = be_cmd_get_cntl_attributes(adapter);
4990 /* Must be a power of 2 or else MODULO will BUG_ON */
4991 adapter->be_get_temp_freq = 64;
4993 if (BEx_chip(adapter)) {
4994 level = be_cmd_get_fw_log_level(adapter);
4995 adapter->msg_enable =
4996 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4999 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
5003 static int lancer_recover_func(struct be_adapter *adapter)
5005 struct device *dev = &adapter->pdev->dev;
5008 status = lancer_test_and_set_rdy_state(adapter);
5012 if (netif_running(adapter->netdev))
5013 be_close(adapter->netdev);
5017 be_clear_all_error(adapter);
5019 status = be_setup(adapter);
5023 if (netif_running(adapter->netdev)) {
5024 status = be_open(adapter->netdev);
5029 dev_err(dev, "Adapter recovery successful\n");
5032 if (status == -EAGAIN)
5033 dev_err(dev, "Waiting for resource provisioning\n");
5035 dev_err(dev, "Adapter recovery failed\n");
5040 static void be_func_recovery_task(struct work_struct *work)
5042 struct be_adapter *adapter =
5043 container_of(work, struct be_adapter, func_recovery_work.work);
5046 be_detect_error(adapter);
5048 if (adapter->hw_error && lancer_chip(adapter)) {
5050 netif_device_detach(adapter->netdev);
5053 status = lancer_recover_func(adapter);
5055 netif_device_attach(adapter->netdev);
5058 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
5059 * no need to attempt further recovery.
5061 if (!status || status == -EAGAIN)
5062 schedule_delayed_work(&adapter->func_recovery_work,
5063 msecs_to_jiffies(1000));
5066 static void be_log_sfp_info(struct be_adapter *adapter)
5070 status = be_cmd_query_sfp_info(adapter);
5072 dev_err(&adapter->pdev->dev,
5073 "Unqualified SFP+ detected on %c from %s part no: %s",
5074 adapter->port_name, adapter->phy.vendor_name,
5075 adapter->phy.vendor_pn);
5077 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5080 static void be_worker(struct work_struct *work)
5082 struct be_adapter *adapter =
5083 container_of(work, struct be_adapter, work.work);
5084 struct be_rx_obj *rxo;
5087 /* when interrupts are not yet enabled, just reap any pending
5088 * mcc completions */
5089 if (!netif_running(adapter->netdev)) {
5091 be_process_mcc(adapter);
5096 if (!adapter->stats_cmd_sent) {
5097 if (lancer_chip(adapter))
5098 lancer_cmd_get_pport_stats(adapter,
5099 &adapter->stats_cmd);
5101 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5104 if (be_physfn(adapter) &&
5105 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5106 be_cmd_get_die_temperature(adapter);
5108 for_all_rx_queues(adapter, rxo, i) {
5109 /* Replenish RX-queues starved due to memory
5110 * allocation failures.
5112 if (rxo->rx_post_starved)
5113 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5116 be_eqd_update(adapter);
5118 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5119 be_log_sfp_info(adapter);
5122 adapter->work_counter++;
5123 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5126 /* If any VFs are already enabled don't FLR the PF */
5127 static bool be_reset_required(struct be_adapter *adapter)
5129 return pci_num_vf(adapter->pdev) ? false : true;
5132 static char *mc_name(struct be_adapter *adapter)
5134 char *str = ""; /* default */
5136 switch (adapter->mc_type) {
5162 static inline char *func_name(struct be_adapter *adapter)
5164 return be_physfn(adapter) ? "PF" : "VF";
5167 static inline char *nic_name(struct pci_dev *pdev)
5169 switch (pdev->device) {
5176 return OC_NAME_LANCER;
5187 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
5189 struct be_adapter *adapter;
5190 struct net_device *netdev;
5193 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5195 status = pci_enable_device(pdev);
5199 status = pci_request_regions(pdev, DRV_NAME);
5202 pci_set_master(pdev);
5204 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
5209 adapter = netdev_priv(netdev);
5210 adapter->pdev = pdev;
5211 pci_set_drvdata(pdev, adapter);
5212 adapter->netdev = netdev;
5213 SET_NETDEV_DEV(netdev, &pdev->dev);
5215 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5217 netdev->features |= NETIF_F_HIGHDMA;
5219 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
5221 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5226 status = pci_enable_pcie_error_reporting(pdev);
5228 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
5230 status = be_ctrl_init(adapter);
5234 /* sync up with fw's ready state */
5235 if (be_physfn(adapter)) {
5236 status = be_fw_wait_ready(adapter);
5241 if (be_reset_required(adapter)) {
5242 status = be_cmd_reset_function(adapter);
5246 /* Wait for interrupts to quiesce after an FLR */
5250 /* Allow interrupts for other ULPs running on NIC function */
5251 be_intr_set(adapter, true);
5253 /* tell fw we're ready to fire cmds */
5254 status = be_cmd_fw_init(adapter);
5258 status = be_stats_init(adapter);
5262 status = be_get_initial_config(adapter);
5266 INIT_DELAYED_WORK(&adapter->work, be_worker);
5267 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
5268 adapter->rx_fc = true;
5269 adapter->tx_fc = true;
5271 status = be_setup(adapter);
5275 be_netdev_init(netdev);
5276 status = register_netdev(netdev);
5280 be_roce_dev_add(adapter);
5282 schedule_delayed_work(&adapter->func_recovery_work,
5283 msecs_to_jiffies(1000));
5285 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
5286 func_name(adapter), mc_name(adapter), adapter->port_name);
5293 be_stats_cleanup(adapter);
5295 be_ctrl_cleanup(adapter);
5297 free_netdev(netdev);
5299 pci_release_regions(pdev);
5301 pci_disable_device(pdev);
5303 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
5307 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5309 struct be_adapter *adapter = pci_get_drvdata(pdev);
5310 struct net_device *netdev = adapter->netdev;
5312 if (adapter->wol_en)
5313 be_setup_wol(adapter, true);
5315 be_intr_set(adapter, false);
5316 cancel_delayed_work_sync(&adapter->func_recovery_work);
5318 netif_device_detach(netdev);
5319 if (netif_running(netdev)) {
5326 pci_save_state(pdev);
5327 pci_disable_device(pdev);
5328 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5332 static int be_resume(struct pci_dev *pdev)
5335 struct be_adapter *adapter = pci_get_drvdata(pdev);
5336 struct net_device *netdev = adapter->netdev;
5338 netif_device_detach(netdev);
5340 status = pci_enable_device(pdev);
5344 pci_set_power_state(pdev, PCI_D0);
5345 pci_restore_state(pdev);
5347 status = be_fw_wait_ready(adapter);
5351 status = be_cmd_reset_function(adapter);
5355 be_intr_set(adapter, true);
5356 /* tell fw we're ready to fire cmds */
5357 status = be_cmd_fw_init(adapter);
5362 if (netif_running(netdev)) {
5368 schedule_delayed_work(&adapter->func_recovery_work,
5369 msecs_to_jiffies(1000));
5370 netif_device_attach(netdev);
5372 if (adapter->wol_en)
5373 be_setup_wol(adapter, false);
5379 * An FLR will stop BE from DMAing any data.
5381 static void be_shutdown(struct pci_dev *pdev)
5383 struct be_adapter *adapter = pci_get_drvdata(pdev);
5388 be_roce_dev_shutdown(adapter);
5389 cancel_delayed_work_sync(&adapter->work);
5390 cancel_delayed_work_sync(&adapter->func_recovery_work);
5392 netif_device_detach(adapter->netdev);
5394 be_cmd_reset_function(adapter);
5396 pci_disable_device(pdev);
5399 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
5400 pci_channel_state_t state)
5402 struct be_adapter *adapter = pci_get_drvdata(pdev);
5403 struct net_device *netdev = adapter->netdev;
5405 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5407 if (!adapter->eeh_error) {
5408 adapter->eeh_error = true;
5410 cancel_delayed_work_sync(&adapter->func_recovery_work);
5413 netif_device_detach(netdev);
5414 if (netif_running(netdev))
5421 if (state == pci_channel_io_perm_failure)
5422 return PCI_ERS_RESULT_DISCONNECT;
5424 pci_disable_device(pdev);
5426 /* The error could cause the FW to trigger a flash debug dump.
5427 * Resetting the card while flash dump is in progress
5428 * can cause it not to recover; wait for it to finish.
5429 * Wait only for first function as it is needed only once per
5432 if (pdev->devfn == 0)
5435 return PCI_ERS_RESULT_NEED_RESET;
5438 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5440 struct be_adapter *adapter = pci_get_drvdata(pdev);
5443 dev_info(&adapter->pdev->dev, "EEH reset\n");
5445 status = pci_enable_device(pdev);
5447 return PCI_ERS_RESULT_DISCONNECT;
5449 pci_set_master(pdev);
5450 pci_set_power_state(pdev, PCI_D0);
5451 pci_restore_state(pdev);
5453 /* Check if card is ok and fw is ready */
5454 dev_info(&adapter->pdev->dev,
5455 "Waiting for FW to be ready after EEH reset\n");
5456 status = be_fw_wait_ready(adapter);
5458 return PCI_ERS_RESULT_DISCONNECT;
5460 pci_cleanup_aer_uncorrect_error_status(pdev);
5461 be_clear_all_error(adapter);
5462 return PCI_ERS_RESULT_RECOVERED;
5465 static void be_eeh_resume(struct pci_dev *pdev)
5468 struct be_adapter *adapter = pci_get_drvdata(pdev);
5469 struct net_device *netdev = adapter->netdev;
5471 dev_info(&adapter->pdev->dev, "EEH resume\n");
5473 pci_save_state(pdev);
5475 status = be_cmd_reset_function(adapter);
5479 /* On some BE3 FW versions, after a HW reset,
5480 * interrupts will remain disabled for each function.
5481 * So, explicitly enable interrupts
5483 be_intr_set(adapter, true);
5485 /* tell fw we're ready to fire cmds */
5486 status = be_cmd_fw_init(adapter);
5490 status = be_setup(adapter);
5494 if (netif_running(netdev)) {
5495 status = be_open(netdev);
5500 schedule_delayed_work(&adapter->func_recovery_work,
5501 msecs_to_jiffies(1000));
5502 netif_device_attach(netdev);
5505 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
5508 static const struct pci_error_handlers be_eeh_handlers = {
5509 .error_detected = be_eeh_err_detected,
5510 .slot_reset = be_eeh_reset,
5511 .resume = be_eeh_resume,
5514 static struct pci_driver be_driver = {
5516 .id_table = be_dev_ids,
5518 .remove = be_remove,
5519 .suspend = be_suspend,
5520 .resume = be_resume,
5521 .shutdown = be_shutdown,
5522 .err_handler = &be_eeh_handlers
5525 static int __init be_init_module(void)
5527 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5528 rx_frag_size != 2048) {
5529 printk(KERN_WARNING DRV_NAME
5530 " : Module param rx_frag_size must be 2048/4096/8192."
5532 rx_frag_size = 2048;
5535 return pci_register_driver(&be_driver);
5537 module_init(be_init_module);
5539 static void __exit be_exit_module(void)
5541 pci_unregister_driver(&be_driver);
5543 module_exit(be_exit_module);