1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2016 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/etherdevice.h>
28 #include <linux/of_net.h>
29 #include <linux/pci.h>
33 #include "i40e_diag.h"
34 #include <net/udp_tunnel.h>
36 const char i40e_driver_name[] = "i40e";
37 static const char i40e_driver_string[] =
38 "Intel(R) Ethernet Connection XL710 Network Driver";
42 #define DRV_VERSION_MAJOR 2
43 #define DRV_VERSION_MINOR 1
44 #define DRV_VERSION_BUILD 7
45 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
46 __stringify(DRV_VERSION_MINOR) "." \
47 __stringify(DRV_VERSION_BUILD) DRV_KERN
48 const char i40e_driver_version_str[] = DRV_VERSION;
49 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
51 /* a bit of forward declarations */
52 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
53 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
54 static int i40e_add_vsi(struct i40e_vsi *vsi);
55 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
56 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
57 static int i40e_setup_misc_vector(struct i40e_pf *pf);
58 static void i40e_determine_queue_usage(struct i40e_pf *pf);
59 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
60 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
61 static int i40e_reset(struct i40e_pf *pf);
62 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
63 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
64 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
66 /* i40e_pci_tbl - PCI Device ID Table
68 * Last entry must be all 0s
70 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
71 * Class, Class Mask, private data (not used) }
73 static const struct pci_device_id i40e_pci_tbl[] = {
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
89 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
91 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
92 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
93 /* required last entry */
96 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
98 #define I40E_MAX_VF_COUNT 128
99 static int debug = -1;
100 module_param(debug, uint, 0);
101 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
103 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
104 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
105 MODULE_LICENSE("GPL");
106 MODULE_VERSION(DRV_VERSION);
108 static struct workqueue_struct *i40e_wq;
111 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
112 * @hw: pointer to the HW structure
113 * @mem: ptr to mem struct to fill out
114 * @size: size of memory requested
115 * @alignment: what to align the allocation to
117 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
118 u64 size, u32 alignment)
120 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
122 mem->size = ALIGN(size, alignment);
123 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
124 &mem->pa, GFP_KERNEL);
132 * i40e_free_dma_mem_d - OS specific memory free for shared code
133 * @hw: pointer to the HW structure
134 * @mem: ptr to mem struct to free
136 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
138 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
140 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
149 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
150 * @hw: pointer to the HW structure
151 * @mem: ptr to mem struct to fill out
152 * @size: size of memory requested
154 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
158 mem->va = kzalloc(size, GFP_KERNEL);
167 * i40e_free_virt_mem_d - OS specific memory free for shared code
168 * @hw: pointer to the HW structure
169 * @mem: ptr to mem struct to free
171 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
173 /* it's ok to kfree a NULL pointer */
182 * i40e_get_lump - find a lump of free generic resource
183 * @pf: board private structure
184 * @pile: the pile of resource to search
185 * @needed: the number of items needed
186 * @id: an owner id to stick on the items assigned
188 * Returns the base item index of the lump, or negative for error
190 * The search_hint trick and lack of advanced fit-finding only work
191 * because we're highly likely to have all the same size lump requests.
192 * Linear search time and any fragmentation should be minimal.
194 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
200 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
201 dev_info(&pf->pdev->dev,
202 "param err: pile=%p needed=%d id=0x%04x\n",
207 /* start the linear search with an imperfect hint */
208 i = pile->search_hint;
209 while (i < pile->num_entries) {
210 /* skip already allocated entries */
211 if (pile->list[i] & I40E_PILE_VALID_BIT) {
216 /* do we have enough in this lump? */
217 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
218 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
223 /* there was enough, so assign it to the requestor */
224 for (j = 0; j < needed; j++)
225 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
227 pile->search_hint = i + j;
231 /* not enough, so skip over it and continue looking */
239 * i40e_put_lump - return a lump of generic resource
240 * @pile: the pile of resource to search
241 * @index: the base item index
242 * @id: the owner id of the items assigned
244 * Returns the count of items in the lump
246 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
248 int valid_id = (id | I40E_PILE_VALID_BIT);
252 if (!pile || index >= pile->num_entries)
256 i < pile->num_entries && pile->list[i] == valid_id;
262 if (count && index < pile->search_hint)
263 pile->search_hint = index;
269 * i40e_find_vsi_from_id - searches for the vsi with the given id
270 * @pf - the pf structure to search for the vsi
271 * @id - id of the vsi it is searching for
273 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
277 for (i = 0; i < pf->num_alloc_vsi; i++)
278 if (pf->vsi[i] && (pf->vsi[i]->id == id))
285 * i40e_service_event_schedule - Schedule the service task to wake up
286 * @pf: board private structure
288 * If not already scheduled, this puts the task into the work queue
290 void i40e_service_event_schedule(struct i40e_pf *pf)
292 if (!test_bit(__I40E_DOWN, &pf->state) &&
293 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
294 queue_work(i40e_wq, &pf->service_task);
298 * i40e_tx_timeout - Respond to a Tx Hang
299 * @netdev: network interface device structure
301 * If any port has noticed a Tx timeout, it is likely that the whole
302 * device is munged, not just the one netdev port, so go for the full
305 static void i40e_tx_timeout(struct net_device *netdev)
307 struct i40e_netdev_priv *np = netdev_priv(netdev);
308 struct i40e_vsi *vsi = np->vsi;
309 struct i40e_pf *pf = vsi->back;
310 struct i40e_ring *tx_ring = NULL;
311 unsigned int i, hung_queue = 0;
314 pf->tx_timeout_count++;
316 /* find the stopped queue the same way the stack does */
317 for (i = 0; i < netdev->num_tx_queues; i++) {
318 struct netdev_queue *q;
319 unsigned long trans_start;
321 q = netdev_get_tx_queue(netdev, i);
322 trans_start = q->trans_start;
323 if (netif_xmit_stopped(q) &&
325 (trans_start + netdev->watchdog_timeo))) {
331 if (i == netdev->num_tx_queues) {
332 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
334 /* now that we have an index, find the tx_ring struct */
335 for (i = 0; i < vsi->num_queue_pairs; i++) {
336 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
338 vsi->tx_rings[i]->queue_index) {
339 tx_ring = vsi->tx_rings[i];
346 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
347 pf->tx_timeout_recovery_level = 1; /* reset after some time */
348 else if (time_before(jiffies,
349 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
350 return; /* don't do any new action before the next timeout */
353 head = i40e_get_head(tx_ring);
354 /* Read interrupt register */
355 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
357 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
358 tx_ring->vsi->base_vector - 1));
360 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
362 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
363 vsi->seid, hung_queue, tx_ring->next_to_clean,
364 head, tx_ring->next_to_use,
365 readl(tx_ring->tail), val);
368 pf->tx_timeout_last_recovery = jiffies;
369 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
370 pf->tx_timeout_recovery_level, hung_queue);
372 switch (pf->tx_timeout_recovery_level) {
374 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
377 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
380 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
383 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
387 i40e_service_event_schedule(pf);
388 pf->tx_timeout_recovery_level++;
392 * i40e_get_vsi_stats_struct - Get System Network Statistics
393 * @vsi: the VSI we care about
395 * Returns the address of the device statistics structure.
396 * The statistics are actually updated from the service task.
398 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
400 return &vsi->net_stats;
404 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
405 * @netdev: network interface device structure
407 * Returns the address of the device statistics structure.
408 * The statistics are actually updated from the service task.
410 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
411 struct rtnl_link_stats64 *stats)
413 struct i40e_netdev_priv *np = netdev_priv(netdev);
414 struct i40e_ring *tx_ring, *rx_ring;
415 struct i40e_vsi *vsi = np->vsi;
416 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
419 if (test_bit(__I40E_DOWN, &vsi->state))
426 for (i = 0; i < vsi->num_queue_pairs; i++) {
430 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
435 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
436 packets = tx_ring->stats.packets;
437 bytes = tx_ring->stats.bytes;
438 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
440 stats->tx_packets += packets;
441 stats->tx_bytes += bytes;
442 rx_ring = &tx_ring[1];
445 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
446 packets = rx_ring->stats.packets;
447 bytes = rx_ring->stats.bytes;
448 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
450 stats->rx_packets += packets;
451 stats->rx_bytes += bytes;
455 /* following stats updated by i40e_watchdog_subtask() */
456 stats->multicast = vsi_stats->multicast;
457 stats->tx_errors = vsi_stats->tx_errors;
458 stats->tx_dropped = vsi_stats->tx_dropped;
459 stats->rx_errors = vsi_stats->rx_errors;
460 stats->rx_dropped = vsi_stats->rx_dropped;
461 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
462 stats->rx_length_errors = vsi_stats->rx_length_errors;
466 * i40e_vsi_reset_stats - Resets all stats of the given vsi
467 * @vsi: the VSI to have its stats reset
469 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
471 struct rtnl_link_stats64 *ns;
477 ns = i40e_get_vsi_stats_struct(vsi);
478 memset(ns, 0, sizeof(*ns));
479 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
480 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
481 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
482 if (vsi->rx_rings && vsi->rx_rings[0]) {
483 for (i = 0; i < vsi->num_queue_pairs; i++) {
484 memset(&vsi->rx_rings[i]->stats, 0,
485 sizeof(vsi->rx_rings[i]->stats));
486 memset(&vsi->rx_rings[i]->rx_stats, 0,
487 sizeof(vsi->rx_rings[i]->rx_stats));
488 memset(&vsi->tx_rings[i]->stats, 0,
489 sizeof(vsi->tx_rings[i]->stats));
490 memset(&vsi->tx_rings[i]->tx_stats, 0,
491 sizeof(vsi->tx_rings[i]->tx_stats));
494 vsi->stat_offsets_loaded = false;
498 * i40e_pf_reset_stats - Reset all of the stats for the given PF
499 * @pf: the PF to be reset
501 void i40e_pf_reset_stats(struct i40e_pf *pf)
505 memset(&pf->stats, 0, sizeof(pf->stats));
506 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
507 pf->stat_offsets_loaded = false;
509 for (i = 0; i < I40E_MAX_VEB; i++) {
511 memset(&pf->veb[i]->stats, 0,
512 sizeof(pf->veb[i]->stats));
513 memset(&pf->veb[i]->stats_offsets, 0,
514 sizeof(pf->veb[i]->stats_offsets));
515 pf->veb[i]->stat_offsets_loaded = false;
518 pf->hw_csum_rx_error = 0;
522 * i40e_stat_update48 - read and update a 48 bit stat from the chip
523 * @hw: ptr to the hardware info
524 * @hireg: the high 32 bit reg to read
525 * @loreg: the low 32 bit reg to read
526 * @offset_loaded: has the initial offset been loaded yet
527 * @offset: ptr to current offset value
528 * @stat: ptr to the stat
530 * Since the device stats are not reset at PFReset, they likely will not
531 * be zeroed when the driver starts. We'll save the first values read
532 * and use them as offsets to be subtracted from the raw values in order
533 * to report stats that count from zero. In the process, we also manage
534 * the potential roll-over.
536 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
537 bool offset_loaded, u64 *offset, u64 *stat)
541 if (hw->device_id == I40E_DEV_ID_QEMU) {
542 new_data = rd32(hw, loreg);
543 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
545 new_data = rd64(hw, loreg);
549 if (likely(new_data >= *offset))
550 *stat = new_data - *offset;
552 *stat = (new_data + BIT_ULL(48)) - *offset;
553 *stat &= 0xFFFFFFFFFFFFULL;
557 * i40e_stat_update32 - read and update a 32 bit stat from the chip
558 * @hw: ptr to the hardware info
559 * @reg: the hw reg to read
560 * @offset_loaded: has the initial offset been loaded yet
561 * @offset: ptr to current offset value
562 * @stat: ptr to the stat
564 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
565 bool offset_loaded, u64 *offset, u64 *stat)
569 new_data = rd32(hw, reg);
572 if (likely(new_data >= *offset))
573 *stat = (u32)(new_data - *offset);
575 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
579 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
580 * @vsi: the VSI to be updated
582 void i40e_update_eth_stats(struct i40e_vsi *vsi)
584 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
585 struct i40e_pf *pf = vsi->back;
586 struct i40e_hw *hw = &pf->hw;
587 struct i40e_eth_stats *oes;
588 struct i40e_eth_stats *es; /* device's eth stats */
590 es = &vsi->eth_stats;
591 oes = &vsi->eth_stats_offsets;
593 /* Gather up the stats that the hw collects */
594 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
595 vsi->stat_offsets_loaded,
596 &oes->tx_errors, &es->tx_errors);
597 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
598 vsi->stat_offsets_loaded,
599 &oes->rx_discards, &es->rx_discards);
600 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
601 vsi->stat_offsets_loaded,
602 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
603 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
604 vsi->stat_offsets_loaded,
605 &oes->tx_errors, &es->tx_errors);
607 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
608 I40E_GLV_GORCL(stat_idx),
609 vsi->stat_offsets_loaded,
610 &oes->rx_bytes, &es->rx_bytes);
611 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
612 I40E_GLV_UPRCL(stat_idx),
613 vsi->stat_offsets_loaded,
614 &oes->rx_unicast, &es->rx_unicast);
615 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
616 I40E_GLV_MPRCL(stat_idx),
617 vsi->stat_offsets_loaded,
618 &oes->rx_multicast, &es->rx_multicast);
619 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
620 I40E_GLV_BPRCL(stat_idx),
621 vsi->stat_offsets_loaded,
622 &oes->rx_broadcast, &es->rx_broadcast);
624 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
625 I40E_GLV_GOTCL(stat_idx),
626 vsi->stat_offsets_loaded,
627 &oes->tx_bytes, &es->tx_bytes);
628 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
629 I40E_GLV_UPTCL(stat_idx),
630 vsi->stat_offsets_loaded,
631 &oes->tx_unicast, &es->tx_unicast);
632 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
633 I40E_GLV_MPTCL(stat_idx),
634 vsi->stat_offsets_loaded,
635 &oes->tx_multicast, &es->tx_multicast);
636 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
637 I40E_GLV_BPTCL(stat_idx),
638 vsi->stat_offsets_loaded,
639 &oes->tx_broadcast, &es->tx_broadcast);
640 vsi->stat_offsets_loaded = true;
644 * i40e_update_veb_stats - Update Switch component statistics
645 * @veb: the VEB being updated
647 static void i40e_update_veb_stats(struct i40e_veb *veb)
649 struct i40e_pf *pf = veb->pf;
650 struct i40e_hw *hw = &pf->hw;
651 struct i40e_eth_stats *oes;
652 struct i40e_eth_stats *es; /* device's eth stats */
653 struct i40e_veb_tc_stats *veb_oes;
654 struct i40e_veb_tc_stats *veb_es;
657 idx = veb->stats_idx;
659 oes = &veb->stats_offsets;
660 veb_es = &veb->tc_stats;
661 veb_oes = &veb->tc_stats_offsets;
663 /* Gather up the stats that the hw collects */
664 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
665 veb->stat_offsets_loaded,
666 &oes->tx_discards, &es->tx_discards);
667 if (hw->revision_id > 0)
668 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
669 veb->stat_offsets_loaded,
670 &oes->rx_unknown_protocol,
671 &es->rx_unknown_protocol);
672 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
673 veb->stat_offsets_loaded,
674 &oes->rx_bytes, &es->rx_bytes);
675 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
676 veb->stat_offsets_loaded,
677 &oes->rx_unicast, &es->rx_unicast);
678 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
679 veb->stat_offsets_loaded,
680 &oes->rx_multicast, &es->rx_multicast);
681 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
682 veb->stat_offsets_loaded,
683 &oes->rx_broadcast, &es->rx_broadcast);
685 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
686 veb->stat_offsets_loaded,
687 &oes->tx_bytes, &es->tx_bytes);
688 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
689 veb->stat_offsets_loaded,
690 &oes->tx_unicast, &es->tx_unicast);
691 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
692 veb->stat_offsets_loaded,
693 &oes->tx_multicast, &es->tx_multicast);
694 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
695 veb->stat_offsets_loaded,
696 &oes->tx_broadcast, &es->tx_broadcast);
697 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
698 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
699 I40E_GLVEBTC_RPCL(i, idx),
700 veb->stat_offsets_loaded,
701 &veb_oes->tc_rx_packets[i],
702 &veb_es->tc_rx_packets[i]);
703 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
704 I40E_GLVEBTC_RBCL(i, idx),
705 veb->stat_offsets_loaded,
706 &veb_oes->tc_rx_bytes[i],
707 &veb_es->tc_rx_bytes[i]);
708 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
709 I40E_GLVEBTC_TPCL(i, idx),
710 veb->stat_offsets_loaded,
711 &veb_oes->tc_tx_packets[i],
712 &veb_es->tc_tx_packets[i]);
713 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
714 I40E_GLVEBTC_TBCL(i, idx),
715 veb->stat_offsets_loaded,
716 &veb_oes->tc_tx_bytes[i],
717 &veb_es->tc_tx_bytes[i]);
719 veb->stat_offsets_loaded = true;
723 * i40e_update_vsi_stats - Update the vsi statistics counters.
724 * @vsi: the VSI to be updated
726 * There are a few instances where we store the same stat in a
727 * couple of different structs. This is partly because we have
728 * the netdev stats that need to be filled out, which is slightly
729 * different from the "eth_stats" defined by the chip and used in
730 * VF communications. We sort it out here.
732 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
734 struct i40e_pf *pf = vsi->back;
735 struct rtnl_link_stats64 *ons;
736 struct rtnl_link_stats64 *ns; /* netdev stats */
737 struct i40e_eth_stats *oes;
738 struct i40e_eth_stats *es; /* device's eth stats */
739 u32 tx_restart, tx_busy;
750 if (test_bit(__I40E_DOWN, &vsi->state) ||
751 test_bit(__I40E_CONFIG_BUSY, &pf->state))
754 ns = i40e_get_vsi_stats_struct(vsi);
755 ons = &vsi->net_stats_offsets;
756 es = &vsi->eth_stats;
757 oes = &vsi->eth_stats_offsets;
759 /* Gather up the netdev and vsi stats that the driver collects
760 * on the fly during packet processing
764 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
768 for (q = 0; q < vsi->num_queue_pairs; q++) {
770 p = ACCESS_ONCE(vsi->tx_rings[q]);
773 start = u64_stats_fetch_begin_irq(&p->syncp);
774 packets = p->stats.packets;
775 bytes = p->stats.bytes;
776 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
779 tx_restart += p->tx_stats.restart_queue;
780 tx_busy += p->tx_stats.tx_busy;
781 tx_linearize += p->tx_stats.tx_linearize;
782 tx_force_wb += p->tx_stats.tx_force_wb;
784 /* Rx queue is part of the same block as Tx queue */
787 start = u64_stats_fetch_begin_irq(&p->syncp);
788 packets = p->stats.packets;
789 bytes = p->stats.bytes;
790 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
793 rx_buf += p->rx_stats.alloc_buff_failed;
794 rx_page += p->rx_stats.alloc_page_failed;
797 vsi->tx_restart = tx_restart;
798 vsi->tx_busy = tx_busy;
799 vsi->tx_linearize = tx_linearize;
800 vsi->tx_force_wb = tx_force_wb;
801 vsi->rx_page_failed = rx_page;
802 vsi->rx_buf_failed = rx_buf;
804 ns->rx_packets = rx_p;
806 ns->tx_packets = tx_p;
809 /* update netdev stats from eth stats */
810 i40e_update_eth_stats(vsi);
811 ons->tx_errors = oes->tx_errors;
812 ns->tx_errors = es->tx_errors;
813 ons->multicast = oes->rx_multicast;
814 ns->multicast = es->rx_multicast;
815 ons->rx_dropped = oes->rx_discards;
816 ns->rx_dropped = es->rx_discards;
817 ons->tx_dropped = oes->tx_discards;
818 ns->tx_dropped = es->tx_discards;
820 /* pull in a couple PF stats if this is the main vsi */
821 if (vsi == pf->vsi[pf->lan_vsi]) {
822 ns->rx_crc_errors = pf->stats.crc_errors;
823 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
824 ns->rx_length_errors = pf->stats.rx_length_errors;
829 * i40e_update_pf_stats - Update the PF statistics counters.
830 * @pf: the PF to be updated
832 static void i40e_update_pf_stats(struct i40e_pf *pf)
834 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
835 struct i40e_hw_port_stats *nsd = &pf->stats;
836 struct i40e_hw *hw = &pf->hw;
840 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
841 I40E_GLPRT_GORCL(hw->port),
842 pf->stat_offsets_loaded,
843 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
844 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
845 I40E_GLPRT_GOTCL(hw->port),
846 pf->stat_offsets_loaded,
847 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
848 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
849 pf->stat_offsets_loaded,
850 &osd->eth.rx_discards,
851 &nsd->eth.rx_discards);
852 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
853 I40E_GLPRT_UPRCL(hw->port),
854 pf->stat_offsets_loaded,
855 &osd->eth.rx_unicast,
856 &nsd->eth.rx_unicast);
857 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
858 I40E_GLPRT_MPRCL(hw->port),
859 pf->stat_offsets_loaded,
860 &osd->eth.rx_multicast,
861 &nsd->eth.rx_multicast);
862 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
863 I40E_GLPRT_BPRCL(hw->port),
864 pf->stat_offsets_loaded,
865 &osd->eth.rx_broadcast,
866 &nsd->eth.rx_broadcast);
867 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
868 I40E_GLPRT_UPTCL(hw->port),
869 pf->stat_offsets_loaded,
870 &osd->eth.tx_unicast,
871 &nsd->eth.tx_unicast);
872 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
873 I40E_GLPRT_MPTCL(hw->port),
874 pf->stat_offsets_loaded,
875 &osd->eth.tx_multicast,
876 &nsd->eth.tx_multicast);
877 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
878 I40E_GLPRT_BPTCL(hw->port),
879 pf->stat_offsets_loaded,
880 &osd->eth.tx_broadcast,
881 &nsd->eth.tx_broadcast);
883 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
884 pf->stat_offsets_loaded,
885 &osd->tx_dropped_link_down,
886 &nsd->tx_dropped_link_down);
888 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
889 pf->stat_offsets_loaded,
890 &osd->crc_errors, &nsd->crc_errors);
892 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
893 pf->stat_offsets_loaded,
894 &osd->illegal_bytes, &nsd->illegal_bytes);
896 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
897 pf->stat_offsets_loaded,
898 &osd->mac_local_faults,
899 &nsd->mac_local_faults);
900 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
901 pf->stat_offsets_loaded,
902 &osd->mac_remote_faults,
903 &nsd->mac_remote_faults);
905 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
906 pf->stat_offsets_loaded,
907 &osd->rx_length_errors,
908 &nsd->rx_length_errors);
910 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
911 pf->stat_offsets_loaded,
912 &osd->link_xon_rx, &nsd->link_xon_rx);
913 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
914 pf->stat_offsets_loaded,
915 &osd->link_xon_tx, &nsd->link_xon_tx);
916 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
917 pf->stat_offsets_loaded,
918 &osd->link_xoff_rx, &nsd->link_xoff_rx);
919 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
920 pf->stat_offsets_loaded,
921 &osd->link_xoff_tx, &nsd->link_xoff_tx);
923 for (i = 0; i < 8; i++) {
924 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
925 pf->stat_offsets_loaded,
926 &osd->priority_xoff_rx[i],
927 &nsd->priority_xoff_rx[i]);
928 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
929 pf->stat_offsets_loaded,
930 &osd->priority_xon_rx[i],
931 &nsd->priority_xon_rx[i]);
932 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
933 pf->stat_offsets_loaded,
934 &osd->priority_xon_tx[i],
935 &nsd->priority_xon_tx[i]);
936 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
937 pf->stat_offsets_loaded,
938 &osd->priority_xoff_tx[i],
939 &nsd->priority_xoff_tx[i]);
940 i40e_stat_update32(hw,
941 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
942 pf->stat_offsets_loaded,
943 &osd->priority_xon_2_xoff[i],
944 &nsd->priority_xon_2_xoff[i]);
947 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
948 I40E_GLPRT_PRC64L(hw->port),
949 pf->stat_offsets_loaded,
950 &osd->rx_size_64, &nsd->rx_size_64);
951 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
952 I40E_GLPRT_PRC127L(hw->port),
953 pf->stat_offsets_loaded,
954 &osd->rx_size_127, &nsd->rx_size_127);
955 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
956 I40E_GLPRT_PRC255L(hw->port),
957 pf->stat_offsets_loaded,
958 &osd->rx_size_255, &nsd->rx_size_255);
959 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
960 I40E_GLPRT_PRC511L(hw->port),
961 pf->stat_offsets_loaded,
962 &osd->rx_size_511, &nsd->rx_size_511);
963 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
964 I40E_GLPRT_PRC1023L(hw->port),
965 pf->stat_offsets_loaded,
966 &osd->rx_size_1023, &nsd->rx_size_1023);
967 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
968 I40E_GLPRT_PRC1522L(hw->port),
969 pf->stat_offsets_loaded,
970 &osd->rx_size_1522, &nsd->rx_size_1522);
971 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
972 I40E_GLPRT_PRC9522L(hw->port),
973 pf->stat_offsets_loaded,
974 &osd->rx_size_big, &nsd->rx_size_big);
976 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
977 I40E_GLPRT_PTC64L(hw->port),
978 pf->stat_offsets_loaded,
979 &osd->tx_size_64, &nsd->tx_size_64);
980 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
981 I40E_GLPRT_PTC127L(hw->port),
982 pf->stat_offsets_loaded,
983 &osd->tx_size_127, &nsd->tx_size_127);
984 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
985 I40E_GLPRT_PTC255L(hw->port),
986 pf->stat_offsets_loaded,
987 &osd->tx_size_255, &nsd->tx_size_255);
988 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
989 I40E_GLPRT_PTC511L(hw->port),
990 pf->stat_offsets_loaded,
991 &osd->tx_size_511, &nsd->tx_size_511);
992 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
993 I40E_GLPRT_PTC1023L(hw->port),
994 pf->stat_offsets_loaded,
995 &osd->tx_size_1023, &nsd->tx_size_1023);
996 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
997 I40E_GLPRT_PTC1522L(hw->port),
998 pf->stat_offsets_loaded,
999 &osd->tx_size_1522, &nsd->tx_size_1522);
1000 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1001 I40E_GLPRT_PTC9522L(hw->port),
1002 pf->stat_offsets_loaded,
1003 &osd->tx_size_big, &nsd->tx_size_big);
1005 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1006 pf->stat_offsets_loaded,
1007 &osd->rx_undersize, &nsd->rx_undersize);
1008 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1009 pf->stat_offsets_loaded,
1010 &osd->rx_fragments, &nsd->rx_fragments);
1011 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1012 pf->stat_offsets_loaded,
1013 &osd->rx_oversize, &nsd->rx_oversize);
1014 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1015 pf->stat_offsets_loaded,
1016 &osd->rx_jabber, &nsd->rx_jabber);
1019 i40e_stat_update32(hw,
1020 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
1021 pf->stat_offsets_loaded,
1022 &osd->fd_atr_match, &nsd->fd_atr_match);
1023 i40e_stat_update32(hw,
1024 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
1025 pf->stat_offsets_loaded,
1026 &osd->fd_sb_match, &nsd->fd_sb_match);
1027 i40e_stat_update32(hw,
1028 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
1029 pf->stat_offsets_loaded,
1030 &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
1032 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1033 nsd->tx_lpi_status =
1034 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1035 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1036 nsd->rx_lpi_status =
1037 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1038 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1039 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1040 pf->stat_offsets_loaded,
1041 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1042 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1043 pf->stat_offsets_loaded,
1044 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1046 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1047 !(pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED))
1048 nsd->fd_sb_status = true;
1050 nsd->fd_sb_status = false;
1052 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1053 !(pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED))
1054 nsd->fd_atr_status = true;
1056 nsd->fd_atr_status = false;
1058 pf->stat_offsets_loaded = true;
1062 * i40e_update_stats - Update the various statistics counters.
1063 * @vsi: the VSI to be updated
1065 * Update the various stats for this VSI and its related entities.
1067 void i40e_update_stats(struct i40e_vsi *vsi)
1069 struct i40e_pf *pf = vsi->back;
1071 if (vsi == pf->vsi[pf->lan_vsi])
1072 i40e_update_pf_stats(pf);
1074 i40e_update_vsi_stats(vsi);
1078 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1079 * @vsi: the VSI to be searched
1080 * @macaddr: the MAC address
1083 * Returns ptr to the filter object or NULL
1085 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1086 const u8 *macaddr, s16 vlan)
1088 struct i40e_mac_filter *f;
1091 if (!vsi || !macaddr)
1094 key = i40e_addr_to_hkey(macaddr);
1095 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1096 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1104 * i40e_find_mac - Find a mac addr in the macvlan filters list
1105 * @vsi: the VSI to be searched
1106 * @macaddr: the MAC address we are searching for
1108 * Returns the first filter with the provided MAC address or NULL if
1109 * MAC address was not found
1111 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1113 struct i40e_mac_filter *f;
1116 if (!vsi || !macaddr)
1119 key = i40e_addr_to_hkey(macaddr);
1120 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1121 if ((ether_addr_equal(macaddr, f->macaddr)))
1128 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1129 * @vsi: the VSI to be searched
1131 * Returns true if VSI is in vlan mode or false otherwise
1133 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1135 /* If we have a PVID, always operate in VLAN mode */
1139 /* We need to operate in VLAN mode whenever we have any filters with
1140 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1141 * time, incurring search cost repeatedly. However, we can notice two
1144 * 1) the only place where we can gain a VLAN filter is in
1147 * 2) the only place where filters are actually removed is in
1148 * i40e_sync_filters_subtask.
1150 * Thus, we can simply use a boolean value, has_vlan_filters which we
1151 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1152 * we have to perform the full search after deleting filters in
1153 * i40e_sync_filters_subtask, but we already have to search
1154 * filters here and can perform the check at the same time. This
1155 * results in avoiding embedding a loop for VLAN mode inside another
1156 * loop over all the filters, and should maintain correctness as noted
1159 return vsi->has_vlan_filter;
1163 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1164 * @vsi: the VSI to configure
1165 * @tmp_add_list: list of filters ready to be added
1166 * @tmp_del_list: list of filters ready to be deleted
1167 * @vlan_filters: the number of active VLAN filters
1169 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1170 * behave as expected. If we have any active VLAN filters remaining or about
1171 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1172 * so that they only match against untagged traffic. If we no longer have any
1173 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1174 * so that they match against both tagged and untagged traffic. In this way,
1175 * we ensure that we correctly receive the desired traffic. This ensures that
1176 * when we have an active VLAN we will receive only untagged traffic and
1177 * traffic matching active VLANs. If we have no active VLANs then we will
1178 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1180 * Finally, in a similar fashion, this function also corrects filters when
1181 * there is an active PVID assigned to this VSI.
1183 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1185 * This function is only expected to be called from within
1186 * i40e_sync_vsi_filters.
1188 * NOTE: This function expects to be called while under the
1189 * mac_filter_hash_lock
1191 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1192 struct hlist_head *tmp_add_list,
1193 struct hlist_head *tmp_del_list,
1196 s16 pvid = le16_to_cpu(vsi->info.pvid);
1197 struct i40e_mac_filter *f, *add_head;
1198 struct i40e_new_mac_filter *new;
1199 struct hlist_node *h;
1202 /* To determine if a particular filter needs to be replaced we
1203 * have the three following conditions:
1205 * a) if we have a PVID assigned, then all filters which are
1206 * not marked as VLAN=PVID must be replaced with filters that
1208 * b) otherwise, if we have any active VLANS, all filters
1209 * which are marked as VLAN=-1 must be replaced with
1210 * filters marked as VLAN=0
1211 * c) finally, if we do not have any active VLANS, all filters
1212 * which are marked as VLAN=0 must be replaced with filters
1216 /* Update the filters about to be added in place */
1217 hlist_for_each_entry(new, tmp_add_list, hlist) {
1218 if (pvid && new->f->vlan != pvid)
1219 new->f->vlan = pvid;
1220 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1222 else if (!vlan_filters && new->f->vlan == 0)
1223 new->f->vlan = I40E_VLAN_ANY;
1226 /* Update the remaining active filters */
1227 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1228 /* Combine the checks for whether a filter needs to be changed
1229 * and then determine the new VLAN inside the if block, in
1230 * order to avoid duplicating code for adding the new filter
1231 * then deleting the old filter.
1233 if ((pvid && f->vlan != pvid) ||
1234 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1235 (!vlan_filters && f->vlan == 0)) {
1236 /* Determine the new vlan we will be adding */
1239 else if (vlan_filters)
1242 new_vlan = I40E_VLAN_ANY;
1244 /* Create the new filter */
1245 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1249 /* Create a temporary i40e_new_mac_filter */
1250 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1255 new->state = add_head->state;
1257 /* Add the new filter to the tmp list */
1258 hlist_add_head(&new->hlist, tmp_add_list);
1260 /* Put the original filter into the delete list */
1261 f->state = I40E_FILTER_REMOVE;
1262 hash_del(&f->hlist);
1263 hlist_add_head(&f->hlist, tmp_del_list);
1267 vsi->has_vlan_filter = !!vlan_filters;
1273 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1274 * @vsi: the PF Main VSI - inappropriate for any other VSI
1275 * @macaddr: the MAC address
1277 * Remove whatever filter the firmware set up so the driver can manage
1278 * its own filtering intelligently.
1280 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1282 struct i40e_aqc_remove_macvlan_element_data element;
1283 struct i40e_pf *pf = vsi->back;
1285 /* Only appropriate for the PF main VSI */
1286 if (vsi->type != I40E_VSI_MAIN)
1289 memset(&element, 0, sizeof(element));
1290 ether_addr_copy(element.mac_addr, macaddr);
1291 element.vlan_tag = 0;
1292 /* Ignore error returns, some firmware does it this way... */
1293 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1294 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1296 memset(&element, 0, sizeof(element));
1297 ether_addr_copy(element.mac_addr, macaddr);
1298 element.vlan_tag = 0;
1299 /* ...and some firmware does it this way. */
1300 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1301 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1302 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1306 * i40e_add_filter - Add a mac/vlan filter to the VSI
1307 * @vsi: the VSI to be searched
1308 * @macaddr: the MAC address
1311 * Returns ptr to the filter object or NULL when no memory available.
1313 * NOTE: This function is expected to be called with mac_filter_hash_lock
1316 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1317 const u8 *macaddr, s16 vlan)
1319 struct i40e_mac_filter *f;
1322 if (!vsi || !macaddr)
1325 f = i40e_find_filter(vsi, macaddr, vlan);
1327 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1331 /* Update the boolean indicating if we need to function in
1335 vsi->has_vlan_filter = true;
1337 ether_addr_copy(f->macaddr, macaddr);
1339 /* If we're in overflow promisc mode, set the state directly
1340 * to failed, so we don't bother to try sending the filter
1343 if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))
1344 f->state = I40E_FILTER_FAILED;
1346 f->state = I40E_FILTER_NEW;
1347 INIT_HLIST_NODE(&f->hlist);
1349 key = i40e_addr_to_hkey(macaddr);
1350 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1352 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1353 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1356 /* If we're asked to add a filter that has been marked for removal, it
1357 * is safe to simply restore it to active state. __i40e_del_filter
1358 * will have simply deleted any filters which were previously marked
1359 * NEW or FAILED, so if it is currently marked REMOVE it must have
1360 * previously been ACTIVE. Since we haven't yet run the sync filters
1361 * task, just restore this filter to the ACTIVE state so that the
1362 * sync task leaves it in place
1364 if (f->state == I40E_FILTER_REMOVE)
1365 f->state = I40E_FILTER_ACTIVE;
1371 * __i40e_del_filter - Remove a specific filter from the VSI
1372 * @vsi: VSI to remove from
1373 * @f: the filter to remove from the list
1375 * This function should be called instead of i40e_del_filter only if you know
1376 * the exact filter you will remove already, such as via i40e_find_filter or
1379 * NOTE: This function is expected to be called with mac_filter_hash_lock
1381 * ANOTHER NOTE: This function MUST be called from within the context of
1382 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1383 * instead of list_for_each_entry().
1385 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1390 /* If the filter was never added to firmware then we can just delete it
1391 * directly and we don't want to set the status to remove or else an
1392 * admin queue command will unnecessarily fire.
1394 if ((f->state == I40E_FILTER_FAILED) ||
1395 (f->state == I40E_FILTER_NEW)) {
1396 hash_del(&f->hlist);
1399 f->state = I40E_FILTER_REMOVE;
1402 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1403 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1407 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1408 * @vsi: the VSI to be searched
1409 * @macaddr: the MAC address
1412 * NOTE: This function is expected to be called with mac_filter_hash_lock
1414 * ANOTHER NOTE: This function MUST be called from within the context of
1415 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1416 * instead of list_for_each_entry().
1418 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1420 struct i40e_mac_filter *f;
1422 if (!vsi || !macaddr)
1425 f = i40e_find_filter(vsi, macaddr, vlan);
1426 __i40e_del_filter(vsi, f);
1430 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1431 * @vsi: the VSI to be searched
1432 * @macaddr: the mac address to be filtered
1434 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1435 * go through all the macvlan filters and add a macvlan filter for each
1436 * unique vlan that already exists. If a PVID has been assigned, instead only
1437 * add the macaddr to that VLAN.
1439 * Returns last filter added on success, else NULL
1441 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1444 struct i40e_mac_filter *f, *add = NULL;
1445 struct hlist_node *h;
1449 return i40e_add_filter(vsi, macaddr,
1450 le16_to_cpu(vsi->info.pvid));
1452 if (!i40e_is_vsi_in_vlan(vsi))
1453 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1455 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1456 if (f->state == I40E_FILTER_REMOVE)
1458 add = i40e_add_filter(vsi, macaddr, f->vlan);
1467 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1468 * @vsi: the VSI to be searched
1469 * @macaddr: the mac address to be removed
1471 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1474 * Returns 0 for success, or error
1476 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1478 struct i40e_mac_filter *f;
1479 struct hlist_node *h;
1483 WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
1484 "Missing mac_filter_hash_lock\n");
1485 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1486 if (ether_addr_equal(macaddr, f->macaddr)) {
1487 __i40e_del_filter(vsi, f);
1499 * i40e_set_mac - NDO callback to set mac address
1500 * @netdev: network interface device structure
1501 * @p: pointer to an address structure
1503 * Returns 0 on success, negative on failure
1505 static int i40e_set_mac(struct net_device *netdev, void *p)
1507 struct i40e_netdev_priv *np = netdev_priv(netdev);
1508 struct i40e_vsi *vsi = np->vsi;
1509 struct i40e_pf *pf = vsi->back;
1510 struct i40e_hw *hw = &pf->hw;
1511 struct sockaddr *addr = p;
1513 if (!is_valid_ether_addr(addr->sa_data))
1514 return -EADDRNOTAVAIL;
1516 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1517 netdev_info(netdev, "already using mac address %pM\n",
1522 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1523 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1524 return -EADDRNOTAVAIL;
1526 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1527 netdev_info(netdev, "returning to hw mac address %pM\n",
1530 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1532 spin_lock_bh(&vsi->mac_filter_hash_lock);
1533 i40e_del_mac_filter(vsi, netdev->dev_addr);
1534 i40e_add_mac_filter(vsi, addr->sa_data);
1535 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1536 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1537 if (vsi->type == I40E_VSI_MAIN) {
1540 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1541 I40E_AQC_WRITE_TYPE_LAA_WOL,
1542 addr->sa_data, NULL);
1544 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1545 i40e_stat_str(hw, ret),
1546 i40e_aq_str(hw, hw->aq.asq_last_status));
1549 /* schedule our worker thread which will take care of
1550 * applying the new filter changes
1552 i40e_service_event_schedule(vsi->back);
1557 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1558 * @vsi: the VSI being setup
1559 * @ctxt: VSI context structure
1560 * @enabled_tc: Enabled TCs bitmap
1561 * @is_add: True if called before Add VSI
1563 * Setup VSI queue mapping for enabled traffic classes.
1565 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1566 struct i40e_vsi_context *ctxt,
1570 struct i40e_pf *pf = vsi->back;
1580 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1583 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1584 /* Find numtc from enabled TC bitmap */
1585 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1586 if (enabled_tc & BIT(i)) /* TC is enabled */
1590 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1594 /* At least TC0 is enabled in case of non-DCB case */
1598 vsi->tc_config.numtc = numtc;
1599 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1600 /* Number of queues per enabled TC */
1601 qcount = vsi->alloc_queue_pairs;
1603 num_tc_qps = qcount / numtc;
1604 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
1606 /* Setup queue offset/count for all TCs for given VSI */
1607 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1608 /* See if the given TC is enabled for the given VSI */
1609 if (vsi->tc_config.enabled_tc & BIT(i)) {
1613 switch (vsi->type) {
1615 qcount = min_t(int, pf->alloc_rss_size,
1619 case I40E_VSI_SRIOV:
1620 case I40E_VSI_VMDQ2:
1622 qcount = num_tc_qps;
1626 vsi->tc_config.tc_info[i].qoffset = offset;
1627 vsi->tc_config.tc_info[i].qcount = qcount;
1629 /* find the next higher power-of-2 of num queue pairs */
1632 while (num_qps && (BIT_ULL(pow) < qcount)) {
1637 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1639 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1640 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1644 /* TC is not enabled so set the offset to
1645 * default queue and allocate one queue
1648 vsi->tc_config.tc_info[i].qoffset = 0;
1649 vsi->tc_config.tc_info[i].qcount = 1;
1650 vsi->tc_config.tc_info[i].netdev_tc = 0;
1654 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1657 /* Set actual Tx/Rx queue pairs */
1658 vsi->num_queue_pairs = offset;
1659 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1660 if (vsi->req_queue_pairs > 0)
1661 vsi->num_queue_pairs = vsi->req_queue_pairs;
1662 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1663 vsi->num_queue_pairs = pf->num_lan_msix;
1666 /* Scheduler section valid can only be set for ADD VSI */
1668 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1670 ctxt->info.up_enable_bits = enabled_tc;
1672 if (vsi->type == I40E_VSI_SRIOV) {
1673 ctxt->info.mapping_flags |=
1674 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1675 for (i = 0; i < vsi->num_queue_pairs; i++)
1676 ctxt->info.queue_mapping[i] =
1677 cpu_to_le16(vsi->base_queue + i);
1679 ctxt->info.mapping_flags |=
1680 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1681 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1683 ctxt->info.valid_sections |= cpu_to_le16(sections);
1687 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1688 * @netdev: the netdevice
1689 * @addr: address to add
1691 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1692 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1694 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1696 struct i40e_netdev_priv *np = netdev_priv(netdev);
1697 struct i40e_vsi *vsi = np->vsi;
1699 if (i40e_add_mac_filter(vsi, addr))
1706 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1707 * @netdev: the netdevice
1708 * @addr: address to add
1710 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1711 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1713 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1715 struct i40e_netdev_priv *np = netdev_priv(netdev);
1716 struct i40e_vsi *vsi = np->vsi;
1718 i40e_del_mac_filter(vsi, addr);
1724 * i40e_set_rx_mode - NDO callback to set the netdev filters
1725 * @netdev: network interface device structure
1727 static void i40e_set_rx_mode(struct net_device *netdev)
1729 struct i40e_netdev_priv *np = netdev_priv(netdev);
1730 struct i40e_vsi *vsi = np->vsi;
1732 spin_lock_bh(&vsi->mac_filter_hash_lock);
1734 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1735 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1737 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1739 /* check for other flag changes */
1740 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1741 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1742 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1745 /* schedule our worker thread which will take care of
1746 * applying the new filter changes
1748 i40e_service_event_schedule(vsi->back);
1752 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1753 * @vsi: Pointer to VSI struct
1754 * @from: Pointer to list which contains MAC filter entries - changes to
1755 * those entries needs to be undone.
1757 * MAC filter entries from this list were slated for deletion.
1759 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1760 struct hlist_head *from)
1762 struct i40e_mac_filter *f;
1763 struct hlist_node *h;
1765 hlist_for_each_entry_safe(f, h, from, hlist) {
1766 u64 key = i40e_addr_to_hkey(f->macaddr);
1768 /* Move the element back into MAC filter list*/
1769 hlist_del(&f->hlist);
1770 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1775 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
1776 * @vsi: Pointer to vsi struct
1777 * @from: Pointer to list which contains MAC filter entries - changes to
1778 * those entries needs to be undone.
1780 * MAC filter entries from this list were slated for addition.
1782 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
1783 struct hlist_head *from)
1785 struct i40e_new_mac_filter *new;
1786 struct hlist_node *h;
1788 hlist_for_each_entry_safe(new, h, from, hlist) {
1789 /* We can simply free the wrapper structure */
1790 hlist_del(&new->hlist);
1796 * i40e_next_entry - Get the next non-broadcast filter from a list
1797 * @next: pointer to filter in list
1799 * Returns the next non-broadcast filter in the list. Required so that we
1800 * ignore broadcast filters within the list, since these are not handled via
1801 * the normal firmware update path.
1804 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
1806 hlist_for_each_entry_continue(next, hlist) {
1807 if (!is_broadcast_ether_addr(next->f->macaddr))
1815 * i40e_update_filter_state - Update filter state based on return data
1817 * @count: Number of filters added
1818 * @add_list: return data from fw
1819 * @head: pointer to first filter in current batch
1821 * MAC filter entries from list were slated to be added to device. Returns
1822 * number of successful filters. Note that 0 does NOT mean success!
1825 i40e_update_filter_state(int count,
1826 struct i40e_aqc_add_macvlan_element_data *add_list,
1827 struct i40e_new_mac_filter *add_head)
1832 for (i = 0; i < count; i++) {
1833 /* Always check status of each filter. We don't need to check
1834 * the firmware return status because we pre-set the filter
1835 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
1836 * request to the adminq. Thus, if it no longer matches then
1837 * we know the filter is active.
1839 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
1840 add_head->state = I40E_FILTER_FAILED;
1842 add_head->state = I40E_FILTER_ACTIVE;
1846 add_head = i40e_next_filter(add_head);
1855 * i40e_aqc_del_filters - Request firmware to delete a set of filters
1856 * @vsi: ptr to the VSI
1857 * @vsi_name: name to display in messages
1858 * @list: the list of filters to send to firmware
1859 * @num_del: the number of filters to delete
1860 * @retval: Set to -EIO on failure to delete
1862 * Send a request to firmware via AdminQ to delete a set of filters. Uses
1863 * *retval instead of a return value so that success does not force ret_val to
1864 * be set to 0. This ensures that a sequence of calls to this function
1865 * preserve the previous value of *retval on successful delete.
1868 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
1869 struct i40e_aqc_remove_macvlan_element_data *list,
1870 int num_del, int *retval)
1872 struct i40e_hw *hw = &vsi->back->hw;
1876 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
1877 aq_err = hw->aq.asq_last_status;
1879 /* Explicitly ignore and do not report when firmware returns ENOENT */
1880 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
1882 dev_info(&vsi->back->pdev->dev,
1883 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
1884 vsi_name, i40e_stat_str(hw, aq_ret),
1885 i40e_aq_str(hw, aq_err));
1890 * i40e_aqc_add_filters - Request firmware to add a set of filters
1891 * @vsi: ptr to the VSI
1892 * @vsi_name: name to display in messages
1893 * @list: the list of filters to send to firmware
1894 * @add_head: Position in the add hlist
1895 * @num_add: the number of filters to add
1896 * @promisc_change: set to true on exit if promiscuous mode was forced on
1898 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
1899 * promisc_changed to true if the firmware has run out of space for more
1903 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
1904 struct i40e_aqc_add_macvlan_element_data *list,
1905 struct i40e_new_mac_filter *add_head,
1906 int num_add, bool *promisc_changed)
1908 struct i40e_hw *hw = &vsi->back->hw;
1911 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
1912 aq_err = hw->aq.asq_last_status;
1913 fcnt = i40e_update_filter_state(num_add, list, add_head);
1915 if (fcnt != num_add) {
1916 *promisc_changed = true;
1917 set_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
1918 dev_warn(&vsi->back->pdev->dev,
1919 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
1920 i40e_aq_str(hw, aq_err),
1926 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
1927 * @vsi: pointer to the VSI
1930 * This function sets or clears the promiscuous broadcast flags for VLAN
1931 * filters in order to properly receive broadcast frames. Assumes that only
1932 * broadcast filters are passed.
1934 * Returns status indicating success or failure;
1937 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
1938 struct i40e_mac_filter *f)
1940 bool enable = f->state == I40E_FILTER_NEW;
1941 struct i40e_hw *hw = &vsi->back->hw;
1944 if (f->vlan == I40E_VLAN_ANY) {
1945 aq_ret = i40e_aq_set_vsi_broadcast(hw,
1950 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
1958 dev_warn(&vsi->back->pdev->dev,
1959 "Error %s setting broadcast promiscuous mode on %s\n",
1960 i40e_aq_str(hw, hw->aq.asq_last_status),
1967 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1968 * @vsi: ptr to the VSI
1970 * Push any outstanding VSI filter changes through the AdminQ.
1972 * Returns 0 or error value
1974 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1976 struct hlist_head tmp_add_list, tmp_del_list;
1977 struct i40e_mac_filter *f;
1978 struct i40e_new_mac_filter *new, *add_head = NULL;
1979 struct i40e_hw *hw = &vsi->back->hw;
1980 unsigned int failed_filters = 0;
1981 unsigned int vlan_filters = 0;
1982 bool promisc_changed = false;
1983 char vsi_name[16] = "PF";
1984 int filter_list_len = 0;
1985 i40e_status aq_ret = 0;
1986 u32 changed_flags = 0;
1987 struct hlist_node *h;
1996 /* empty array typed pointers, kcalloc later */
1997 struct i40e_aqc_add_macvlan_element_data *add_list;
1998 struct i40e_aqc_remove_macvlan_element_data *del_list;
2000 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
2001 usleep_range(1000, 2000);
2005 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2006 vsi->current_netdev_flags = vsi->netdev->flags;
2009 INIT_HLIST_HEAD(&tmp_add_list);
2010 INIT_HLIST_HEAD(&tmp_del_list);
2012 if (vsi->type == I40E_VSI_SRIOV)
2013 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2014 else if (vsi->type != I40E_VSI_MAIN)
2015 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2017 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2018 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2020 spin_lock_bh(&vsi->mac_filter_hash_lock);
2021 /* Create a list of filters to delete. */
2022 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2023 if (f->state == I40E_FILTER_REMOVE) {
2024 /* Move the element into temporary del_list */
2025 hash_del(&f->hlist);
2026 hlist_add_head(&f->hlist, &tmp_del_list);
2028 /* Avoid counting removed filters */
2031 if (f->state == I40E_FILTER_NEW) {
2032 /* Create a temporary i40e_new_mac_filter */
2033 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2035 goto err_no_memory_locked;
2037 /* Store pointer to the real filter */
2039 new->state = f->state;
2041 /* Add it to the hash list */
2042 hlist_add_head(&new->hlist, &tmp_add_list);
2045 /* Count the number of active (current and new) VLAN
2046 * filters we have now. Does not count filters which
2047 * are marked for deletion.
2053 retval = i40e_correct_mac_vlan_filters(vsi,
2058 goto err_no_memory_locked;
2060 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2063 /* Now process 'del_list' outside the lock */
2064 if (!hlist_empty(&tmp_del_list)) {
2065 filter_list_len = hw->aq.asq_buf_size /
2066 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2067 list_size = filter_list_len *
2068 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2069 del_list = kzalloc(list_size, GFP_ATOMIC);
2073 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2076 /* handle broadcast filters by updating the broadcast
2077 * promiscuous flag and release filter list.
2079 if (is_broadcast_ether_addr(f->macaddr)) {
2080 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2082 hlist_del(&f->hlist);
2087 /* add to delete list */
2088 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2089 if (f->vlan == I40E_VLAN_ANY) {
2090 del_list[num_del].vlan_tag = 0;
2091 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2093 del_list[num_del].vlan_tag =
2094 cpu_to_le16((u16)(f->vlan));
2097 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2098 del_list[num_del].flags = cmd_flags;
2101 /* flush a full buffer */
2102 if (num_del == filter_list_len) {
2103 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2105 memset(del_list, 0, list_size);
2108 /* Release memory for MAC filter entries which were
2109 * synced up with HW.
2111 hlist_del(&f->hlist);
2116 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2124 if (!hlist_empty(&tmp_add_list)) {
2125 /* Do all the adds now. */
2126 filter_list_len = hw->aq.asq_buf_size /
2127 sizeof(struct i40e_aqc_add_macvlan_element_data);
2128 list_size = filter_list_len *
2129 sizeof(struct i40e_aqc_add_macvlan_element_data);
2130 add_list = kzalloc(list_size, GFP_ATOMIC);
2135 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2136 if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2138 new->state = I40E_FILTER_FAILED;
2142 /* handle broadcast filters by updating the broadcast
2143 * promiscuous flag instead of adding a MAC filter.
2145 if (is_broadcast_ether_addr(new->f->macaddr)) {
2146 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2148 new->state = I40E_FILTER_FAILED;
2150 new->state = I40E_FILTER_ACTIVE;
2154 /* add to add array */
2158 ether_addr_copy(add_list[num_add].mac_addr,
2160 if (new->f->vlan == I40E_VLAN_ANY) {
2161 add_list[num_add].vlan_tag = 0;
2162 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2164 add_list[num_add].vlan_tag =
2165 cpu_to_le16((u16)(new->f->vlan));
2167 add_list[num_add].queue_number = 0;
2168 /* set invalid match method for later detection */
2169 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2170 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2171 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2174 /* flush a full buffer */
2175 if (num_add == filter_list_len) {
2176 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2179 memset(add_list, 0, list_size);
2184 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2185 num_add, &promisc_changed);
2187 /* Now move all of the filters from the temp add list back to
2190 spin_lock_bh(&vsi->mac_filter_hash_lock);
2191 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2192 /* Only update the state if we're still NEW */
2193 if (new->f->state == I40E_FILTER_NEW)
2194 new->f->state = new->state;
2195 hlist_del(&new->hlist);
2198 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2203 /* Determine the number of active and failed filters. */
2204 spin_lock_bh(&vsi->mac_filter_hash_lock);
2205 vsi->active_filters = 0;
2206 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2207 if (f->state == I40E_FILTER_ACTIVE)
2208 vsi->active_filters++;
2209 else if (f->state == I40E_FILTER_FAILED)
2212 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2214 /* If promiscuous mode has changed, we need to calculate a new
2215 * threshold for when we are safe to exit
2217 if (promisc_changed)
2218 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2220 /* Check if we are able to exit overflow promiscuous mode. We can
2221 * safely exit if we didn't just enter, we no longer have any failed
2222 * filters, and we have reduced filters below the threshold value.
2224 if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) &&
2225 !promisc_changed && !failed_filters &&
2226 (vsi->active_filters < vsi->promisc_threshold)) {
2227 dev_info(&pf->pdev->dev,
2228 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2230 clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
2231 promisc_changed = true;
2232 vsi->promisc_threshold = 0;
2235 /* if the VF is not trusted do not do promisc */
2236 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2237 clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
2241 /* check for changes in promiscuous modes */
2242 if (changed_flags & IFF_ALLMULTI) {
2243 bool cur_multipromisc;
2245 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2246 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2251 retval = i40e_aq_rc_to_posix(aq_ret,
2252 hw->aq.asq_last_status);
2253 dev_info(&pf->pdev->dev,
2254 "set multi promisc failed on %s, err %s aq_err %s\n",
2256 i40e_stat_str(hw, aq_ret),
2257 i40e_aq_str(hw, hw->aq.asq_last_status));
2260 if ((changed_flags & IFF_PROMISC) ||
2262 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))) {
2265 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2266 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2268 if ((vsi->type == I40E_VSI_MAIN) &&
2269 (pf->lan_veb != I40E_NO_VEB) &&
2270 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2271 /* set defport ON for Main VSI instead of true promisc
2272 * this way we will get all unicast/multicast and VLAN
2273 * promisc behavior but will not get VF or VMDq traffic
2274 * replicated on the Main VSI.
2276 if (pf->cur_promisc != cur_promisc) {
2277 pf->cur_promisc = cur_promisc;
2280 i40e_aq_set_default_vsi(hw,
2285 i40e_aq_clear_default_vsi(hw,
2289 retval = i40e_aq_rc_to_posix(aq_ret,
2290 hw->aq.asq_last_status);
2291 dev_info(&pf->pdev->dev,
2292 "Set default VSI failed on %s, err %s, aq_err %s\n",
2294 i40e_stat_str(hw, aq_ret),
2296 hw->aq.asq_last_status));
2300 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2307 i40e_aq_rc_to_posix(aq_ret,
2308 hw->aq.asq_last_status);
2309 dev_info(&pf->pdev->dev,
2310 "set unicast promisc failed on %s, err %s, aq_err %s\n",
2312 i40e_stat_str(hw, aq_ret),
2314 hw->aq.asq_last_status));
2316 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2322 i40e_aq_rc_to_posix(aq_ret,
2323 hw->aq.asq_last_status);
2324 dev_info(&pf->pdev->dev,
2325 "set multicast promisc failed on %s, err %s, aq_err %s\n",
2327 i40e_stat_str(hw, aq_ret),
2329 hw->aq.asq_last_status));
2332 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
2336 retval = i40e_aq_rc_to_posix(aq_ret,
2337 pf->hw.aq.asq_last_status);
2338 dev_info(&pf->pdev->dev,
2339 "set brdcast promisc failed, err %s, aq_err %s\n",
2340 i40e_stat_str(hw, aq_ret),
2342 hw->aq.asq_last_status));
2346 /* if something went wrong then set the changed flag so we try again */
2348 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2350 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
2354 /* Restore elements on the temporary add and delete lists */
2355 spin_lock_bh(&vsi->mac_filter_hash_lock);
2356 err_no_memory_locked:
2357 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2358 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2359 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2361 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2362 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
2367 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2368 * @pf: board private structure
2370 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2374 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
2376 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
2378 for (v = 0; v < pf->num_alloc_vsi; v++) {
2380 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2381 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2384 /* come back and try again later */
2385 pf->flags |= I40E_FLAG_FILTER_SYNC;
2393 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2394 * @netdev: network interface device structure
2395 * @new_mtu: new value for maximum frame size
2397 * Returns 0 on success, negative on failure
2399 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2401 struct i40e_netdev_priv *np = netdev_priv(netdev);
2402 struct i40e_vsi *vsi = np->vsi;
2403 struct i40e_pf *pf = vsi->back;
2405 netdev_info(netdev, "changing MTU from %d to %d\n",
2406 netdev->mtu, new_mtu);
2407 netdev->mtu = new_mtu;
2408 if (netif_running(netdev))
2409 i40e_vsi_reinit_locked(vsi);
2410 pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
2411 I40E_FLAG_CLIENT_L2_CHANGE);
2416 * i40e_ioctl - Access the hwtstamp interface
2417 * @netdev: network interface device structure
2418 * @ifr: interface request data
2419 * @cmd: ioctl command
2421 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2423 struct i40e_netdev_priv *np = netdev_priv(netdev);
2424 struct i40e_pf *pf = np->vsi->back;
2428 return i40e_ptp_get_ts_config(pf, ifr);
2430 return i40e_ptp_set_ts_config(pf, ifr);
2437 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2438 * @vsi: the vsi being adjusted
2440 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2442 struct i40e_vsi_context ctxt;
2445 if ((vsi->info.valid_sections &
2446 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2447 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2448 return; /* already enabled */
2450 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2451 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2452 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2454 ctxt.seid = vsi->seid;
2455 ctxt.info = vsi->info;
2456 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2458 dev_info(&vsi->back->pdev->dev,
2459 "update vlan stripping failed, err %s aq_err %s\n",
2460 i40e_stat_str(&vsi->back->hw, ret),
2461 i40e_aq_str(&vsi->back->hw,
2462 vsi->back->hw.aq.asq_last_status));
2467 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2468 * @vsi: the vsi being adjusted
2470 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2472 struct i40e_vsi_context ctxt;
2475 if ((vsi->info.valid_sections &
2476 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2477 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2478 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2479 return; /* already disabled */
2481 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2482 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2483 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2485 ctxt.seid = vsi->seid;
2486 ctxt.info = vsi->info;
2487 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2489 dev_info(&vsi->back->pdev->dev,
2490 "update vlan stripping failed, err %s aq_err %s\n",
2491 i40e_stat_str(&vsi->back->hw, ret),
2492 i40e_aq_str(&vsi->back->hw,
2493 vsi->back->hw.aq.asq_last_status));
2498 * i40e_vlan_rx_register - Setup or shutdown vlan offload
2499 * @netdev: network interface to be adjusted
2500 * @features: netdev features to test if VLAN offload is enabled or not
2502 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2504 struct i40e_netdev_priv *np = netdev_priv(netdev);
2505 struct i40e_vsi *vsi = np->vsi;
2507 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2508 i40e_vlan_stripping_enable(vsi);
2510 i40e_vlan_stripping_disable(vsi);
2514 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2515 * @vsi: the vsi being configured
2516 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2518 * This is a helper function for adding a new MAC/VLAN filter with the
2519 * specified VLAN for each existing MAC address already in the hash table.
2520 * This function does *not* perform any accounting to update filters based on
2523 * NOTE: this function expects to be called while under the
2524 * mac_filter_hash_lock
2526 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2528 struct i40e_mac_filter *f, *add_f;
2529 struct hlist_node *h;
2532 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2533 if (f->state == I40E_FILTER_REMOVE)
2535 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2537 dev_info(&vsi->back->pdev->dev,
2538 "Could not add vlan filter %d for %pM\n",
2548 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2549 * @vsi: the VSI being configured
2550 * @vid: VLAN id to be added
2552 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2556 if (!vid || vsi->info.pvid)
2559 /* Locked once because all functions invoked below iterates list*/
2560 spin_lock_bh(&vsi->mac_filter_hash_lock);
2561 err = i40e_add_vlan_all_mac(vsi, vid);
2562 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2566 /* schedule our worker thread which will take care of
2567 * applying the new filter changes
2569 i40e_service_event_schedule(vsi->back);
2574 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2575 * @vsi: the vsi being configured
2576 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2578 * This function should be used to remove all VLAN filters which match the
2579 * given VID. It does not schedule the service event and does not take the
2580 * mac_filter_hash_lock so it may be combined with other operations under
2581 * a single invocation of the mac_filter_hash_lock.
2583 * NOTE: this function expects to be called while under the
2584 * mac_filter_hash_lock
2586 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2588 struct i40e_mac_filter *f;
2589 struct hlist_node *h;
2592 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2594 __i40e_del_filter(vsi, f);
2599 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2600 * @vsi: the VSI being configured
2601 * @vid: VLAN id to be removed
2603 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2605 if (!vid || vsi->info.pvid)
2608 spin_lock_bh(&vsi->mac_filter_hash_lock);
2609 i40e_rm_vlan_all_mac(vsi, vid);
2610 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2612 /* schedule our worker thread which will take care of
2613 * applying the new filter changes
2615 i40e_service_event_schedule(vsi->back);
2619 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2620 * @netdev: network interface to be adjusted
2621 * @vid: vlan id to be added
2623 * net_device_ops implementation for adding vlan ids
2625 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2626 __always_unused __be16 proto, u16 vid)
2628 struct i40e_netdev_priv *np = netdev_priv(netdev);
2629 struct i40e_vsi *vsi = np->vsi;
2632 if (vid >= VLAN_N_VID)
2635 /* If the network stack called us with vid = 0 then
2636 * it is asking to receive priority tagged packets with
2637 * vlan id 0. Our HW receives them by default when configured
2638 * to receive untagged packets so there is no need to add an
2639 * extra filter for vlan 0 tagged packets.
2642 ret = i40e_vsi_add_vlan(vsi, vid);
2645 set_bit(vid, vsi->active_vlans);
2651 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2652 * @netdev: network interface to be adjusted
2653 * @vid: vlan id to be removed
2655 * net_device_ops implementation for removing vlan ids
2657 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2658 __always_unused __be16 proto, u16 vid)
2660 struct i40e_netdev_priv *np = netdev_priv(netdev);
2661 struct i40e_vsi *vsi = np->vsi;
2663 /* return code is ignored as there is nothing a user
2664 * can do about failure to remove and a log message was
2665 * already printed from the other function
2667 i40e_vsi_kill_vlan(vsi, vid);
2669 clear_bit(vid, vsi->active_vlans);
2675 * i40e_macaddr_init - explicitly write the mac address filters
2677 * @vsi: pointer to the vsi
2678 * @macaddr: the MAC address
2680 * This is needed when the macaddr has been obtained by other
2681 * means than the default, e.g., from Open Firmware or IDPROM.
2682 * Returns 0 on success, negative on failure
2684 static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr)
2687 struct i40e_aqc_add_macvlan_element_data element;
2689 ret = i40e_aq_mac_address_write(&vsi->back->hw,
2690 I40E_AQC_WRITE_TYPE_LAA_WOL,
2693 dev_info(&vsi->back->pdev->dev,
2694 "Addr change for VSI failed: %d\n", ret);
2695 return -EADDRNOTAVAIL;
2698 memset(&element, 0, sizeof(element));
2699 ether_addr_copy(element.mac_addr, macaddr);
2700 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
2701 ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL);
2703 dev_info(&vsi->back->pdev->dev,
2704 "add filter failed err %s aq_err %s\n",
2705 i40e_stat_str(&vsi->back->hw, ret),
2706 i40e_aq_str(&vsi->back->hw,
2707 vsi->back->hw.aq.asq_last_status));
2713 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2714 * @vsi: the vsi being brought back up
2716 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2723 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2725 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2726 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2731 * i40e_vsi_add_pvid - Add pvid for the VSI
2732 * @vsi: the vsi being adjusted
2733 * @vid: the vlan id to set as a PVID
2735 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2737 struct i40e_vsi_context ctxt;
2740 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2741 vsi->info.pvid = cpu_to_le16(vid);
2742 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2743 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2744 I40E_AQ_VSI_PVLAN_EMOD_STR;
2746 ctxt.seid = vsi->seid;
2747 ctxt.info = vsi->info;
2748 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2750 dev_info(&vsi->back->pdev->dev,
2751 "add pvid failed, err %s aq_err %s\n",
2752 i40e_stat_str(&vsi->back->hw, ret),
2753 i40e_aq_str(&vsi->back->hw,
2754 vsi->back->hw.aq.asq_last_status));
2762 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2763 * @vsi: the vsi being adjusted
2765 * Just use the vlan_rx_register() service to put it back to normal
2767 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2769 i40e_vlan_stripping_disable(vsi);
2775 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2776 * @vsi: ptr to the VSI
2778 * If this function returns with an error, then it's possible one or
2779 * more of the rings is populated (while the rest are not). It is the
2780 * callers duty to clean those orphaned rings.
2782 * Return 0 on success, negative on failure
2784 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2788 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2789 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2795 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2796 * @vsi: ptr to the VSI
2798 * Free VSI's transmit software resources
2800 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2807 for (i = 0; i < vsi->num_queue_pairs; i++)
2808 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2809 i40e_free_tx_resources(vsi->tx_rings[i]);
2813 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2814 * @vsi: ptr to the VSI
2816 * If this function returns with an error, then it's possible one or
2817 * more of the rings is populated (while the rest are not). It is the
2818 * callers duty to clean those orphaned rings.
2820 * Return 0 on success, negative on failure
2822 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2826 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2827 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2832 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2833 * @vsi: ptr to the VSI
2835 * Free all receive software resources
2837 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2844 for (i = 0; i < vsi->num_queue_pairs; i++)
2845 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2846 i40e_free_rx_resources(vsi->rx_rings[i]);
2850 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2851 * @ring: The Tx ring to configure
2853 * This enables/disables XPS for a given Tx descriptor ring
2854 * based on the TCs enabled for the VSI that ring belongs to.
2856 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2858 struct i40e_vsi *vsi = ring->vsi;
2861 if (!ring->q_vector || !ring->netdev)
2864 /* Single TC mode enable XPS */
2865 if (vsi->tc_config.numtc <= 1) {
2866 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2867 netif_set_xps_queue(ring->netdev,
2868 &ring->q_vector->affinity_mask,
2870 } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2871 /* Disable XPS to allow selection based on TC */
2872 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2873 netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
2874 free_cpumask_var(mask);
2877 /* schedule our worker thread which will take care of
2878 * applying the new filter changes
2880 i40e_service_event_schedule(vsi->back);
2884 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2885 * @ring: The Tx ring to configure
2887 * Configure the Tx descriptor ring in the HMC context.
2889 static int i40e_configure_tx_ring(struct i40e_ring *ring)
2891 struct i40e_vsi *vsi = ring->vsi;
2892 u16 pf_q = vsi->base_queue + ring->queue_index;
2893 struct i40e_hw *hw = &vsi->back->hw;
2894 struct i40e_hmc_obj_txq tx_ctx;
2895 i40e_status err = 0;
2898 /* some ATR related tx ring init */
2899 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2900 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2901 ring->atr_count = 0;
2903 ring->atr_sample_rate = 0;
2907 i40e_config_xps_tx_ring(ring);
2909 /* clear the context structure first */
2910 memset(&tx_ctx, 0, sizeof(tx_ctx));
2912 tx_ctx.new_context = 1;
2913 tx_ctx.base = (ring->dma / 128);
2914 tx_ctx.qlen = ring->count;
2915 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2916 I40E_FLAG_FD_ATR_ENABLED));
2917 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2918 /* FDIR VSI tx ring can still use RS bit and writebacks */
2919 if (vsi->type != I40E_VSI_FDIR)
2920 tx_ctx.head_wb_ena = 1;
2921 tx_ctx.head_wb_addr = ring->dma +
2922 (ring->count * sizeof(struct i40e_tx_desc));
2924 /* As part of VSI creation/update, FW allocates certain
2925 * Tx arbitration queue sets for each TC enabled for
2926 * the VSI. The FW returns the handles to these queue
2927 * sets as part of the response buffer to Add VSI,
2928 * Update VSI, etc. AQ commands. It is expected that
2929 * these queue set handles be associated with the Tx
2930 * queues by the driver as part of the TX queue context
2931 * initialization. This has to be done regardless of
2932 * DCB as by default everything is mapped to TC0.
2934 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2935 tx_ctx.rdylist_act = 0;
2937 /* clear the context in the HMC */
2938 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2940 dev_info(&vsi->back->pdev->dev,
2941 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2942 ring->queue_index, pf_q, err);
2946 /* set the context in the HMC */
2947 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2949 dev_info(&vsi->back->pdev->dev,
2950 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2951 ring->queue_index, pf_q, err);
2955 /* Now associate this queue with this PCI function */
2956 if (vsi->type == I40E_VSI_VMDQ2) {
2957 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2958 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2959 I40E_QTX_CTL_VFVM_INDX_MASK;
2961 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2964 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2965 I40E_QTX_CTL_PF_INDX_MASK);
2966 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2969 /* cache tail off for easier writes later */
2970 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2976 * i40e_configure_rx_ring - Configure a receive ring context
2977 * @ring: The Rx ring to configure
2979 * Configure the Rx descriptor ring in the HMC context.
2981 static int i40e_configure_rx_ring(struct i40e_ring *ring)
2983 struct i40e_vsi *vsi = ring->vsi;
2984 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2985 u16 pf_q = vsi->base_queue + ring->queue_index;
2986 struct i40e_hw *hw = &vsi->back->hw;
2987 struct i40e_hmc_obj_rxq rx_ctx;
2988 i40e_status err = 0;
2992 /* clear the context structure first */
2993 memset(&rx_ctx, 0, sizeof(rx_ctx));
2995 ring->rx_buf_len = vsi->rx_buf_len;
2997 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
2998 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3000 rx_ctx.base = (ring->dma / 128);
3001 rx_ctx.qlen = ring->count;
3003 /* use 32 byte descriptors */
3006 /* descriptor type is always zero
3009 rx_ctx.hsplit_0 = 0;
3011 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3012 if (hw->revision_id == 0)
3013 rx_ctx.lrxqthresh = 0;
3015 rx_ctx.lrxqthresh = 2;
3016 rx_ctx.crcstrip = 1;
3018 /* this controls whether VLAN is stripped from inner headers */
3020 /* set the prefena field to 1 because the manual says to */
3023 /* clear the context in the HMC */
3024 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3026 dev_info(&vsi->back->pdev->dev,
3027 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3028 ring->queue_index, pf_q, err);
3032 /* set the context in the HMC */
3033 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3035 dev_info(&vsi->back->pdev->dev,
3036 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3037 ring->queue_index, pf_q, err);
3041 /* cache tail for quicker writes, and clear the reg before use */
3042 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3043 writel(0, ring->tail);
3045 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3051 * i40e_vsi_configure_tx - Configure the VSI for Tx
3052 * @vsi: VSI structure describing this set of rings and resources
3054 * Configure the Tx VSI for operation.
3056 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3061 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3062 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3068 * i40e_vsi_configure_rx - Configure the VSI for Rx
3069 * @vsi: the VSI being configured
3071 * Configure the Rx VSI for operation.
3073 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3078 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3079 vsi->max_frame = I40E_MAX_RXBUFFER;
3080 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3081 #if (PAGE_SIZE < 8192)
3082 } else if (vsi->netdev->mtu <= ETH_DATA_LEN) {
3083 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3084 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3087 vsi->max_frame = I40E_MAX_RXBUFFER;
3088 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3091 /* set up individual rings */
3092 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3093 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3099 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3100 * @vsi: ptr to the VSI
3102 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3104 struct i40e_ring *tx_ring, *rx_ring;
3105 u16 qoffset, qcount;
3108 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3109 /* Reset the TC information */
3110 for (i = 0; i < vsi->num_queue_pairs; i++) {
3111 rx_ring = vsi->rx_rings[i];
3112 tx_ring = vsi->tx_rings[i];
3113 rx_ring->dcb_tc = 0;
3114 tx_ring->dcb_tc = 0;
3118 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3119 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3122 qoffset = vsi->tc_config.tc_info[n].qoffset;
3123 qcount = vsi->tc_config.tc_info[n].qcount;
3124 for (i = qoffset; i < (qoffset + qcount); i++) {
3125 rx_ring = vsi->rx_rings[i];
3126 tx_ring = vsi->tx_rings[i];
3127 rx_ring->dcb_tc = n;
3128 tx_ring->dcb_tc = n;
3134 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3135 * @vsi: ptr to the VSI
3137 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3139 struct i40e_pf *pf = vsi->back;
3143 i40e_set_rx_mode(vsi->netdev);
3145 if (!!(pf->flags & I40E_FLAG_PF_MAC)) {
3146 err = i40e_macaddr_init(vsi, pf->hw.mac.addr);
3148 dev_warn(&pf->pdev->dev,
3149 "could not set up macaddr; err %d\n", err);
3155 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3156 * @vsi: Pointer to the targeted VSI
3158 * This function replays the hlist on the hw where all the SB Flow Director
3159 * filters were saved.
3161 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3163 struct i40e_fdir_filter *filter;
3164 struct i40e_pf *pf = vsi->back;
3165 struct hlist_node *node;
3167 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3170 /* Reset FDir counters as we're replaying all existing filters */
3171 pf->fd_tcp4_filter_cnt = 0;
3172 pf->fd_udp4_filter_cnt = 0;
3173 pf->fd_sctp4_filter_cnt = 0;
3174 pf->fd_ip4_filter_cnt = 0;
3176 hlist_for_each_entry_safe(filter, node,
3177 &pf->fdir_filter_list, fdir_node) {
3178 i40e_add_del_fdir(vsi, filter, true);
3183 * i40e_vsi_configure - Set up the VSI for action
3184 * @vsi: the VSI being configured
3186 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3190 i40e_set_vsi_rx_mode(vsi);
3191 i40e_restore_vlan(vsi);
3192 i40e_vsi_config_dcb_rings(vsi);
3193 err = i40e_vsi_configure_tx(vsi);
3195 err = i40e_vsi_configure_rx(vsi);
3201 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3202 * @vsi: the VSI being configured
3204 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3206 struct i40e_pf *pf = vsi->back;
3207 struct i40e_hw *hw = &pf->hw;
3212 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3213 * and PFINT_LNKLSTn registers, e.g.:
3214 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3216 qp = vsi->base_queue;
3217 vector = vsi->base_vector;
3218 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3219 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3221 q_vector->itr_countdown = ITR_COUNTDOWN_START;
3222 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting);
3223 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3224 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3226 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting);
3227 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3228 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3230 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3231 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3233 /* Linked list for the queuepairs assigned to this vector */
3234 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3235 for (q = 0; q < q_vector->num_ringpairs; q++) {
3238 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3239 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3240 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3241 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3243 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3245 wr32(hw, I40E_QINT_RQCTL(qp), val);
3247 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3248 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3249 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3250 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
3252 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3254 /* Terminate the linked list */
3255 if (q == (q_vector->num_ringpairs - 1))
3256 val |= (I40E_QUEUE_END_OF_LIST
3257 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3259 wr32(hw, I40E_QINT_TQCTL(qp), val);
3268 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3269 * @hw: ptr to the hardware info
3271 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3273 struct i40e_hw *hw = &pf->hw;
3276 /* clear things first */
3277 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3278 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3280 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3281 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3282 I40E_PFINT_ICR0_ENA_GRST_MASK |
3283 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3284 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3285 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3286 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3287 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3289 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3290 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3292 if (pf->flags & I40E_FLAG_PTP)
3293 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3295 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3297 /* SW_ITR_IDX = 0, but don't change INTENA */
3298 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3299 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3301 /* OTHER_ITR_IDX = 0 */
3302 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3306 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3307 * @vsi: the VSI being configured
3309 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3311 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3312 struct i40e_pf *pf = vsi->back;
3313 struct i40e_hw *hw = &pf->hw;
3316 /* set the ITR configuration */
3317 q_vector->itr_countdown = ITR_COUNTDOWN_START;
3318 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting);
3319 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3320 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
3321 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting);
3322 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3323 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
3325 i40e_enable_misc_int_causes(pf);
3327 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3328 wr32(hw, I40E_PFINT_LNKLST0, 0);
3330 /* Associate the queue pair to the vector and enable the queue int */
3331 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3332 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3333 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3335 wr32(hw, I40E_QINT_RQCTL(0), val);
3337 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3338 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3339 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3341 wr32(hw, I40E_QINT_TQCTL(0), val);
3346 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3347 * @pf: board private structure
3349 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3351 struct i40e_hw *hw = &pf->hw;
3353 wr32(hw, I40E_PFINT_DYN_CTL0,
3354 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3359 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3360 * @pf: board private structure
3361 * @clearpba: true when all pending interrupt events should be cleared
3363 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
3365 struct i40e_hw *hw = &pf->hw;
3368 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3369 (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
3370 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3372 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3377 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3378 * @irq: interrupt number
3379 * @data: pointer to a q_vector
3381 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3383 struct i40e_q_vector *q_vector = data;
3385 if (!q_vector->tx.ring && !q_vector->rx.ring)
3388 napi_schedule_irqoff(&q_vector->napi);
3394 * i40e_irq_affinity_notify - Callback for affinity changes
3395 * @notify: context as to what irq was changed
3396 * @mask: the new affinity mask
3398 * This is a callback function used by the irq_set_affinity_notifier function
3399 * so that we may register to receive changes to the irq affinity masks.
3401 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3402 const cpumask_t *mask)
3404 struct i40e_q_vector *q_vector =
3405 container_of(notify, struct i40e_q_vector, affinity_notify);
3407 q_vector->affinity_mask = *mask;
3411 * i40e_irq_affinity_release - Callback for affinity notifier release
3412 * @ref: internal core kernel usage
3414 * This is a callback function used by the irq_set_affinity_notifier function
3415 * to inform the current notification subscriber that they will no longer
3416 * receive notifications.
3418 static void i40e_irq_affinity_release(struct kref *ref) {}
3421 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3422 * @vsi: the VSI being configured
3423 * @basename: name for the vector
3425 * Allocates MSI-X vectors and requests interrupts from the kernel.
3427 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3429 int q_vectors = vsi->num_q_vectors;
3430 struct i40e_pf *pf = vsi->back;
3431 int base = vsi->base_vector;
3437 for (vector = 0; vector < q_vectors; vector++) {
3438 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3440 irq_num = pf->msix_entries[base + vector].vector;
3442 if (q_vector->tx.ring && q_vector->rx.ring) {
3443 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3444 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3446 } else if (q_vector->rx.ring) {
3447 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3448 "%s-%s-%d", basename, "rx", rx_int_idx++);
3449 } else if (q_vector->tx.ring) {
3450 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3451 "%s-%s-%d", basename, "tx", tx_int_idx++);
3453 /* skip this unused q_vector */
3456 err = request_irq(irq_num,
3462 dev_info(&pf->pdev->dev,
3463 "MSIX request_irq failed, error: %d\n", err);
3464 goto free_queue_irqs;
3467 /* register for affinity change notifications */
3468 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3469 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3470 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3471 /* assign the mask for this irq */
3472 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
3475 vsi->irqs_ready = true;
3481 irq_num = pf->msix_entries[base + vector].vector;
3482 irq_set_affinity_notifier(irq_num, NULL);
3483 irq_set_affinity_hint(irq_num, NULL);
3484 free_irq(irq_num, &vsi->q_vectors[vector]);
3490 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3491 * @vsi: the VSI being un-configured
3493 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3495 struct i40e_pf *pf = vsi->back;
3496 struct i40e_hw *hw = &pf->hw;
3497 int base = vsi->base_vector;
3500 for (i = 0; i < vsi->num_queue_pairs; i++) {
3501 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3502 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
3505 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3506 for (i = vsi->base_vector;
3507 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3508 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3511 for (i = 0; i < vsi->num_q_vectors; i++)
3512 synchronize_irq(pf->msix_entries[i + base].vector);
3514 /* Legacy and MSI mode - this stops all interrupt handling */
3515 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3516 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3518 synchronize_irq(pf->pdev->irq);
3523 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3524 * @vsi: the VSI being configured
3526 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3528 struct i40e_pf *pf = vsi->back;
3531 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3532 for (i = 0; i < vsi->num_q_vectors; i++)
3533 i40e_irq_dynamic_enable(vsi, i);
3535 i40e_irq_dynamic_enable_icr0(pf, true);
3538 i40e_flush(&pf->hw);
3543 * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3544 * @pf: board private structure
3546 static void i40e_stop_misc_vector(struct i40e_pf *pf)
3549 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3550 i40e_flush(&pf->hw);
3554 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3555 * @irq: interrupt number
3556 * @data: pointer to a q_vector
3558 * This is the handler used for all MSI/Legacy interrupts, and deals
3559 * with both queue and non-queue interrupts. This is also used in
3560 * MSIX mode to handle the non-queue interrupts.
3562 static irqreturn_t i40e_intr(int irq, void *data)
3564 struct i40e_pf *pf = (struct i40e_pf *)data;
3565 struct i40e_hw *hw = &pf->hw;
3566 irqreturn_t ret = IRQ_NONE;
3567 u32 icr0, icr0_remaining;
3570 icr0 = rd32(hw, I40E_PFINT_ICR0);
3571 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3573 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3574 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3577 /* if interrupt but no bits showing, must be SWINT */
3578 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3579 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3582 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3583 (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3584 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3585 icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3586 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
3589 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3590 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3591 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3592 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3594 /* We do not have a way to disarm Queue causes while leaving
3595 * interrupt enabled for all other causes, ideally
3596 * interrupt should be disabled while we are in NAPI but
3597 * this is not a performance path and napi_schedule()
3598 * can deal with rescheduling.
3600 if (!test_bit(__I40E_DOWN, &pf->state))
3601 napi_schedule_irqoff(&q_vector->napi);
3604 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3605 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3606 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
3607 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3610 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3611 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3612 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3615 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3616 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3617 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3620 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3621 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3622 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3623 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3624 val = rd32(hw, I40E_GLGEN_RSTAT);
3625 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3626 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3627 if (val == I40E_RESET_CORER) {
3629 } else if (val == I40E_RESET_GLOBR) {
3631 } else if (val == I40E_RESET_EMPR) {
3633 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
3637 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3638 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3639 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3640 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3641 rd32(hw, I40E_PFHMC_ERRORINFO),
3642 rd32(hw, I40E_PFHMC_ERRORDATA));
3645 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3646 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3648 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3649 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3650 i40e_ptp_tx_hwtstamp(pf);
3654 /* If a critical error is pending we have no choice but to reset the
3656 * Report and mask out any remaining unexpected interrupts.
3658 icr0_remaining = icr0 & ena_mask;
3659 if (icr0_remaining) {
3660 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3662 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3663 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3664 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3665 dev_info(&pf->pdev->dev, "device will be reset\n");
3666 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3667 i40e_service_event_schedule(pf);
3669 ena_mask &= ~icr0_remaining;
3674 /* re-enable interrupt causes */
3675 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3676 if (!test_bit(__I40E_DOWN, &pf->state)) {
3677 i40e_service_event_schedule(pf);
3678 i40e_irq_dynamic_enable_icr0(pf, false);
3685 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3686 * @tx_ring: tx ring to clean
3687 * @budget: how many cleans we're allowed
3689 * Returns true if there's any budget left (e.g. the clean is finished)
3691 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3693 struct i40e_vsi *vsi = tx_ring->vsi;
3694 u16 i = tx_ring->next_to_clean;
3695 struct i40e_tx_buffer *tx_buf;
3696 struct i40e_tx_desc *tx_desc;
3698 tx_buf = &tx_ring->tx_bi[i];
3699 tx_desc = I40E_TX_DESC(tx_ring, i);
3700 i -= tx_ring->count;
3703 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3705 /* if next_to_watch is not set then there is no work pending */
3709 /* prevent any other reads prior to eop_desc */
3710 read_barrier_depends();
3712 /* if the descriptor isn't done, no work yet to do */
3713 if (!(eop_desc->cmd_type_offset_bsz &
3714 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3717 /* clear next_to_watch to prevent false hangs */
3718 tx_buf->next_to_watch = NULL;
3720 tx_desc->buffer_addr = 0;
3721 tx_desc->cmd_type_offset_bsz = 0;
3722 /* move past filter desc */
3727 i -= tx_ring->count;
3728 tx_buf = tx_ring->tx_bi;
3729 tx_desc = I40E_TX_DESC(tx_ring, 0);
3731 /* unmap skb header data */
3732 dma_unmap_single(tx_ring->dev,
3733 dma_unmap_addr(tx_buf, dma),
3734 dma_unmap_len(tx_buf, len),
3736 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3737 kfree(tx_buf->raw_buf);
3739 tx_buf->raw_buf = NULL;
3740 tx_buf->tx_flags = 0;
3741 tx_buf->next_to_watch = NULL;
3742 dma_unmap_len_set(tx_buf, len, 0);
3743 tx_desc->buffer_addr = 0;
3744 tx_desc->cmd_type_offset_bsz = 0;
3746 /* move us past the eop_desc for start of next FD desc */
3751 i -= tx_ring->count;
3752 tx_buf = tx_ring->tx_bi;
3753 tx_desc = I40E_TX_DESC(tx_ring, 0);
3756 /* update budget accounting */
3758 } while (likely(budget));
3760 i += tx_ring->count;
3761 tx_ring->next_to_clean = i;
3763 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
3764 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
3770 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3771 * @irq: interrupt number
3772 * @data: pointer to a q_vector
3774 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3776 struct i40e_q_vector *q_vector = data;
3777 struct i40e_vsi *vsi;
3779 if (!q_vector->tx.ring)
3782 vsi = q_vector->tx.ring->vsi;
3783 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3789 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
3790 * @vsi: the VSI being configured
3791 * @v_idx: vector index
3792 * @qp_idx: queue pair index
3794 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3796 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3797 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3798 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3800 tx_ring->q_vector = q_vector;
3801 tx_ring->next = q_vector->tx.ring;
3802 q_vector->tx.ring = tx_ring;
3803 q_vector->tx.count++;
3805 rx_ring->q_vector = q_vector;
3806 rx_ring->next = q_vector->rx.ring;
3807 q_vector->rx.ring = rx_ring;
3808 q_vector->rx.count++;
3812 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3813 * @vsi: the VSI being configured
3815 * This function maps descriptor rings to the queue-specific vectors
3816 * we were allotted through the MSI-X enabling code. Ideally, we'd have
3817 * one vector per queue pair, but on a constrained vector budget, we
3818 * group the queue pairs as "efficiently" as possible.
3820 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3822 int qp_remaining = vsi->num_queue_pairs;
3823 int q_vectors = vsi->num_q_vectors;
3828 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3829 * group them so there are multiple queues per vector.
3830 * It is also important to go through all the vectors available to be
3831 * sure that if we don't use all the vectors, that the remaining vectors
3832 * are cleared. This is especially important when decreasing the
3833 * number of queues in use.
3835 for (; v_start < q_vectors; v_start++) {
3836 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3838 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3840 q_vector->num_ringpairs = num_ringpairs;
3842 q_vector->rx.count = 0;
3843 q_vector->tx.count = 0;
3844 q_vector->rx.ring = NULL;
3845 q_vector->tx.ring = NULL;
3847 while (num_ringpairs--) {
3848 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
3856 * i40e_vsi_request_irq - Request IRQ from the OS
3857 * @vsi: the VSI being configured
3858 * @basename: name for the vector
3860 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3862 struct i40e_pf *pf = vsi->back;
3865 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3866 err = i40e_vsi_request_irq_msix(vsi, basename);
3867 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3868 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3871 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3875 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3880 #ifdef CONFIG_NET_POLL_CONTROLLER
3882 * i40e_netpoll - A Polling 'interrupt' handler
3883 * @netdev: network interface device structure
3885 * This is used by netconsole to send skbs without having to re-enable
3886 * interrupts. It's not called while the normal interrupt routine is executing.
3888 static void i40e_netpoll(struct net_device *netdev)
3890 struct i40e_netdev_priv *np = netdev_priv(netdev);
3891 struct i40e_vsi *vsi = np->vsi;
3892 struct i40e_pf *pf = vsi->back;
3895 /* if interface is down do nothing */
3896 if (test_bit(__I40E_DOWN, &vsi->state))
3899 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3900 for (i = 0; i < vsi->num_q_vectors; i++)
3901 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3903 i40e_intr(pf->pdev->irq, netdev);
3909 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3910 * @pf: the PF being configured
3911 * @pf_q: the PF queue
3912 * @enable: enable or disable state of the queue
3914 * This routine will wait for the given Tx queue of the PF to reach the
3915 * enabled or disabled state.
3916 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3917 * multiple retries; else will return 0 in case of success.
3919 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3924 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3925 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3926 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3929 usleep_range(10, 20);
3931 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3938 * i40e_vsi_control_tx - Start or stop a VSI's rings
3939 * @vsi: the VSI being configured
3940 * @enable: start or stop the rings
3942 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3944 struct i40e_pf *pf = vsi->back;
3945 struct i40e_hw *hw = &pf->hw;
3946 int i, j, pf_q, ret = 0;
3949 pf_q = vsi->base_queue;
3950 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3952 /* warn the TX unit of coming changes */
3953 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3955 usleep_range(10, 20);
3957 for (j = 0; j < 50; j++) {
3958 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3959 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3960 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3962 usleep_range(1000, 2000);
3964 /* Skip if the queue is already in the requested state */
3965 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3968 /* turn on/off the queue */
3970 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3971 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3973 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3976 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3977 /* No waiting for the Tx queue to disable */
3978 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3981 /* wait for the change to finish */
3982 ret = i40e_pf_txq_wait(pf, pf_q, enable);
3984 dev_info(&pf->pdev->dev,
3985 "VSI seid %d Tx ring %d %sable timeout\n",
3986 vsi->seid, pf_q, (enable ? "en" : "dis"));
3995 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3996 * @pf: the PF being configured
3997 * @pf_q: the PF queue
3998 * @enable: enable or disable state of the queue
4000 * This routine will wait for the given Rx queue of the PF to reach the
4001 * enabled or disabled state.
4002 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4003 * multiple retries; else will return 0 in case of success.
4005 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4010 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4011 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4012 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4015 usleep_range(10, 20);
4017 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4024 * i40e_vsi_control_rx - Start or stop a VSI's rings
4025 * @vsi: the VSI being configured
4026 * @enable: start or stop the rings
4028 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4030 struct i40e_pf *pf = vsi->back;
4031 struct i40e_hw *hw = &pf->hw;
4032 int i, j, pf_q, ret = 0;
4035 pf_q = vsi->base_queue;
4036 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4037 for (j = 0; j < 50; j++) {
4038 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4039 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4040 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4042 usleep_range(1000, 2000);
4045 /* Skip if the queue is already in the requested state */
4046 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4049 /* turn on/off the queue */
4051 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4053 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4054 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4055 /* No waiting for the Tx queue to disable */
4056 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
4059 /* wait for the change to finish */
4060 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4062 dev_info(&pf->pdev->dev,
4063 "VSI seid %d Rx ring %d %sable timeout\n",
4064 vsi->seid, pf_q, (enable ? "en" : "dis"));
4069 /* Due to HW errata, on Rx disable only, the register can indicate done
4070 * before it really is. Needs 50ms to be sure
4079 * i40e_vsi_start_rings - Start a VSI's rings
4080 * @vsi: the VSI being configured
4082 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4086 /* do rx first for enable and last for disable */
4087 ret = i40e_vsi_control_rx(vsi, true);
4090 ret = i40e_vsi_control_tx(vsi, true);
4096 * i40e_vsi_stop_rings - Stop a VSI's rings
4097 * @vsi: the VSI being configured
4099 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4101 /* do rx first for enable and last for disable
4102 * Ignore return value, we need to shutdown whatever we can
4104 i40e_vsi_control_tx(vsi, false);
4105 i40e_vsi_control_rx(vsi, false);
4109 * i40e_vsi_free_irq - Free the irq association with the OS
4110 * @vsi: the VSI being configured
4112 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4114 struct i40e_pf *pf = vsi->back;
4115 struct i40e_hw *hw = &pf->hw;
4116 int base = vsi->base_vector;
4120 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4121 if (!vsi->q_vectors)
4124 if (!vsi->irqs_ready)
4127 vsi->irqs_ready = false;
4128 for (i = 0; i < vsi->num_q_vectors; i++) {
4133 irq_num = pf->msix_entries[vector].vector;
4135 /* free only the irqs that were actually requested */
4136 if (!vsi->q_vectors[i] ||
4137 !vsi->q_vectors[i]->num_ringpairs)
4140 /* clear the affinity notifier in the IRQ descriptor */
4141 irq_set_affinity_notifier(irq_num, NULL);
4142 /* clear the affinity_mask in the IRQ descriptor */
4143 irq_set_affinity_hint(irq_num, NULL);
4144 synchronize_irq(irq_num);
4145 free_irq(irq_num, vsi->q_vectors[i]);
4147 /* Tear down the interrupt queue link list
4149 * We know that they come in pairs and always
4150 * the Rx first, then the Tx. To clear the
4151 * link list, stick the EOL value into the
4152 * next_q field of the registers.
4154 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4155 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4156 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4157 val |= I40E_QUEUE_END_OF_LIST
4158 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4159 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4161 while (qp != I40E_QUEUE_END_OF_LIST) {
4164 val = rd32(hw, I40E_QINT_RQCTL(qp));
4166 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4167 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4168 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4169 I40E_QINT_RQCTL_INTEVENT_MASK);
4171 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4172 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4174 wr32(hw, I40E_QINT_RQCTL(qp), val);
4176 val = rd32(hw, I40E_QINT_TQCTL(qp));
4178 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4179 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4181 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4182 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4183 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4184 I40E_QINT_TQCTL_INTEVENT_MASK);
4186 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4187 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4189 wr32(hw, I40E_QINT_TQCTL(qp), val);
4194 free_irq(pf->pdev->irq, pf);
4196 val = rd32(hw, I40E_PFINT_LNKLST0);
4197 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4198 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4199 val |= I40E_QUEUE_END_OF_LIST
4200 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4201 wr32(hw, I40E_PFINT_LNKLST0, val);
4203 val = rd32(hw, I40E_QINT_RQCTL(qp));
4204 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4205 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4206 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4207 I40E_QINT_RQCTL_INTEVENT_MASK);
4209 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4210 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4212 wr32(hw, I40E_QINT_RQCTL(qp), val);
4214 val = rd32(hw, I40E_QINT_TQCTL(qp));
4216 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4217 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4218 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4219 I40E_QINT_TQCTL_INTEVENT_MASK);
4221 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4222 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4224 wr32(hw, I40E_QINT_TQCTL(qp), val);
4229 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4230 * @vsi: the VSI being configured
4231 * @v_idx: Index of vector to be freed
4233 * This function frees the memory allocated to the q_vector. In addition if
4234 * NAPI is enabled it will delete any references to the NAPI struct prior
4235 * to freeing the q_vector.
4237 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4239 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4240 struct i40e_ring *ring;
4245 /* disassociate q_vector from rings */
4246 i40e_for_each_ring(ring, q_vector->tx)
4247 ring->q_vector = NULL;
4249 i40e_for_each_ring(ring, q_vector->rx)
4250 ring->q_vector = NULL;
4252 /* only VSI w/ an associated netdev is set up w/ NAPI */
4254 netif_napi_del(&q_vector->napi);
4256 vsi->q_vectors[v_idx] = NULL;
4258 kfree_rcu(q_vector, rcu);
4262 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4263 * @vsi: the VSI being un-configured
4265 * This frees the memory allocated to the q_vectors and
4266 * deletes references to the NAPI struct.
4268 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4272 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4273 i40e_free_q_vector(vsi, v_idx);
4277 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4278 * @pf: board private structure
4280 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4282 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4283 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4284 pci_disable_msix(pf->pdev);
4285 kfree(pf->msix_entries);
4286 pf->msix_entries = NULL;
4287 kfree(pf->irq_pile);
4288 pf->irq_pile = NULL;
4289 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4290 pci_disable_msi(pf->pdev);
4292 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4296 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4297 * @pf: board private structure
4299 * We go through and clear interrupt specific resources and reset the structure
4300 * to pre-load conditions
4302 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4306 i40e_stop_misc_vector(pf);
4307 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
4308 synchronize_irq(pf->msix_entries[0].vector);
4309 free_irq(pf->msix_entries[0].vector, pf);
4312 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4313 I40E_IWARP_IRQ_PILE_ID);
4315 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4316 for (i = 0; i < pf->num_alloc_vsi; i++)
4318 i40e_vsi_free_q_vectors(pf->vsi[i]);
4319 i40e_reset_interrupt_capability(pf);
4323 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4324 * @vsi: the VSI being configured
4326 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4333 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4334 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4336 if (q_vector->rx.ring || q_vector->tx.ring)
4337 napi_enable(&q_vector->napi);
4342 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4343 * @vsi: the VSI being configured
4345 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4352 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4353 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4355 if (q_vector->rx.ring || q_vector->tx.ring)
4356 napi_disable(&q_vector->napi);
4361 * i40e_vsi_close - Shut down a VSI
4362 * @vsi: the vsi to be quelled
4364 static void i40e_vsi_close(struct i40e_vsi *vsi)
4366 struct i40e_pf *pf = vsi->back;
4367 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
4369 i40e_vsi_free_irq(vsi);
4370 i40e_vsi_free_tx_resources(vsi);
4371 i40e_vsi_free_rx_resources(vsi);
4372 vsi->current_netdev_flags = 0;
4373 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
4374 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
4375 pf->flags |= I40E_FLAG_CLIENT_RESET;
4379 * i40e_quiesce_vsi - Pause a given VSI
4380 * @vsi: the VSI being paused
4382 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4384 if (test_bit(__I40E_DOWN, &vsi->state))
4387 set_bit(__I40E_NEEDS_RESTART, &vsi->state);
4388 if (vsi->netdev && netif_running(vsi->netdev))
4389 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4391 i40e_vsi_close(vsi);
4395 * i40e_unquiesce_vsi - Resume a given VSI
4396 * @vsi: the VSI being resumed
4398 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4400 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
4403 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4404 if (vsi->netdev && netif_running(vsi->netdev))
4405 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4407 i40e_vsi_open(vsi); /* this clears the DOWN bit */
4411 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4414 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4418 for (v = 0; v < pf->num_alloc_vsi; v++) {
4420 i40e_quiesce_vsi(pf->vsi[v]);
4425 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4428 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4432 for (v = 0; v < pf->num_alloc_vsi; v++) {
4434 i40e_unquiesce_vsi(pf->vsi[v]);
4438 #ifdef CONFIG_I40E_DCB
4440 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4441 * @vsi: the VSI being configured
4443 * This function waits for the given VSI's queues to be disabled.
4445 static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4447 struct i40e_pf *pf = vsi->back;
4450 pf_q = vsi->base_queue;
4451 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4452 /* Check and wait for the disable status of the queue */
4453 ret = i40e_pf_txq_wait(pf, pf_q, false);
4455 dev_info(&pf->pdev->dev,
4456 "VSI seid %d Tx ring %d disable timeout\n",
4462 pf_q = vsi->base_queue;
4463 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4464 /* Check and wait for the disable status of the queue */
4465 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4467 dev_info(&pf->pdev->dev,
4468 "VSI seid %d Rx ring %d disable timeout\n",
4478 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
4481 * This function waits for the queues to be in disabled state for all the
4482 * VSIs that are managed by this PF.
4484 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4488 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4490 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
4502 * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
4503 * @q_idx: TX queue number
4504 * @vsi: Pointer to VSI struct
4506 * This function checks specified queue for given VSI. Detects hung condition.
4507 * We proactively detect hung TX queues by checking if interrupts are disabled
4508 * but there are pending descriptors. If it appears hung, attempt to recover
4509 * by triggering a SW interrupt.
4511 static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
4513 struct i40e_ring *tx_ring = NULL;
4515 u32 val, tx_pending;
4520 /* now that we have an index, find the tx_ring struct */
4521 for (i = 0; i < vsi->num_queue_pairs; i++) {
4522 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
4523 if (q_idx == vsi->tx_rings[i]->queue_index) {
4524 tx_ring = vsi->tx_rings[i];
4533 /* Read interrupt register */
4534 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4536 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
4537 tx_ring->vsi->base_vector - 1));
4539 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
4541 tx_pending = i40e_get_tx_pending(tx_ring);
4543 /* Interrupts are disabled and TX pending is non-zero,
4544 * trigger the SW interrupt (don't wait). Worst case
4545 * there will be one extra interrupt which may result
4546 * into not cleaning any queues because queues are cleaned.
4548 if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
4549 i40e_force_wb(vsi, tx_ring->q_vector);
4553 * i40e_detect_recover_hung - Function to detect and recover hung_queues
4554 * @pf: pointer to PF struct
4556 * LAN VSI has netdev and netdev has TX queues. This function is to check
4557 * each of those TX queues if they are hung, trigger recovery by issuing
4560 static void i40e_detect_recover_hung(struct i40e_pf *pf)
4562 struct net_device *netdev;
4563 struct i40e_vsi *vsi;
4566 /* Only for LAN VSI */
4567 vsi = pf->vsi[pf->lan_vsi];
4572 /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
4573 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
4574 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4577 /* Make sure type is MAIN VSI */
4578 if (vsi->type != I40E_VSI_MAIN)
4581 netdev = vsi->netdev;
4585 /* Bail out if netif_carrier is not OK */
4586 if (!netif_carrier_ok(netdev))
4589 /* Go thru' TX queues for netdev */
4590 for (i = 0; i < netdev->num_tx_queues; i++) {
4591 struct netdev_queue *q;
4593 q = netdev_get_tx_queue(netdev, i);
4595 i40e_detect_recover_hung_queue(i, vsi);
4600 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4601 * @pf: pointer to PF
4603 * Get TC map for ISCSI PF type that will include iSCSI TC
4606 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4608 struct i40e_dcb_app_priority_table app;
4609 struct i40e_hw *hw = &pf->hw;
4610 u8 enabled_tc = 1; /* TC0 is always enabled */
4612 /* Get the iSCSI APP TLV */
4613 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4615 for (i = 0; i < dcbcfg->numapps; i++) {
4616 app = dcbcfg->app[i];
4617 if (app.selector == I40E_APP_SEL_TCPIP &&
4618 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4619 tc = dcbcfg->etscfg.prioritytable[app.priority];
4620 enabled_tc |= BIT(tc);
4629 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4630 * @dcbcfg: the corresponding DCBx configuration structure
4632 * Return the number of TCs from given DCBx configuration
4634 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4636 int i, tc_unused = 0;
4640 /* Scan the ETS Config Priority Table to find
4641 * traffic class enabled for a given priority
4642 * and create a bitmask of enabled TCs
4644 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
4645 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
4647 /* Now scan the bitmask to check for
4648 * contiguous TCs starting with TC0
4650 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4651 if (num_tc & BIT(i)) {
4655 pr_err("Non-contiguous TC - Disabling DCB\n");
4663 /* There is always at least TC0 */
4671 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4672 * @dcbcfg: the corresponding DCBx configuration structure
4674 * Query the current DCB configuration and return the number of
4675 * traffic classes enabled from the given DCBX config
4677 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4679 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4683 for (i = 0; i < num_tc; i++)
4684 enabled_tc |= BIT(i);
4690 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4691 * @pf: PF being queried
4693 * Return number of traffic classes enabled for the given PF
4695 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4697 struct i40e_hw *hw = &pf->hw;
4698 u8 i, enabled_tc = 1;
4700 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4702 /* If DCB is not enabled then always in single TC */
4703 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4706 /* SFP mode will be enabled for all TCs on port */
4707 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4708 return i40e_dcb_get_num_tc(dcbcfg);
4710 /* MFP mode return count of enabled TCs for this PF */
4711 if (pf->hw.func_caps.iscsi)
4712 enabled_tc = i40e_get_iscsi_tc_map(pf);
4714 return 1; /* Only TC0 */
4716 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4717 if (enabled_tc & BIT(i))
4724 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4725 * @pf: PF being queried
4727 * Return a bitmap for enabled traffic classes for this PF.
4729 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4731 /* If DCB is not enabled for this PF then just return default TC */
4732 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4733 return I40E_DEFAULT_TRAFFIC_CLASS;
4735 /* SFP mode we want PF to be enabled for all TCs */
4736 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4737 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4739 /* MFP enabled and iSCSI PF type */
4740 if (pf->hw.func_caps.iscsi)
4741 return i40e_get_iscsi_tc_map(pf);
4743 return I40E_DEFAULT_TRAFFIC_CLASS;
4747 * i40e_vsi_get_bw_info - Query VSI BW Information
4748 * @vsi: the VSI being queried
4750 * Returns 0 on success, negative value on failure
4752 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4754 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4755 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4756 struct i40e_pf *pf = vsi->back;
4757 struct i40e_hw *hw = &pf->hw;
4762 /* Get the VSI level BW configuration */
4763 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4765 dev_info(&pf->pdev->dev,
4766 "couldn't get PF vsi bw config, err %s aq_err %s\n",
4767 i40e_stat_str(&pf->hw, ret),
4768 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4772 /* Get the VSI level BW configuration per TC */
4773 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4776 dev_info(&pf->pdev->dev,
4777 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
4778 i40e_stat_str(&pf->hw, ret),
4779 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4783 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4784 dev_info(&pf->pdev->dev,
4785 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4786 bw_config.tc_valid_bits,
4787 bw_ets_config.tc_valid_bits);
4788 /* Still continuing */
4791 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4792 vsi->bw_max_quanta = bw_config.max_bw;
4793 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4794 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4795 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4796 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4797 vsi->bw_ets_limit_credits[i] =
4798 le16_to_cpu(bw_ets_config.credits[i]);
4799 /* 3 bits out of 4 for each TC */
4800 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4807 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4808 * @vsi: the VSI being configured
4809 * @enabled_tc: TC bitmap
4810 * @bw_credits: BW shared credits per TC
4812 * Returns 0 on success, negative value on failure
4814 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
4817 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
4821 bw_data.tc_valid_bits = enabled_tc;
4822 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4823 bw_data.tc_bw_credits[i] = bw_share[i];
4825 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4828 dev_info(&vsi->back->pdev->dev,
4829 "AQ command Config VSI BW allocation per TC failed = %d\n",
4830 vsi->back->hw.aq.asq_last_status);
4834 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4835 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4841 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4842 * @vsi: the VSI being configured
4843 * @enabled_tc: TC map to be enabled
4846 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4848 struct net_device *netdev = vsi->netdev;
4849 struct i40e_pf *pf = vsi->back;
4850 struct i40e_hw *hw = &pf->hw;
4853 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4859 netdev_reset_tc(netdev);
4863 /* Set up actual enabled TCs on the VSI */
4864 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4867 /* set per TC queues for the VSI */
4868 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4869 /* Only set TC queues for enabled tcs
4871 * e.g. For a VSI that has TC0 and TC3 enabled the
4872 * enabled_tc bitmap would be 0x00001001; the driver
4873 * will set the numtc for netdev as 2 that will be
4874 * referenced by the netdev layer as TC 0 and 1.
4876 if (vsi->tc_config.enabled_tc & BIT(i))
4877 netdev_set_tc_queue(netdev,
4878 vsi->tc_config.tc_info[i].netdev_tc,
4879 vsi->tc_config.tc_info[i].qcount,
4880 vsi->tc_config.tc_info[i].qoffset);
4883 /* Assign UP2TC map for the VSI */
4884 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4885 /* Get the actual TC# for the UP */
4886 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4887 /* Get the mapped netdev TC# for the UP */
4888 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
4889 netdev_set_prio_tc_map(netdev, i, netdev_tc);
4894 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4895 * @vsi: the VSI being configured
4896 * @ctxt: the ctxt buffer returned from AQ VSI update param command
4898 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4899 struct i40e_vsi_context *ctxt)
4901 /* copy just the sections touched not the entire info
4902 * since not all sections are valid as returned by
4905 vsi->info.mapping_flags = ctxt->info.mapping_flags;
4906 memcpy(&vsi->info.queue_mapping,
4907 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4908 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4909 sizeof(vsi->info.tc_mapping));
4913 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4914 * @vsi: VSI to be configured
4915 * @enabled_tc: TC bitmap
4917 * This configures a particular VSI for TCs that are mapped to the
4918 * given TC bitmap. It uses default bandwidth share for TCs across
4919 * VSIs to configure TC for a particular VSI.
4922 * It is expected that the VSI queues have been quisced before calling
4925 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4927 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4928 struct i40e_vsi_context ctxt;
4932 /* Check if enabled_tc is same as existing or new TCs */
4933 if (vsi->tc_config.enabled_tc == enabled_tc)
4936 /* Enable ETS TCs with equal BW Share for now across all VSIs */
4937 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4938 if (enabled_tc & BIT(i))
4942 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4944 dev_info(&vsi->back->pdev->dev,
4945 "Failed configuring TC map %d for VSI %d\n",
4946 enabled_tc, vsi->seid);
4950 /* Update Queue Pairs Mapping for currently enabled UPs */
4951 ctxt.seid = vsi->seid;
4952 ctxt.pf_num = vsi->back->hw.pf_id;
4954 ctxt.uplink_seid = vsi->uplink_seid;
4955 ctxt.info = vsi->info;
4956 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4958 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
4959 ctxt.info.valid_sections |=
4960 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
4961 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
4964 /* Update the VSI after updating the VSI queue-mapping information */
4965 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4967 dev_info(&vsi->back->pdev->dev,
4968 "Update vsi tc config failed, err %s aq_err %s\n",
4969 i40e_stat_str(&vsi->back->hw, ret),
4970 i40e_aq_str(&vsi->back->hw,
4971 vsi->back->hw.aq.asq_last_status));
4974 /* update the local VSI info with updated queue map */
4975 i40e_vsi_update_queue_map(vsi, &ctxt);
4976 vsi->info.valid_sections = 0;
4978 /* Update current VSI BW information */
4979 ret = i40e_vsi_get_bw_info(vsi);
4981 dev_info(&vsi->back->pdev->dev,
4982 "Failed updating vsi bw info, err %s aq_err %s\n",
4983 i40e_stat_str(&vsi->back->hw, ret),
4984 i40e_aq_str(&vsi->back->hw,
4985 vsi->back->hw.aq.asq_last_status));
4989 /* Update the netdev TC setup */
4990 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4996 * i40e_veb_config_tc - Configure TCs for given VEB
4998 * @enabled_tc: TC bitmap
5000 * Configures given TC bitmap for VEB (switching) element
5002 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
5004 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
5005 struct i40e_pf *pf = veb->pf;
5009 /* No TCs or already enabled TCs just return */
5010 if (!enabled_tc || veb->enabled_tc == enabled_tc)
5013 bw_data.tc_valid_bits = enabled_tc;
5014 /* bw_data.absolute_credits is not set (relative) */
5016 /* Enable ETS TCs with equal BW Share for now */
5017 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5018 if (enabled_tc & BIT(i))
5019 bw_data.tc_bw_share_credits[i] = 1;
5022 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
5025 dev_info(&pf->pdev->dev,
5026 "VEB bw config failed, err %s aq_err %s\n",
5027 i40e_stat_str(&pf->hw, ret),
5028 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5032 /* Update the BW information */
5033 ret = i40e_veb_get_bw_info(veb);
5035 dev_info(&pf->pdev->dev,
5036 "Failed getting veb bw config, err %s aq_err %s\n",
5037 i40e_stat_str(&pf->hw, ret),
5038 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5045 #ifdef CONFIG_I40E_DCB
5047 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
5050 * Reconfigure VEB/VSIs on a given PF; it is assumed that
5051 * the caller would've quiesce all the VSIs before calling
5054 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
5060 /* Enable the TCs available on PF to all VEBs */
5061 tc_map = i40e_pf_get_tc_map(pf);
5062 for (v = 0; v < I40E_MAX_VEB; v++) {
5065 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
5067 dev_info(&pf->pdev->dev,
5068 "Failed configuring TC for VEB seid=%d\n",
5070 /* Will try to configure as many components */
5074 /* Update each VSI */
5075 for (v = 0; v < pf->num_alloc_vsi; v++) {
5079 /* - Enable all TCs for the LAN VSI
5080 * - For all others keep them at TC0 for now
5082 if (v == pf->lan_vsi)
5083 tc_map = i40e_pf_get_tc_map(pf);
5085 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
5087 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
5089 dev_info(&pf->pdev->dev,
5090 "Failed configuring TC for VSI seid=%d\n",
5092 /* Will try to configure as many components */
5094 /* Re-configure VSI vectors based on updated TC map */
5095 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
5096 if (pf->vsi[v]->netdev)
5097 i40e_dcbnl_set_all(pf->vsi[v]);
5103 * i40e_resume_port_tx - Resume port Tx
5106 * Resume a port's Tx and issue a PF reset in case of failure to
5109 static int i40e_resume_port_tx(struct i40e_pf *pf)
5111 struct i40e_hw *hw = &pf->hw;
5114 ret = i40e_aq_resume_port_tx(hw, NULL);
5116 dev_info(&pf->pdev->dev,
5117 "Resume Port Tx failed, err %s aq_err %s\n",
5118 i40e_stat_str(&pf->hw, ret),
5119 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5120 /* Schedule PF reset to recover */
5121 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5122 i40e_service_event_schedule(pf);
5129 * i40e_init_pf_dcb - Initialize DCB configuration
5130 * @pf: PF being configured
5132 * Query the current DCB configuration and cache it
5133 * in the hardware structure
5135 static int i40e_init_pf_dcb(struct i40e_pf *pf)
5137 struct i40e_hw *hw = &pf->hw;
5140 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
5141 if (pf->flags & I40E_FLAG_NO_DCB_SUPPORT)
5144 /* Get the initial DCB configuration */
5145 err = i40e_init_dcb(hw);
5147 /* Device/Function is not DCBX capable */
5148 if ((!hw->func_caps.dcb) ||
5149 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
5150 dev_info(&pf->pdev->dev,
5151 "DCBX offload is not supported or is disabled for this PF.\n");
5153 /* When status is not DISABLED then DCBX in FW */
5154 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
5155 DCB_CAP_DCBX_VER_IEEE;
5157 pf->flags |= I40E_FLAG_DCB_CAPABLE;
5158 /* Enable DCB tagging only when more than one TC
5159 * or explicitly disable if only one TC
5161 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5162 pf->flags |= I40E_FLAG_DCB_ENABLED;
5164 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5165 dev_dbg(&pf->pdev->dev,
5166 "DCBX offload is supported for this PF.\n");
5169 dev_info(&pf->pdev->dev,
5170 "Query for DCB configuration failed, err %s aq_err %s\n",
5171 i40e_stat_str(&pf->hw, err),
5172 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5178 #endif /* CONFIG_I40E_DCB */
5179 #define SPEED_SIZE 14
5182 * i40e_print_link_message - print link up or down
5183 * @vsi: the VSI for which link needs a message
5185 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
5187 enum i40e_aq_link_speed new_speed;
5188 char *speed = "Unknown";
5189 char *fc = "Unknown";
5193 new_speed = vsi->back->hw.phy.link_info.link_speed;
5195 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
5197 vsi->current_isup = isup;
5198 vsi->current_speed = new_speed;
5200 netdev_info(vsi->netdev, "NIC Link is Down\n");
5204 /* Warn user if link speed on NPAR enabled partition is not at
5207 if (vsi->back->hw.func_caps.npar_enable &&
5208 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
5209 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
5210 netdev_warn(vsi->netdev,
5211 "The partition detected link speed that is less than 10Gbps\n");
5213 switch (vsi->back->hw.phy.link_info.link_speed) {
5214 case I40E_LINK_SPEED_40GB:
5217 case I40E_LINK_SPEED_20GB:
5220 case I40E_LINK_SPEED_25GB:
5223 case I40E_LINK_SPEED_10GB:
5226 case I40E_LINK_SPEED_1GB:
5229 case I40E_LINK_SPEED_100MB:
5236 switch (vsi->back->hw.fc.current_mode) {
5240 case I40E_FC_TX_PAUSE:
5243 case I40E_FC_RX_PAUSE:
5251 if (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
5252 fec = ", FEC: None";
5253 an = ", Autoneg: False";
5255 if (vsi->back->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
5256 an = ", Autoneg: True";
5258 if (vsi->back->hw.phy.link_info.fec_info &
5259 I40E_AQ_CONFIG_FEC_KR_ENA)
5260 fec = ", FEC: CL74 FC-FEC/BASE-R";
5261 else if (vsi->back->hw.phy.link_info.fec_info &
5262 I40E_AQ_CONFIG_FEC_RS_ENA)
5263 fec = ", FEC: CL108 RS-FEC";
5266 netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s, Flow Control: %s\n",
5267 speed, fec, an, fc);
5271 * i40e_up_complete - Finish the last steps of bringing up a connection
5272 * @vsi: the VSI being configured
5274 static int i40e_up_complete(struct i40e_vsi *vsi)
5276 struct i40e_pf *pf = vsi->back;
5279 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5280 i40e_vsi_configure_msix(vsi);
5282 i40e_configure_msi_and_legacy(vsi);
5285 err = i40e_vsi_start_rings(vsi);
5289 clear_bit(__I40E_DOWN, &vsi->state);
5290 i40e_napi_enable_all(vsi);
5291 i40e_vsi_enable_irq(vsi);
5293 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
5295 i40e_print_link_message(vsi, true);
5296 netif_tx_start_all_queues(vsi->netdev);
5297 netif_carrier_on(vsi->netdev);
5298 } else if (vsi->netdev) {
5299 i40e_print_link_message(vsi, false);
5300 /* need to check for qualified module here*/
5301 if ((pf->hw.phy.link_info.link_info &
5302 I40E_AQ_MEDIA_AVAILABLE) &&
5303 (!(pf->hw.phy.link_info.an_info &
5304 I40E_AQ_QUALIFIED_MODULE)))
5305 netdev_err(vsi->netdev,
5306 "the driver failed to link because an unqualified module was detected.");
5309 /* replay FDIR SB filters */
5310 if (vsi->type == I40E_VSI_FDIR) {
5311 /* reset fd counters */
5314 i40e_fdir_filter_restore(vsi);
5317 /* On the next run of the service_task, notify any clients of the new
5320 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
5321 i40e_service_event_schedule(pf);
5327 * i40e_vsi_reinit_locked - Reset the VSI
5328 * @vsi: the VSI being configured
5330 * Rebuild the ring structs after some configuration
5331 * has changed, e.g. MTU size.
5333 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
5335 struct i40e_pf *pf = vsi->back;
5337 WARN_ON(in_interrupt());
5338 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
5339 usleep_range(1000, 2000);
5343 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
5347 * i40e_up - Bring the connection back up after being down
5348 * @vsi: the VSI being configured
5350 int i40e_up(struct i40e_vsi *vsi)
5354 err = i40e_vsi_configure(vsi);
5356 err = i40e_up_complete(vsi);
5362 * i40e_down - Shutdown the connection processing
5363 * @vsi: the VSI being stopped
5365 void i40e_down(struct i40e_vsi *vsi)
5369 /* It is assumed that the caller of this function
5370 * sets the vsi->state __I40E_DOWN bit.
5373 netif_carrier_off(vsi->netdev);
5374 netif_tx_disable(vsi->netdev);
5376 i40e_vsi_disable_irq(vsi);
5377 i40e_vsi_stop_rings(vsi);
5378 i40e_napi_disable_all(vsi);
5380 for (i = 0; i < vsi->num_queue_pairs; i++) {
5381 i40e_clean_tx_ring(vsi->tx_rings[i]);
5382 i40e_clean_rx_ring(vsi->rx_rings[i]);
5388 * i40e_setup_tc - configure multiple traffic classes
5389 * @netdev: net device to configure
5390 * @tc: number of traffic classes to enable
5392 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
5394 struct i40e_netdev_priv *np = netdev_priv(netdev);
5395 struct i40e_vsi *vsi = np->vsi;
5396 struct i40e_pf *pf = vsi->back;
5401 /* Check if DCB enabled to continue */
5402 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5403 netdev_info(netdev, "DCB is not enabled for adapter\n");
5407 /* Check if MFP enabled */
5408 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5409 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
5413 /* Check whether tc count is within enabled limit */
5414 if (tc > i40e_pf_get_num_tc(pf)) {
5415 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
5419 /* Generate TC map for number of tc requested */
5420 for (i = 0; i < tc; i++)
5421 enabled_tc |= BIT(i);
5423 /* Requesting same TC configuration as already enabled */
5424 if (enabled_tc == vsi->tc_config.enabled_tc)
5427 /* Quiesce VSI queues */
5428 i40e_quiesce_vsi(vsi);
5430 /* Configure VSI for enabled TCs */
5431 ret = i40e_vsi_config_tc(vsi, enabled_tc);
5433 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
5439 i40e_unquiesce_vsi(vsi);
5445 static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
5446 struct tc_to_netdev *tc)
5448 if (tc->type != TC_SETUP_MQPRIO)
5451 tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
5453 return i40e_setup_tc(netdev, tc->mqprio->num_tc);
5457 * i40e_open - Called when a network interface is made active
5458 * @netdev: network interface device structure
5460 * The open entry point is called when a network interface is made
5461 * active by the system (IFF_UP). At this point all resources needed
5462 * for transmit and receive operations are allocated, the interrupt
5463 * handler is registered with the OS, the netdev watchdog subtask is
5464 * enabled, and the stack is notified that the interface is ready.
5466 * Returns 0 on success, negative value on failure
5468 int i40e_open(struct net_device *netdev)
5470 struct i40e_netdev_priv *np = netdev_priv(netdev);
5471 struct i40e_vsi *vsi = np->vsi;
5472 struct i40e_pf *pf = vsi->back;
5475 /* disallow open during test or if eeprom is broken */
5476 if (test_bit(__I40E_TESTING, &pf->state) ||
5477 test_bit(__I40E_BAD_EEPROM, &pf->state))
5480 netif_carrier_off(netdev);
5482 err = i40e_vsi_open(vsi);
5486 /* configure global TSO hardware offload settings */
5487 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
5488 TCP_FLAG_FIN) >> 16);
5489 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
5491 TCP_FLAG_CWR) >> 16);
5492 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
5494 udp_tunnel_get_rx_info(netdev);
5501 * @vsi: the VSI to open
5503 * Finish initialization of the VSI.
5505 * Returns 0 on success, negative value on failure
5507 * Note: expects to be called while under rtnl_lock()
5509 int i40e_vsi_open(struct i40e_vsi *vsi)
5511 struct i40e_pf *pf = vsi->back;
5512 char int_name[I40E_INT_NAME_STR_LEN];
5515 /* allocate descriptors */
5516 err = i40e_vsi_setup_tx_resources(vsi);
5519 err = i40e_vsi_setup_rx_resources(vsi);
5523 err = i40e_vsi_configure(vsi);
5528 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5529 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5530 err = i40e_vsi_request_irq(vsi, int_name);
5534 /* Notify the stack of the actual queue counts. */
5535 err = netif_set_real_num_tx_queues(vsi->netdev,
5536 vsi->num_queue_pairs);
5538 goto err_set_queues;
5540 err = netif_set_real_num_rx_queues(vsi->netdev,
5541 vsi->num_queue_pairs);
5543 goto err_set_queues;
5545 } else if (vsi->type == I40E_VSI_FDIR) {
5546 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
5547 dev_driver_string(&pf->pdev->dev),
5548 dev_name(&pf->pdev->dev));
5549 err = i40e_vsi_request_irq(vsi, int_name);
5556 err = i40e_up_complete(vsi);
5558 goto err_up_complete;
5565 i40e_vsi_free_irq(vsi);
5567 i40e_vsi_free_rx_resources(vsi);
5569 i40e_vsi_free_tx_resources(vsi);
5570 if (vsi == pf->vsi[pf->lan_vsi])
5571 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
5577 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
5578 * @pf: Pointer to PF
5580 * This function destroys the hlist where all the Flow Director
5581 * filters were saved.
5583 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5585 struct i40e_fdir_filter *filter;
5586 struct i40e_flex_pit *pit_entry, *tmp;
5587 struct hlist_node *node2;
5589 hlist_for_each_entry_safe(filter, node2,
5590 &pf->fdir_filter_list, fdir_node) {
5591 hlist_del(&filter->fdir_node);
5595 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
5596 list_del(&pit_entry->list);
5599 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
5601 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
5602 list_del(&pit_entry->list);
5605 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
5607 pf->fdir_pf_active_filters = 0;
5608 pf->fd_tcp4_filter_cnt = 0;
5609 pf->fd_udp4_filter_cnt = 0;
5610 pf->fd_sctp4_filter_cnt = 0;
5611 pf->fd_ip4_filter_cnt = 0;
5613 /* Reprogram the default input set for TCP/IPv4 */
5614 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
5615 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
5616 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
5618 /* Reprogram the default input set for UDP/IPv4 */
5619 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
5620 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
5621 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
5623 /* Reprogram the default input set for SCTP/IPv4 */
5624 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
5625 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
5626 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
5628 /* Reprogram the default input set for Other/IPv4 */
5629 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
5630 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
5634 * i40e_close - Disables a network interface
5635 * @netdev: network interface device structure
5637 * The close entry point is called when an interface is de-activated
5638 * by the OS. The hardware is still under the driver's control, but
5639 * this netdev interface is disabled.
5641 * Returns 0, this is not allowed to fail
5643 int i40e_close(struct net_device *netdev)
5645 struct i40e_netdev_priv *np = netdev_priv(netdev);
5646 struct i40e_vsi *vsi = np->vsi;
5648 i40e_vsi_close(vsi);
5654 * i40e_do_reset - Start a PF or Core Reset sequence
5655 * @pf: board private structure
5656 * @reset_flags: which reset is requested
5657 * @lock_acquired: indicates whether or not the lock has been acquired
5658 * before this function was called.
5660 * The essential difference in resets is that the PF Reset
5661 * doesn't clear the packet buffers, doesn't reset the PE
5662 * firmware, and doesn't bother the other PFs on the chip.
5664 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
5668 WARN_ON(in_interrupt());
5671 /* do the biggest reset indicated */
5672 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
5674 /* Request a Global Reset
5676 * This will start the chip's countdown to the actual full
5677 * chip reset event, and a warning interrupt to be sent
5678 * to all PFs, including the requestor. Our handler
5679 * for the warning interrupt will deal with the shutdown
5680 * and recovery of the switch setup.
5682 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
5683 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5684 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5685 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5687 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
5689 /* Request a Core Reset
5691 * Same as Global Reset, except does *not* include the MAC/PHY
5693 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
5694 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5695 val |= I40E_GLGEN_RTRIG_CORER_MASK;
5696 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5697 i40e_flush(&pf->hw);
5699 } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
5701 /* Request a PF Reset
5703 * Resets only the PF-specific registers
5705 * This goes directly to the tear-down and rebuild of
5706 * the switch, since we need to do all the recovery as
5707 * for the Core Reset.
5709 dev_dbg(&pf->pdev->dev, "PFR requested\n");
5710 i40e_handle_reset_warning(pf, lock_acquired);
5712 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
5715 /* Find the VSI(s) that requested a re-init */
5716 dev_info(&pf->pdev->dev,
5717 "VSI reinit requested\n");
5718 for (v = 0; v < pf->num_alloc_vsi; v++) {
5719 struct i40e_vsi *vsi = pf->vsi[v];
5722 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
5723 i40e_vsi_reinit_locked(pf->vsi[v]);
5724 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
5727 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
5730 /* Find the VSI(s) that needs to be brought down */
5731 dev_info(&pf->pdev->dev, "VSI down requested\n");
5732 for (v = 0; v < pf->num_alloc_vsi; v++) {
5733 struct i40e_vsi *vsi = pf->vsi[v];
5736 test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
5737 set_bit(__I40E_DOWN, &vsi->state);
5739 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
5743 dev_info(&pf->pdev->dev,
5744 "bad reset request 0x%08x\n", reset_flags);
5748 #ifdef CONFIG_I40E_DCB
5750 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5751 * @pf: board private structure
5752 * @old_cfg: current DCB config
5753 * @new_cfg: new DCB config
5755 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5756 struct i40e_dcbx_config *old_cfg,
5757 struct i40e_dcbx_config *new_cfg)
5759 bool need_reconfig = false;
5761 /* Check if ETS configuration has changed */
5762 if (memcmp(&new_cfg->etscfg,
5764 sizeof(new_cfg->etscfg))) {
5765 /* If Priority Table has changed reconfig is needed */
5766 if (memcmp(&new_cfg->etscfg.prioritytable,
5767 &old_cfg->etscfg.prioritytable,
5768 sizeof(new_cfg->etscfg.prioritytable))) {
5769 need_reconfig = true;
5770 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
5773 if (memcmp(&new_cfg->etscfg.tcbwtable,
5774 &old_cfg->etscfg.tcbwtable,
5775 sizeof(new_cfg->etscfg.tcbwtable)))
5776 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
5778 if (memcmp(&new_cfg->etscfg.tsatable,
5779 &old_cfg->etscfg.tsatable,
5780 sizeof(new_cfg->etscfg.tsatable)))
5781 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
5784 /* Check if PFC configuration has changed */
5785 if (memcmp(&new_cfg->pfc,
5787 sizeof(new_cfg->pfc))) {
5788 need_reconfig = true;
5789 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
5792 /* Check if APP Table has changed */
5793 if (memcmp(&new_cfg->app,
5795 sizeof(new_cfg->app))) {
5796 need_reconfig = true;
5797 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
5800 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
5801 return need_reconfig;
5805 * i40e_handle_lldp_event - Handle LLDP Change MIB event
5806 * @pf: board private structure
5807 * @e: event info posted on ARQ
5809 static int i40e_handle_lldp_event(struct i40e_pf *pf,
5810 struct i40e_arq_event_info *e)
5812 struct i40e_aqc_lldp_get_mib *mib =
5813 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5814 struct i40e_hw *hw = &pf->hw;
5815 struct i40e_dcbx_config tmp_dcbx_cfg;
5816 bool need_reconfig = false;
5820 /* Not DCB capable or capability disabled */
5821 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
5824 /* Ignore if event is not for Nearest Bridge */
5825 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
5826 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
5827 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
5828 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
5831 /* Check MIB Type and return if event for Remote MIB update */
5832 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
5833 dev_dbg(&pf->pdev->dev,
5834 "LLDP event mib type %s\n", type ? "remote" : "local");
5835 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
5836 /* Update the remote cached instance and return */
5837 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
5838 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
5839 &hw->remote_dcbx_config);
5843 /* Store the old configuration */
5844 tmp_dcbx_cfg = hw->local_dcbx_config;
5846 /* Reset the old DCBx configuration data */
5847 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
5848 /* Get updated DCBX data from firmware */
5849 ret = i40e_get_dcb_config(&pf->hw);
5851 dev_info(&pf->pdev->dev,
5852 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
5853 i40e_stat_str(&pf->hw, ret),
5854 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5858 /* No change detected in DCBX configs */
5859 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
5860 sizeof(tmp_dcbx_cfg))) {
5861 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
5865 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
5866 &hw->local_dcbx_config);
5868 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
5873 /* Enable DCB tagging only when more than one TC */
5874 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5875 pf->flags |= I40E_FLAG_DCB_ENABLED;
5877 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5879 set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5880 /* Reconfiguration needed quiesce all VSIs */
5881 i40e_pf_quiesce_all_vsi(pf);
5883 /* Changes in configuration update VEB/VSI */
5884 i40e_dcb_reconfigure(pf);
5886 ret = i40e_resume_port_tx(pf);
5888 clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5889 /* In case of error no point in resuming VSIs */
5893 /* Wait for the PF's queues to be disabled */
5894 ret = i40e_pf_wait_queues_disabled(pf);
5896 /* Schedule PF reset to recover */
5897 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5898 i40e_service_event_schedule(pf);
5900 i40e_pf_unquiesce_all_vsi(pf);
5901 pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
5902 I40E_FLAG_CLIENT_L2_CHANGE);
5908 #endif /* CONFIG_I40E_DCB */
5911 * i40e_do_reset_safe - Protected reset path for userland calls.
5912 * @pf: board private structure
5913 * @reset_flags: which reset is requested
5916 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
5919 i40e_do_reset(pf, reset_flags, true);
5924 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
5925 * @pf: board private structure
5926 * @e: event info posted on ARQ
5928 * Handler for LAN Queue Overflow Event generated by the firmware for PF
5931 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
5932 struct i40e_arq_event_info *e)
5934 struct i40e_aqc_lan_overflow *data =
5935 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
5936 u32 queue = le32_to_cpu(data->prtdcb_rupto);
5937 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
5938 struct i40e_hw *hw = &pf->hw;
5942 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5945 /* Queue belongs to VF, find the VF and issue VF reset */
5946 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
5947 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
5948 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
5949 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
5950 vf_id -= hw->func_caps.vf_base_id;
5951 vf = &pf->vf[vf_id];
5952 i40e_vc_notify_vf_reset(vf);
5953 /* Allow VF to process pending reset notification */
5955 i40e_reset_vf(vf, false);
5960 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
5961 * @pf: board private structure
5963 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
5967 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5968 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
5973 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
5974 * @pf: board private structure
5976 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
5980 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5981 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
5982 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
5983 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5988 * i40e_get_global_fd_count - Get total FD filters programmed on device
5989 * @pf: board private structure
5991 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
5995 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
5996 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
5997 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
5998 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
6003 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
6004 * @pf: board private structure
6006 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
6008 struct i40e_fdir_filter *filter;
6009 u32 fcnt_prog, fcnt_avail;
6010 struct hlist_node *node;
6012 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
6015 /* Check if, FD SB or ATR was auto disabled and if there is enough room
6018 fcnt_prog = i40e_get_global_fd_count(pf);
6019 fcnt_avail = pf->fdir_pf_filter_count;
6020 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
6021 (pf->fd_add_err == 0) ||
6022 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
6023 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
6024 (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)) {
6025 pf->hw_disabled_flags &= ~I40E_FLAG_FD_SB_ENABLED;
6026 if (I40E_DEBUG_FD & pf->hw.debug_mask)
6027 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
6031 /* Wait for some more space to be available to turn on ATR. We also
6032 * must check that no existing ntuple rules for TCP are in effect
6034 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
6035 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
6036 (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED) &&
6037 (pf->fd_tcp4_filter_cnt == 0)) {
6038 pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
6039 if (I40E_DEBUG_FD & pf->hw.debug_mask)
6040 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
6044 /* if hw had a problem adding a filter, delete it */
6045 if (pf->fd_inv > 0) {
6046 hlist_for_each_entry_safe(filter, node,
6047 &pf->fdir_filter_list, fdir_node) {
6048 if (filter->fd_id == pf->fd_inv) {
6049 hlist_del(&filter->fdir_node);
6051 pf->fdir_pf_active_filters--;
6057 #define I40E_MIN_FD_FLUSH_INTERVAL 10
6058 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
6060 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
6061 * @pf: board private structure
6063 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
6065 unsigned long min_flush_time;
6066 int flush_wait_retry = 50;
6067 bool disable_atr = false;
6071 if (!time_after(jiffies, pf->fd_flush_timestamp +
6072 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
6075 /* If the flush is happening too quick and we have mostly SB rules we
6076 * should not re-enable ATR for some time.
6078 min_flush_time = pf->fd_flush_timestamp +
6079 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
6080 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
6082 if (!(time_after(jiffies, min_flush_time)) &&
6083 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
6084 if (I40E_DEBUG_FD & pf->hw.debug_mask)
6085 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
6089 pf->fd_flush_timestamp = jiffies;
6090 pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
6091 /* flush all filters */
6092 wr32(&pf->hw, I40E_PFQF_CTL_1,
6093 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
6094 i40e_flush(&pf->hw);
6098 /* Check FD flush status every 5-6msec */
6099 usleep_range(5000, 6000);
6100 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
6101 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
6103 } while (flush_wait_retry--);
6104 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
6105 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
6107 /* replay sideband filters */
6108 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
6109 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
6110 pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
6111 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
6112 if (I40E_DEBUG_FD & pf->hw.debug_mask)
6113 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
6118 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
6119 * @pf: board private structure
6121 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
6123 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
6126 /* We can see up to 256 filter programming desc in transit if the filters are
6127 * being applied really fast; before we see the first
6128 * filter miss error on Rx queue 0. Accumulating enough error messages before
6129 * reacting will make sure we don't cause flush too often.
6131 #define I40E_MAX_FD_PROGRAM_ERROR 256
6134 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
6135 * @pf: board private structure
6137 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
6140 /* if interface is down do nothing */
6141 if (test_bit(__I40E_DOWN, &pf->state))
6144 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
6145 i40e_fdir_flush_and_replay(pf);
6147 i40e_fdir_check_and_reenable(pf);
6152 * i40e_vsi_link_event - notify VSI of a link event
6153 * @vsi: vsi to be notified
6154 * @link_up: link up or down
6156 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
6158 if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
6161 switch (vsi->type) {
6163 if (!vsi->netdev || !vsi->netdev_registered)
6167 netif_carrier_on(vsi->netdev);
6168 netif_tx_wake_all_queues(vsi->netdev);
6170 netif_carrier_off(vsi->netdev);
6171 netif_tx_stop_all_queues(vsi->netdev);
6175 case I40E_VSI_SRIOV:
6176 case I40E_VSI_VMDQ2:
6178 case I40E_VSI_IWARP:
6179 case I40E_VSI_MIRROR:
6181 /* there is no notification for other VSIs */
6187 * i40e_veb_link_event - notify elements on the veb of a link event
6188 * @veb: veb to be notified
6189 * @link_up: link up or down
6191 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
6196 if (!veb || !veb->pf)
6200 /* depth first... */
6201 for (i = 0; i < I40E_MAX_VEB; i++)
6202 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
6203 i40e_veb_link_event(pf->veb[i], link_up);
6205 /* ... now the local VSIs */
6206 for (i = 0; i < pf->num_alloc_vsi; i++)
6207 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
6208 i40e_vsi_link_event(pf->vsi[i], link_up);
6212 * i40e_link_event - Update netif_carrier status
6213 * @pf: board private structure
6215 static void i40e_link_event(struct i40e_pf *pf)
6217 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6218 u8 new_link_speed, old_link_speed;
6220 bool new_link, old_link;
6222 /* save off old link status information */
6223 pf->hw.phy.link_info_old = pf->hw.phy.link_info;
6225 /* set this to force the get_link_status call to refresh state */
6226 pf->hw.phy.get_link_info = true;
6228 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
6230 status = i40e_get_link_status(&pf->hw, &new_link);
6232 /* On success, disable temp link polling */
6233 if (status == I40E_SUCCESS) {
6234 if (pf->flags & I40E_FLAG_TEMP_LINK_POLLING)
6235 pf->flags &= ~I40E_FLAG_TEMP_LINK_POLLING;
6237 /* Enable link polling temporarily until i40e_get_link_status
6238 * returns I40E_SUCCESS
6240 pf->flags |= I40E_FLAG_TEMP_LINK_POLLING;
6241 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
6246 old_link_speed = pf->hw.phy.link_info_old.link_speed;
6247 new_link_speed = pf->hw.phy.link_info.link_speed;
6249 if (new_link == old_link &&
6250 new_link_speed == old_link_speed &&
6251 (test_bit(__I40E_DOWN, &vsi->state) ||
6252 new_link == netif_carrier_ok(vsi->netdev)))
6255 if (!test_bit(__I40E_DOWN, &vsi->state))
6256 i40e_print_link_message(vsi, new_link);
6258 /* Notify the base of the switch tree connected to
6259 * the link. Floating VEBs are not notified.
6261 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
6262 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
6264 i40e_vsi_link_event(vsi, new_link);
6267 i40e_vc_notify_link_state(pf);
6269 if (pf->flags & I40E_FLAG_PTP)
6270 i40e_ptp_set_increment(pf);
6274 * i40e_watchdog_subtask - periodic checks not using event driven response
6275 * @pf: board private structure
6277 static void i40e_watchdog_subtask(struct i40e_pf *pf)
6281 /* if interface is down do nothing */
6282 if (test_bit(__I40E_DOWN, &pf->state) ||
6283 test_bit(__I40E_CONFIG_BUSY, &pf->state))
6286 /* make sure we don't do these things too often */
6287 if (time_before(jiffies, (pf->service_timer_previous +
6288 pf->service_timer_period)))
6290 pf->service_timer_previous = jiffies;
6292 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
6293 (pf->flags & I40E_FLAG_TEMP_LINK_POLLING))
6294 i40e_link_event(pf);
6296 /* Update the stats for active netdevs so the network stack
6297 * can look at updated numbers whenever it cares to
6299 for (i = 0; i < pf->num_alloc_vsi; i++)
6300 if (pf->vsi[i] && pf->vsi[i]->netdev)
6301 i40e_update_stats(pf->vsi[i]);
6303 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
6304 /* Update the stats for the active switching components */
6305 for (i = 0; i < I40E_MAX_VEB; i++)
6307 i40e_update_veb_stats(pf->veb[i]);
6310 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
6314 * i40e_reset_subtask - Set up for resetting the device and driver
6315 * @pf: board private structure
6317 static void i40e_reset_subtask(struct i40e_pf *pf)
6319 u32 reset_flags = 0;
6321 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
6322 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
6323 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
6325 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
6326 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
6327 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6329 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
6330 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
6331 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
6333 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
6334 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
6335 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
6337 if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
6338 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
6339 clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
6342 /* If there's a recovery already waiting, it takes
6343 * precedence before starting a new reset sequence.
6345 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
6346 i40e_prep_for_reset(pf, false);
6348 i40e_rebuild(pf, false, false);
6351 /* If we're already down or resetting, just bail */
6353 !test_bit(__I40E_DOWN, &pf->state) &&
6354 !test_bit(__I40E_CONFIG_BUSY, &pf->state)) {
6356 i40e_do_reset(pf, reset_flags, true);
6362 * i40e_handle_link_event - Handle link event
6363 * @pf: board private structure
6364 * @e: event info posted on ARQ
6366 static void i40e_handle_link_event(struct i40e_pf *pf,
6367 struct i40e_arq_event_info *e)
6369 struct i40e_aqc_get_link_status *status =
6370 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
6372 /* Do a new status request to re-enable LSE reporting
6373 * and load new status information into the hw struct
6374 * This completely ignores any state information
6375 * in the ARQ event info, instead choosing to always
6376 * issue the AQ update link status command.
6378 i40e_link_event(pf);
6380 /* check for unqualified module, if link is down */
6381 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
6382 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
6383 (!(status->link_info & I40E_AQ_LINK_UP)))
6384 dev_err(&pf->pdev->dev,
6385 "The driver failed to link because an unqualified module was detected.\n");
6389 * i40e_clean_adminq_subtask - Clean the AdminQ rings
6390 * @pf: board private structure
6392 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
6394 struct i40e_arq_event_info event;
6395 struct i40e_hw *hw = &pf->hw;
6402 /* Do not run clean AQ when PF reset fails */
6403 if (test_bit(__I40E_RESET_FAILED, &pf->state))
6406 /* check for error indications */
6407 val = rd32(&pf->hw, pf->hw.aq.arq.len);
6409 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
6410 if (hw->debug_mask & I40E_DEBUG_AQ)
6411 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
6412 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
6414 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
6415 if (hw->debug_mask & I40E_DEBUG_AQ)
6416 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
6417 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
6418 pf->arq_overflows++;
6420 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
6421 if (hw->debug_mask & I40E_DEBUG_AQ)
6422 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
6423 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
6426 wr32(&pf->hw, pf->hw.aq.arq.len, val);
6428 val = rd32(&pf->hw, pf->hw.aq.asq.len);
6430 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
6431 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6432 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
6433 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
6435 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
6436 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6437 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
6438 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
6440 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
6441 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6442 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
6443 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
6446 wr32(&pf->hw, pf->hw.aq.asq.len, val);
6448 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
6449 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
6454 ret = i40e_clean_arq_element(hw, &event, &pending);
6455 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
6458 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
6462 opcode = le16_to_cpu(event.desc.opcode);
6465 case i40e_aqc_opc_get_link_status:
6466 i40e_handle_link_event(pf, &event);
6468 case i40e_aqc_opc_send_msg_to_pf:
6469 ret = i40e_vc_process_vf_msg(pf,
6470 le16_to_cpu(event.desc.retval),
6471 le32_to_cpu(event.desc.cookie_high),
6472 le32_to_cpu(event.desc.cookie_low),
6476 case i40e_aqc_opc_lldp_update_mib:
6477 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
6478 #ifdef CONFIG_I40E_DCB
6480 ret = i40e_handle_lldp_event(pf, &event);
6482 #endif /* CONFIG_I40E_DCB */
6484 case i40e_aqc_opc_event_lan_overflow:
6485 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
6486 i40e_handle_lan_overflow_event(pf, &event);
6488 case i40e_aqc_opc_send_msg_to_peer:
6489 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
6491 case i40e_aqc_opc_nvm_erase:
6492 case i40e_aqc_opc_nvm_update:
6493 case i40e_aqc_opc_oem_post_update:
6494 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
6495 "ARQ NVM operation 0x%04x completed\n",
6499 dev_info(&pf->pdev->dev,
6500 "ARQ: Unknown event 0x%04x ignored\n",
6504 } while (i++ < pf->adminq_work_limit);
6506 if (i < pf->adminq_work_limit)
6507 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
6509 /* re-enable Admin queue interrupt cause */
6510 val = rd32(hw, I40E_PFINT_ICR0_ENA);
6511 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
6512 wr32(hw, I40E_PFINT_ICR0_ENA, val);
6515 kfree(event.msg_buf);
6519 * i40e_verify_eeprom - make sure eeprom is good to use
6520 * @pf: board private structure
6522 static void i40e_verify_eeprom(struct i40e_pf *pf)
6526 err = i40e_diag_eeprom_test(&pf->hw);
6528 /* retry in case of garbage read */
6529 err = i40e_diag_eeprom_test(&pf->hw);
6531 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
6533 set_bit(__I40E_BAD_EEPROM, &pf->state);
6537 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
6538 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
6539 clear_bit(__I40E_BAD_EEPROM, &pf->state);
6544 * i40e_enable_pf_switch_lb
6545 * @pf: pointer to the PF structure
6547 * enable switch loop back or die - no point in a return value
6549 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
6551 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6552 struct i40e_vsi_context ctxt;
6555 ctxt.seid = pf->main_vsi_seid;
6556 ctxt.pf_num = pf->hw.pf_id;
6558 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6560 dev_info(&pf->pdev->dev,
6561 "couldn't get PF vsi config, err %s aq_err %s\n",
6562 i40e_stat_str(&pf->hw, ret),
6563 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6566 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6567 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6568 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6570 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6572 dev_info(&pf->pdev->dev,
6573 "update vsi switch failed, err %s aq_err %s\n",
6574 i40e_stat_str(&pf->hw, ret),
6575 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6580 * i40e_disable_pf_switch_lb
6581 * @pf: pointer to the PF structure
6583 * disable switch loop back or die - no point in a return value
6585 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6587 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6588 struct i40e_vsi_context ctxt;
6591 ctxt.seid = pf->main_vsi_seid;
6592 ctxt.pf_num = pf->hw.pf_id;
6594 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6596 dev_info(&pf->pdev->dev,
6597 "couldn't get PF vsi config, err %s aq_err %s\n",
6598 i40e_stat_str(&pf->hw, ret),
6599 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6602 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6603 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6604 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6606 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6608 dev_info(&pf->pdev->dev,
6609 "update vsi switch failed, err %s aq_err %s\n",
6610 i40e_stat_str(&pf->hw, ret),
6611 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6616 * i40e_config_bridge_mode - Configure the HW bridge mode
6617 * @veb: pointer to the bridge instance
6619 * Configure the loop back mode for the LAN VSI that is downlink to the
6620 * specified HW bridge instance. It is expected this function is called
6621 * when a new HW bridge is instantiated.
6623 static void i40e_config_bridge_mode(struct i40e_veb *veb)
6625 struct i40e_pf *pf = veb->pf;
6627 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
6628 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6629 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
6630 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6631 i40e_disable_pf_switch_lb(pf);
6633 i40e_enable_pf_switch_lb(pf);
6637 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
6638 * @veb: pointer to the VEB instance
6640 * This is a recursive function that first builds the attached VSIs then
6641 * recurses in to build the next layer of VEB. We track the connections
6642 * through our own index numbers because the seid's from the HW could
6643 * change across the reset.
6645 static int i40e_reconstitute_veb(struct i40e_veb *veb)
6647 struct i40e_vsi *ctl_vsi = NULL;
6648 struct i40e_pf *pf = veb->pf;
6652 /* build VSI that owns this VEB, temporarily attached to base VEB */
6653 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
6655 pf->vsi[v]->veb_idx == veb->idx &&
6656 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6657 ctl_vsi = pf->vsi[v];
6662 dev_info(&pf->pdev->dev,
6663 "missing owner VSI for veb_idx %d\n", veb->idx);
6665 goto end_reconstitute;
6667 if (ctl_vsi != pf->vsi[pf->lan_vsi])
6668 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6669 ret = i40e_add_vsi(ctl_vsi);
6671 dev_info(&pf->pdev->dev,
6672 "rebuild of veb_idx %d owner VSI failed: %d\n",
6674 goto end_reconstitute;
6676 i40e_vsi_reset_stats(ctl_vsi);
6678 /* create the VEB in the switch and move the VSI onto the VEB */
6679 ret = i40e_add_veb(veb, ctl_vsi);
6681 goto end_reconstitute;
6683 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6684 veb->bridge_mode = BRIDGE_MODE_VEB;
6686 veb->bridge_mode = BRIDGE_MODE_VEPA;
6687 i40e_config_bridge_mode(veb);
6689 /* create the remaining VSIs attached to this VEB */
6690 for (v = 0; v < pf->num_alloc_vsi; v++) {
6691 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6694 if (pf->vsi[v]->veb_idx == veb->idx) {
6695 struct i40e_vsi *vsi = pf->vsi[v];
6697 vsi->uplink_seid = veb->seid;
6698 ret = i40e_add_vsi(vsi);
6700 dev_info(&pf->pdev->dev,
6701 "rebuild of vsi_idx %d failed: %d\n",
6703 goto end_reconstitute;
6705 i40e_vsi_reset_stats(vsi);
6709 /* create any VEBs attached to this VEB - RECURSION */
6710 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6711 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6712 pf->veb[veb_idx]->uplink_seid = veb->seid;
6713 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6724 * i40e_get_capabilities - get info about the HW
6725 * @pf: the PF struct
6727 static int i40e_get_capabilities(struct i40e_pf *pf)
6729 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6734 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6736 cap_buf = kzalloc(buf_len, GFP_KERNEL);
6740 /* this loads the data into the hw struct for us */
6741 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6743 i40e_aqc_opc_list_func_capabilities,
6745 /* data loaded, buffer no longer needed */
6748 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6749 /* retry with a larger buffer */
6750 buf_len = data_size;
6751 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6752 dev_info(&pf->pdev->dev,
6753 "capability discovery failed, err %s aq_err %s\n",
6754 i40e_stat_str(&pf->hw, err),
6755 i40e_aq_str(&pf->hw,
6756 pf->hw.aq.asq_last_status));
6761 if (pf->hw.debug_mask & I40E_DEBUG_USER)
6762 dev_info(&pf->pdev->dev,
6763 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6764 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6765 pf->hw.func_caps.num_msix_vectors,
6766 pf->hw.func_caps.num_msix_vectors_vf,
6767 pf->hw.func_caps.fd_filters_guaranteed,
6768 pf->hw.func_caps.fd_filters_best_effort,
6769 pf->hw.func_caps.num_tx_qp,
6770 pf->hw.func_caps.num_vsis);
6772 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6773 + pf->hw.func_caps.num_vfs)
6774 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6775 dev_info(&pf->pdev->dev,
6776 "got num_vsis %d, setting num_vsis to %d\n",
6777 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6778 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6784 static int i40e_vsi_clear(struct i40e_vsi *vsi);
6787 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
6788 * @pf: board private structure
6790 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
6792 struct i40e_vsi *vsi;
6794 /* quick workaround for an NVM issue that leaves a critical register
6797 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6798 static const u32 hkey[] = {
6799 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6800 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6801 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6805 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6806 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6809 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6812 /* find existing VSI and see if it needs configuring */
6813 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
6815 /* create a new VSI if none exists */
6817 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
6818 pf->vsi[pf->lan_vsi]->seid, 0);
6820 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
6821 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6826 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
6830 * i40e_fdir_teardown - release the Flow Director resources
6831 * @pf: board private structure
6833 static void i40e_fdir_teardown(struct i40e_pf *pf)
6835 struct i40e_vsi *vsi;
6837 i40e_fdir_filter_exit(pf);
6838 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
6840 i40e_vsi_release(vsi);
6844 * i40e_prep_for_reset - prep for the core to reset
6845 * @pf: board private structure
6846 * @lock_acquired: indicates whether or not the lock has been acquired
6847 * before this function was called.
6849 * Close up the VFs and other things in prep for PF Reset.
6851 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
6853 struct i40e_hw *hw = &pf->hw;
6854 i40e_status ret = 0;
6857 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6858 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
6860 if (i40e_check_asq_alive(&pf->hw))
6861 i40e_vc_notify_reset(pf);
6863 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
6865 /* quiesce the VSIs and their queues that are not already DOWN */
6866 /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
6869 i40e_pf_quiesce_all_vsi(pf);
6873 for (v = 0; v < pf->num_alloc_vsi; v++) {
6875 pf->vsi[v]->seid = 0;
6878 i40e_shutdown_adminq(&pf->hw);
6880 /* call shutdown HMC */
6881 if (hw->hmc.hmc_obj) {
6882 ret = i40e_shutdown_lan_hmc(hw);
6884 dev_warn(&pf->pdev->dev,
6885 "shutdown_lan_hmc failed: %d\n", ret);
6890 * i40e_send_version - update firmware with driver version
6893 static void i40e_send_version(struct i40e_pf *pf)
6895 struct i40e_driver_version dv;
6897 dv.major_version = DRV_VERSION_MAJOR;
6898 dv.minor_version = DRV_VERSION_MINOR;
6899 dv.build_version = DRV_VERSION_BUILD;
6900 dv.subbuild_version = 0;
6901 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
6902 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
6906 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
6907 * @pf: board private structure
6909 static int i40e_reset(struct i40e_pf *pf)
6911 struct i40e_hw *hw = &pf->hw;
6914 ret = i40e_pf_reset(hw);
6916 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
6917 set_bit(__I40E_RESET_FAILED, &pf->state);
6918 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
6926 * i40e_rebuild - rebuild using a saved config
6927 * @pf: board private structure
6928 * @reinit: if the Main VSI needs to re-initialized.
6929 * @lock_acquired: indicates whether or not the lock has been acquired
6930 * before this function was called.
6932 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
6934 struct i40e_hw *hw = &pf->hw;
6935 u8 set_fc_aq_fail = 0;
6940 if (test_bit(__I40E_DOWN, &pf->state))
6941 goto clear_recovery;
6942 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
6944 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
6945 ret = i40e_init_adminq(&pf->hw);
6947 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
6948 i40e_stat_str(&pf->hw, ret),
6949 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6950 goto clear_recovery;
6953 /* re-verify the eeprom if we just had an EMP reset */
6954 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
6955 i40e_verify_eeprom(pf);
6957 i40e_clear_pxe_mode(hw);
6958 ret = i40e_get_capabilities(pf);
6960 goto end_core_reset;
6962 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
6963 hw->func_caps.num_rx_qp, 0, 0);
6965 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
6966 goto end_core_reset;
6968 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
6970 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
6971 goto end_core_reset;
6974 #ifdef CONFIG_I40E_DCB
6975 ret = i40e_init_pf_dcb(pf);
6977 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6978 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6979 /* Continue without DCB enabled */
6981 #endif /* CONFIG_I40E_DCB */
6982 /* do basic switch setup */
6985 ret = i40e_setup_pf_switch(pf, reinit);
6989 /* The driver only wants link up/down and module qualification
6990 * reports from firmware. Note the negative logic.
6992 ret = i40e_aq_set_phy_int_mask(&pf->hw,
6993 ~(I40E_AQ_EVENT_LINK_UPDOWN |
6994 I40E_AQ_EVENT_MEDIA_NA |
6995 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
6997 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
6998 i40e_stat_str(&pf->hw, ret),
6999 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7001 /* make sure our flow control settings are restored */
7002 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
7004 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
7005 i40e_stat_str(&pf->hw, ret),
7006 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7008 /* Rebuild the VSIs and VEBs that existed before reset.
7009 * They are still in our local switch element arrays, so only
7010 * need to rebuild the switch model in the HW.
7012 * If there were VEBs but the reconstitution failed, we'll try
7013 * try to recover minimal use by getting the basic PF VSI working.
7015 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
7016 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
7017 /* find the one VEB connected to the MAC, and find orphans */
7018 for (v = 0; v < I40E_MAX_VEB; v++) {
7022 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
7023 pf->veb[v]->uplink_seid == 0) {
7024 ret = i40e_reconstitute_veb(pf->veb[v]);
7029 /* If Main VEB failed, we're in deep doodoo,
7030 * so give up rebuilding the switch and set up
7031 * for minimal rebuild of PF VSI.
7032 * If orphan failed, we'll report the error
7033 * but try to keep going.
7035 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
7036 dev_info(&pf->pdev->dev,
7037 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
7039 pf->vsi[pf->lan_vsi]->uplink_seid
7042 } else if (pf->veb[v]->uplink_seid == 0) {
7043 dev_info(&pf->pdev->dev,
7044 "rebuild of orphan VEB failed: %d\n",
7051 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
7052 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
7053 /* no VEB, so rebuild only the Main VSI */
7054 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
7056 dev_info(&pf->pdev->dev,
7057 "rebuild of Main VSI failed: %d\n", ret);
7062 /* Reconfigure hardware for allowing smaller MSS in the case
7063 * of TSO, so that we avoid the MDD being fired and causing
7064 * a reset in the case of small MSS+TSO.
7066 #define I40E_REG_MSS 0x000E64DC
7067 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
7068 #define I40E_64BYTE_MSS 0x400000
7069 val = rd32(hw, I40E_REG_MSS);
7070 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
7071 val &= ~I40E_REG_MSS_MIN_MASK;
7072 val |= I40E_64BYTE_MSS;
7073 wr32(hw, I40E_REG_MSS, val);
7076 if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
7078 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
7080 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
7081 i40e_stat_str(&pf->hw, ret),
7082 i40e_aq_str(&pf->hw,
7083 pf->hw.aq.asq_last_status));
7085 /* reinit the misc interrupt */
7086 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7087 ret = i40e_setup_misc_vector(pf);
7089 /* Add a filter to drop all Flow control frames from any VSI from being
7090 * transmitted. By doing so we stop a malicious VF from sending out
7091 * PAUSE or PFC frames and potentially controlling traffic for other
7093 * The FW can still send Flow control frames if enabled.
7095 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
7098 /* restart the VSIs that were rebuilt and running before the reset */
7099 i40e_pf_unquiesce_all_vsi(pf);
7101 if (pf->num_alloc_vfs) {
7102 for (v = 0; v < pf->num_alloc_vfs; v++)
7103 i40e_reset_vf(&pf->vf[v], true);
7106 /* tell the firmware that we're starting */
7107 i40e_send_version(pf);
7113 clear_bit(__I40E_RESET_FAILED, &pf->state);
7115 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
7119 * i40e_reset_and_rebuild - reset and rebuild using a saved config
7120 * @pf: board private structure
7121 * @reinit: if the Main VSI needs to re-initialized.
7122 * @lock_acquired: indicates whether or not the lock has been acquired
7123 * before this function was called.
7125 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
7129 /* Now we wait for GRST to settle out.
7130 * We don't have to delete the VEBs or VSIs from the hw switch
7131 * because the reset will make them disappear.
7133 ret = i40e_reset(pf);
7135 i40e_rebuild(pf, reinit, lock_acquired);
7139 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
7140 * @pf: board private structure
7142 * Close up the VFs and other things in prep for a Core Reset,
7143 * then get ready to rebuild the world.
7144 * @lock_acquired: indicates whether or not the lock has been acquired
7145 * before this function was called.
7147 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
7149 i40e_prep_for_reset(pf, lock_acquired);
7150 i40e_reset_and_rebuild(pf, false, lock_acquired);
7154 * i40e_handle_mdd_event
7155 * @pf: pointer to the PF structure
7157 * Called from the MDD irq handler to identify possibly malicious vfs
7159 static void i40e_handle_mdd_event(struct i40e_pf *pf)
7161 struct i40e_hw *hw = &pf->hw;
7162 bool mdd_detected = false;
7163 bool pf_mdd_detected = false;
7168 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
7171 /* find what triggered the MDD event */
7172 reg = rd32(hw, I40E_GL_MDET_TX);
7173 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
7174 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
7175 I40E_GL_MDET_TX_PF_NUM_SHIFT;
7176 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
7177 I40E_GL_MDET_TX_VF_NUM_SHIFT;
7178 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
7179 I40E_GL_MDET_TX_EVENT_SHIFT;
7180 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
7181 I40E_GL_MDET_TX_QUEUE_SHIFT) -
7182 pf->hw.func_caps.base_queue;
7183 if (netif_msg_tx_err(pf))
7184 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
7185 event, queue, pf_num, vf_num);
7186 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
7187 mdd_detected = true;
7189 reg = rd32(hw, I40E_GL_MDET_RX);
7190 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
7191 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
7192 I40E_GL_MDET_RX_FUNCTION_SHIFT;
7193 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
7194 I40E_GL_MDET_RX_EVENT_SHIFT;
7195 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
7196 I40E_GL_MDET_RX_QUEUE_SHIFT) -
7197 pf->hw.func_caps.base_queue;
7198 if (netif_msg_rx_err(pf))
7199 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
7200 event, queue, func);
7201 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
7202 mdd_detected = true;
7206 reg = rd32(hw, I40E_PF_MDET_TX);
7207 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
7208 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
7209 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
7210 pf_mdd_detected = true;
7212 reg = rd32(hw, I40E_PF_MDET_RX);
7213 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
7214 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
7215 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
7216 pf_mdd_detected = true;
7218 /* Queue belongs to the PF, initiate a reset */
7219 if (pf_mdd_detected) {
7220 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
7221 i40e_service_event_schedule(pf);
7225 /* see if one of the VFs needs its hand slapped */
7226 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
7228 reg = rd32(hw, I40E_VP_MDET_TX(i));
7229 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
7230 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
7231 vf->num_mdd_events++;
7232 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
7236 reg = rd32(hw, I40E_VP_MDET_RX(i));
7237 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
7238 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
7239 vf->num_mdd_events++;
7240 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
7244 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
7245 dev_info(&pf->pdev->dev,
7246 "Too many MDD events on VF %d, disabled\n", i);
7247 dev_info(&pf->pdev->dev,
7248 "Use PF Control I/F to re-enable the VF\n");
7249 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
7253 /* re-enable mdd interrupt cause */
7254 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
7255 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
7256 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
7257 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
7262 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
7263 * @pf: board private structure
7265 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
7267 struct i40e_hw *hw = &pf->hw;
7272 if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
7275 pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
7277 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7278 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
7279 pf->pending_udp_bitmap &= ~BIT_ULL(i);
7280 port = pf->udp_ports[i].index;
7282 ret = i40e_aq_add_udp_tunnel(hw, port,
7283 pf->udp_ports[i].type,
7286 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
7289 dev_dbg(&pf->pdev->dev,
7290 "%s %s port %d, index %d failed, err %s aq_err %s\n",
7291 pf->udp_ports[i].type ? "vxlan" : "geneve",
7292 port ? "add" : "delete",
7294 i40e_stat_str(&pf->hw, ret),
7295 i40e_aq_str(&pf->hw,
7296 pf->hw.aq.asq_last_status));
7297 pf->udp_ports[i].index = 0;
7304 * i40e_service_task - Run the driver's async subtasks
7305 * @work: pointer to work_struct containing our data
7307 static void i40e_service_task(struct work_struct *work)
7309 struct i40e_pf *pf = container_of(work,
7312 unsigned long start_time = jiffies;
7314 /* don't bother with service tasks if a reset is in progress */
7315 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7319 if (test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
7322 i40e_detect_recover_hung(pf);
7323 i40e_sync_filters_subtask(pf);
7324 i40e_reset_subtask(pf);
7325 i40e_handle_mdd_event(pf);
7326 i40e_vc_process_vflr_event(pf);
7327 i40e_watchdog_subtask(pf);
7328 i40e_fdir_reinit_subtask(pf);
7329 if (pf->flags & I40E_FLAG_CLIENT_RESET) {
7330 /* Client subtask will reopen next time through. */
7331 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
7332 pf->flags &= ~I40E_FLAG_CLIENT_RESET;
7334 i40e_client_subtask(pf);
7335 if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) {
7336 i40e_notify_client_of_l2_param_changes(
7337 pf->vsi[pf->lan_vsi]);
7338 pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE;
7341 i40e_sync_filters_subtask(pf);
7342 i40e_sync_udp_filters_subtask(pf);
7343 i40e_clean_adminq_subtask(pf);
7345 /* flush memory to make sure state is correct before next watchdog */
7346 smp_mb__before_atomic();
7347 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
7349 /* If the tasks have taken longer than one timer cycle or there
7350 * is more work to be done, reschedule the service task now
7351 * rather than wait for the timer to tick again.
7353 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
7354 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
7355 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
7356 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
7357 i40e_service_event_schedule(pf);
7361 * i40e_service_timer - timer callback
7362 * @data: pointer to PF struct
7364 static void i40e_service_timer(unsigned long data)
7366 struct i40e_pf *pf = (struct i40e_pf *)data;
7368 mod_timer(&pf->service_timer,
7369 round_jiffies(jiffies + pf->service_timer_period));
7370 i40e_service_event_schedule(pf);
7374 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
7375 * @vsi: the VSI being configured
7377 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
7379 struct i40e_pf *pf = vsi->back;
7381 switch (vsi->type) {
7383 vsi->alloc_queue_pairs = pf->num_lan_qps;
7384 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7385 I40E_REQ_DESCRIPTOR_MULTIPLE);
7386 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7387 vsi->num_q_vectors = pf->num_lan_msix;
7389 vsi->num_q_vectors = 1;
7394 vsi->alloc_queue_pairs = 1;
7395 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
7396 I40E_REQ_DESCRIPTOR_MULTIPLE);
7397 vsi->num_q_vectors = pf->num_fdsb_msix;
7400 case I40E_VSI_VMDQ2:
7401 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
7402 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7403 I40E_REQ_DESCRIPTOR_MULTIPLE);
7404 vsi->num_q_vectors = pf->num_vmdq_msix;
7407 case I40E_VSI_SRIOV:
7408 vsi->alloc_queue_pairs = pf->num_vf_qps;
7409 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7410 I40E_REQ_DESCRIPTOR_MULTIPLE);
7422 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
7423 * @type: VSI pointer
7424 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
7426 * On error: returns error code (negative)
7427 * On success: returns 0
7429 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
7434 /* allocate memory for both Tx and Rx ring pointers */
7435 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
7436 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
7439 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
7441 if (alloc_qvectors) {
7442 /* allocate memory for q_vector pointers */
7443 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
7444 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
7445 if (!vsi->q_vectors) {
7453 kfree(vsi->tx_rings);
7458 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
7459 * @pf: board private structure
7460 * @type: type of VSI
7462 * On error: returns error code (negative)
7463 * On success: returns vsi index in PF (positive)
7465 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
7468 struct i40e_vsi *vsi;
7472 /* Need to protect the allocation of the VSIs at the PF level */
7473 mutex_lock(&pf->switch_mutex);
7475 /* VSI list may be fragmented if VSI creation/destruction has
7476 * been happening. We can afford to do a quick scan to look
7477 * for any free VSIs in the list.
7479 * find next empty vsi slot, looping back around if necessary
7482 while (i < pf->num_alloc_vsi && pf->vsi[i])
7484 if (i >= pf->num_alloc_vsi) {
7486 while (i < pf->next_vsi && pf->vsi[i])
7490 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
7491 vsi_idx = i; /* Found one! */
7494 goto unlock_pf; /* out of VSI slots! */
7498 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
7505 set_bit(__I40E_DOWN, &vsi->state);
7508 vsi->int_rate_limit = 0;
7509 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
7510 pf->rss_table_size : 64;
7511 vsi->netdev_registered = false;
7512 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
7513 hash_init(vsi->mac_filter_hash);
7514 vsi->irqs_ready = false;
7516 ret = i40e_set_num_rings_in_vsi(vsi);
7520 ret = i40e_vsi_alloc_arrays(vsi, true);
7524 /* Setup default MSIX irq handler for VSI */
7525 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
7527 /* Initialize VSI lock */
7528 spin_lock_init(&vsi->mac_filter_hash_lock);
7529 pf->vsi[vsi_idx] = vsi;
7534 pf->next_vsi = i - 1;
7537 mutex_unlock(&pf->switch_mutex);
7542 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
7543 * @type: VSI pointer
7544 * @free_qvectors: a bool to specify if q_vectors need to be freed.
7546 * On error: returns error code (negative)
7547 * On success: returns 0
7549 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
7551 /* free the ring and vector containers */
7552 if (free_qvectors) {
7553 kfree(vsi->q_vectors);
7554 vsi->q_vectors = NULL;
7556 kfree(vsi->tx_rings);
7557 vsi->tx_rings = NULL;
7558 vsi->rx_rings = NULL;
7562 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
7564 * @vsi: Pointer to VSI structure
7566 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
7571 kfree(vsi->rss_hkey_user);
7572 vsi->rss_hkey_user = NULL;
7574 kfree(vsi->rss_lut_user);
7575 vsi->rss_lut_user = NULL;
7579 * i40e_vsi_clear - Deallocate the VSI provided
7580 * @vsi: the VSI being un-configured
7582 static int i40e_vsi_clear(struct i40e_vsi *vsi)
7593 mutex_lock(&pf->switch_mutex);
7594 if (!pf->vsi[vsi->idx]) {
7595 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
7596 vsi->idx, vsi->idx, vsi, vsi->type);
7600 if (pf->vsi[vsi->idx] != vsi) {
7601 dev_err(&pf->pdev->dev,
7602 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
7603 pf->vsi[vsi->idx]->idx,
7605 pf->vsi[vsi->idx]->type,
7606 vsi->idx, vsi, vsi->type);
7610 /* updates the PF for this cleared vsi */
7611 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7612 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
7614 i40e_vsi_free_arrays(vsi, true);
7615 i40e_clear_rss_config_user(vsi);
7617 pf->vsi[vsi->idx] = NULL;
7618 if (vsi->idx < pf->next_vsi)
7619 pf->next_vsi = vsi->idx;
7622 mutex_unlock(&pf->switch_mutex);
7630 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
7631 * @vsi: the VSI being cleaned
7633 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
7637 if (vsi->tx_rings && vsi->tx_rings[0]) {
7638 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7639 kfree_rcu(vsi->tx_rings[i], rcu);
7640 vsi->tx_rings[i] = NULL;
7641 vsi->rx_rings[i] = NULL;
7647 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
7648 * @vsi: the VSI being configured
7650 static int i40e_alloc_rings(struct i40e_vsi *vsi)
7652 struct i40e_ring *tx_ring, *rx_ring;
7653 struct i40e_pf *pf = vsi->back;
7656 /* Set basic values in the rings to be used later during open() */
7657 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7658 /* allocate space for both Tx and Rx in one shot */
7659 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
7663 tx_ring->queue_index = i;
7664 tx_ring->reg_idx = vsi->base_queue + i;
7665 tx_ring->ring_active = false;
7667 tx_ring->netdev = vsi->netdev;
7668 tx_ring->dev = &pf->pdev->dev;
7669 tx_ring->count = vsi->num_desc;
7671 tx_ring->dcb_tc = 0;
7672 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
7673 tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7674 tx_ring->tx_itr_setting = pf->tx_itr_default;
7675 vsi->tx_rings[i] = tx_ring;
7677 rx_ring = &tx_ring[1];
7678 rx_ring->queue_index = i;
7679 rx_ring->reg_idx = vsi->base_queue + i;
7680 rx_ring->ring_active = false;
7682 rx_ring->netdev = vsi->netdev;
7683 rx_ring->dev = &pf->pdev->dev;
7684 rx_ring->count = vsi->num_desc;
7686 rx_ring->dcb_tc = 0;
7687 rx_ring->rx_itr_setting = pf->rx_itr_default;
7688 vsi->rx_rings[i] = rx_ring;
7694 i40e_vsi_clear_rings(vsi);
7699 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
7700 * @pf: board private structure
7701 * @vectors: the number of MSI-X vectors to request
7703 * Returns the number of vectors reserved, or error
7705 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
7707 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
7708 I40E_MIN_MSIX, vectors);
7710 dev_info(&pf->pdev->dev,
7711 "MSI-X vector reservation failed: %d\n", vectors);
7719 * i40e_init_msix - Setup the MSIX capability
7720 * @pf: board private structure
7722 * Work with the OS to set up the MSIX vectors needed.
7724 * Returns the number of vectors reserved or negative on failure
7726 static int i40e_init_msix(struct i40e_pf *pf)
7728 struct i40e_hw *hw = &pf->hw;
7729 int cpus, extra_vectors;
7733 int iwarp_requested = 0;
7735 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7738 /* The number of vectors we'll request will be comprised of:
7739 * - Add 1 for "other" cause for Admin Queue events, etc.
7740 * - The number of LAN queue pairs
7741 * - Queues being used for RSS.
7742 * We don't need as many as max_rss_size vectors.
7743 * use rss_size instead in the calculation since that
7744 * is governed by number of cpus in the system.
7745 * - assumes symmetric Tx/Rx pairing
7746 * - The number of VMDq pairs
7747 * - The CPU count within the NUMA node if iWARP is enabled
7748 * Once we count this up, try the request.
7750 * If we can't get what we want, we'll simplify to nearly nothing
7751 * and try again. If that still fails, we punt.
7753 vectors_left = hw->func_caps.num_msix_vectors;
7756 /* reserve one vector for miscellaneous handler */
7762 /* reserve some vectors for the main PF traffic queues. Initially we
7763 * only reserve at most 50% of the available vectors, in the case that
7764 * the number of online CPUs is large. This ensures that we can enable
7765 * extra features as well. Once we've enabled the other features, we
7766 * will use any remaining vectors to reach as close as we can to the
7767 * number of online CPUs.
7769 cpus = num_online_cpus();
7770 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
7771 vectors_left -= pf->num_lan_msix;
7773 /* reserve one vector for sideband flow director */
7774 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7776 pf->num_fdsb_msix = 1;
7780 pf->num_fdsb_msix = 0;
7784 /* can we reserve enough for iWARP? */
7785 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7786 iwarp_requested = pf->num_iwarp_msix;
7789 pf->num_iwarp_msix = 0;
7790 else if (vectors_left < pf->num_iwarp_msix)
7791 pf->num_iwarp_msix = 1;
7792 v_budget += pf->num_iwarp_msix;
7793 vectors_left -= pf->num_iwarp_msix;
7796 /* any vectors left over go for VMDq support */
7797 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
7798 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
7799 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
7801 if (!vectors_left) {
7802 pf->num_vmdq_msix = 0;
7803 pf->num_vmdq_qps = 0;
7805 /* if we're short on vectors for what's desired, we limit
7806 * the queues per vmdq. If this is still more than are
7807 * available, the user will need to change the number of
7808 * queues/vectors used by the PF later with the ethtool
7811 if (vmdq_vecs < vmdq_vecs_wanted)
7812 pf->num_vmdq_qps = 1;
7813 pf->num_vmdq_msix = pf->num_vmdq_qps;
7815 v_budget += vmdq_vecs;
7816 vectors_left -= vmdq_vecs;
7820 /* On systems with a large number of SMP cores, we previously limited
7821 * the number of vectors for num_lan_msix to be at most 50% of the
7822 * available vectors, to allow for other features. Now, we add back
7823 * the remaining vectors. However, we ensure that the total
7824 * num_lan_msix will not exceed num_online_cpus(). To do this, we
7825 * calculate the number of vectors we can add without going over the
7826 * cap of CPUs. For systems with a small number of CPUs this will be
7829 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
7830 pf->num_lan_msix += extra_vectors;
7831 vectors_left -= extra_vectors;
7833 WARN(vectors_left < 0,
7834 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
7836 v_budget += pf->num_lan_msix;
7837 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
7839 if (!pf->msix_entries)
7842 for (i = 0; i < v_budget; i++)
7843 pf->msix_entries[i].entry = i;
7844 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
7846 if (v_actual < I40E_MIN_MSIX) {
7847 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
7848 kfree(pf->msix_entries);
7849 pf->msix_entries = NULL;
7850 pci_disable_msix(pf->pdev);
7853 } else if (v_actual == I40E_MIN_MSIX) {
7854 /* Adjust for minimal MSIX use */
7855 pf->num_vmdq_vsis = 0;
7856 pf->num_vmdq_qps = 0;
7857 pf->num_lan_qps = 1;
7858 pf->num_lan_msix = 1;
7860 } else if (!vectors_left) {
7861 /* If we have limited resources, we will start with no vectors
7862 * for the special features and then allocate vectors to some
7863 * of these features based on the policy and at the end disable
7864 * the features that did not get any vectors.
7868 dev_info(&pf->pdev->dev,
7869 "MSI-X vector limit reached, attempting to redistribute vectors\n");
7870 /* reserve the misc vector */
7873 /* Scale vector usage down */
7874 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
7875 pf->num_vmdq_vsis = 1;
7876 pf->num_vmdq_qps = 1;
7878 /* partition out the remaining vectors */
7881 pf->num_lan_msix = 1;
7884 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7885 pf->num_lan_msix = 1;
7886 pf->num_iwarp_msix = 1;
7888 pf->num_lan_msix = 2;
7892 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7893 pf->num_iwarp_msix = min_t(int, (vec / 3),
7895 pf->num_vmdq_vsis = min_t(int, (vec / 3),
7896 I40E_DEFAULT_NUM_VMDQ_VSI);
7898 pf->num_vmdq_vsis = min_t(int, (vec / 2),
7899 I40E_DEFAULT_NUM_VMDQ_VSI);
7901 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7902 pf->num_fdsb_msix = 1;
7905 pf->num_lan_msix = min_t(int,
7906 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
7908 pf->num_lan_qps = pf->num_lan_msix;
7913 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
7914 (pf->num_fdsb_msix == 0)) {
7915 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
7916 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7918 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7919 (pf->num_vmdq_msix == 0)) {
7920 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
7921 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
7924 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
7925 (pf->num_iwarp_msix == 0)) {
7926 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
7927 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
7929 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
7930 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
7932 pf->num_vmdq_msix * pf->num_vmdq_vsis,
7934 pf->num_iwarp_msix);
7940 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
7941 * @vsi: the VSI being configured
7942 * @v_idx: index of the vector in the vsi struct
7943 * @cpu: cpu to be used on affinity_mask
7945 * We allocate one q_vector. If allocation fails we return -ENOMEM.
7947 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
7949 struct i40e_q_vector *q_vector;
7951 /* allocate q_vector */
7952 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
7956 q_vector->vsi = vsi;
7957 q_vector->v_idx = v_idx;
7958 cpumask_set_cpu(cpu, &q_vector->affinity_mask);
7961 netif_napi_add(vsi->netdev, &q_vector->napi,
7962 i40e_napi_poll, NAPI_POLL_WEIGHT);
7964 q_vector->rx.latency_range = I40E_LOW_LATENCY;
7965 q_vector->tx.latency_range = I40E_LOW_LATENCY;
7967 /* tie q_vector and vsi together */
7968 vsi->q_vectors[v_idx] = q_vector;
7974 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
7975 * @vsi: the VSI being configured
7977 * We allocate one q_vector per queue interrupt. If allocation fails we
7980 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
7982 struct i40e_pf *pf = vsi->back;
7983 int err, v_idx, num_q_vectors, current_cpu;
7985 /* if not MSIX, give the one vector only to the LAN VSI */
7986 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7987 num_q_vectors = vsi->num_q_vectors;
7988 else if (vsi == pf->vsi[pf->lan_vsi])
7993 current_cpu = cpumask_first(cpu_online_mask);
7995 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
7996 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
7999 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
8000 if (unlikely(current_cpu >= nr_cpu_ids))
8001 current_cpu = cpumask_first(cpu_online_mask);
8008 i40e_free_q_vector(vsi, v_idx);
8014 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
8015 * @pf: board private structure to initialize
8017 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
8022 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
8023 vectors = i40e_init_msix(pf);
8025 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
8026 I40E_FLAG_IWARP_ENABLED |
8027 I40E_FLAG_RSS_ENABLED |
8028 I40E_FLAG_DCB_CAPABLE |
8029 I40E_FLAG_DCB_ENABLED |
8030 I40E_FLAG_SRIOV_ENABLED |
8031 I40E_FLAG_FD_SB_ENABLED |
8032 I40E_FLAG_FD_ATR_ENABLED |
8033 I40E_FLAG_VMDQ_ENABLED);
8035 /* rework the queue expectations without MSIX */
8036 i40e_determine_queue_usage(pf);
8040 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
8041 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
8042 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
8043 vectors = pci_enable_msi(pf->pdev);
8045 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
8047 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
8049 vectors = 1; /* one MSI or Legacy vector */
8052 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
8053 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
8055 /* set up vector assignment tracking */
8056 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
8057 pf->irq_pile = kzalloc(size, GFP_KERNEL);
8058 if (!pf->irq_pile) {
8059 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
8062 pf->irq_pile->num_entries = vectors;
8063 pf->irq_pile->search_hint = 0;
8065 /* track first vector for misc interrupts, ignore return */
8066 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
8072 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
8073 * @pf: board private structure
8075 * This sets up the handler for MSIX 0, which is used to manage the
8076 * non-queue interrupts, e.g. AdminQ and errors. This is not used
8077 * when in MSI or Legacy interrupt mode.
8079 static int i40e_setup_misc_vector(struct i40e_pf *pf)
8081 struct i40e_hw *hw = &pf->hw;
8084 /* Only request the irq if this is the first time through, and
8085 * not when we're rebuilding after a Reset
8087 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
8088 err = request_irq(pf->msix_entries[0].vector,
8089 i40e_intr, 0, pf->int_name, pf);
8091 dev_info(&pf->pdev->dev,
8092 "request_irq for %s failed: %d\n",
8098 i40e_enable_misc_int_causes(pf);
8100 /* associate no queues to the misc vector */
8101 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
8102 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
8106 i40e_irq_dynamic_enable_icr0(pf, true);
8112 * i40e_config_rss_aq - Prepare for RSS using AQ commands
8113 * @vsi: vsi structure
8114 * @seed: RSS hash seed
8116 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
8117 u8 *lut, u16 lut_size)
8119 struct i40e_pf *pf = vsi->back;
8120 struct i40e_hw *hw = &pf->hw;
8124 struct i40e_aqc_get_set_rss_key_data *seed_dw =
8125 (struct i40e_aqc_get_set_rss_key_data *)seed;
8126 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
8128 dev_info(&pf->pdev->dev,
8129 "Cannot set RSS key, err %s aq_err %s\n",
8130 i40e_stat_str(hw, ret),
8131 i40e_aq_str(hw, hw->aq.asq_last_status));
8136 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
8138 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
8140 dev_info(&pf->pdev->dev,
8141 "Cannot set RSS lut, err %s aq_err %s\n",
8142 i40e_stat_str(hw, ret),
8143 i40e_aq_str(hw, hw->aq.asq_last_status));
8151 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
8152 * @vsi: Pointer to vsi structure
8153 * @seed: Buffter to store the hash keys
8154 * @lut: Buffer to store the lookup table entries
8155 * @lut_size: Size of buffer to store the lookup table entries
8157 * Return 0 on success, negative on failure
8159 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
8160 u8 *lut, u16 lut_size)
8162 struct i40e_pf *pf = vsi->back;
8163 struct i40e_hw *hw = &pf->hw;
8167 ret = i40e_aq_get_rss_key(hw, vsi->id,
8168 (struct i40e_aqc_get_set_rss_key_data *)seed);
8170 dev_info(&pf->pdev->dev,
8171 "Cannot get RSS key, err %s aq_err %s\n",
8172 i40e_stat_str(&pf->hw, ret),
8173 i40e_aq_str(&pf->hw,
8174 pf->hw.aq.asq_last_status));
8180 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
8182 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
8184 dev_info(&pf->pdev->dev,
8185 "Cannot get RSS lut, err %s aq_err %s\n",
8186 i40e_stat_str(&pf->hw, ret),
8187 i40e_aq_str(&pf->hw,
8188 pf->hw.aq.asq_last_status));
8197 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
8198 * @vsi: VSI structure
8200 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
8202 u8 seed[I40E_HKEY_ARRAY_SIZE];
8203 struct i40e_pf *pf = vsi->back;
8207 if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE))
8211 vsi->rss_size = min_t(int, pf->alloc_rss_size,
8212 vsi->num_queue_pairs);
8216 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8219 /* Use the user configured hash keys and lookup table if there is one,
8220 * otherwise use default
8222 if (vsi->rss_lut_user)
8223 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8225 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8226 if (vsi->rss_hkey_user)
8227 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8229 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
8230 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
8237 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
8238 * @vsi: Pointer to vsi structure
8239 * @seed: RSS hash seed
8240 * @lut: Lookup table
8241 * @lut_size: Lookup table size
8243 * Returns 0 on success, negative on failure
8245 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
8246 const u8 *lut, u16 lut_size)
8248 struct i40e_pf *pf = vsi->back;
8249 struct i40e_hw *hw = &pf->hw;
8250 u16 vf_id = vsi->vf_id;
8253 /* Fill out hash function seed */
8255 u32 *seed_dw = (u32 *)seed;
8257 if (vsi->type == I40E_VSI_MAIN) {
8258 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8259 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
8260 } else if (vsi->type == I40E_VSI_SRIOV) {
8261 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
8262 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
8264 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
8269 u32 *lut_dw = (u32 *)lut;
8271 if (vsi->type == I40E_VSI_MAIN) {
8272 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8274 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8275 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
8276 } else if (vsi->type == I40E_VSI_SRIOV) {
8277 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
8279 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
8280 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
8282 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
8291 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
8292 * @vsi: Pointer to VSI structure
8293 * @seed: Buffer to store the keys
8294 * @lut: Buffer to store the lookup table entries
8295 * @lut_size: Size of buffer to store the lookup table entries
8297 * Returns 0 on success, negative on failure
8299 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
8300 u8 *lut, u16 lut_size)
8302 struct i40e_pf *pf = vsi->back;
8303 struct i40e_hw *hw = &pf->hw;
8307 u32 *seed_dw = (u32 *)seed;
8309 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8310 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
8313 u32 *lut_dw = (u32 *)lut;
8315 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8317 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8318 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
8325 * i40e_config_rss - Configure RSS keys and lut
8326 * @vsi: Pointer to VSI structure
8327 * @seed: RSS hash seed
8328 * @lut: Lookup table
8329 * @lut_size: Lookup table size
8331 * Returns 0 on success, negative on failure
8333 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8335 struct i40e_pf *pf = vsi->back;
8337 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
8338 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
8340 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
8344 * i40e_get_rss - Get RSS keys and lut
8345 * @vsi: Pointer to VSI structure
8346 * @seed: Buffer to store the keys
8347 * @lut: Buffer to store the lookup table entries
8348 * lut_size: Size of buffer to store the lookup table entries
8350 * Returns 0 on success, negative on failure
8352 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8354 struct i40e_pf *pf = vsi->back;
8356 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
8357 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
8359 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
8363 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
8364 * @pf: Pointer to board private structure
8365 * @lut: Lookup table
8366 * @rss_table_size: Lookup table size
8367 * @rss_size: Range of queue number for hashing
8369 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
8370 u16 rss_table_size, u16 rss_size)
8374 for (i = 0; i < rss_table_size; i++)
8375 lut[i] = i % rss_size;
8379 * i40e_pf_config_rss - Prepare for RSS if used
8380 * @pf: board private structure
8382 static int i40e_pf_config_rss(struct i40e_pf *pf)
8384 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8385 u8 seed[I40E_HKEY_ARRAY_SIZE];
8387 struct i40e_hw *hw = &pf->hw;
8392 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
8393 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
8394 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
8395 hena |= i40e_pf_get_default_rss_hena(pf);
8397 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
8398 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
8400 /* Determine the RSS table size based on the hardware capabilities */
8401 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
8402 reg_val = (pf->rss_table_size == 512) ?
8403 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
8404 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
8405 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
8407 /* Determine the RSS size of the VSI */
8408 if (!vsi->rss_size) {
8411 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
8412 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
8417 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8421 /* Use user configured lut if there is one, otherwise use default */
8422 if (vsi->rss_lut_user)
8423 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8425 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8427 /* Use user configured hash key if there is one, otherwise
8430 if (vsi->rss_hkey_user)
8431 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8433 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
8434 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
8441 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
8442 * @pf: board private structure
8443 * @queue_count: the requested queue count for rss.
8445 * returns 0 if rss is not enabled, if enabled returns the final rss queue
8446 * count which may be different from the requested queue count.
8447 * Note: expects to be called while under rtnl_lock()
8449 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
8451 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8454 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
8457 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
8459 if (queue_count != vsi->num_queue_pairs) {
8462 vsi->req_queue_pairs = queue_count;
8463 i40e_prep_for_reset(pf, true);
8465 pf->alloc_rss_size = new_rss_size;
8467 i40e_reset_and_rebuild(pf, true, true);
8469 /* Discard the user configured hash keys and lut, if less
8470 * queues are enabled.
8472 if (queue_count < vsi->rss_size) {
8473 i40e_clear_rss_config_user(vsi);
8474 dev_dbg(&pf->pdev->dev,
8475 "discard user configured hash keys and lut\n");
8478 /* Reset vsi->rss_size, as number of enabled queues changed */
8479 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
8480 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
8482 i40e_pf_config_rss(pf);
8484 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
8485 vsi->req_queue_pairs, pf->rss_size_max);
8486 return pf->alloc_rss_size;
8490 * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
8491 * @pf: board private structure
8493 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
8496 bool min_valid, max_valid;
8499 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
8500 &min_valid, &max_valid);
8504 pf->npar_min_bw = min_bw;
8506 pf->npar_max_bw = max_bw;
8513 * i40e_set_npar_bw_setting - Set BW settings for this PF partition
8514 * @pf: board private structure
8516 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
8518 struct i40e_aqc_configure_partition_bw_data bw_data;
8521 /* Set the valid bit for this PF */
8522 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
8523 bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
8524 bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
8526 /* Set the new bandwidths */
8527 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
8533 * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
8534 * @pf: board private structure
8536 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
8538 /* Commit temporary BW setting to permanent NVM image */
8539 enum i40e_admin_queue_err last_aq_status;
8543 if (pf->hw.partition_id != 1) {
8544 dev_info(&pf->pdev->dev,
8545 "Commit BW only works on partition 1! This is partition %d",
8546 pf->hw.partition_id);
8547 ret = I40E_NOT_SUPPORTED;
8551 /* Acquire NVM for read access */
8552 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
8553 last_aq_status = pf->hw.aq.asq_last_status;
8555 dev_info(&pf->pdev->dev,
8556 "Cannot acquire NVM for read access, err %s aq_err %s\n",
8557 i40e_stat_str(&pf->hw, ret),
8558 i40e_aq_str(&pf->hw, last_aq_status));
8562 /* Read word 0x10 of NVM - SW compatibility word 1 */
8563 ret = i40e_aq_read_nvm(&pf->hw,
8564 I40E_SR_NVM_CONTROL_WORD,
8565 0x10, sizeof(nvm_word), &nvm_word,
8567 /* Save off last admin queue command status before releasing
8570 last_aq_status = pf->hw.aq.asq_last_status;
8571 i40e_release_nvm(&pf->hw);
8573 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
8574 i40e_stat_str(&pf->hw, ret),
8575 i40e_aq_str(&pf->hw, last_aq_status));
8579 /* Wait a bit for NVM release to complete */
8582 /* Acquire NVM for write access */
8583 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
8584 last_aq_status = pf->hw.aq.asq_last_status;
8586 dev_info(&pf->pdev->dev,
8587 "Cannot acquire NVM for write access, err %s aq_err %s\n",
8588 i40e_stat_str(&pf->hw, ret),
8589 i40e_aq_str(&pf->hw, last_aq_status));
8592 /* Write it back out unchanged to initiate update NVM,
8593 * which will force a write of the shadow (alt) RAM to
8594 * the NVM - thus storing the bandwidth values permanently.
8596 ret = i40e_aq_update_nvm(&pf->hw,
8597 I40E_SR_NVM_CONTROL_WORD,
8598 0x10, sizeof(nvm_word),
8599 &nvm_word, true, NULL);
8600 /* Save off last admin queue command status before releasing
8603 last_aq_status = pf->hw.aq.asq_last_status;
8604 i40e_release_nvm(&pf->hw);
8606 dev_info(&pf->pdev->dev,
8607 "BW settings NOT SAVED, err %s aq_err %s\n",
8608 i40e_stat_str(&pf->hw, ret),
8609 i40e_aq_str(&pf->hw, last_aq_status));
8616 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
8617 * @pf: board private structure to initialize
8619 * i40e_sw_init initializes the Adapter private data structure.
8620 * Fields are initialized based on PCI device information and
8621 * OS network device settings (MTU size).
8623 static int i40e_sw_init(struct i40e_pf *pf)
8628 /* Set default capability flags */
8629 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
8630 I40E_FLAG_MSI_ENABLED |
8631 I40E_FLAG_MSIX_ENABLED;
8633 /* Set default ITR */
8634 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
8635 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
8637 /* Depending on PF configurations, it is possible that the RSS
8638 * maximum might end up larger than the available queues
8640 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
8641 pf->alloc_rss_size = 1;
8642 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
8643 pf->rss_size_max = min_t(int, pf->rss_size_max,
8644 pf->hw.func_caps.num_tx_qp);
8645 if (pf->hw.func_caps.rss) {
8646 pf->flags |= I40E_FLAG_RSS_ENABLED;
8647 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
8651 /* MFP mode enabled */
8652 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
8653 pf->flags |= I40E_FLAG_MFP_ENABLED;
8654 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
8655 if (i40e_get_npar_bw_setting(pf))
8656 dev_warn(&pf->pdev->dev,
8657 "Could not get NPAR bw settings\n");
8659 dev_info(&pf->pdev->dev,
8660 "Min BW = %8.8x, Max BW = %8.8x\n",
8661 pf->npar_min_bw, pf->npar_max_bw);
8664 /* FW/NVM is not yet fixed in this regard */
8665 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
8666 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
8667 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
8668 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
8669 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
8670 pf->hw.num_partitions > 1)
8671 dev_info(&pf->pdev->dev,
8672 "Flow Director Sideband mode Disabled in MFP mode\n");
8674 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8675 pf->fdir_pf_filter_count =
8676 pf->hw.func_caps.fd_filters_guaranteed;
8677 pf->hw.fdir_shared_filter_count =
8678 pf->hw.func_caps.fd_filters_best_effort;
8681 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
8682 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
8683 (pf->hw.aq.fw_maj_ver < 4))) {
8684 pf->flags |= I40E_FLAG_RESTART_AUTONEG;
8685 /* No DCB support for FW < v4.33 */
8686 pf->flags |= I40E_FLAG_NO_DCB_SUPPORT;
8689 /* Disable FW LLDP if FW < v4.3 */
8690 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
8691 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
8692 (pf->hw.aq.fw_maj_ver < 4)))
8693 pf->flags |= I40E_FLAG_STOP_FW_LLDP;
8695 /* Use the FW Set LLDP MIB API if FW > v4.40 */
8696 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
8697 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
8698 (pf->hw.aq.fw_maj_ver >= 5)))
8699 pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB;
8701 if (pf->hw.func_caps.vmdq) {
8702 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
8703 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
8704 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
8707 if (pf->hw.func_caps.iwarp) {
8708 pf->flags |= I40E_FLAG_IWARP_ENABLED;
8709 /* IWARP needs one extra vector for CQP just like MISC.*/
8710 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
8713 #ifdef CONFIG_PCI_IOV
8714 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
8715 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
8716 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
8717 pf->num_req_vfs = min_t(int,
8718 pf->hw.func_caps.num_vfs,
8721 #endif /* CONFIG_PCI_IOV */
8722 if (pf->hw.mac.type == I40E_MAC_X722) {
8723 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE
8724 | I40E_FLAG_128_QP_RSS_CAPABLE
8725 | I40E_FLAG_HW_ATR_EVICT_CAPABLE
8726 | I40E_FLAG_OUTER_UDP_CSUM_CAPABLE
8727 | I40E_FLAG_WB_ON_ITR_CAPABLE
8728 | I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE
8729 | I40E_FLAG_NO_PCI_LINK_CHECK
8730 | I40E_FLAG_USE_SET_LLDP_MIB
8731 | I40E_FLAG_GENEVE_OFFLOAD_CAPABLE
8732 | I40E_FLAG_PTP_L4_CAPABLE
8733 | I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE;
8734 } else if ((pf->hw.aq.api_maj_ver > 1) ||
8735 ((pf->hw.aq.api_maj_ver == 1) &&
8736 (pf->hw.aq.api_min_ver > 4))) {
8737 /* Supported in FW API version higher than 1.4 */
8738 pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
8739 pf->hw_disabled_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
8741 pf->hw_disabled_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
8744 pf->eeprom_version = 0xDEAD;
8745 pf->lan_veb = I40E_NO_VEB;
8746 pf->lan_vsi = I40E_NO_VSI;
8748 /* By default FW has this off for performance reasons */
8749 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
8751 /* set up queue assignment tracking */
8752 size = sizeof(struct i40e_lump_tracking)
8753 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
8754 pf->qp_pile = kzalloc(size, GFP_KERNEL);
8759 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
8760 pf->qp_pile->search_hint = 0;
8762 pf->tx_timeout_recovery_level = 1;
8764 mutex_init(&pf->switch_mutex);
8766 /* If NPAR is enabled nudge the Tx scheduler */
8767 if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
8768 i40e_set_npar_bw_setting(pf);
8775 * i40e_set_ntuple - set the ntuple feature flag and take action
8776 * @pf: board private structure to initialize
8777 * @features: the feature set that the stack is suggesting
8779 * returns a bool to indicate if reset needs to happen
8781 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
8783 bool need_reset = false;
8785 /* Check if Flow Director n-tuple support was enabled or disabled. If
8786 * the state changed, we need to reset.
8788 if (features & NETIF_F_NTUPLE) {
8789 /* Enable filters and mark for reset */
8790 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
8792 /* enable FD_SB only if there is MSI-X vector */
8793 if (pf->num_fdsb_msix > 0)
8794 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8796 /* turn off filters, mark for reset and clear SW filter list */
8797 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8799 i40e_fdir_filter_exit(pf);
8801 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8802 pf->hw_disabled_flags &= ~I40E_FLAG_FD_SB_ENABLED;
8803 /* reset fd counters */
8806 /* if ATR was auto disabled it can be re-enabled. */
8807 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8808 (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED)) {
8809 pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
8810 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8811 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
8818 * i40e_clear_rss_lut - clear the rx hash lookup table
8819 * @vsi: the VSI being configured
8821 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
8823 struct i40e_pf *pf = vsi->back;
8824 struct i40e_hw *hw = &pf->hw;
8825 u16 vf_id = vsi->vf_id;
8828 if (vsi->type == I40E_VSI_MAIN) {
8829 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8830 wr32(hw, I40E_PFQF_HLUT(i), 0);
8831 } else if (vsi->type == I40E_VSI_SRIOV) {
8832 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
8833 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
8835 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
8840 * i40e_set_features - set the netdev feature flags
8841 * @netdev: ptr to the netdev being adjusted
8842 * @features: the feature set that the stack is suggesting
8843 * Note: expects to be called while under rtnl_lock()
8845 static int i40e_set_features(struct net_device *netdev,
8846 netdev_features_t features)
8848 struct i40e_netdev_priv *np = netdev_priv(netdev);
8849 struct i40e_vsi *vsi = np->vsi;
8850 struct i40e_pf *pf = vsi->back;
8853 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
8854 i40e_pf_config_rss(pf);
8855 else if (!(features & NETIF_F_RXHASH) &&
8856 netdev->features & NETIF_F_RXHASH)
8857 i40e_clear_rss_lut(vsi);
8859 if (features & NETIF_F_HW_VLAN_CTAG_RX)
8860 i40e_vlan_stripping_enable(vsi);
8862 i40e_vlan_stripping_disable(vsi);
8864 need_reset = i40e_set_ntuple(pf, features);
8867 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
8873 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
8874 * @pf: board private structure
8875 * @port: The UDP port to look up
8877 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
8879 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
8883 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8884 if (pf->udp_ports[i].index == port)
8892 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
8893 * @netdev: This physical port's netdev
8894 * @ti: Tunnel endpoint information
8896 static void i40e_udp_tunnel_add(struct net_device *netdev,
8897 struct udp_tunnel_info *ti)
8899 struct i40e_netdev_priv *np = netdev_priv(netdev);
8900 struct i40e_vsi *vsi = np->vsi;
8901 struct i40e_pf *pf = vsi->back;
8902 u16 port = ntohs(ti->port);
8906 idx = i40e_get_udp_port_idx(pf, port);
8908 /* Check if port already exists */
8909 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8910 netdev_info(netdev, "port %d already offloaded\n", port);
8914 /* Now check if there is space to add the new port */
8915 next_idx = i40e_get_udp_port_idx(pf, 0);
8917 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8918 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
8924 case UDP_TUNNEL_TYPE_VXLAN:
8925 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
8927 case UDP_TUNNEL_TYPE_GENEVE:
8928 if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
8930 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
8936 /* New port: add it and mark its index in the bitmap */
8937 pf->udp_ports[next_idx].index = port;
8938 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
8939 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8943 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
8944 * @netdev: This physical port's netdev
8945 * @ti: Tunnel endpoint information
8947 static void i40e_udp_tunnel_del(struct net_device *netdev,
8948 struct udp_tunnel_info *ti)
8950 struct i40e_netdev_priv *np = netdev_priv(netdev);
8951 struct i40e_vsi *vsi = np->vsi;
8952 struct i40e_pf *pf = vsi->back;
8953 u16 port = ntohs(ti->port);
8956 idx = i40e_get_udp_port_idx(pf, port);
8958 /* Check if port already exists */
8959 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
8963 case UDP_TUNNEL_TYPE_VXLAN:
8964 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
8967 case UDP_TUNNEL_TYPE_GENEVE:
8968 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
8975 /* if port exists, set it to 0 (mark for deletion)
8976 * and make it pending
8978 pf->udp_ports[idx].index = 0;
8979 pf->pending_udp_bitmap |= BIT_ULL(idx);
8980 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8984 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
8988 static int i40e_get_phys_port_id(struct net_device *netdev,
8989 struct netdev_phys_item_id *ppid)
8991 struct i40e_netdev_priv *np = netdev_priv(netdev);
8992 struct i40e_pf *pf = np->vsi->back;
8993 struct i40e_hw *hw = &pf->hw;
8995 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
8998 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
8999 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
9005 * i40e_ndo_fdb_add - add an entry to the hardware database
9006 * @ndm: the input from the stack
9007 * @tb: pointer to array of nladdr (unused)
9008 * @dev: the net device pointer
9009 * @addr: the MAC address entry being added
9010 * @flags: instructions from stack about fdb operation
9012 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
9013 struct net_device *dev,
9014 const unsigned char *addr, u16 vid,
9017 struct i40e_netdev_priv *np = netdev_priv(dev);
9018 struct i40e_pf *pf = np->vsi->back;
9021 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
9025 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
9029 /* Hardware does not support aging addresses so if a
9030 * ndm_state is given only allow permanent addresses
9032 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
9033 netdev_info(dev, "FDB only supports static addresses\n");
9037 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
9038 err = dev_uc_add_excl(dev, addr);
9039 else if (is_multicast_ether_addr(addr))
9040 err = dev_mc_add_excl(dev, addr);
9044 /* Only return duplicate errors if NLM_F_EXCL is set */
9045 if (err == -EEXIST && !(flags & NLM_F_EXCL))
9052 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
9053 * @dev: the netdev being configured
9054 * @nlh: RTNL message
9056 * Inserts a new hardware bridge if not already created and
9057 * enables the bridging mode requested (VEB or VEPA). If the
9058 * hardware bridge has already been inserted and the request
9059 * is to change the mode then that requires a PF reset to
9060 * allow rebuild of the components with required hardware
9061 * bridge mode enabled.
9063 * Note: expects to be called while under rtnl_lock()
9065 static int i40e_ndo_bridge_setlink(struct net_device *dev,
9066 struct nlmsghdr *nlh,
9069 struct i40e_netdev_priv *np = netdev_priv(dev);
9070 struct i40e_vsi *vsi = np->vsi;
9071 struct i40e_pf *pf = vsi->back;
9072 struct i40e_veb *veb = NULL;
9073 struct nlattr *attr, *br_spec;
9076 /* Only for PF VSI for now */
9077 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
9080 /* Find the HW bridge for PF VSI */
9081 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9082 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9086 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
9088 nla_for_each_nested(attr, br_spec, rem) {
9091 if (nla_type(attr) != IFLA_BRIDGE_MODE)
9094 mode = nla_get_u16(attr);
9095 if ((mode != BRIDGE_MODE_VEPA) &&
9096 (mode != BRIDGE_MODE_VEB))
9099 /* Insert a new HW bridge */
9101 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
9102 vsi->tc_config.enabled_tc);
9104 veb->bridge_mode = mode;
9105 i40e_config_bridge_mode(veb);
9107 /* No Bridge HW offload available */
9111 } else if (mode != veb->bridge_mode) {
9112 /* Existing HW bridge but different mode needs reset */
9113 veb->bridge_mode = mode;
9114 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
9115 if (mode == BRIDGE_MODE_VEB)
9116 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
9118 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
9119 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED),
9129 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
9132 * @seq: RTNL message seq #
9133 * @dev: the netdev being configured
9134 * @filter_mask: unused
9135 * @nlflags: netlink flags passed in
9137 * Return the mode in which the hardware bridge is operating in
9140 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9141 struct net_device *dev,
9142 u32 __always_unused filter_mask,
9145 struct i40e_netdev_priv *np = netdev_priv(dev);
9146 struct i40e_vsi *vsi = np->vsi;
9147 struct i40e_pf *pf = vsi->back;
9148 struct i40e_veb *veb = NULL;
9151 /* Only for PF VSI for now */
9152 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
9155 /* Find the HW bridge for the PF VSI */
9156 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9157 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9164 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
9165 0, 0, nlflags, filter_mask, NULL);
9169 * i40e_features_check - Validate encapsulated packet conforms to limits
9171 * @dev: This physical port's netdev
9172 * @features: Offload features that the stack believes apply
9174 static netdev_features_t i40e_features_check(struct sk_buff *skb,
9175 struct net_device *dev,
9176 netdev_features_t features)
9180 /* No point in doing any of this if neither checksum nor GSO are
9181 * being requested for this frame. We can rule out both by just
9182 * checking for CHECKSUM_PARTIAL
9184 if (skb->ip_summed != CHECKSUM_PARTIAL)
9187 /* We cannot support GSO if the MSS is going to be less than
9188 * 64 bytes. If it is then we need to drop support for GSO.
9190 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
9191 features &= ~NETIF_F_GSO_MASK;
9193 /* MACLEN can support at most 63 words */
9194 len = skb_network_header(skb) - skb->data;
9195 if (len & ~(63 * 2))
9198 /* IPLEN and EIPLEN can support at most 127 dwords */
9199 len = skb_transport_header(skb) - skb_network_header(skb);
9200 if (len & ~(127 * 4))
9203 if (skb->encapsulation) {
9204 /* L4TUNLEN can support 127 words */
9205 len = skb_inner_network_header(skb) - skb_transport_header(skb);
9206 if (len & ~(127 * 2))
9209 /* IPLEN can support at most 127 dwords */
9210 len = skb_inner_transport_header(skb) -
9211 skb_inner_network_header(skb);
9212 if (len & ~(127 * 4))
9216 /* No need to validate L4LEN as TCP is the only protocol with a
9217 * a flexible value and we support all possible values supported
9218 * by TCP, which is at most 15 dwords
9223 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9226 static const struct net_device_ops i40e_netdev_ops = {
9227 .ndo_open = i40e_open,
9228 .ndo_stop = i40e_close,
9229 .ndo_start_xmit = i40e_lan_xmit_frame,
9230 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
9231 .ndo_set_rx_mode = i40e_set_rx_mode,
9232 .ndo_validate_addr = eth_validate_addr,
9233 .ndo_set_mac_address = i40e_set_mac,
9234 .ndo_change_mtu = i40e_change_mtu,
9235 .ndo_do_ioctl = i40e_ioctl,
9236 .ndo_tx_timeout = i40e_tx_timeout,
9237 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
9238 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
9239 #ifdef CONFIG_NET_POLL_CONTROLLER
9240 .ndo_poll_controller = i40e_netpoll,
9242 .ndo_setup_tc = __i40e_setup_tc,
9243 .ndo_set_features = i40e_set_features,
9244 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
9245 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
9246 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
9247 .ndo_get_vf_config = i40e_ndo_get_vf_config,
9248 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
9249 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
9250 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
9251 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
9252 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
9253 .ndo_get_phys_port_id = i40e_get_phys_port_id,
9254 .ndo_fdb_add = i40e_ndo_fdb_add,
9255 .ndo_features_check = i40e_features_check,
9256 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
9257 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
9261 * i40e_config_netdev - Setup the netdev flags
9262 * @vsi: the VSI being configured
9264 * Returns 0 on success, negative value on failure
9266 static int i40e_config_netdev(struct i40e_vsi *vsi)
9268 struct i40e_pf *pf = vsi->back;
9269 struct i40e_hw *hw = &pf->hw;
9270 struct i40e_netdev_priv *np;
9271 struct net_device *netdev;
9272 u8 broadcast[ETH_ALEN];
9273 u8 mac_addr[ETH_ALEN];
9275 netdev_features_t hw_enc_features;
9276 netdev_features_t hw_features;
9278 etherdev_size = sizeof(struct i40e_netdev_priv);
9279 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
9283 vsi->netdev = netdev;
9284 np = netdev_priv(netdev);
9287 hw_enc_features = NETIF_F_SG |
9291 NETIF_F_SOFT_FEATURES |
9296 NETIF_F_GSO_GRE_CSUM |
9297 NETIF_F_GSO_PARTIAL |
9298 NETIF_F_GSO_UDP_TUNNEL |
9299 NETIF_F_GSO_UDP_TUNNEL_CSUM |
9305 if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE))
9306 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
9308 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
9310 netdev->hw_enc_features |= hw_enc_features;
9312 /* record features VLANs can make use of */
9313 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
9315 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
9316 netdev->hw_features |= NETIF_F_NTUPLE;
9317 hw_features = hw_enc_features |
9318 NETIF_F_HW_VLAN_CTAG_TX |
9319 NETIF_F_HW_VLAN_CTAG_RX;
9321 netdev->hw_features |= hw_features;
9323 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
9324 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
9326 if (vsi->type == I40E_VSI_MAIN) {
9327 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
9328 ether_addr_copy(mac_addr, hw->mac.perm_addr);
9329 /* The following steps are necessary for two reasons. First,
9330 * some older NVM configurations load a default MAC-VLAN
9331 * filter that will accept any tagged packet, and we want to
9332 * replace this with a normal filter. Additionally, it is
9333 * possible our MAC address was provided by the platform using
9334 * Open Firmware or similar.
9336 * Thus, we need to remove the default filter and install one
9337 * specific to the MAC address.
9339 i40e_rm_default_mac_filter(vsi, mac_addr);
9340 spin_lock_bh(&vsi->mac_filter_hash_lock);
9341 i40e_add_mac_filter(vsi, mac_addr);
9342 spin_unlock_bh(&vsi->mac_filter_hash_lock);
9344 /* relate the VSI_VMDQ name to the VSI_MAIN name */
9345 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
9346 pf->vsi[pf->lan_vsi]->netdev->name);
9347 random_ether_addr(mac_addr);
9349 spin_lock_bh(&vsi->mac_filter_hash_lock);
9350 i40e_add_mac_filter(vsi, mac_addr);
9351 spin_unlock_bh(&vsi->mac_filter_hash_lock);
9354 /* Add the broadcast filter so that we initially will receive
9355 * broadcast packets. Note that when a new VLAN is first added the
9356 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
9357 * specific filters as part of transitioning into "vlan" operation.
9358 * When more VLANs are added, the driver will copy each existing MAC
9359 * filter and add it for the new VLAN.
9361 * Broadcast filters are handled specially by
9362 * i40e_sync_filters_subtask, as the driver must to set the broadcast
9363 * promiscuous bit instead of adding this directly as a MAC/VLAN
9364 * filter. The subtask will update the correct broadcast promiscuous
9365 * bits as VLANs become active or inactive.
9367 eth_broadcast_addr(broadcast);
9368 spin_lock_bh(&vsi->mac_filter_hash_lock);
9369 i40e_add_mac_filter(vsi, broadcast);
9370 spin_unlock_bh(&vsi->mac_filter_hash_lock);
9372 ether_addr_copy(netdev->dev_addr, mac_addr);
9373 ether_addr_copy(netdev->perm_addr, mac_addr);
9375 netdev->priv_flags |= IFF_UNICAST_FLT;
9376 netdev->priv_flags |= IFF_SUPP_NOFCS;
9377 /* Setup netdev TC information */
9378 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
9380 netdev->netdev_ops = &i40e_netdev_ops;
9381 netdev->watchdog_timeo = 5 * HZ;
9382 i40e_set_ethtool_ops(netdev);
9384 /* MTU range: 68 - 9706 */
9385 netdev->min_mtu = ETH_MIN_MTU;
9386 netdev->max_mtu = I40E_MAX_RXBUFFER -
9387 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9393 * i40e_vsi_delete - Delete a VSI from the switch
9394 * @vsi: the VSI being removed
9396 * Returns 0 on success, negative value on failure
9398 static void i40e_vsi_delete(struct i40e_vsi *vsi)
9400 /* remove default VSI is not allowed */
9401 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
9404 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
9408 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
9409 * @vsi: the VSI being queried
9411 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
9413 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
9415 struct i40e_veb *veb;
9416 struct i40e_pf *pf = vsi->back;
9418 /* Uplink is not a bridge so default to VEB */
9419 if (vsi->veb_idx == I40E_NO_VEB)
9422 veb = pf->veb[vsi->veb_idx];
9424 dev_info(&pf->pdev->dev,
9425 "There is no veb associated with the bridge\n");
9429 /* Uplink is a bridge in VEPA mode */
9430 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
9433 /* Uplink is a bridge in VEB mode */
9437 /* VEPA is now default bridge, so return 0 */
9442 * i40e_add_vsi - Add a VSI to the switch
9443 * @vsi: the VSI being configured
9445 * This initializes a VSI context depending on the VSI type to be added and
9446 * passes it down to the add_vsi aq command.
9448 static int i40e_add_vsi(struct i40e_vsi *vsi)
9451 struct i40e_pf *pf = vsi->back;
9452 struct i40e_hw *hw = &pf->hw;
9453 struct i40e_vsi_context ctxt;
9454 struct i40e_mac_filter *f;
9455 struct hlist_node *h;
9458 u8 enabled_tc = 0x1; /* TC0 enabled */
9461 memset(&ctxt, 0, sizeof(ctxt));
9462 switch (vsi->type) {
9464 /* The PF's main VSI is already setup as part of the
9465 * device initialization, so we'll not bother with
9466 * the add_vsi call, but we will retrieve the current
9469 ctxt.seid = pf->main_vsi_seid;
9470 ctxt.pf_num = pf->hw.pf_id;
9472 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9473 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9475 dev_info(&pf->pdev->dev,
9476 "couldn't get PF vsi config, err %s aq_err %s\n",
9477 i40e_stat_str(&pf->hw, ret),
9478 i40e_aq_str(&pf->hw,
9479 pf->hw.aq.asq_last_status));
9482 vsi->info = ctxt.info;
9483 vsi->info.valid_sections = 0;
9485 vsi->seid = ctxt.seid;
9486 vsi->id = ctxt.vsi_number;
9488 enabled_tc = i40e_pf_get_tc_map(pf);
9490 /* MFP mode setup queue map and update VSI */
9491 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
9492 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
9493 memset(&ctxt, 0, sizeof(ctxt));
9494 ctxt.seid = pf->main_vsi_seid;
9495 ctxt.pf_num = pf->hw.pf_id;
9497 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
9498 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
9500 dev_info(&pf->pdev->dev,
9501 "update vsi failed, err %s aq_err %s\n",
9502 i40e_stat_str(&pf->hw, ret),
9503 i40e_aq_str(&pf->hw,
9504 pf->hw.aq.asq_last_status));
9508 /* update the local VSI info queue map */
9509 i40e_vsi_update_queue_map(vsi, &ctxt);
9510 vsi->info.valid_sections = 0;
9512 /* Default/Main VSI is only enabled for TC0
9513 * reconfigure it to enable all TCs that are
9514 * available on the port in SFP mode.
9515 * For MFP case the iSCSI PF would use this
9516 * flow to enable LAN+iSCSI TC.
9518 ret = i40e_vsi_config_tc(vsi, enabled_tc);
9520 dev_info(&pf->pdev->dev,
9521 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
9523 i40e_stat_str(&pf->hw, ret),
9524 i40e_aq_str(&pf->hw,
9525 pf->hw.aq.asq_last_status));
9532 ctxt.pf_num = hw->pf_id;
9534 ctxt.uplink_seid = vsi->uplink_seid;
9535 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9536 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9537 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
9538 (i40e_is_vsi_uplink_mode_veb(vsi))) {
9539 ctxt.info.valid_sections |=
9540 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9541 ctxt.info.switch_id =
9542 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9544 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9547 case I40E_VSI_VMDQ2:
9548 ctxt.pf_num = hw->pf_id;
9550 ctxt.uplink_seid = vsi->uplink_seid;
9551 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9552 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
9554 /* This VSI is connected to VEB so the switch_id
9555 * should be set to zero by default.
9557 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9558 ctxt.info.valid_sections |=
9559 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9560 ctxt.info.switch_id =
9561 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9564 /* Setup the VSI tx/rx queue map for TC0 only for now */
9565 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9568 case I40E_VSI_SRIOV:
9569 ctxt.pf_num = hw->pf_id;
9570 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
9571 ctxt.uplink_seid = vsi->uplink_seid;
9572 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9573 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
9575 /* This VSI is connected to VEB so the switch_id
9576 * should be set to zero by default.
9578 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9579 ctxt.info.valid_sections |=
9580 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9581 ctxt.info.switch_id =
9582 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9585 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
9586 ctxt.info.valid_sections |=
9587 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
9588 ctxt.info.queueing_opt_flags |=
9589 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
9590 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
9593 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
9594 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
9595 if (pf->vf[vsi->vf_id].spoofchk) {
9596 ctxt.info.valid_sections |=
9597 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
9598 ctxt.info.sec_flags |=
9599 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
9600 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
9602 /* Setup the VSI tx/rx queue map for TC0 only for now */
9603 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9606 case I40E_VSI_IWARP:
9607 /* send down message to iWARP */
9614 if (vsi->type != I40E_VSI_MAIN) {
9615 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
9617 dev_info(&vsi->back->pdev->dev,
9618 "add vsi failed, err %s aq_err %s\n",
9619 i40e_stat_str(&pf->hw, ret),
9620 i40e_aq_str(&pf->hw,
9621 pf->hw.aq.asq_last_status));
9625 vsi->info = ctxt.info;
9626 vsi->info.valid_sections = 0;
9627 vsi->seid = ctxt.seid;
9628 vsi->id = ctxt.vsi_number;
9631 vsi->active_filters = 0;
9632 clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
9633 spin_lock_bh(&vsi->mac_filter_hash_lock);
9634 /* If macvlan filters already exist, force them to get loaded */
9635 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
9636 f->state = I40E_FILTER_NEW;
9639 spin_unlock_bh(&vsi->mac_filter_hash_lock);
9642 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
9643 pf->flags |= I40E_FLAG_FILTER_SYNC;
9646 /* Update VSI BW information */
9647 ret = i40e_vsi_get_bw_info(vsi);
9649 dev_info(&pf->pdev->dev,
9650 "couldn't get vsi bw info, err %s aq_err %s\n",
9651 i40e_stat_str(&pf->hw, ret),
9652 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9653 /* VSI is already added so not tearing that up */
9662 * i40e_vsi_release - Delete a VSI and free its resources
9663 * @vsi: the VSI being removed
9665 * Returns 0 on success or < 0 on error
9667 int i40e_vsi_release(struct i40e_vsi *vsi)
9669 struct i40e_mac_filter *f;
9670 struct hlist_node *h;
9671 struct i40e_veb *veb = NULL;
9678 /* release of a VEB-owner or last VSI is not allowed */
9679 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
9680 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
9681 vsi->seid, vsi->uplink_seid);
9684 if (vsi == pf->vsi[pf->lan_vsi] &&
9685 !test_bit(__I40E_DOWN, &pf->state)) {
9686 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
9690 uplink_seid = vsi->uplink_seid;
9691 if (vsi->type != I40E_VSI_SRIOV) {
9692 if (vsi->netdev_registered) {
9693 vsi->netdev_registered = false;
9695 /* results in a call to i40e_close() */
9696 unregister_netdev(vsi->netdev);
9699 i40e_vsi_close(vsi);
9701 i40e_vsi_disable_irq(vsi);
9704 spin_lock_bh(&vsi->mac_filter_hash_lock);
9706 /* clear the sync flag on all filters */
9708 __dev_uc_unsync(vsi->netdev, NULL);
9709 __dev_mc_unsync(vsi->netdev, NULL);
9712 /* make sure any remaining filters are marked for deletion */
9713 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
9714 __i40e_del_filter(vsi, f);
9716 spin_unlock_bh(&vsi->mac_filter_hash_lock);
9718 i40e_sync_vsi_filters(vsi);
9720 i40e_vsi_delete(vsi);
9721 i40e_vsi_free_q_vectors(vsi);
9723 free_netdev(vsi->netdev);
9726 i40e_vsi_clear_rings(vsi);
9727 i40e_vsi_clear(vsi);
9729 /* If this was the last thing on the VEB, except for the
9730 * controlling VSI, remove the VEB, which puts the controlling
9731 * VSI onto the next level down in the switch.
9733 * Well, okay, there's one more exception here: don't remove
9734 * the orphan VEBs yet. We'll wait for an explicit remove request
9735 * from up the network stack.
9737 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
9739 pf->vsi[i]->uplink_seid == uplink_seid &&
9740 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
9741 n++; /* count the VSIs */
9744 for (i = 0; i < I40E_MAX_VEB; i++) {
9747 if (pf->veb[i]->uplink_seid == uplink_seid)
9748 n++; /* count the VEBs */
9749 if (pf->veb[i]->seid == uplink_seid)
9752 if (n == 0 && veb && veb->uplink_seid != 0)
9753 i40e_veb_release(veb);
9759 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
9760 * @vsi: ptr to the VSI
9762 * This should only be called after i40e_vsi_mem_alloc() which allocates the
9763 * corresponding SW VSI structure and initializes num_queue_pairs for the
9764 * newly allocated VSI.
9766 * Returns 0 on success or negative on failure
9768 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
9771 struct i40e_pf *pf = vsi->back;
9773 if (vsi->q_vectors[0]) {
9774 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
9779 if (vsi->base_vector) {
9780 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
9781 vsi->seid, vsi->base_vector);
9785 ret = i40e_vsi_alloc_q_vectors(vsi);
9787 dev_info(&pf->pdev->dev,
9788 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
9789 vsi->num_q_vectors, vsi->seid, ret);
9790 vsi->num_q_vectors = 0;
9791 goto vector_setup_out;
9794 /* In Legacy mode, we do not have to get any other vector since we
9795 * piggyback on the misc/ICR0 for queue interrupts.
9797 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
9799 if (vsi->num_q_vectors)
9800 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
9801 vsi->num_q_vectors, vsi->idx);
9802 if (vsi->base_vector < 0) {
9803 dev_info(&pf->pdev->dev,
9804 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
9805 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
9806 i40e_vsi_free_q_vectors(vsi);
9808 goto vector_setup_out;
9816 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
9817 * @vsi: pointer to the vsi.
9819 * This re-allocates a vsi's queue resources.
9821 * Returns pointer to the successfully allocated and configured VSI sw struct
9822 * on success, otherwise returns NULL on failure.
9824 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
9835 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
9836 i40e_vsi_clear_rings(vsi);
9838 i40e_vsi_free_arrays(vsi, false);
9839 i40e_set_num_rings_in_vsi(vsi);
9840 ret = i40e_vsi_alloc_arrays(vsi, false);
9844 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
9846 dev_info(&pf->pdev->dev,
9847 "failed to get tracking for %d queues for VSI %d err %d\n",
9848 vsi->alloc_queue_pairs, vsi->seid, ret);
9851 vsi->base_queue = ret;
9853 /* Update the FW view of the VSI. Force a reset of TC and queue
9854 * layout configurations.
9856 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
9857 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
9858 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
9859 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
9860 if (vsi->type == I40E_VSI_MAIN)
9861 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
9863 /* assign it some queues */
9864 ret = i40e_alloc_rings(vsi);
9868 /* map all of the rings to the q_vectors */
9869 i40e_vsi_map_rings_to_vectors(vsi);
9873 i40e_vsi_free_q_vectors(vsi);
9874 if (vsi->netdev_registered) {
9875 vsi->netdev_registered = false;
9876 unregister_netdev(vsi->netdev);
9877 free_netdev(vsi->netdev);
9880 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9882 i40e_vsi_clear(vsi);
9887 * i40e_vsi_setup - Set up a VSI by a given type
9888 * @pf: board private structure
9890 * @uplink_seid: the switch element to link to
9891 * @param1: usage depends upon VSI type. For VF types, indicates VF id
9893 * This allocates the sw VSI structure and its queue resources, then add a VSI
9894 * to the identified VEB.
9896 * Returns pointer to the successfully allocated and configure VSI sw struct on
9897 * success, otherwise returns NULL on failure.
9899 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
9900 u16 uplink_seid, u32 param1)
9902 struct i40e_vsi *vsi = NULL;
9903 struct i40e_veb *veb = NULL;
9907 /* The requested uplink_seid must be either
9908 * - the PF's port seid
9909 * no VEB is needed because this is the PF
9910 * or this is a Flow Director special case VSI
9911 * - seid of an existing VEB
9912 * - seid of a VSI that owns an existing VEB
9913 * - seid of a VSI that doesn't own a VEB
9914 * a new VEB is created and the VSI becomes the owner
9915 * - seid of the PF VSI, which is what creates the first VEB
9916 * this is a special case of the previous
9918 * Find which uplink_seid we were given and create a new VEB if needed
9920 for (i = 0; i < I40E_MAX_VEB; i++) {
9921 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
9927 if (!veb && uplink_seid != pf->mac_seid) {
9929 for (i = 0; i < pf->num_alloc_vsi; i++) {
9930 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
9936 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
9941 if (vsi->uplink_seid == pf->mac_seid)
9942 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
9943 vsi->tc_config.enabled_tc);
9944 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
9945 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
9946 vsi->tc_config.enabled_tc);
9948 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
9949 dev_info(&vsi->back->pdev->dev,
9950 "New VSI creation error, uplink seid of LAN VSI expected.\n");
9953 /* We come up by default in VEPA mode if SRIOV is not
9954 * already enabled, in which case we can't force VEPA
9957 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
9958 veb->bridge_mode = BRIDGE_MODE_VEPA;
9959 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
9961 i40e_config_bridge_mode(veb);
9963 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9964 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9968 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
9972 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
9973 uplink_seid = veb->seid;
9976 /* get vsi sw struct */
9977 v_idx = i40e_vsi_mem_alloc(pf, type);
9980 vsi = pf->vsi[v_idx];
9984 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
9986 if (type == I40E_VSI_MAIN)
9987 pf->lan_vsi = v_idx;
9988 else if (type == I40E_VSI_SRIOV)
9989 vsi->vf_id = param1;
9990 /* assign it some queues */
9991 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
9994 dev_info(&pf->pdev->dev,
9995 "failed to get tracking for %d queues for VSI %d err=%d\n",
9996 vsi->alloc_queue_pairs, vsi->seid, ret);
9999 vsi->base_queue = ret;
10001 /* get a VSI from the hardware */
10002 vsi->uplink_seid = uplink_seid;
10003 ret = i40e_add_vsi(vsi);
10007 switch (vsi->type) {
10008 /* setup the netdev if needed */
10009 case I40E_VSI_MAIN:
10010 /* Apply relevant filters if a platform-specific mac
10011 * address was selected.
10013 if (!!(pf->flags & I40E_FLAG_PF_MAC)) {
10014 ret = i40e_macaddr_init(vsi, pf->hw.mac.addr);
10016 dev_warn(&pf->pdev->dev,
10017 "could not set up macaddr; err %d\n",
10021 case I40E_VSI_VMDQ2:
10022 ret = i40e_config_netdev(vsi);
10025 ret = register_netdev(vsi->netdev);
10028 vsi->netdev_registered = true;
10029 netif_carrier_off(vsi->netdev);
10030 #ifdef CONFIG_I40E_DCB
10031 /* Setup DCB netlink interface */
10032 i40e_dcbnl_setup(vsi);
10033 #endif /* CONFIG_I40E_DCB */
10036 case I40E_VSI_FDIR:
10037 /* set up vectors and rings if needed */
10038 ret = i40e_vsi_setup_vectors(vsi);
10042 ret = i40e_alloc_rings(vsi);
10046 /* map all of the rings to the q_vectors */
10047 i40e_vsi_map_rings_to_vectors(vsi);
10049 i40e_vsi_reset_stats(vsi);
10053 /* no netdev or rings for the other VSI types */
10057 if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
10058 (vsi->type == I40E_VSI_VMDQ2)) {
10059 ret = i40e_vsi_config_rss(vsi);
10064 i40e_vsi_free_q_vectors(vsi);
10066 if (vsi->netdev_registered) {
10067 vsi->netdev_registered = false;
10068 unregister_netdev(vsi->netdev);
10069 free_netdev(vsi->netdev);
10070 vsi->netdev = NULL;
10073 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
10075 i40e_vsi_clear(vsi);
10081 * i40e_veb_get_bw_info - Query VEB BW information
10082 * @veb: the veb to query
10084 * Query the Tx scheduler BW configuration data for given VEB
10086 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
10088 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
10089 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
10090 struct i40e_pf *pf = veb->pf;
10091 struct i40e_hw *hw = &pf->hw;
10096 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10099 dev_info(&pf->pdev->dev,
10100 "query veb bw config failed, err %s aq_err %s\n",
10101 i40e_stat_str(&pf->hw, ret),
10102 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
10106 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10109 dev_info(&pf->pdev->dev,
10110 "query veb bw ets config failed, err %s aq_err %s\n",
10111 i40e_stat_str(&pf->hw, ret),
10112 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
10116 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
10117 veb->bw_max_quanta = ets_data.tc_bw_max;
10118 veb->is_abs_credits = bw_data.absolute_credits_enable;
10119 veb->enabled_tc = ets_data.tc_valid_bits;
10120 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
10121 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
10122 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10123 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
10124 veb->bw_tc_limit_credits[i] =
10125 le16_to_cpu(bw_data.tc_bw_limits[i]);
10126 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
10134 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
10135 * @pf: board private structure
10137 * On error: returns error code (negative)
10138 * On success: returns vsi index in PF (positive)
10140 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
10143 struct i40e_veb *veb;
10146 /* Need to protect the allocation of switch elements at the PF level */
10147 mutex_lock(&pf->switch_mutex);
10149 /* VEB list may be fragmented if VEB creation/destruction has
10150 * been happening. We can afford to do a quick scan to look
10151 * for any free slots in the list.
10153 * find next empty veb slot, looping back around if necessary
10156 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
10158 if (i >= I40E_MAX_VEB) {
10160 goto err_alloc_veb; /* out of VEB slots! */
10163 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
10166 goto err_alloc_veb;
10170 veb->enabled_tc = 1;
10175 mutex_unlock(&pf->switch_mutex);
10180 * i40e_switch_branch_release - Delete a branch of the switch tree
10181 * @branch: where to start deleting
10183 * This uses recursion to find the tips of the branch to be
10184 * removed, deleting until we get back to and can delete this VEB.
10186 static void i40e_switch_branch_release(struct i40e_veb *branch)
10188 struct i40e_pf *pf = branch->pf;
10189 u16 branch_seid = branch->seid;
10190 u16 veb_idx = branch->idx;
10193 /* release any VEBs on this VEB - RECURSION */
10194 for (i = 0; i < I40E_MAX_VEB; i++) {
10197 if (pf->veb[i]->uplink_seid == branch->seid)
10198 i40e_switch_branch_release(pf->veb[i]);
10201 /* Release the VSIs on this VEB, but not the owner VSI.
10203 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
10204 * the VEB itself, so don't use (*branch) after this loop.
10206 for (i = 0; i < pf->num_alloc_vsi; i++) {
10209 if (pf->vsi[i]->uplink_seid == branch_seid &&
10210 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
10211 i40e_vsi_release(pf->vsi[i]);
10215 /* There's one corner case where the VEB might not have been
10216 * removed, so double check it here and remove it if needed.
10217 * This case happens if the veb was created from the debugfs
10218 * commands and no VSIs were added to it.
10220 if (pf->veb[veb_idx])
10221 i40e_veb_release(pf->veb[veb_idx]);
10225 * i40e_veb_clear - remove veb struct
10226 * @veb: the veb to remove
10228 static void i40e_veb_clear(struct i40e_veb *veb)
10234 struct i40e_pf *pf = veb->pf;
10236 mutex_lock(&pf->switch_mutex);
10237 if (pf->veb[veb->idx] == veb)
10238 pf->veb[veb->idx] = NULL;
10239 mutex_unlock(&pf->switch_mutex);
10246 * i40e_veb_release - Delete a VEB and free its resources
10247 * @veb: the VEB being removed
10249 void i40e_veb_release(struct i40e_veb *veb)
10251 struct i40e_vsi *vsi = NULL;
10252 struct i40e_pf *pf;
10257 /* find the remaining VSI and check for extras */
10258 for (i = 0; i < pf->num_alloc_vsi; i++) {
10259 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
10265 dev_info(&pf->pdev->dev,
10266 "can't remove VEB %d with %d VSIs left\n",
10271 /* move the remaining VSI to uplink veb */
10272 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
10273 if (veb->uplink_seid) {
10274 vsi->uplink_seid = veb->uplink_seid;
10275 if (veb->uplink_seid == pf->mac_seid)
10276 vsi->veb_idx = I40E_NO_VEB;
10278 vsi->veb_idx = veb->veb_idx;
10281 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10282 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
10285 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10286 i40e_veb_clear(veb);
10290 * i40e_add_veb - create the VEB in the switch
10291 * @veb: the VEB to be instantiated
10292 * @vsi: the controlling VSI
10294 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
10296 struct i40e_pf *pf = veb->pf;
10297 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
10300 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
10301 veb->enabled_tc, false,
10302 &veb->seid, enable_stats, NULL);
10304 /* get a VEB from the hardware */
10306 dev_info(&pf->pdev->dev,
10307 "couldn't add VEB, err %s aq_err %s\n",
10308 i40e_stat_str(&pf->hw, ret),
10309 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10313 /* get statistics counter */
10314 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
10315 &veb->stats_idx, NULL, NULL, NULL);
10317 dev_info(&pf->pdev->dev,
10318 "couldn't get VEB statistics idx, err %s aq_err %s\n",
10319 i40e_stat_str(&pf->hw, ret),
10320 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10323 ret = i40e_veb_get_bw_info(veb);
10325 dev_info(&pf->pdev->dev,
10326 "couldn't get VEB bw info, err %s aq_err %s\n",
10327 i40e_stat_str(&pf->hw, ret),
10328 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10329 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10333 vsi->uplink_seid = veb->seid;
10334 vsi->veb_idx = veb->idx;
10335 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
10341 * i40e_veb_setup - Set up a VEB
10342 * @pf: board private structure
10343 * @flags: VEB setup flags
10344 * @uplink_seid: the switch element to link to
10345 * @vsi_seid: the initial VSI seid
10346 * @enabled_tc: Enabled TC bit-map
10348 * This allocates the sw VEB structure and links it into the switch
10349 * It is possible and legal for this to be a duplicate of an already
10350 * existing VEB. It is also possible for both uplink and vsi seids
10351 * to be zero, in order to create a floating VEB.
10353 * Returns pointer to the successfully allocated VEB sw struct on
10354 * success, otherwise returns NULL on failure.
10356 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
10357 u16 uplink_seid, u16 vsi_seid,
10360 struct i40e_veb *veb, *uplink_veb = NULL;
10361 int vsi_idx, veb_idx;
10364 /* if one seid is 0, the other must be 0 to create a floating relay */
10365 if ((uplink_seid == 0 || vsi_seid == 0) &&
10366 (uplink_seid + vsi_seid != 0)) {
10367 dev_info(&pf->pdev->dev,
10368 "one, not both seid's are 0: uplink=%d vsi=%d\n",
10369 uplink_seid, vsi_seid);
10373 /* make sure there is such a vsi and uplink */
10374 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
10375 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
10377 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
10378 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
10383 if (uplink_seid && uplink_seid != pf->mac_seid) {
10384 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10385 if (pf->veb[veb_idx] &&
10386 pf->veb[veb_idx]->seid == uplink_seid) {
10387 uplink_veb = pf->veb[veb_idx];
10392 dev_info(&pf->pdev->dev,
10393 "uplink seid %d not found\n", uplink_seid);
10398 /* get veb sw struct */
10399 veb_idx = i40e_veb_mem_alloc(pf);
10402 veb = pf->veb[veb_idx];
10403 veb->flags = flags;
10404 veb->uplink_seid = uplink_seid;
10405 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
10406 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
10408 /* create the VEB in the switch */
10409 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
10412 if (vsi_idx == pf->lan_vsi)
10413 pf->lan_veb = veb->idx;
10418 i40e_veb_clear(veb);
10424 * i40e_setup_pf_switch_element - set PF vars based on switch type
10425 * @pf: board private structure
10426 * @ele: element we are building info from
10427 * @num_reported: total number of elements
10428 * @printconfig: should we print the contents
10430 * helper function to assist in extracting a few useful SEID values.
10432 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
10433 struct i40e_aqc_switch_config_element_resp *ele,
10434 u16 num_reported, bool printconfig)
10436 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
10437 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
10438 u8 element_type = ele->element_type;
10439 u16 seid = le16_to_cpu(ele->seid);
10442 dev_info(&pf->pdev->dev,
10443 "type=%d seid=%d uplink=%d downlink=%d\n",
10444 element_type, seid, uplink_seid, downlink_seid);
10446 switch (element_type) {
10447 case I40E_SWITCH_ELEMENT_TYPE_MAC:
10448 pf->mac_seid = seid;
10450 case I40E_SWITCH_ELEMENT_TYPE_VEB:
10452 if (uplink_seid != pf->mac_seid)
10454 if (pf->lan_veb == I40E_NO_VEB) {
10457 /* find existing or else empty VEB */
10458 for (v = 0; v < I40E_MAX_VEB; v++) {
10459 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
10464 if (pf->lan_veb == I40E_NO_VEB) {
10465 v = i40e_veb_mem_alloc(pf);
10472 pf->veb[pf->lan_veb]->seid = seid;
10473 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
10474 pf->veb[pf->lan_veb]->pf = pf;
10475 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
10477 case I40E_SWITCH_ELEMENT_TYPE_VSI:
10478 if (num_reported != 1)
10480 /* This is immediately after a reset so we can assume this is
10483 pf->mac_seid = uplink_seid;
10484 pf->pf_seid = downlink_seid;
10485 pf->main_vsi_seid = seid;
10487 dev_info(&pf->pdev->dev,
10488 "pf_seid=%d main_vsi_seid=%d\n",
10489 pf->pf_seid, pf->main_vsi_seid);
10491 case I40E_SWITCH_ELEMENT_TYPE_PF:
10492 case I40E_SWITCH_ELEMENT_TYPE_VF:
10493 case I40E_SWITCH_ELEMENT_TYPE_EMP:
10494 case I40E_SWITCH_ELEMENT_TYPE_BMC:
10495 case I40E_SWITCH_ELEMENT_TYPE_PE:
10496 case I40E_SWITCH_ELEMENT_TYPE_PA:
10497 /* ignore these for now */
10500 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
10501 element_type, seid);
10507 * i40e_fetch_switch_configuration - Get switch config from firmware
10508 * @pf: board private structure
10509 * @printconfig: should we print the contents
10511 * Get the current switch configuration from the device and
10512 * extract a few useful SEID values.
10514 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
10516 struct i40e_aqc_get_switch_config_resp *sw_config;
10522 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
10526 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
10528 u16 num_reported, num_total;
10530 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
10534 dev_info(&pf->pdev->dev,
10535 "get switch config failed err %s aq_err %s\n",
10536 i40e_stat_str(&pf->hw, ret),
10537 i40e_aq_str(&pf->hw,
10538 pf->hw.aq.asq_last_status));
10543 num_reported = le16_to_cpu(sw_config->header.num_reported);
10544 num_total = le16_to_cpu(sw_config->header.num_total);
10547 dev_info(&pf->pdev->dev,
10548 "header: %d reported %d total\n",
10549 num_reported, num_total);
10551 for (i = 0; i < num_reported; i++) {
10552 struct i40e_aqc_switch_config_element_resp *ele =
10553 &sw_config->element[i];
10555 i40e_setup_pf_switch_element(pf, ele, num_reported,
10558 } while (next_seid != 0);
10565 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
10566 * @pf: board private structure
10567 * @reinit: if the Main VSI needs to re-initialized.
10569 * Returns 0 on success, negative value on failure
10571 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
10576 /* find out what's out there already */
10577 ret = i40e_fetch_switch_configuration(pf, false);
10579 dev_info(&pf->pdev->dev,
10580 "couldn't fetch switch config, err %s aq_err %s\n",
10581 i40e_stat_str(&pf->hw, ret),
10582 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10585 i40e_pf_reset_stats(pf);
10587 /* set the switch config bit for the whole device to
10588 * support limited promisc or true promisc
10589 * when user requests promisc. The default is limited
10593 if ((pf->hw.pf_id == 0) &&
10594 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
10595 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
10597 if (pf->hw.pf_id == 0) {
10600 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
10601 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
10603 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
10604 dev_info(&pf->pdev->dev,
10605 "couldn't set switch config bits, err %s aq_err %s\n",
10606 i40e_stat_str(&pf->hw, ret),
10607 i40e_aq_str(&pf->hw,
10608 pf->hw.aq.asq_last_status));
10609 /* not a fatal problem, just keep going */
10613 /* first time setup */
10614 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
10615 struct i40e_vsi *vsi = NULL;
10618 /* Set up the PF VSI associated with the PF's main VSI
10619 * that is already in the HW switch
10621 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
10622 uplink_seid = pf->veb[pf->lan_veb]->seid;
10624 uplink_seid = pf->mac_seid;
10625 if (pf->lan_vsi == I40E_NO_VSI)
10626 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
10628 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
10630 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
10631 i40e_fdir_teardown(pf);
10635 /* force a reset of TC and queue layout configurations */
10636 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
10638 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
10639 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
10640 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
10642 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
10644 i40e_fdir_sb_setup(pf);
10646 /* Setup static PF queue filter control settings */
10647 ret = i40e_setup_pf_filter_control(pf);
10649 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
10651 /* Failure here should not stop continuing other steps */
10654 /* enable RSS in the HW, even for only one queue, as the stack can use
10657 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
10658 i40e_pf_config_rss(pf);
10660 /* fill in link information and enable LSE reporting */
10661 i40e_link_event(pf);
10663 /* Initialize user-specific link properties */
10664 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
10665 I40E_AQ_AN_COMPLETED) ? true : false);
10673 * i40e_determine_queue_usage - Work out queue distribution
10674 * @pf: board private structure
10676 static void i40e_determine_queue_usage(struct i40e_pf *pf)
10680 pf->num_lan_qps = 0;
10682 /* Find the max queues to be put into basic use. We'll always be
10683 * using TC0, whether or not DCB is running, and TC0 will get the
10686 queues_left = pf->hw.func_caps.num_tx_qp;
10688 if ((queues_left == 1) ||
10689 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
10690 /* one qp for PF, no queues for anything else */
10692 pf->alloc_rss_size = pf->num_lan_qps = 1;
10694 /* make sure all the fancies are disabled */
10695 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
10696 I40E_FLAG_IWARP_ENABLED |
10697 I40E_FLAG_FD_SB_ENABLED |
10698 I40E_FLAG_FD_ATR_ENABLED |
10699 I40E_FLAG_DCB_CAPABLE |
10700 I40E_FLAG_DCB_ENABLED |
10701 I40E_FLAG_SRIOV_ENABLED |
10702 I40E_FLAG_VMDQ_ENABLED);
10703 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
10704 I40E_FLAG_FD_SB_ENABLED |
10705 I40E_FLAG_FD_ATR_ENABLED |
10706 I40E_FLAG_DCB_CAPABLE))) {
10707 /* one qp for PF */
10708 pf->alloc_rss_size = pf->num_lan_qps = 1;
10709 queues_left -= pf->num_lan_qps;
10711 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
10712 I40E_FLAG_IWARP_ENABLED |
10713 I40E_FLAG_FD_SB_ENABLED |
10714 I40E_FLAG_FD_ATR_ENABLED |
10715 I40E_FLAG_DCB_ENABLED |
10716 I40E_FLAG_VMDQ_ENABLED);
10718 /* Not enough queues for all TCs */
10719 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
10720 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
10721 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
10722 I40E_FLAG_DCB_ENABLED);
10723 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
10725 pf->num_lan_qps = max_t(int, pf->rss_size_max,
10726 num_online_cpus());
10727 pf->num_lan_qps = min_t(int, pf->num_lan_qps,
10728 pf->hw.func_caps.num_tx_qp);
10730 queues_left -= pf->num_lan_qps;
10733 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10734 if (queues_left > 1) {
10735 queues_left -= 1; /* save 1 queue for FD */
10737 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10738 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
10742 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10743 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
10744 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
10745 (queues_left / pf->num_vf_qps));
10746 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
10749 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
10750 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
10751 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
10752 (queues_left / pf->num_vmdq_qps));
10753 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
10756 pf->queues_left = queues_left;
10757 dev_dbg(&pf->pdev->dev,
10758 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
10759 pf->hw.func_caps.num_tx_qp,
10760 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
10761 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
10762 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
10767 * i40e_setup_pf_filter_control - Setup PF static filter control
10768 * @pf: PF to be setup
10770 * i40e_setup_pf_filter_control sets up a PF's initial filter control
10771 * settings. If PE/FCoE are enabled then it will also set the per PF
10772 * based filter sizes required for them. It also enables Flow director,
10773 * ethertype and macvlan type filter settings for the pf.
10775 * Returns 0 on success, negative on failure
10777 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
10779 struct i40e_filter_control_settings *settings = &pf->filter_settings;
10781 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
10783 /* Flow Director is enabled */
10784 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
10785 settings->enable_fdir = true;
10787 /* Ethtype and MACVLAN filters enabled for PF */
10788 settings->enable_ethtype = true;
10789 settings->enable_macvlan = true;
10791 if (i40e_set_filter_control(&pf->hw, settings))
10797 #define INFO_STRING_LEN 255
10798 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
10799 static void i40e_print_features(struct i40e_pf *pf)
10801 struct i40e_hw *hw = &pf->hw;
10805 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
10809 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
10810 #ifdef CONFIG_PCI_IOV
10811 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
10813 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
10814 pf->hw.func_caps.num_vsis,
10815 pf->vsi[pf->lan_vsi]->num_queue_pairs);
10816 if (pf->flags & I40E_FLAG_RSS_ENABLED)
10817 i += snprintf(&buf[i], REMAIN(i), " RSS");
10818 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
10819 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
10820 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10821 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
10822 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
10824 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
10825 i += snprintf(&buf[i], REMAIN(i), " DCB");
10826 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
10827 i += snprintf(&buf[i], REMAIN(i), " Geneve");
10828 if (pf->flags & I40E_FLAG_PTP)
10829 i += snprintf(&buf[i], REMAIN(i), " PTP");
10830 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
10831 i += snprintf(&buf[i], REMAIN(i), " VEB");
10833 i += snprintf(&buf[i], REMAIN(i), " VEPA");
10835 dev_info(&pf->pdev->dev, "%s\n", buf);
10837 WARN_ON(i > INFO_STRING_LEN);
10841 * i40e_get_platform_mac_addr - get platform-specific MAC address
10842 * @pdev: PCI device information struct
10843 * @pf: board private structure
10845 * Look up the MAC address for the device. First we'll try
10846 * eth_platform_get_mac_address, which will check Open Firmware, or arch
10847 * specific fallback. Otherwise, we'll default to the stored value in
10850 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
10852 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
10853 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
10857 * i40e_probe - Device initialization routine
10858 * @pdev: PCI device information struct
10859 * @ent: entry in i40e_pci_tbl
10861 * i40e_probe initializes a PF identified by a pci_dev structure.
10862 * The OS initialization, configuring of the PF private structure,
10863 * and a hardware reset occur.
10865 * Returns 0 on success, negative on failure
10867 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10869 struct i40e_aq_get_phy_abilities_resp abilities;
10870 struct i40e_pf *pf;
10871 struct i40e_hw *hw;
10872 static u16 pfs_found;
10880 err = pci_enable_device_mem(pdev);
10884 /* set up for high or low dma */
10885 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10887 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10889 dev_err(&pdev->dev,
10890 "DMA configuration failed: 0x%x\n", err);
10895 /* set up pci connections */
10896 err = pci_request_mem_regions(pdev, i40e_driver_name);
10898 dev_info(&pdev->dev,
10899 "pci_request_selected_regions failed %d\n", err);
10903 pci_enable_pcie_error_reporting(pdev);
10904 pci_set_master(pdev);
10906 /* Now that we have a PCI connection, we need to do the
10907 * low level device setup. This is primarily setting up
10908 * the Admin Queue structures and then querying for the
10909 * device's current profile information.
10911 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
10918 set_bit(__I40E_DOWN, &pf->state);
10923 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
10924 I40E_MAX_CSR_SPACE);
10926 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
10927 if (!hw->hw_addr) {
10929 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
10930 (unsigned int)pci_resource_start(pdev, 0),
10931 pf->ioremap_len, err);
10934 hw->vendor_id = pdev->vendor;
10935 hw->device_id = pdev->device;
10936 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
10937 hw->subsystem_vendor_id = pdev->subsystem_vendor;
10938 hw->subsystem_device_id = pdev->subsystem_device;
10939 hw->bus.device = PCI_SLOT(pdev->devfn);
10940 hw->bus.func = PCI_FUNC(pdev->devfn);
10941 hw->bus.bus_id = pdev->bus->number;
10942 pf->instance = pfs_found;
10944 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
10945 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
10947 /* set up the locks for the AQ, do this only once in probe
10948 * and destroy them only once in remove
10950 mutex_init(&hw->aq.asq_mutex);
10951 mutex_init(&hw->aq.arq_mutex);
10953 pf->msg_enable = netif_msg_init(debug,
10958 pf->hw.debug_mask = debug;
10960 /* do a special CORER for clearing PXE mode once at init */
10961 if (hw->revision_id == 0 &&
10962 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
10963 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
10968 i40e_clear_pxe_mode(hw);
10971 /* Reset here to make sure all is clean and to define PF 'n' */
10973 err = i40e_pf_reset(hw);
10975 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
10980 hw->aq.num_arq_entries = I40E_AQ_LEN;
10981 hw->aq.num_asq_entries = I40E_AQ_LEN;
10982 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10983 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10984 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
10986 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
10988 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
10990 err = i40e_init_shared_code(hw);
10992 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
10997 /* set up a default setting for link flow control */
10998 pf->hw.fc.requested_mode = I40E_FC_NONE;
11000 err = i40e_init_adminq(hw);
11002 if (err == I40E_ERR_FIRMWARE_API_VERSION)
11003 dev_info(&pdev->dev,
11004 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
11006 dev_info(&pdev->dev,
11007 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
11012 /* provide nvm, fw, api versions */
11013 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
11014 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
11015 hw->aq.api_maj_ver, hw->aq.api_min_ver,
11016 i40e_nvm_version_str(hw));
11018 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
11019 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
11020 dev_info(&pdev->dev,
11021 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
11022 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
11023 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
11024 dev_info(&pdev->dev,
11025 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
11027 i40e_verify_eeprom(pf);
11029 /* Rev 0 hardware was never productized */
11030 if (hw->revision_id < 1)
11031 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
11033 i40e_clear_pxe_mode(hw);
11034 err = i40e_get_capabilities(pf);
11036 goto err_adminq_setup;
11038 err = i40e_sw_init(pf);
11040 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
11044 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
11045 hw->func_caps.num_rx_qp, 0, 0);
11047 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
11048 goto err_init_lan_hmc;
11051 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
11053 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
11055 goto err_configure_lan_hmc;
11058 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
11059 * Ignore error return codes because if it was already disabled via
11060 * hardware settings this will fail
11062 if (pf->flags & I40E_FLAG_STOP_FW_LLDP) {
11063 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
11064 i40e_aq_stop_lldp(hw, true, NULL);
11067 /* allow a platform config to override the HW addr */
11068 i40e_get_platform_mac_addr(pdev, pf);
11070 if (!is_valid_ether_addr(hw->mac.addr)) {
11071 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
11075 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
11076 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
11077 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
11078 if (is_valid_ether_addr(hw->mac.port_addr))
11079 pf->flags |= I40E_FLAG_PORT_ID_VALID;
11081 pci_set_drvdata(pdev, pf);
11082 pci_save_state(pdev);
11083 #ifdef CONFIG_I40E_DCB
11084 err = i40e_init_pf_dcb(pf);
11086 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
11087 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
11088 /* Continue without DCB enabled */
11090 #endif /* CONFIG_I40E_DCB */
11092 /* set up periodic task facility */
11093 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
11094 pf->service_timer_period = HZ;
11096 INIT_WORK(&pf->service_task, i40e_service_task);
11097 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
11098 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
11100 /* NVM bit on means WoL disabled for the port */
11101 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
11102 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
11103 pf->wol_en = false;
11106 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
11108 /* set up the main switch operations */
11109 i40e_determine_queue_usage(pf);
11110 err = i40e_init_interrupt_scheme(pf);
11112 goto err_switch_setup;
11114 /* The number of VSIs reported by the FW is the minimum guaranteed
11115 * to us; HW supports far more and we share the remaining pool with
11116 * the other PFs. We allocate space for more than the guarantee with
11117 * the understanding that we might not get them all later.
11119 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
11120 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
11122 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
11124 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
11125 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
11129 goto err_switch_setup;
11132 #ifdef CONFIG_PCI_IOV
11133 /* prep for VF support */
11134 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11135 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11136 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
11137 if (pci_num_vf(pdev))
11138 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
11141 err = i40e_setup_pf_switch(pf, false);
11143 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
11147 /* Make sure flow control is set according to current settings */
11148 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
11149 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
11150 dev_dbg(&pf->pdev->dev,
11151 "Set fc with err %s aq_err %s on get_phy_cap\n",
11152 i40e_stat_str(hw, err),
11153 i40e_aq_str(hw, hw->aq.asq_last_status));
11154 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
11155 dev_dbg(&pf->pdev->dev,
11156 "Set fc with err %s aq_err %s on set_phy_config\n",
11157 i40e_stat_str(hw, err),
11158 i40e_aq_str(hw, hw->aq.asq_last_status));
11159 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
11160 dev_dbg(&pf->pdev->dev,
11161 "Set fc with err %s aq_err %s on get_link_info\n",
11162 i40e_stat_str(hw, err),
11163 i40e_aq_str(hw, hw->aq.asq_last_status));
11165 /* if FDIR VSI was set up, start it now */
11166 for (i = 0; i < pf->num_alloc_vsi; i++) {
11167 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
11168 i40e_vsi_open(pf->vsi[i]);
11173 /* The driver only wants link up/down and module qualification
11174 * reports from firmware. Note the negative logic.
11176 err = i40e_aq_set_phy_int_mask(&pf->hw,
11177 ~(I40E_AQ_EVENT_LINK_UPDOWN |
11178 I40E_AQ_EVENT_MEDIA_NA |
11179 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
11181 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
11182 i40e_stat_str(&pf->hw, err),
11183 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11185 /* Reconfigure hardware for allowing smaller MSS in the case
11186 * of TSO, so that we avoid the MDD being fired and causing
11187 * a reset in the case of small MSS+TSO.
11189 val = rd32(hw, I40E_REG_MSS);
11190 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
11191 val &= ~I40E_REG_MSS_MIN_MASK;
11192 val |= I40E_64BYTE_MSS;
11193 wr32(hw, I40E_REG_MSS, val);
11196 if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
11198 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
11200 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
11201 i40e_stat_str(&pf->hw, err),
11202 i40e_aq_str(&pf->hw,
11203 pf->hw.aq.asq_last_status));
11205 /* The main driver is (mostly) up and happy. We need to set this state
11206 * before setting up the misc vector or we get a race and the vector
11207 * ends up disabled forever.
11209 clear_bit(__I40E_DOWN, &pf->state);
11211 /* In case of MSIX we are going to setup the misc vector right here
11212 * to handle admin queue events etc. In case of legacy and MSI
11213 * the misc functionality and queue processing is combined in
11214 * the same vector and that gets setup at open.
11216 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11217 err = i40e_setup_misc_vector(pf);
11219 dev_info(&pdev->dev,
11220 "setup of misc vector failed: %d\n", err);
11225 #ifdef CONFIG_PCI_IOV
11226 /* prep for VF support */
11227 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11228 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11229 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
11230 /* disable link interrupts for VFs */
11231 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
11232 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
11233 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
11236 if (pci_num_vf(pdev)) {
11237 dev_info(&pdev->dev,
11238 "Active VFs found, allocating resources.\n");
11239 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
11241 dev_info(&pdev->dev,
11242 "Error %d allocating resources for existing VFs\n",
11246 #endif /* CONFIG_PCI_IOV */
11248 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11249 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
11250 pf->num_iwarp_msix,
11251 I40E_IWARP_IRQ_PILE_ID);
11252 if (pf->iwarp_base_vector < 0) {
11253 dev_info(&pdev->dev,
11254 "failed to get tracking for %d vectors for IWARP err=%d\n",
11255 pf->num_iwarp_msix, pf->iwarp_base_vector);
11256 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11260 i40e_dbg_pf_init(pf);
11262 /* tell the firmware that we're starting */
11263 i40e_send_version(pf);
11265 /* since everything's happy, start the service_task timer */
11266 mod_timer(&pf->service_timer,
11267 round_jiffies(jiffies + pf->service_timer_period));
11269 /* add this PF to client device list and launch a client service task */
11270 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11271 err = i40e_lan_add_device(pf);
11273 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
11277 #define PCI_SPEED_SIZE 8
11278 #define PCI_WIDTH_SIZE 8
11279 /* Devices on the IOSF bus do not have this information
11280 * and will report PCI Gen 1 x 1 by default so don't bother
11283 if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) {
11284 char speed[PCI_SPEED_SIZE] = "Unknown";
11285 char width[PCI_WIDTH_SIZE] = "Unknown";
11287 /* Get the negotiated link width and speed from PCI config
11290 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
11293 i40e_set_pci_config_data(hw, link_status);
11295 switch (hw->bus.speed) {
11296 case i40e_bus_speed_8000:
11297 strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
11298 case i40e_bus_speed_5000:
11299 strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
11300 case i40e_bus_speed_2500:
11301 strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
11305 switch (hw->bus.width) {
11306 case i40e_bus_width_pcie_x8:
11307 strncpy(width, "8", PCI_WIDTH_SIZE); break;
11308 case i40e_bus_width_pcie_x4:
11309 strncpy(width, "4", PCI_WIDTH_SIZE); break;
11310 case i40e_bus_width_pcie_x2:
11311 strncpy(width, "2", PCI_WIDTH_SIZE); break;
11312 case i40e_bus_width_pcie_x1:
11313 strncpy(width, "1", PCI_WIDTH_SIZE); break;
11318 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
11321 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
11322 hw->bus.speed < i40e_bus_speed_8000) {
11323 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
11324 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
11328 /* get the requested speeds from the fw */
11329 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
11331 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
11332 i40e_stat_str(&pf->hw, err),
11333 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11334 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
11336 /* get the supported phy types from the fw */
11337 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
11339 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
11340 i40e_stat_str(&pf->hw, err),
11341 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11343 /* Add a filter to drop all Flow control frames from any VSI from being
11344 * transmitted. By doing so we stop a malicious VF from sending out
11345 * PAUSE or PFC frames and potentially controlling traffic for other
11347 * The FW can still send Flow control frames if enabled.
11349 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
11350 pf->main_vsi_seid);
11352 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
11353 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
11354 pf->flags |= I40E_FLAG_PHY_CONTROLS_LEDS;
11355 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
11356 pf->flags |= I40E_FLAG_HAVE_CRT_RETIMER;
11357 /* print a string summarizing features */
11358 i40e_print_features(pf);
11362 /* Unwind what we've done if something failed in the setup */
11364 set_bit(__I40E_DOWN, &pf->state);
11365 i40e_clear_interrupt_scheme(pf);
11368 i40e_reset_interrupt_capability(pf);
11369 del_timer_sync(&pf->service_timer);
11371 err_configure_lan_hmc:
11372 (void)i40e_shutdown_lan_hmc(hw);
11374 kfree(pf->qp_pile);
11378 iounmap(hw->hw_addr);
11382 pci_disable_pcie_error_reporting(pdev);
11383 pci_release_mem_regions(pdev);
11386 pci_disable_device(pdev);
11391 * i40e_remove - Device removal routine
11392 * @pdev: PCI device information struct
11394 * i40e_remove is called by the PCI subsystem to alert the driver
11395 * that is should release a PCI device. This could be caused by a
11396 * Hot-Plug event, or because the driver is going to be removed from
11399 static void i40e_remove(struct pci_dev *pdev)
11401 struct i40e_pf *pf = pci_get_drvdata(pdev);
11402 struct i40e_hw *hw = &pf->hw;
11403 i40e_status ret_code;
11406 i40e_dbg_pf_exit(pf);
11410 /* Disable RSS in hw */
11411 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
11412 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
11414 /* no more scheduling of any task */
11415 set_bit(__I40E_SUSPENDED, &pf->state);
11416 set_bit(__I40E_DOWN, &pf->state);
11417 if (pf->service_timer.data)
11418 del_timer_sync(&pf->service_timer);
11419 if (pf->service_task.func)
11420 cancel_work_sync(&pf->service_task);
11422 /* Client close must be called explicitly here because the timer
11423 * has been stopped.
11425 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
11427 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
11429 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
11432 i40e_fdir_teardown(pf);
11434 /* If there is a switch structure or any orphans, remove them.
11435 * This will leave only the PF's VSI remaining.
11437 for (i = 0; i < I40E_MAX_VEB; i++) {
11441 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
11442 pf->veb[i]->uplink_seid == 0)
11443 i40e_switch_branch_release(pf->veb[i]);
11446 /* Now we can shutdown the PF's VSI, just before we kill
11449 if (pf->vsi[pf->lan_vsi])
11450 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
11452 /* remove attached clients */
11453 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11454 ret_code = i40e_lan_del_device(pf);
11456 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
11460 /* shutdown and destroy the HMC */
11461 if (hw->hmc.hmc_obj) {
11462 ret_code = i40e_shutdown_lan_hmc(hw);
11464 dev_warn(&pdev->dev,
11465 "Failed to destroy the HMC resources: %d\n",
11469 /* shutdown the adminq */
11470 i40e_shutdown_adminq(hw);
11472 /* destroy the locks only once, here */
11473 mutex_destroy(&hw->aq.arq_mutex);
11474 mutex_destroy(&hw->aq.asq_mutex);
11476 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
11477 i40e_clear_interrupt_scheme(pf);
11478 for (i = 0; i < pf->num_alloc_vsi; i++) {
11480 i40e_vsi_clear_rings(pf->vsi[i]);
11481 i40e_vsi_clear(pf->vsi[i]);
11486 for (i = 0; i < I40E_MAX_VEB; i++) {
11491 kfree(pf->qp_pile);
11494 iounmap(hw->hw_addr);
11496 pci_release_mem_regions(pdev);
11498 pci_disable_pcie_error_reporting(pdev);
11499 pci_disable_device(pdev);
11503 * i40e_pci_error_detected - warning that something funky happened in PCI land
11504 * @pdev: PCI device information struct
11506 * Called to warn that something happened and the error handling steps
11507 * are in progress. Allows the driver to quiesce things, be ready for
11510 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
11511 enum pci_channel_state error)
11513 struct i40e_pf *pf = pci_get_drvdata(pdev);
11515 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
11518 dev_info(&pdev->dev,
11519 "Cannot recover - error happened during device probe\n");
11520 return PCI_ERS_RESULT_DISCONNECT;
11523 /* shutdown all operations */
11524 if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
11526 i40e_prep_for_reset(pf, true);
11530 /* Request a slot reset */
11531 return PCI_ERS_RESULT_NEED_RESET;
11535 * i40e_pci_error_slot_reset - a PCI slot reset just happened
11536 * @pdev: PCI device information struct
11538 * Called to find if the driver can work with the device now that
11539 * the pci slot has been reset. If a basic connection seems good
11540 * (registers are readable and have sane content) then return a
11541 * happy little PCI_ERS_RESULT_xxx.
11543 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
11545 struct i40e_pf *pf = pci_get_drvdata(pdev);
11546 pci_ers_result_t result;
11550 dev_dbg(&pdev->dev, "%s\n", __func__);
11551 if (pci_enable_device_mem(pdev)) {
11552 dev_info(&pdev->dev,
11553 "Cannot re-enable PCI device after reset.\n");
11554 result = PCI_ERS_RESULT_DISCONNECT;
11556 pci_set_master(pdev);
11557 pci_restore_state(pdev);
11558 pci_save_state(pdev);
11559 pci_wake_from_d3(pdev, false);
11561 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
11563 result = PCI_ERS_RESULT_RECOVERED;
11565 result = PCI_ERS_RESULT_DISCONNECT;
11568 err = pci_cleanup_aer_uncorrect_error_status(pdev);
11570 dev_info(&pdev->dev,
11571 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
11573 /* non-fatal, continue */
11580 * i40e_pci_error_resume - restart operations after PCI error recovery
11581 * @pdev: PCI device information struct
11583 * Called to allow the driver to bring things back up after PCI error
11584 * and/or reset recovery has finished.
11586 static void i40e_pci_error_resume(struct pci_dev *pdev)
11588 struct i40e_pf *pf = pci_get_drvdata(pdev);
11590 dev_dbg(&pdev->dev, "%s\n", __func__);
11591 if (test_bit(__I40E_SUSPENDED, &pf->state))
11595 i40e_handle_reset_warning(pf, true);
11600 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
11601 * using the mac_address_write admin q function
11602 * @pf: pointer to i40e_pf struct
11604 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
11606 struct i40e_hw *hw = &pf->hw;
11611 /* Get current MAC address in case it's an LAA */
11612 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
11613 ether_addr_copy(mac_addr,
11614 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
11616 dev_err(&pf->pdev->dev,
11617 "Failed to retrieve MAC address; using default\n");
11618 ether_addr_copy(mac_addr, hw->mac.addr);
11621 /* The FW expects the mac address write cmd to first be called with
11622 * one of these flags before calling it again with the multicast
11625 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
11627 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
11628 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
11630 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
11632 dev_err(&pf->pdev->dev,
11633 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
11637 flags = I40E_AQC_MC_MAG_EN
11638 | I40E_AQC_WOL_PRESERVE_ON_PFR
11639 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
11640 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
11642 dev_err(&pf->pdev->dev,
11643 "Failed to enable Multicast Magic Packet wake up\n");
11647 * i40e_shutdown - PCI callback for shutting down
11648 * @pdev: PCI device information struct
11650 static void i40e_shutdown(struct pci_dev *pdev)
11652 struct i40e_pf *pf = pci_get_drvdata(pdev);
11653 struct i40e_hw *hw = &pf->hw;
11655 set_bit(__I40E_SUSPENDED, &pf->state);
11656 set_bit(__I40E_DOWN, &pf->state);
11658 i40e_prep_for_reset(pf, true);
11661 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11662 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11664 del_timer_sync(&pf->service_timer);
11665 cancel_work_sync(&pf->service_task);
11666 i40e_fdir_teardown(pf);
11668 /* Client close must be called explicitly here because the timer
11669 * has been stopped.
11671 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
11673 if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
11674 i40e_enable_mc_magic_wake(pf);
11677 i40e_prep_for_reset(pf, true);
11680 wr32(hw, I40E_PFPM_APM,
11681 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11682 wr32(hw, I40E_PFPM_WUFC,
11683 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11685 i40e_clear_interrupt_scheme(pf);
11687 if (system_state == SYSTEM_POWER_OFF) {
11688 pci_wake_from_d3(pdev, pf->wol_en);
11689 pci_set_power_state(pdev, PCI_D3hot);
11695 * i40e_suspend - PCI callback for moving to D3
11696 * @pdev: PCI device information struct
11698 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
11700 struct i40e_pf *pf = pci_get_drvdata(pdev);
11701 struct i40e_hw *hw = &pf->hw;
11704 set_bit(__I40E_SUSPENDED, &pf->state);
11705 set_bit(__I40E_DOWN, &pf->state);
11707 if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
11708 i40e_enable_mc_magic_wake(pf);
11711 i40e_prep_for_reset(pf, true);
11714 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11715 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11717 i40e_stop_misc_vector(pf);
11719 retval = pci_save_state(pdev);
11723 pci_wake_from_d3(pdev, pf->wol_en);
11724 pci_set_power_state(pdev, PCI_D3hot);
11730 * i40e_resume - PCI callback for waking up from D3
11731 * @pdev: PCI device information struct
11733 static int i40e_resume(struct pci_dev *pdev)
11735 struct i40e_pf *pf = pci_get_drvdata(pdev);
11738 pci_set_power_state(pdev, PCI_D0);
11739 pci_restore_state(pdev);
11740 /* pci_restore_state() clears dev->state_saves, so
11741 * call pci_save_state() again to restore it.
11743 pci_save_state(pdev);
11745 err = pci_enable_device_mem(pdev);
11747 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
11750 pci_set_master(pdev);
11752 /* no wakeup events while running */
11753 pci_wake_from_d3(pdev, false);
11755 /* handling the reset will rebuild the device state */
11756 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
11757 clear_bit(__I40E_DOWN, &pf->state);
11759 i40e_reset_and_rebuild(pf, false, true);
11767 static const struct pci_error_handlers i40e_err_handler = {
11768 .error_detected = i40e_pci_error_detected,
11769 .slot_reset = i40e_pci_error_slot_reset,
11770 .resume = i40e_pci_error_resume,
11773 static struct pci_driver i40e_driver = {
11774 .name = i40e_driver_name,
11775 .id_table = i40e_pci_tbl,
11776 .probe = i40e_probe,
11777 .remove = i40e_remove,
11779 .suspend = i40e_suspend,
11780 .resume = i40e_resume,
11782 .shutdown = i40e_shutdown,
11783 .err_handler = &i40e_err_handler,
11784 .sriov_configure = i40e_pci_sriov_configure,
11788 * i40e_init_module - Driver registration routine
11790 * i40e_init_module is the first routine called when the driver is
11791 * loaded. All it does is register with the PCI subsystem.
11793 static int __init i40e_init_module(void)
11795 pr_info("%s: %s - version %s\n", i40e_driver_name,
11796 i40e_driver_string, i40e_driver_version_str);
11797 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
11799 /* we will see if single thread per module is enough for now,
11800 * it can't be any worse than using the system workqueue which
11801 * was already single threaded
11803 i40e_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
11806 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
11811 return pci_register_driver(&i40e_driver);
11813 module_init(i40e_init_module);
11816 * i40e_exit_module - Driver exit cleanup routine
11818 * i40e_exit_module is called just before the driver is removed
11821 static void __exit i40e_exit_module(void)
11823 pci_unregister_driver(&i40e_driver);
11824 destroy_workqueue(i40e_wq);
11827 module_exit(i40e_exit_module);