1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 - 2016 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
28 #include "i40e_prototype.h"
29 #include "i40evf_client.h"
30 /* All i40evf tracepoints are defined by the include below, which must
31 * be included exactly once across the whole kernel with
32 * CREATE_TRACE_POINTS defined
34 #define CREATE_TRACE_POINTS
35 #include "i40e_trace.h"
37 static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
38 static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
39 static int i40evf_close(struct net_device *netdev);
41 char i40evf_driver_name[] = "i40evf";
42 static const char i40evf_driver_string[] =
43 "Intel(R) 40-10 Gigabit Virtual Function Network Driver";
47 #define DRV_VERSION_MAJOR 2
48 #define DRV_VERSION_MINOR 1
49 #define DRV_VERSION_BUILD 14
50 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
51 __stringify(DRV_VERSION_MINOR) "." \
52 __stringify(DRV_VERSION_BUILD) \
54 const char i40evf_driver_version[] = DRV_VERSION;
55 static const char i40evf_copyright[] =
56 "Copyright (c) 2013 - 2015 Intel Corporation.";
58 /* i40evf_pci_tbl - PCI Device ID Table
60 * Wildcard entries (PCI_ANY_ID) should come last
61 * Last entry must be all 0s
63 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
64 * Class, Class Mask, private data (not used) }
66 static const struct pci_device_id i40evf_pci_tbl[] = {
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
70 /* required last entry */
74 MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl);
76 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
77 MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_VERSION);
81 static struct workqueue_struct *i40evf_wq;
84 * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
85 * @hw: pointer to the HW structure
86 * @mem: ptr to mem struct to fill out
87 * @size: size of memory requested
88 * @alignment: what to align the allocation to
90 i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw,
91 struct i40e_dma_mem *mem,
92 u64 size, u32 alignment)
94 struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
97 return I40E_ERR_PARAM;
99 mem->size = ALIGN(size, alignment);
100 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
101 (dma_addr_t *)&mem->pa, GFP_KERNEL);
105 return I40E_ERR_NO_MEMORY;
109 * i40evf_free_dma_mem_d - OS specific memory free for shared code
110 * @hw: pointer to the HW structure
111 * @mem: ptr to mem struct to free
113 i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
115 struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
117 if (!mem || !mem->va)
118 return I40E_ERR_PARAM;
119 dma_free_coherent(&adapter->pdev->dev, mem->size,
120 mem->va, (dma_addr_t)mem->pa);
125 * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code
126 * @hw: pointer to the HW structure
127 * @mem: ptr to mem struct to fill out
128 * @size: size of memory requested
130 i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw,
131 struct i40e_virt_mem *mem, u32 size)
134 return I40E_ERR_PARAM;
137 mem->va = kzalloc(size, GFP_KERNEL);
142 return I40E_ERR_NO_MEMORY;
146 * i40evf_free_virt_mem_d - OS specific memory free for shared code
147 * @hw: pointer to the HW structure
148 * @mem: ptr to mem struct to free
150 i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw,
151 struct i40e_virt_mem *mem)
154 return I40E_ERR_PARAM;
156 /* it's ok to kfree a NULL pointer */
163 * i40evf_debug_d - OS dependent version of debug printing
164 * @hw: pointer to the HW structure
165 * @mask: debug level mask
166 * @fmt_str: printf-type format description
168 void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
173 if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
176 va_start(argptr, fmt_str);
177 vsnprintf(buf, sizeof(buf), fmt_str, argptr);
180 /* the debug string is already formatted with a newline */
185 * i40evf_schedule_reset - Set the flags and schedule a reset event
186 * @adapter: board private structure
188 void i40evf_schedule_reset(struct i40evf_adapter *adapter)
190 if (!(adapter->flags &
191 (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) {
192 adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
193 schedule_work(&adapter->reset_task);
198 * i40evf_tx_timeout - Respond to a Tx Hang
199 * @netdev: network interface device structure
201 static void i40evf_tx_timeout(struct net_device *netdev)
203 struct i40evf_adapter *adapter = netdev_priv(netdev);
205 adapter->tx_timeout_count++;
206 i40evf_schedule_reset(adapter);
210 * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC
211 * @adapter: board private structure
213 static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
215 struct i40e_hw *hw = &adapter->hw;
217 if (!adapter->msix_entries)
220 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
223 rd32(hw, I40E_VFGEN_RSTAT);
225 synchronize_irq(adapter->msix_entries[0].vector);
229 * i40evf_misc_irq_enable - Enable default interrupt generation settings
230 * @adapter: board private structure
232 static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
234 struct i40e_hw *hw = &adapter->hw;
236 wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
237 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
238 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
241 rd32(hw, I40E_VFGEN_RSTAT);
245 * i40evf_irq_disable - Mask off interrupt generation on the NIC
246 * @adapter: board private structure
248 static void i40evf_irq_disable(struct i40evf_adapter *adapter)
251 struct i40e_hw *hw = &adapter->hw;
253 if (!adapter->msix_entries)
256 for (i = 1; i < adapter->num_msix_vectors; i++) {
257 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
258 synchronize_irq(adapter->msix_entries[i].vector);
261 rd32(hw, I40E_VFGEN_RSTAT);
265 * i40evf_irq_enable_queues - Enable interrupt for specified queues
266 * @adapter: board private structure
267 * @mask: bitmap of queues to enable
269 void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
271 struct i40e_hw *hw = &adapter->hw;
274 for (i = 1; i < adapter->num_msix_vectors; i++) {
275 if (mask & BIT(i - 1)) {
276 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
277 I40E_VFINT_DYN_CTLN1_INTENA_MASK |
278 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
279 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK);
285 * i40evf_fire_sw_int - Generate SW interrupt for specified vectors
286 * @adapter: board private structure
287 * @mask: bitmap of vectors to trigger
289 static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
291 struct i40e_hw *hw = &adapter->hw;
296 dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01);
297 dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
298 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
299 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
300 wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
302 for (i = 1; i < adapter->num_msix_vectors; i++) {
304 dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
305 dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
306 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
307 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
308 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
314 * i40evf_irq_enable - Enable default interrupt generation settings
315 * @adapter: board private structure
316 * @flush: boolean value whether to run rd32()
318 void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
320 struct i40e_hw *hw = &adapter->hw;
322 i40evf_misc_irq_enable(adapter);
323 i40evf_irq_enable_queues(adapter, ~0);
326 rd32(hw, I40E_VFGEN_RSTAT);
330 * i40evf_msix_aq - Interrupt handler for vector 0
331 * @irq: interrupt number
332 * @data: pointer to netdev
334 static irqreturn_t i40evf_msix_aq(int irq, void *data)
336 struct net_device *netdev = data;
337 struct i40evf_adapter *adapter = netdev_priv(netdev);
338 struct i40e_hw *hw = &adapter->hw;
341 /* handle non-queue interrupts, these reads clear the registers */
342 val = rd32(hw, I40E_VFINT_ICR01);
343 val = rd32(hw, I40E_VFINT_ICR0_ENA1);
345 val = rd32(hw, I40E_VFINT_DYN_CTL01) |
346 I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
347 wr32(hw, I40E_VFINT_DYN_CTL01, val);
349 /* schedule work on the private workqueue */
350 schedule_work(&adapter->adminq_task);
356 * i40evf_msix_clean_rings - MSIX mode Interrupt Handler
357 * @irq: interrupt number
358 * @data: pointer to a q_vector
360 static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
362 struct i40e_q_vector *q_vector = data;
364 if (!q_vector->tx.ring && !q_vector->rx.ring)
367 napi_schedule_irqoff(&q_vector->napi);
373 * i40evf_map_vector_to_rxq - associate irqs with rx queues
374 * @adapter: board private structure
375 * @v_idx: interrupt number
376 * @r_idx: queue number
379 i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
381 struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
382 struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx];
383 struct i40e_hw *hw = &adapter->hw;
385 rx_ring->q_vector = q_vector;
386 rx_ring->next = q_vector->rx.ring;
387 rx_ring->vsi = &adapter->vsi;
388 q_vector->rx.ring = rx_ring;
389 q_vector->rx.count++;
390 q_vector->rx.latency_range = I40E_LOW_LATENCY;
391 q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
392 q_vector->ring_mask |= BIT(r_idx);
393 q_vector->itr_countdown = ITR_COUNTDOWN_START;
394 wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, v_idx - 1), q_vector->rx.itr);
398 * i40evf_map_vector_to_txq - associate irqs with tx queues
399 * @adapter: board private structure
400 * @v_idx: interrupt number
401 * @t_idx: queue number
404 i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
406 struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
407 struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx];
408 struct i40e_hw *hw = &adapter->hw;
410 tx_ring->q_vector = q_vector;
411 tx_ring->next = q_vector->tx.ring;
412 tx_ring->vsi = &adapter->vsi;
413 q_vector->tx.ring = tx_ring;
414 q_vector->tx.count++;
415 q_vector->tx.latency_range = I40E_LOW_LATENCY;
416 q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
417 q_vector->itr_countdown = ITR_COUNTDOWN_START;
418 q_vector->num_ringpairs++;
419 wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, v_idx - 1), q_vector->tx.itr);
423 * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors
424 * @adapter: board private structure to initialize
426 * This function maps descriptor rings to the queue-specific vectors
427 * we were allotted through the MSI-X enabling code. Ideally, we'd have
428 * one vector per ring/queue, but on a constrained vector budget, we
429 * group the rings as "efficiently" as possible. You would add new
430 * mapping configurations in here.
432 static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
436 int rxr_idx = 0, txr_idx = 0;
437 int rxr_remaining = adapter->num_active_queues;
438 int txr_remaining = adapter->num_active_queues;
443 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
445 /* The ideal configuration...
446 * We have enough vectors to map one per queue.
448 if (q_vectors >= (rxr_remaining * 2)) {
449 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
450 i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx);
452 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
453 i40evf_map_vector_to_txq(adapter, v_start, txr_idx);
457 /* If we don't have enough vectors for a 1-to-1
458 * mapping, we'll have to group them so there are
459 * multiple queues per vector.
460 * Re-adjusting *qpv takes care of the remainder.
462 for (i = v_start; i < q_vectors; i++) {
463 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
464 for (j = 0; j < rqpv; j++) {
465 i40evf_map_vector_to_rxq(adapter, i, rxr_idx);
470 for (i = v_start; i < q_vectors; i++) {
471 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
472 for (j = 0; j < tqpv; j++) {
473 i40evf_map_vector_to_txq(adapter, i, txr_idx);
480 adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
485 #ifdef CONFIG_NET_POLL_CONTROLLER
487 * i40evf_netpoll - A Polling 'interrupt' handler
488 * @netdev: network interface device structure
490 * This is used by netconsole to send skbs without having to re-enable
491 * interrupts. It's not called while the normal interrupt routine is executing.
493 static void i40evf_netpoll(struct net_device *netdev)
495 struct i40evf_adapter *adapter = netdev_priv(netdev);
496 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
499 /* if interface is down do nothing */
500 if (test_bit(__I40E_DOWN, &adapter->vsi.state))
503 for (i = 0; i < q_vectors; i++)
504 i40evf_msix_clean_rings(0, &adapter->q_vectors[i]);
509 * i40evf_irq_affinity_notify - Callback for affinity changes
510 * @notify: context as to what irq was changed
511 * @mask: the new affinity mask
513 * This is a callback function used by the irq_set_affinity_notifier function
514 * so that we may register to receive changes to the irq affinity masks.
516 static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify,
517 const cpumask_t *mask)
519 struct i40e_q_vector *q_vector =
520 container_of(notify, struct i40e_q_vector, affinity_notify);
522 q_vector->affinity_mask = *mask;
526 * i40evf_irq_affinity_release - Callback for affinity notifier release
527 * @ref: internal core kernel usage
529 * This is a callback function used by the irq_set_affinity_notifier function
530 * to inform the current notification subscriber that they will no longer
531 * receive notifications.
533 static void i40evf_irq_affinity_release(struct kref *ref) {}
536 * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
537 * @adapter: board private structure
539 * Allocates MSI-X vectors for tx and rx handling, and requests
540 * interrupts from the kernel.
543 i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
545 int vector, err, q_vectors;
546 int rx_int_idx = 0, tx_int_idx = 0;
549 i40evf_irq_disable(adapter);
550 /* Decrement for Other and TCP Timer vectors */
551 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
553 for (vector = 0; vector < q_vectors; vector++) {
554 struct i40e_q_vector *q_vector = &adapter->q_vectors[vector];
555 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
557 if (q_vector->tx.ring && q_vector->rx.ring) {
558 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
559 "i40evf-%s-%s-%d", basename,
560 "TxRx", rx_int_idx++);
562 } else if (q_vector->rx.ring) {
563 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
564 "i40evf-%s-%s-%d", basename,
566 } else if (q_vector->tx.ring) {
567 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
568 "i40evf-%s-%s-%d", basename,
571 /* skip this unused q_vector */
574 err = request_irq(irq_num,
575 i40evf_msix_clean_rings,
580 dev_info(&adapter->pdev->dev,
581 "Request_irq failed, error: %d\n", err);
582 goto free_queue_irqs;
584 /* register for affinity change notifications */
585 q_vector->affinity_notify.notify = i40evf_irq_affinity_notify;
586 q_vector->affinity_notify.release =
587 i40evf_irq_affinity_release;
588 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
589 /* assign the mask for this irq */
590 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
598 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
599 irq_set_affinity_notifier(irq_num, NULL);
600 irq_set_affinity_hint(irq_num, NULL);
601 free_irq(irq_num, &adapter->q_vectors[vector]);
607 * i40evf_request_misc_irq - Initialize MSI-X interrupts
608 * @adapter: board private structure
610 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
611 * vector is only for the admin queue, and stays active even when the netdev
614 static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
616 struct net_device *netdev = adapter->netdev;
619 snprintf(adapter->misc_vector_name,
620 sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx",
621 dev_name(&adapter->pdev->dev));
622 err = request_irq(adapter->msix_entries[0].vector,
624 adapter->misc_vector_name, netdev);
626 dev_err(&adapter->pdev->dev,
627 "request_irq for %s failed: %d\n",
628 adapter->misc_vector_name, err);
629 free_irq(adapter->msix_entries[0].vector, netdev);
635 * i40evf_free_traffic_irqs - Free MSI-X interrupts
636 * @adapter: board private structure
638 * Frees all MSI-X vectors other than 0.
640 static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
642 int vector, irq_num, q_vectors;
644 if (!adapter->msix_entries)
647 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
649 for (vector = 0; vector < q_vectors; vector++) {
650 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
651 irq_set_affinity_notifier(irq_num, NULL);
652 irq_set_affinity_hint(irq_num, NULL);
653 free_irq(irq_num, &adapter->q_vectors[vector]);
658 * i40evf_free_misc_irq - Free MSI-X miscellaneous vector
659 * @adapter: board private structure
661 * Frees MSI-X vector 0.
663 static void i40evf_free_misc_irq(struct i40evf_adapter *adapter)
665 struct net_device *netdev = adapter->netdev;
667 if (!adapter->msix_entries)
670 free_irq(adapter->msix_entries[0].vector, netdev);
674 * i40evf_configure_tx - Configure Transmit Unit after Reset
675 * @adapter: board private structure
677 * Configure the Tx unit of the MAC after a reset.
679 static void i40evf_configure_tx(struct i40evf_adapter *adapter)
681 struct i40e_hw *hw = &adapter->hw;
684 for (i = 0; i < adapter->num_active_queues; i++)
685 adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i);
689 * i40evf_configure_rx - Configure Receive Unit after Reset
690 * @adapter: board private structure
692 * Configure the Rx unit of the MAC after a reset.
694 static void i40evf_configure_rx(struct i40evf_adapter *adapter)
696 unsigned int rx_buf_len = I40E_RXBUFFER_2048;
697 struct net_device *netdev = adapter->netdev;
698 struct i40e_hw *hw = &adapter->hw;
701 /* Legacy Rx will always default to a 2048 buffer size. */
702 #if (PAGE_SIZE < 8192)
703 if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) {
704 /* For jumbo frames on systems with 4K pages we have to use
705 * an order 1 page, so we might as well increase the size
706 * of our Rx buffer to make better use of the available space
708 rx_buf_len = I40E_RXBUFFER_3072;
710 /* We use a 1536 buffer size for configurations with
711 * standard Ethernet mtu. On x86 this gives us enough room
712 * for shared info and 192 bytes of padding.
714 if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
715 (netdev->mtu <= ETH_DATA_LEN))
716 rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
720 for (i = 0; i < adapter->num_active_queues; i++) {
721 adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
722 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
724 if (adapter->flags & I40EVF_FLAG_LEGACY_RX)
725 clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
727 set_ring_build_skb_enabled(&adapter->rx_rings[i]);
732 * i40evf_find_vlan - Search filter list for specific vlan filter
733 * @adapter: board private structure
736 * Returns ptr to the filter object or NULL
739 i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
741 struct i40evf_vlan_filter *f;
743 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
751 * i40evf_add_vlan - Add a vlan filter to the list
752 * @adapter: board private structure
755 * Returns ptr to the filter object or NULL when no memory available.
758 i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
760 struct i40evf_vlan_filter *f = NULL;
763 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
764 &adapter->crit_section)) {
770 f = i40evf_find_vlan(adapter, vlan);
772 f = kzalloc(sizeof(*f), GFP_ATOMIC);
778 INIT_LIST_HEAD(&f->list);
779 list_add(&f->list, &adapter->vlan_filter_list);
781 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
785 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
791 * i40evf_del_vlan - Remove a vlan filter from the list
792 * @adapter: board private structure
795 static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
797 struct i40evf_vlan_filter *f;
800 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
801 &adapter->crit_section)) {
807 f = i40evf_find_vlan(adapter, vlan);
810 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
812 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
816 * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
817 * @netdev: network device struct
820 static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
821 __always_unused __be16 proto, u16 vid)
823 struct i40evf_adapter *adapter = netdev_priv(netdev);
825 if (!VLAN_ALLOWED(adapter))
827 if (i40evf_add_vlan(adapter, vid) == NULL)
833 * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
834 * @netdev: network device struct
837 static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
838 __always_unused __be16 proto, u16 vid)
840 struct i40evf_adapter *adapter = netdev_priv(netdev);
842 if (VLAN_ALLOWED(adapter)) {
843 i40evf_del_vlan(adapter, vid);
850 * i40evf_find_filter - Search filter list for specific mac filter
851 * @adapter: board private structure
852 * @macaddr: the MAC address
854 * Returns ptr to the filter object or NULL
857 i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
860 struct i40evf_mac_filter *f;
865 list_for_each_entry(f, &adapter->mac_filter_list, list) {
866 if (ether_addr_equal(macaddr, f->macaddr))
873 * i40e_add_filter - Add a mac filter to the filter list
874 * @adapter: board private structure
875 * @macaddr: the MAC address
877 * Returns ptr to the filter object or NULL when no memory available.
880 i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
883 struct i40evf_mac_filter *f;
889 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
890 &adapter->crit_section)) {
896 f = i40evf_find_filter(adapter, macaddr);
898 f = kzalloc(sizeof(*f), GFP_ATOMIC);
900 clear_bit(__I40EVF_IN_CRITICAL_TASK,
901 &adapter->crit_section);
905 ether_addr_copy(f->macaddr, macaddr);
907 list_add_tail(&f->list, &adapter->mac_filter_list);
909 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
912 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
917 * i40evf_set_mac - NDO callback to set port mac address
918 * @netdev: network interface device structure
919 * @p: pointer to an address structure
921 * Returns 0 on success, negative on failure
923 static int i40evf_set_mac(struct net_device *netdev, void *p)
925 struct i40evf_adapter *adapter = netdev_priv(netdev);
926 struct i40e_hw *hw = &adapter->hw;
927 struct i40evf_mac_filter *f;
928 struct sockaddr *addr = p;
930 if (!is_valid_ether_addr(addr->sa_data))
931 return -EADDRNOTAVAIL;
933 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
936 if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
939 f = i40evf_find_filter(adapter, hw->mac.addr);
942 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
945 f = i40evf_add_filter(adapter, addr->sa_data);
947 ether_addr_copy(hw->mac.addr, addr->sa_data);
948 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
951 return (f == NULL) ? -ENOMEM : 0;
955 * i40evf_set_rx_mode - NDO callback to set the netdev filters
956 * @netdev: network interface device structure
958 static void i40evf_set_rx_mode(struct net_device *netdev)
960 struct i40evf_adapter *adapter = netdev_priv(netdev);
961 struct i40evf_mac_filter *f, *ftmp;
962 struct netdev_hw_addr *uca;
963 struct netdev_hw_addr *mca;
964 struct netdev_hw_addr *ha;
967 /* add addr if not already in the filter list */
968 netdev_for_each_uc_addr(uca, netdev) {
969 i40evf_add_filter(adapter, uca->addr);
971 netdev_for_each_mc_addr(mca, netdev) {
972 i40evf_add_filter(adapter, mca->addr);
975 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
976 &adapter->crit_section)) {
979 dev_err(&adapter->pdev->dev,
980 "Failed to get lock in %s\n", __func__);
984 /* remove filter if not in netdev list */
985 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
986 netdev_for_each_mc_addr(mca, netdev)
987 if (ether_addr_equal(mca->addr, f->macaddr))
988 goto bottom_of_search_loop;
990 netdev_for_each_uc_addr(uca, netdev)
991 if (ether_addr_equal(uca->addr, f->macaddr))
992 goto bottom_of_search_loop;
994 for_each_dev_addr(netdev, ha)
995 if (ether_addr_equal(ha->addr, f->macaddr))
996 goto bottom_of_search_loop;
998 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
999 goto bottom_of_search_loop;
1001 /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
1003 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
1005 bottom_of_search_loop:
1009 if (netdev->flags & IFF_PROMISC &&
1010 !(adapter->flags & I40EVF_FLAG_PROMISC_ON))
1011 adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC;
1012 else if (!(netdev->flags & IFF_PROMISC) &&
1013 adapter->flags & I40EVF_FLAG_PROMISC_ON)
1014 adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC;
1016 if (netdev->flags & IFF_ALLMULTI &&
1017 !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON))
1018 adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
1019 else if (!(netdev->flags & IFF_ALLMULTI) &&
1020 adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
1021 adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
1023 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1027 * i40evf_napi_enable_all - enable NAPI on all queue vectors
1028 * @adapter: board private structure
1030 static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
1033 struct i40e_q_vector *q_vector;
1034 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1036 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1037 struct napi_struct *napi;
1039 q_vector = &adapter->q_vectors[q_idx];
1040 napi = &q_vector->napi;
1046 * i40evf_napi_disable_all - disable NAPI on all queue vectors
1047 * @adapter: board private structure
1049 static void i40evf_napi_disable_all(struct i40evf_adapter *adapter)
1052 struct i40e_q_vector *q_vector;
1053 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1055 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1056 q_vector = &adapter->q_vectors[q_idx];
1057 napi_disable(&q_vector->napi);
1062 * i40evf_configure - set up transmit and receive data structures
1063 * @adapter: board private structure
1065 static void i40evf_configure(struct i40evf_adapter *adapter)
1067 struct net_device *netdev = adapter->netdev;
1070 i40evf_set_rx_mode(netdev);
1072 i40evf_configure_tx(adapter);
1073 i40evf_configure_rx(adapter);
1074 adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
1076 for (i = 0; i < adapter->num_active_queues; i++) {
1077 struct i40e_ring *ring = &adapter->rx_rings[i];
1079 i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
1084 * i40evf_up_complete - Finish the last steps of bringing up a connection
1085 * @adapter: board private structure
1087 static void i40evf_up_complete(struct i40evf_adapter *adapter)
1089 adapter->state = __I40EVF_RUNNING;
1090 clear_bit(__I40E_DOWN, &adapter->vsi.state);
1092 i40evf_napi_enable_all(adapter);
1094 adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
1095 if (CLIENT_ENABLED(adapter))
1096 adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN;
1097 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
1101 * i40e_down - Shutdown the connection processing
1102 * @adapter: board private structure
1104 void i40evf_down(struct i40evf_adapter *adapter)
1106 struct net_device *netdev = adapter->netdev;
1107 struct i40evf_mac_filter *f;
1109 if (adapter->state <= __I40EVF_DOWN_PENDING)
1112 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
1113 &adapter->crit_section))
1114 usleep_range(500, 1000);
1116 netif_carrier_off(netdev);
1117 netif_tx_disable(netdev);
1118 adapter->link_up = false;
1119 i40evf_napi_disable_all(adapter);
1120 i40evf_irq_disable(adapter);
1122 /* remove all MAC filters */
1123 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1126 /* remove all VLAN filters */
1127 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
1130 if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
1131 adapter->state != __I40EVF_RESETTING) {
1132 /* cancel any current operation */
1133 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1134 /* Schedule operations to close down the HW. Don't wait
1135 * here for this to complete. The watchdog is still running
1136 * and it will take care of this.
1138 adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
1139 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
1140 adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
1143 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1147 * i40evf_acquire_msix_vectors - Setup the MSIX capability
1148 * @adapter: board private structure
1149 * @vectors: number of vectors to request
1151 * Work with the OS to set up the MSIX vectors needed.
1153 * Returns 0 on success, negative on failure
1156 i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
1158 int err, vector_threshold;
1160 /* We'll want at least 3 (vector_threshold):
1161 * 0) Other (Admin Queue and link, mostly)
1165 vector_threshold = MIN_MSIX_COUNT;
1167 /* The more we get, the more we will assign to Tx/Rx Cleanup
1168 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1169 * Right now, we simply care about how many we'll get; we'll
1170 * set them up later while requesting irq's.
1172 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1173 vector_threshold, vectors);
1175 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1176 kfree(adapter->msix_entries);
1177 adapter->msix_entries = NULL;
1181 /* Adjust for only the vectors we'll use, which is minimum
1182 * of max_msix_q_vectors + NONQ_VECS, or the number of
1183 * vectors we were allocated.
1185 adapter->num_msix_vectors = err;
1190 * i40evf_free_queues - Free memory for all rings
1191 * @adapter: board private structure to initialize
1193 * Free all of the memory associated with queue pairs.
1195 static void i40evf_free_queues(struct i40evf_adapter *adapter)
1197 if (!adapter->vsi_res)
1199 kfree(adapter->tx_rings);
1200 adapter->tx_rings = NULL;
1201 kfree(adapter->rx_rings);
1202 adapter->rx_rings = NULL;
1206 * i40evf_alloc_queues - Allocate memory for all rings
1207 * @adapter: board private structure to initialize
1209 * We allocate one ring per queue at run-time since we don't know the
1210 * number of queues at compile-time. The polling_netdev array is
1211 * intended for Multiqueue, but should work fine with a single queue.
1213 static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
1217 adapter->tx_rings = kcalloc(adapter->num_active_queues,
1218 sizeof(struct i40e_ring), GFP_KERNEL);
1219 if (!adapter->tx_rings)
1221 adapter->rx_rings = kcalloc(adapter->num_active_queues,
1222 sizeof(struct i40e_ring), GFP_KERNEL);
1223 if (!adapter->rx_rings)
1226 for (i = 0; i < adapter->num_active_queues; i++) {
1227 struct i40e_ring *tx_ring;
1228 struct i40e_ring *rx_ring;
1230 tx_ring = &adapter->tx_rings[i];
1232 tx_ring->queue_index = i;
1233 tx_ring->netdev = adapter->netdev;
1234 tx_ring->dev = &adapter->pdev->dev;
1235 tx_ring->count = adapter->tx_desc_count;
1236 tx_ring->tx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF);
1237 if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
1238 tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
1240 rx_ring = &adapter->rx_rings[i];
1241 rx_ring->queue_index = i;
1242 rx_ring->netdev = adapter->netdev;
1243 rx_ring->dev = &adapter->pdev->dev;
1244 rx_ring->count = adapter->rx_desc_count;
1245 rx_ring->rx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF);
1251 i40evf_free_queues(adapter);
1256 * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported
1257 * @adapter: board private structure to initialize
1259 * Attempt to configure the interrupts using the best available
1260 * capabilities of the hardware and the kernel.
1262 static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
1264 int vector, v_budget;
1268 if (!adapter->vsi_res) {
1272 pairs = adapter->num_active_queues;
1274 /* It's easy to be greedy for MSI-X vectors, but it really
1275 * doesn't do us much good if we have a lot more vectors
1276 * than CPU's. So let's be conservative and only ask for
1277 * (roughly) twice the number of vectors as there are CPU's.
1279 v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
1280 v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
1282 adapter->msix_entries = kcalloc(v_budget,
1283 sizeof(struct msix_entry), GFP_KERNEL);
1284 if (!adapter->msix_entries) {
1289 for (vector = 0; vector < v_budget; vector++)
1290 adapter->msix_entries[vector].entry = vector;
1292 err = i40evf_acquire_msix_vectors(adapter, v_budget);
1295 netif_set_real_num_rx_queues(adapter->netdev, pairs);
1296 netif_set_real_num_tx_queues(adapter->netdev, pairs);
1301 * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
1302 * @adapter: board private structure
1304 * Return 0 on success, negative on failure
1306 static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
1308 struct i40e_aqc_get_set_rss_key_data *rss_key =
1309 (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
1310 struct i40e_hw *hw = &adapter->hw;
1313 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
1314 /* bail because we already have a command pending */
1315 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1316 adapter->current_op);
1320 ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1322 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1323 i40evf_stat_str(hw, ret),
1324 i40evf_aq_str(hw, hw->aq.asq_last_status));
1329 ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1330 adapter->rss_lut, adapter->rss_lut_size);
1332 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1333 i40evf_stat_str(hw, ret),
1334 i40evf_aq_str(hw, hw->aq.asq_last_status));
1342 * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers
1343 * @adapter: board private structure
1345 * Returns 0 on success, negative on failure
1347 static int i40evf_config_rss_reg(struct i40evf_adapter *adapter)
1349 struct i40e_hw *hw = &adapter->hw;
1353 dw = (u32 *)adapter->rss_key;
1354 for (i = 0; i <= adapter->rss_key_size / 4; i++)
1355 wr32(hw, I40E_VFQF_HKEY(i), dw[i]);
1357 dw = (u32 *)adapter->rss_lut;
1358 for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1359 wr32(hw, I40E_VFQF_HLUT(i), dw[i]);
1367 * i40evf_config_rss - Configure RSS keys and lut
1368 * @adapter: board private structure
1370 * Returns 0 on success, negative on failure
1372 int i40evf_config_rss(struct i40evf_adapter *adapter)
1375 if (RSS_PF(adapter)) {
1376 adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT |
1377 I40EVF_FLAG_AQ_SET_RSS_KEY;
1379 } else if (RSS_AQ(adapter)) {
1380 return i40evf_config_rss_aq(adapter);
1382 return i40evf_config_rss_reg(adapter);
1387 * i40evf_fill_rss_lut - Fill the lut with default values
1388 * @adapter: board private structure
1390 static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter)
1394 for (i = 0; i < adapter->rss_lut_size; i++)
1395 adapter->rss_lut[i] = i % adapter->num_active_queues;
1399 * i40evf_init_rss - Prepare for RSS
1400 * @adapter: board private structure
1402 * Return 0 on success, negative on failure
1404 static int i40evf_init_rss(struct i40evf_adapter *adapter)
1406 struct i40e_hw *hw = &adapter->hw;
1409 if (!RSS_PF(adapter)) {
1410 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1411 if (adapter->vf_res->vf_offload_flags &
1412 I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1413 adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
1415 adapter->hena = I40E_DEFAULT_RSS_HENA;
1417 wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena);
1418 wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1421 i40evf_fill_rss_lut(adapter);
1423 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1424 ret = i40evf_config_rss(adapter);
1430 * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
1431 * @adapter: board private structure to initialize
1433 * We allocate one q_vector per queue interrupt. If allocation fails we
1436 static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
1438 int q_idx = 0, num_q_vectors;
1439 struct i40e_q_vector *q_vector;
1441 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1442 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1444 if (!adapter->q_vectors)
1447 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1448 q_vector = &adapter->q_vectors[q_idx];
1449 q_vector->adapter = adapter;
1450 q_vector->vsi = &adapter->vsi;
1451 q_vector->v_idx = q_idx;
1452 netif_napi_add(adapter->netdev, &q_vector->napi,
1453 i40evf_napi_poll, NAPI_POLL_WEIGHT);
1460 * i40evf_free_q_vectors - Free memory allocated for interrupt vectors
1461 * @adapter: board private structure to initialize
1463 * This function frees the memory allocated to the q_vectors. In addition if
1464 * NAPI is enabled it will delete any references to the NAPI struct prior
1465 * to freeing the q_vector.
1467 static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
1469 int q_idx, num_q_vectors;
1472 if (!adapter->q_vectors)
1475 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1476 napi_vectors = adapter->num_active_queues;
1478 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1479 struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx];
1480 if (q_idx < napi_vectors)
1481 netif_napi_del(&q_vector->napi);
1483 kfree(adapter->q_vectors);
1484 adapter->q_vectors = NULL;
1488 * i40evf_reset_interrupt_capability - Reset MSIX setup
1489 * @adapter: board private structure
1492 void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
1494 if (!adapter->msix_entries)
1497 pci_disable_msix(adapter->pdev);
1498 kfree(adapter->msix_entries);
1499 adapter->msix_entries = NULL;
1503 * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init
1504 * @adapter: board private structure to initialize
1507 int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
1512 err = i40evf_set_interrupt_capability(adapter);
1515 dev_err(&adapter->pdev->dev,
1516 "Unable to setup interrupt capabilities\n");
1517 goto err_set_interrupt;
1520 err = i40evf_alloc_q_vectors(adapter);
1522 dev_err(&adapter->pdev->dev,
1523 "Unable to allocate memory for queue vectors\n");
1524 goto err_alloc_q_vectors;
1527 err = i40evf_alloc_queues(adapter);
1529 dev_err(&adapter->pdev->dev,
1530 "Unable to allocate memory for queues\n");
1531 goto err_alloc_queues;
1534 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1535 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1536 adapter->num_active_queues);
1540 i40evf_free_q_vectors(adapter);
1541 err_alloc_q_vectors:
1542 i40evf_reset_interrupt_capability(adapter);
1548 * i40evf_free_rss - Free memory used by RSS structs
1549 * @adapter: board private structure
1551 static void i40evf_free_rss(struct i40evf_adapter *adapter)
1553 kfree(adapter->rss_key);
1554 adapter->rss_key = NULL;
1556 kfree(adapter->rss_lut);
1557 adapter->rss_lut = NULL;
1561 * i40evf_watchdog_timer - Periodic call-back timer
1562 * @data: pointer to adapter disguised as unsigned long
1564 static void i40evf_watchdog_timer(unsigned long data)
1566 struct i40evf_adapter *adapter = (struct i40evf_adapter *)data;
1568 schedule_work(&adapter->watchdog_task);
1569 /* timer will be rescheduled in watchdog task */
1573 * i40evf_watchdog_task - Periodic call-back task
1574 * @work: pointer to work_struct
1576 static void i40evf_watchdog_task(struct work_struct *work)
1578 struct i40evf_adapter *adapter = container_of(work,
1579 struct i40evf_adapter,
1581 struct i40e_hw *hw = &adapter->hw;
1584 if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
1585 goto restart_watchdog;
1587 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
1588 reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
1589 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1590 if ((reg_val == I40E_VFR_VFACTIVE) ||
1591 (reg_val == I40E_VFR_COMPLETED)) {
1592 /* A chance for redemption! */
1593 dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
1594 adapter->state = __I40EVF_STARTUP;
1595 adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
1596 schedule_delayed_work(&adapter->init_task, 10);
1597 clear_bit(__I40EVF_IN_CRITICAL_TASK,
1598 &adapter->crit_section);
1599 /* Don't reschedule the watchdog, since we've restarted
1600 * the init task. When init_task contacts the PF and
1601 * gets everything set up again, it'll restart the
1602 * watchdog for us. Down, boy. Sit. Stay. Woof.
1606 adapter->aq_required = 0;
1607 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1611 if ((adapter->state < __I40EVF_DOWN) ||
1612 (adapter->flags & I40EVF_FLAG_RESET_PENDING))
1615 /* check for reset */
1616 reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK;
1617 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) {
1618 adapter->state = __I40EVF_RESETTING;
1619 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
1620 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
1621 schedule_work(&adapter->reset_task);
1622 adapter->aq_required = 0;
1623 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1627 /* Process admin queue tasks. After init, everything gets done
1628 * here so we don't race on the admin queue.
1630 if (adapter->current_op) {
1631 if (!i40evf_asq_done(hw)) {
1632 dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
1633 i40evf_send_api_ver(adapter);
1637 if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) {
1638 i40evf_send_vf_config_msg(adapter);
1642 if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
1643 i40evf_disable_queues(adapter);
1647 if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
1648 i40evf_map_queues(adapter);
1652 if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) {
1653 i40evf_add_ether_addrs(adapter);
1657 if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) {
1658 i40evf_add_vlans(adapter);
1662 if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) {
1663 i40evf_del_ether_addrs(adapter);
1667 if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) {
1668 i40evf_del_vlans(adapter);
1672 if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) {
1673 i40evf_configure_queues(adapter);
1677 if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) {
1678 i40evf_enable_queues(adapter);
1682 if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) {
1683 /* This message goes straight to the firmware, not the
1684 * PF, so we don't have to set current_op as we will
1685 * not get a response through the ARQ.
1687 i40evf_init_rss(adapter);
1688 adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
1691 if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) {
1692 i40evf_get_hena(adapter);
1695 if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) {
1696 i40evf_set_hena(adapter);
1699 if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) {
1700 i40evf_set_rss_key(adapter);
1703 if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) {
1704 i40evf_set_rss_lut(adapter);
1708 if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) {
1709 i40evf_set_promiscuous(adapter, I40E_FLAG_VF_UNICAST_PROMISC |
1710 I40E_FLAG_VF_MULTICAST_PROMISC);
1714 if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) {
1715 i40evf_set_promiscuous(adapter, I40E_FLAG_VF_MULTICAST_PROMISC);
1719 if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) &&
1720 (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) {
1721 i40evf_set_promiscuous(adapter, 0);
1724 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
1726 if (adapter->state == __I40EVF_RUNNING)
1727 i40evf_request_stats(adapter);
1729 if (adapter->state == __I40EVF_RUNNING) {
1730 i40evf_irq_enable_queues(adapter, ~0);
1731 i40evf_fire_sw_int(adapter, 0xFF);
1733 i40evf_fire_sw_int(adapter, 0x1);
1736 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1738 if (adapter->state == __I40EVF_REMOVE)
1740 if (adapter->aq_required)
1741 mod_timer(&adapter->watchdog_timer,
1742 jiffies + msecs_to_jiffies(20));
1744 mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
1745 schedule_work(&adapter->adminq_task);
1748 static void i40evf_disable_vf(struct i40evf_adapter *adapter)
1750 struct i40evf_mac_filter *f, *ftmp;
1751 struct i40evf_vlan_filter *fv, *fvtmp;
1753 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
1755 if (netif_running(adapter->netdev)) {
1756 set_bit(__I40E_DOWN, &adapter->vsi.state);
1757 netif_carrier_off(adapter->netdev);
1758 netif_tx_disable(adapter->netdev);
1759 adapter->link_up = false;
1760 i40evf_napi_disable_all(adapter);
1761 i40evf_irq_disable(adapter);
1762 i40evf_free_traffic_irqs(adapter);
1763 i40evf_free_all_tx_resources(adapter);
1764 i40evf_free_all_rx_resources(adapter);
1767 /* Delete all of the filters, both MAC and VLAN. */
1768 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
1773 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
1774 list_del(&fv->list);
1778 i40evf_free_misc_irq(adapter);
1779 i40evf_reset_interrupt_capability(adapter);
1780 i40evf_free_queues(adapter);
1781 i40evf_free_q_vectors(adapter);
1782 kfree(adapter->vf_res);
1783 i40evf_shutdown_adminq(&adapter->hw);
1784 adapter->netdev->flags &= ~IFF_UP;
1785 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1786 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1787 adapter->state = __I40EVF_DOWN;
1788 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
1791 #define I40EVF_RESET_WAIT_MS 10
1792 #define I40EVF_RESET_WAIT_COUNT 500
1794 * i40evf_reset_task - Call-back task to handle hardware reset
1795 * @work: pointer to work_struct
1797 * During reset we need to shut down and reinitialize the admin queue
1798 * before we can use it to communicate with the PF again. We also clear
1799 * and reinit the rings because that context is lost as well.
1801 static void i40evf_reset_task(struct work_struct *work)
1803 struct i40evf_adapter *adapter = container_of(work,
1804 struct i40evf_adapter,
1806 struct net_device *netdev = adapter->netdev;
1807 struct i40e_hw *hw = &adapter->hw;
1808 struct i40evf_vlan_filter *vlf;
1809 struct i40evf_mac_filter *f;
1813 while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
1814 &adapter->crit_section))
1815 usleep_range(500, 1000);
1816 if (CLIENT_ENABLED(adapter)) {
1817 adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN |
1818 I40EVF_FLAG_CLIENT_NEEDS_CLOSE |
1819 I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
1820 I40EVF_FLAG_SERVICE_CLIENT_REQUESTED);
1821 cancel_delayed_work_sync(&adapter->client_task);
1822 i40evf_notify_client_close(&adapter->vsi, true);
1824 i40evf_misc_irq_disable(adapter);
1825 if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
1826 adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED;
1827 /* Restart the AQ here. If we have been reset but didn't
1828 * detect it, or if the PF had to reinit, our AQ will be hosed.
1830 i40evf_shutdown_adminq(hw);
1831 i40evf_init_adminq(hw);
1832 i40evf_request_reset(adapter);
1834 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
1836 /* poll until we see the reset actually happen */
1837 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1838 reg_val = rd32(hw, I40E_VF_ARQLEN1) &
1839 I40E_VF_ARQLEN1_ARQENABLE_MASK;
1842 usleep_range(5000, 10000);
1844 if (i == I40EVF_RESET_WAIT_COUNT) {
1845 dev_info(&adapter->pdev->dev, "Never saw reset\n");
1846 goto continue_reset; /* act like the reset happened */
1849 /* wait until the reset is complete and the PF is responding to us */
1850 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1851 /* sleep first to make sure a minimum wait time is met */
1852 msleep(I40EVF_RESET_WAIT_MS);
1854 reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
1855 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1856 if (reg_val == I40E_VFR_VFACTIVE)
1860 pci_set_master(adapter->pdev);
1862 if (i == I40EVF_RESET_WAIT_COUNT) {
1863 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
1865 i40evf_disable_vf(adapter);
1866 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
1867 return; /* Do not attempt to reinit. It's dead, Jim. */
1871 if (netif_running(adapter->netdev)) {
1872 netif_carrier_off(netdev);
1873 netif_tx_stop_all_queues(netdev);
1874 adapter->link_up = false;
1875 i40evf_napi_disable_all(adapter);
1877 i40evf_irq_disable(adapter);
1879 adapter->state = __I40EVF_RESETTING;
1880 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1882 /* free the Tx/Rx rings and descriptors, might be better to just
1883 * re-use them sometime in the future
1885 i40evf_free_all_rx_resources(adapter);
1886 i40evf_free_all_tx_resources(adapter);
1888 /* kill and reinit the admin queue */
1889 i40evf_shutdown_adminq(hw);
1890 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1891 err = i40evf_init_adminq(hw);
1893 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
1896 adapter->aq_required = I40EVF_FLAG_AQ_GET_CONFIG;
1897 adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
1899 /* re-add all MAC filters */
1900 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1903 /* re-add all VLAN filters */
1904 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1907 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
1908 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
1909 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1910 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
1911 i40evf_misc_irq_enable(adapter);
1913 mod_timer(&adapter->watchdog_timer, jiffies + 2);
1915 if (netif_running(adapter->netdev)) {
1916 /* allocate transmit descriptors */
1917 err = i40evf_setup_all_tx_resources(adapter);
1921 /* allocate receive descriptors */
1922 err = i40evf_setup_all_rx_resources(adapter);
1926 i40evf_configure(adapter);
1928 i40evf_up_complete(adapter);
1930 i40evf_irq_enable(adapter, true);
1932 adapter->state = __I40EVF_DOWN;
1937 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
1938 i40evf_close(adapter->netdev);
1942 * i40evf_adminq_task - worker thread to clean the admin queue
1943 * @work: pointer to work_struct containing our data
1945 static void i40evf_adminq_task(struct work_struct *work)
1947 struct i40evf_adapter *adapter =
1948 container_of(work, struct i40evf_adapter, adminq_task);
1949 struct i40e_hw *hw = &adapter->hw;
1950 struct i40e_arq_event_info event;
1951 struct i40e_virtchnl_msg *v_msg;
1956 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
1959 event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
1960 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1964 v_msg = (struct i40e_virtchnl_msg *)&event.desc;
1966 ret = i40evf_clean_arq_element(hw, &event, &pending);
1967 if (ret || !v_msg->v_opcode)
1968 break; /* No event to process or error cleaning ARQ */
1970 i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
1971 v_msg->v_retval, event.msg_buf,
1974 memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
1977 if ((adapter->flags &
1978 (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) ||
1979 adapter->state == __I40EVF_RESETTING)
1982 /* check for error indications */
1983 val = rd32(hw, hw->aq.arq.len);
1984 if (val == 0xdeadbeef) /* indicates device in reset */
1987 if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
1988 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
1989 val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
1991 if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
1992 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
1993 val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
1995 if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
1996 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
1997 val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
2000 wr32(hw, hw->aq.arq.len, val);
2002 val = rd32(hw, hw->aq.asq.len);
2004 if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2005 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
2006 val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2008 if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2009 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
2010 val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2012 if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2013 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
2014 val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2017 wr32(hw, hw->aq.asq.len, val);
2020 kfree(event.msg_buf);
2022 /* re-enable Admin queue interrupt cause */
2023 i40evf_misc_irq_enable(adapter);
2027 * i40evf_client_task - worker thread to perform client work
2028 * @work: pointer to work_struct containing our data
2030 * This task handles client interactions. Because client calls can be
2031 * reentrant, we can't handle them in the watchdog.
2033 static void i40evf_client_task(struct work_struct *work)
2035 struct i40evf_adapter *adapter =
2036 container_of(work, struct i40evf_adapter, client_task.work);
2038 /* If we can't get the client bit, just give up. We'll be rescheduled
2042 if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section))
2045 if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) {
2046 i40evf_client_subtask(adapter);
2047 adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
2050 if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
2051 i40evf_notify_client_close(&adapter->vsi, false);
2052 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
2055 if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
2056 i40evf_notify_client_open(&adapter->vsi);
2057 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
2060 if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
2061 i40evf_notify_client_l2_params(&adapter->vsi);
2062 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
2065 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
2069 * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
2070 * @adapter: board private structure
2072 * Free all transmit software resources
2074 void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
2078 if (!adapter->tx_rings)
2081 for (i = 0; i < adapter->num_active_queues; i++)
2082 if (adapter->tx_rings[i].desc)
2083 i40evf_free_tx_resources(&adapter->tx_rings[i]);
2087 * i40evf_setup_all_tx_resources - allocate all queues Tx resources
2088 * @adapter: board private structure
2090 * If this function returns with an error, then it's possible one or
2091 * more of the rings is populated (while the rest are not). It is the
2092 * callers duty to clean those orphaned rings.
2094 * Return 0 on success, negative on failure
2096 static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
2100 for (i = 0; i < adapter->num_active_queues; i++) {
2101 adapter->tx_rings[i].count = adapter->tx_desc_count;
2102 err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]);
2105 dev_err(&adapter->pdev->dev,
2106 "Allocation for Tx Queue %u failed\n", i);
2114 * i40evf_setup_all_rx_resources - allocate all queues Rx resources
2115 * @adapter: board private structure
2117 * If this function returns with an error, then it's possible one or
2118 * more of the rings is populated (while the rest are not). It is the
2119 * callers duty to clean those orphaned rings.
2121 * Return 0 on success, negative on failure
2123 static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
2127 for (i = 0; i < adapter->num_active_queues; i++) {
2128 adapter->rx_rings[i].count = adapter->rx_desc_count;
2129 err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]);
2132 dev_err(&adapter->pdev->dev,
2133 "Allocation for Rx Queue %u failed\n", i);
2140 * i40evf_free_all_rx_resources - Free Rx Resources for All Queues
2141 * @adapter: board private structure
2143 * Free all receive software resources
2145 void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
2149 if (!adapter->rx_rings)
2152 for (i = 0; i < adapter->num_active_queues; i++)
2153 if (adapter->rx_rings[i].desc)
2154 i40evf_free_rx_resources(&adapter->rx_rings[i]);
2158 * i40evf_open - Called when a network interface is made active
2159 * @netdev: network interface device structure
2161 * Returns 0 on success, negative value on failure
2163 * The open entry point is called when a network interface is made
2164 * active by the system (IFF_UP). At this point all resources needed
2165 * for transmit and receive operations are allocated, the interrupt
2166 * handler is registered with the OS, the watchdog timer is started,
2167 * and the stack is notified that the interface is ready.
2169 static int i40evf_open(struct net_device *netdev)
2171 struct i40evf_adapter *adapter = netdev_priv(netdev);
2174 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
2175 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
2179 if (adapter->state != __I40EVF_DOWN)
2182 /* allocate transmit descriptors */
2183 err = i40evf_setup_all_tx_resources(adapter);
2187 /* allocate receive descriptors */
2188 err = i40evf_setup_all_rx_resources(adapter);
2192 /* clear any pending interrupts, may auto mask */
2193 err = i40evf_request_traffic_irqs(adapter, netdev->name);
2197 i40evf_add_filter(adapter, adapter->hw.mac.addr);
2198 i40evf_configure(adapter);
2200 i40evf_up_complete(adapter);
2202 i40evf_irq_enable(adapter, true);
2207 i40evf_down(adapter);
2208 i40evf_free_traffic_irqs(adapter);
2210 i40evf_free_all_rx_resources(adapter);
2212 i40evf_free_all_tx_resources(adapter);
2218 * i40evf_close - Disables a network interface
2219 * @netdev: network interface device structure
2221 * Returns 0, this is not allowed to fail
2223 * The close entry point is called when an interface is de-activated
2224 * by the OS. The hardware is still under the drivers control, but
2225 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
2226 * are freed, along with all transmit and receive resources.
2228 static int i40evf_close(struct net_device *netdev)
2230 struct i40evf_adapter *adapter = netdev_priv(netdev);
2232 if (adapter->state <= __I40EVF_DOWN_PENDING)
2236 set_bit(__I40E_DOWN, &adapter->vsi.state);
2237 if (CLIENT_ENABLED(adapter))
2238 adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
2240 i40evf_down(adapter);
2241 adapter->state = __I40EVF_DOWN_PENDING;
2242 i40evf_free_traffic_irqs(adapter);
2244 /* We explicitly don't free resources here because the hardware is
2245 * still active and can DMA into memory. Resources are cleared in
2246 * i40evf_virtchnl_completion() after we get confirmation from the PF
2247 * driver that the rings have been stopped.
2253 * i40evf_change_mtu - Change the Maximum Transfer Unit
2254 * @netdev: network interface device structure
2255 * @new_mtu: new value for maximum frame size
2257 * Returns 0 on success, negative on failure
2259 static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
2261 struct i40evf_adapter *adapter = netdev_priv(netdev);
2263 netdev->mtu = new_mtu;
2264 if (CLIENT_ENABLED(adapter)) {
2265 i40evf_notify_client_l2_params(&adapter->vsi);
2266 adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
2268 adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
2269 schedule_work(&adapter->reset_task);
2275 * i40evf_features_check - Validate encapsulated packet conforms to limits
2277 * @netdev: This physical port's netdev
2278 * @features: Offload features that the stack believes apply
2280 static netdev_features_t i40evf_features_check(struct sk_buff *skb,
2281 struct net_device *dev,
2282 netdev_features_t features)
2286 /* No point in doing any of this if neither checksum nor GSO are
2287 * being requested for this frame. We can rule out both by just
2288 * checking for CHECKSUM_PARTIAL
2290 if (skb->ip_summed != CHECKSUM_PARTIAL)
2293 /* We cannot support GSO if the MSS is going to be less than
2294 * 64 bytes. If it is then we need to drop support for GSO.
2296 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
2297 features &= ~NETIF_F_GSO_MASK;
2299 /* MACLEN can support at most 63 words */
2300 len = skb_network_header(skb) - skb->data;
2301 if (len & ~(63 * 2))
2304 /* IPLEN and EIPLEN can support at most 127 dwords */
2305 len = skb_transport_header(skb) - skb_network_header(skb);
2306 if (len & ~(127 * 4))
2309 if (skb->encapsulation) {
2310 /* L4TUNLEN can support 127 words */
2311 len = skb_inner_network_header(skb) - skb_transport_header(skb);
2312 if (len & ~(127 * 2))
2315 /* IPLEN can support at most 127 dwords */
2316 len = skb_inner_transport_header(skb) -
2317 skb_inner_network_header(skb);
2318 if (len & ~(127 * 4))
2322 /* No need to validate L4LEN as TCP is the only protocol with a
2323 * a flexible value and we support all possible values supported
2324 * by TCP, which is at most 15 dwords
2329 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2332 #define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\
2333 NETIF_F_HW_VLAN_CTAG_RX |\
2334 NETIF_F_HW_VLAN_CTAG_FILTER)
2337 * i40evf_fix_features - fix up the netdev feature bits
2338 * @netdev: our net device
2339 * @features: desired feature bits
2341 * Returns fixed-up features bits
2343 static netdev_features_t i40evf_fix_features(struct net_device *netdev,
2344 netdev_features_t features)
2346 struct i40evf_adapter *adapter = netdev_priv(netdev);
2348 features &= ~I40EVF_VLAN_FEATURES;
2349 if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
2350 features |= I40EVF_VLAN_FEATURES;
2354 static const struct net_device_ops i40evf_netdev_ops = {
2355 .ndo_open = i40evf_open,
2356 .ndo_stop = i40evf_close,
2357 .ndo_start_xmit = i40evf_xmit_frame,
2358 .ndo_set_rx_mode = i40evf_set_rx_mode,
2359 .ndo_validate_addr = eth_validate_addr,
2360 .ndo_set_mac_address = i40evf_set_mac,
2361 .ndo_change_mtu = i40evf_change_mtu,
2362 .ndo_tx_timeout = i40evf_tx_timeout,
2363 .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid,
2364 .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
2365 .ndo_features_check = i40evf_features_check,
2366 .ndo_fix_features = i40evf_fix_features,
2367 #ifdef CONFIG_NET_POLL_CONTROLLER
2368 .ndo_poll_controller = i40evf_netpoll,
2373 * i40evf_check_reset_complete - check that VF reset is complete
2374 * @hw: pointer to hw struct
2376 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
2378 static int i40evf_check_reset_complete(struct i40e_hw *hw)
2383 for (i = 0; i < 100; i++) {
2384 rstat = rd32(hw, I40E_VFGEN_RSTAT) &
2385 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2386 if ((rstat == I40E_VFR_VFACTIVE) ||
2387 (rstat == I40E_VFR_COMPLETED))
2389 usleep_range(10, 20);
2395 * i40evf_process_config - Process the config information we got from the PF
2396 * @adapter: board private structure
2398 * Verify that we have a valid config struct, and set up our netdev features
2399 * and our VSI struct.
2401 int i40evf_process_config(struct i40evf_adapter *adapter)
2403 struct i40e_virtchnl_vf_resource *vfres = adapter->vf_res;
2404 struct net_device *netdev = adapter->netdev;
2405 struct i40e_vsi *vsi = &adapter->vsi;
2407 netdev_features_t hw_enc_features;
2408 netdev_features_t hw_features;
2410 /* got VF config message back from PF, now we can parse it */
2411 for (i = 0; i < vfres->num_vsis; i++) {
2412 if (vfres->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
2413 adapter->vsi_res = &vfres->vsi_res[i];
2415 if (!adapter->vsi_res) {
2416 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
2420 hw_enc_features = NETIF_F_SG |
2424 NETIF_F_SOFT_FEATURES |
2433 /* advertise to stack only if offloads for encapsulated packets is
2436 if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP) {
2437 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
2439 NETIF_F_GSO_GRE_CSUM |
2440 NETIF_F_GSO_IPXIP4 |
2441 NETIF_F_GSO_IPXIP6 |
2442 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2443 NETIF_F_GSO_PARTIAL |
2446 if (!(vfres->vf_offload_flags &
2447 I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
2448 netdev->gso_partial_features |=
2449 NETIF_F_GSO_UDP_TUNNEL_CSUM;
2451 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
2452 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
2453 netdev->hw_enc_features |= hw_enc_features;
2455 /* record features VLANs can make use of */
2456 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
2458 /* Write features and hw_features separately to avoid polluting
2459 * with, or dropping, features that are set when we registered.
2461 hw_features = hw_enc_features;
2463 netdev->hw_features |= hw_features;
2465 netdev->features |= hw_features | I40EVF_VLAN_FEATURES;
2467 adapter->vsi.id = adapter->vsi_res->vsi_id;
2469 adapter->vsi.back = adapter;
2470 adapter->vsi.base_vector = 1;
2471 adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
2472 vsi->netdev = adapter->netdev;
2473 vsi->qs_handle = adapter->vsi_res->qset_handle;
2474 if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2475 adapter->rss_key_size = vfres->rss_key_size;
2476 adapter->rss_lut_size = vfres->rss_lut_size;
2478 adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE;
2479 adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE;
2486 * i40evf_init_task - worker thread to perform delayed initialization
2487 * @work: pointer to work_struct containing our data
2489 * This task completes the work that was begun in probe. Due to the nature
2490 * of VF-PF communications, we may need to wait tens of milliseconds to get
2491 * responses back from the PF. Rather than busy-wait in probe and bog down the
2492 * whole system, we'll do it in a task so we can sleep.
2493 * This task only runs during driver init. Once we've established
2494 * communications with the PF driver and set up our netdev, the watchdog
2497 static void i40evf_init_task(struct work_struct *work)
2499 struct i40evf_adapter *adapter = container_of(work,
2500 struct i40evf_adapter,
2502 struct net_device *netdev = adapter->netdev;
2503 struct i40e_hw *hw = &adapter->hw;
2504 struct pci_dev *pdev = adapter->pdev;
2507 switch (adapter->state) {
2508 case __I40EVF_STARTUP:
2509 /* driver loaded, probe complete */
2510 adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
2511 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
2512 err = i40e_set_mac_type(hw);
2514 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
2518 err = i40evf_check_reset_complete(hw);
2520 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
2524 hw->aq.num_arq_entries = I40EVF_AQ_LEN;
2525 hw->aq.num_asq_entries = I40EVF_AQ_LEN;
2526 hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
2527 hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
2529 err = i40evf_init_adminq(hw);
2531 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
2535 err = i40evf_send_api_ver(adapter);
2537 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
2538 i40evf_shutdown_adminq(hw);
2541 adapter->state = __I40EVF_INIT_VERSION_CHECK;
2543 case __I40EVF_INIT_VERSION_CHECK:
2544 if (!i40evf_asq_done(hw)) {
2545 dev_err(&pdev->dev, "Admin queue command never completed\n");
2546 i40evf_shutdown_adminq(hw);
2547 adapter->state = __I40EVF_STARTUP;
2551 /* aq msg sent, awaiting reply */
2552 err = i40evf_verify_api_ver(adapter);
2554 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
2555 err = i40evf_send_api_ver(adapter);
2557 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
2558 adapter->pf_version.major,
2559 adapter->pf_version.minor,
2560 I40E_VIRTCHNL_VERSION_MAJOR,
2561 I40E_VIRTCHNL_VERSION_MINOR);
2564 err = i40evf_send_vf_config_msg(adapter);
2566 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
2570 adapter->state = __I40EVF_INIT_GET_RESOURCES;
2572 case __I40EVF_INIT_GET_RESOURCES:
2573 /* aq msg sent, awaiting reply */
2574 if (!adapter->vf_res) {
2575 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
2577 sizeof(struct i40e_virtchnl_vsi_resource));
2578 adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
2579 if (!adapter->vf_res)
2582 err = i40evf_get_vf_config(adapter);
2583 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
2584 err = i40evf_send_vf_config_msg(adapter);
2586 } else if (err == I40E_ERR_PARAM) {
2587 /* We only get ERR_PARAM if the device is in a very bad
2588 * state or if we've been disabled for previous bad
2589 * behavior. Either way, we're done now.
2591 i40evf_shutdown_adminq(hw);
2592 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
2596 dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
2600 adapter->state = __I40EVF_INIT_SW;
2606 if (i40evf_process_config(adapter))
2608 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
2610 adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
2612 netdev->netdev_ops = &i40evf_netdev_ops;
2613 i40evf_set_ethtool_ops(netdev);
2614 netdev->watchdog_timeo = 5 * HZ;
2616 /* MTU range: 68 - 9710 */
2617 netdev->min_mtu = ETH_MIN_MTU;
2618 netdev->max_mtu = I40E_MAX_RXBUFFER - (ETH_HLEN + ETH_FCS_LEN);
2620 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2621 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
2622 adapter->hw.mac.addr);
2623 eth_hw_addr_random(netdev);
2624 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2626 adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF;
2627 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2628 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2631 init_timer(&adapter->watchdog_timer);
2632 adapter->watchdog_timer.function = &i40evf_watchdog_timer;
2633 adapter->watchdog_timer.data = (unsigned long)adapter;
2634 mod_timer(&adapter->watchdog_timer, jiffies + 1);
2636 adapter->num_active_queues = min_t(int,
2637 adapter->vsi_res->num_queue_pairs,
2638 (int)(num_online_cpus()));
2639 adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
2640 adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
2641 err = i40evf_init_interrupt_scheme(adapter);
2644 i40evf_map_rings_to_vectors(adapter);
2645 if (adapter->vf_res->vf_offload_flags &
2646 I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2647 adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
2649 err = i40evf_request_misc_irq(adapter);
2653 netif_carrier_off(netdev);
2654 adapter->link_up = false;
2656 if (!adapter->netdev_registered) {
2657 err = register_netdev(netdev);
2662 adapter->netdev_registered = true;
2664 netif_tx_stop_all_queues(netdev);
2665 if (CLIENT_ALLOWED(adapter)) {
2666 err = i40evf_lan_add_device(adapter);
2668 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
2672 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2673 if (netdev->features & NETIF_F_GRO)
2674 dev_info(&pdev->dev, "GRO is enabled\n");
2676 adapter->state = __I40EVF_DOWN;
2677 set_bit(__I40E_DOWN, &adapter->vsi.state);
2678 i40evf_misc_irq_enable(adapter);
2680 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
2681 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
2682 if (!adapter->rss_key || !adapter->rss_lut)
2685 if (RSS_AQ(adapter)) {
2686 adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
2687 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
2689 i40evf_init_rss(adapter);
2693 schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
2696 i40evf_free_rss(adapter);
2698 i40evf_free_misc_irq(adapter);
2700 i40evf_reset_interrupt_capability(adapter);
2702 kfree(adapter->vf_res);
2703 adapter->vf_res = NULL;
2705 /* Things went into the weeds, so try again later */
2706 if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
2707 dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
2708 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
2709 i40evf_shutdown_adminq(hw);
2710 adapter->state = __I40EVF_STARTUP;
2711 schedule_delayed_work(&adapter->init_task, HZ * 5);
2714 schedule_delayed_work(&adapter->init_task, HZ);
2718 * i40evf_shutdown - Shutdown the device in preparation for a reboot
2719 * @pdev: pci device structure
2721 static void i40evf_shutdown(struct pci_dev *pdev)
2723 struct net_device *netdev = pci_get_drvdata(pdev);
2724 struct i40evf_adapter *adapter = netdev_priv(netdev);
2726 netif_device_detach(netdev);
2728 if (netif_running(netdev))
2729 i40evf_close(netdev);
2731 /* Prevent the watchdog from running. */
2732 adapter->state = __I40EVF_REMOVE;
2733 adapter->aq_required = 0;
2736 pci_save_state(pdev);
2739 pci_disable_device(pdev);
2743 * i40evf_probe - Device Initialization Routine
2744 * @pdev: PCI device information struct
2745 * @ent: entry in i40evf_pci_tbl
2747 * Returns 0 on success, negative on failure
2749 * i40evf_probe initializes an adapter identified by a pci_dev structure.
2750 * The OS initialization, configuring of the adapter private structure,
2751 * and a hardware reset occur.
2753 static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2755 struct net_device *netdev;
2756 struct i40evf_adapter *adapter = NULL;
2757 struct i40e_hw *hw = NULL;
2760 err = pci_enable_device(pdev);
2764 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2766 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2769 "DMA configuration failed: 0x%x\n", err);
2774 err = pci_request_regions(pdev, i40evf_driver_name);
2777 "pci_request_regions failed 0x%x\n", err);
2781 pci_enable_pcie_error_reporting(pdev);
2783 pci_set_master(pdev);
2785 netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), MAX_QUEUES);
2788 goto err_alloc_etherdev;
2791 SET_NETDEV_DEV(netdev, &pdev->dev);
2793 pci_set_drvdata(pdev, netdev);
2794 adapter = netdev_priv(netdev);
2796 adapter->netdev = netdev;
2797 adapter->pdev = pdev;
2802 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
2803 adapter->state = __I40EVF_STARTUP;
2805 /* Call save state here because it relies on the adapter struct. */
2806 pci_save_state(pdev);
2808 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
2809 pci_resource_len(pdev, 0));
2814 hw->vendor_id = pdev->vendor;
2815 hw->device_id = pdev->device;
2816 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2817 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2818 hw->subsystem_device_id = pdev->subsystem_device;
2819 hw->bus.device = PCI_SLOT(pdev->devfn);
2820 hw->bus.func = PCI_FUNC(pdev->devfn);
2821 hw->bus.bus_id = pdev->bus->number;
2823 /* set up the locks for the AQ, do this only once in probe
2824 * and destroy them only once in remove
2826 mutex_init(&hw->aq.asq_mutex);
2827 mutex_init(&hw->aq.arq_mutex);
2829 INIT_LIST_HEAD(&adapter->mac_filter_list);
2830 INIT_LIST_HEAD(&adapter->vlan_filter_list);
2832 INIT_WORK(&adapter->reset_task, i40evf_reset_task);
2833 INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
2834 INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
2835 INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task);
2836 INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
2837 schedule_delayed_work(&adapter->init_task,
2838 msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
2843 free_netdev(netdev);
2845 pci_release_regions(pdev);
2848 pci_disable_device(pdev);
2854 * i40evf_suspend - Power management suspend routine
2855 * @pdev: PCI device information struct
2858 * Called when the system (VM) is entering sleep/suspend.
2860 static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
2862 struct net_device *netdev = pci_get_drvdata(pdev);
2863 struct i40evf_adapter *adapter = netdev_priv(netdev);
2866 netif_device_detach(netdev);
2868 if (netif_running(netdev)) {
2870 i40evf_down(adapter);
2873 i40evf_free_misc_irq(adapter);
2874 i40evf_reset_interrupt_capability(adapter);
2876 retval = pci_save_state(pdev);
2880 pci_disable_device(pdev);
2886 * i40evf_resume - Power management resume routine
2887 * @pdev: PCI device information struct
2889 * Called when the system (VM) is resumed from sleep/suspend.
2891 static int i40evf_resume(struct pci_dev *pdev)
2893 struct i40evf_adapter *adapter = pci_get_drvdata(pdev);
2894 struct net_device *netdev = adapter->netdev;
2897 pci_set_power_state(pdev, PCI_D0);
2898 pci_restore_state(pdev);
2899 /* pci_restore_state clears dev->state_saved so call
2900 * pci_save_state to restore it.
2902 pci_save_state(pdev);
2904 err = pci_enable_device_mem(pdev);
2906 dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
2909 pci_set_master(pdev);
2912 err = i40evf_set_interrupt_capability(adapter);
2915 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
2918 err = i40evf_request_misc_irq(adapter);
2921 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
2925 schedule_work(&adapter->reset_task);
2927 netif_device_attach(netdev);
2932 #endif /* CONFIG_PM */
2934 * i40evf_remove - Device Removal Routine
2935 * @pdev: PCI device information struct
2937 * i40evf_remove is called by the PCI subsystem to alert the driver
2938 * that it should release a PCI device. The could be caused by a
2939 * Hot-Plug event, or because the driver is going to be removed from
2942 static void i40evf_remove(struct pci_dev *pdev)
2944 struct net_device *netdev = pci_get_drvdata(pdev);
2945 struct i40evf_adapter *adapter = netdev_priv(netdev);
2946 struct i40evf_mac_filter *f, *ftmp;
2947 struct i40e_hw *hw = &adapter->hw;
2950 cancel_delayed_work_sync(&adapter->init_task);
2951 cancel_work_sync(&adapter->reset_task);
2952 cancel_delayed_work_sync(&adapter->client_task);
2953 if (adapter->netdev_registered) {
2954 unregister_netdev(netdev);
2955 adapter->netdev_registered = false;
2957 if (CLIENT_ALLOWED(adapter)) {
2958 err = i40evf_lan_del_device(adapter);
2960 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
2964 /* Shut down all the garbage mashers on the detention level */
2965 adapter->state = __I40EVF_REMOVE;
2966 adapter->aq_required = 0;
2967 i40evf_request_reset(adapter);
2969 /* If the FW isn't responding, kick it once, but only once. */
2970 if (!i40evf_asq_done(hw)) {
2971 i40evf_request_reset(adapter);
2974 i40evf_free_all_tx_resources(adapter);
2975 i40evf_free_all_rx_resources(adapter);
2976 i40evf_misc_irq_disable(adapter);
2977 i40evf_free_misc_irq(adapter);
2978 i40evf_reset_interrupt_capability(adapter);
2979 i40evf_free_q_vectors(adapter);
2981 if (adapter->watchdog_timer.function)
2982 del_timer_sync(&adapter->watchdog_timer);
2984 flush_scheduled_work();
2986 i40evf_free_rss(adapter);
2988 if (hw->aq.asq.count)
2989 i40evf_shutdown_adminq(hw);
2991 /* destroy the locks only once, here */
2992 mutex_destroy(&hw->aq.arq_mutex);
2993 mutex_destroy(&hw->aq.asq_mutex);
2995 iounmap(hw->hw_addr);
2996 pci_release_regions(pdev);
2997 i40evf_free_all_tx_resources(adapter);
2998 i40evf_free_all_rx_resources(adapter);
2999 i40evf_free_queues(adapter);
3000 kfree(adapter->vf_res);
3001 /* If we got removed before an up/down sequence, we've got a filter
3002 * hanging out there that we need to get rid of.
3004 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3008 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
3013 free_netdev(netdev);
3015 pci_disable_pcie_error_reporting(pdev);
3017 pci_disable_device(pdev);
3020 static struct pci_driver i40evf_driver = {
3021 .name = i40evf_driver_name,
3022 .id_table = i40evf_pci_tbl,
3023 .probe = i40evf_probe,
3024 .remove = i40evf_remove,
3026 .suspend = i40evf_suspend,
3027 .resume = i40evf_resume,
3029 .shutdown = i40evf_shutdown,
3033 * i40e_init_module - Driver Registration Routine
3035 * i40e_init_module is the first routine called when the driver is
3036 * loaded. All it does is register with the PCI subsystem.
3038 static int __init i40evf_init_module(void)
3042 pr_info("i40evf: %s - version %s\n", i40evf_driver_string,
3043 i40evf_driver_version);
3045 pr_info("%s\n", i40evf_copyright);
3047 i40evf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
3048 i40evf_driver_name);
3050 pr_err("%s: Failed to create workqueue\n", i40evf_driver_name);
3053 ret = pci_register_driver(&i40evf_driver);
3057 module_init(i40evf_init_module);
3060 * i40e_exit_module - Driver Exit Cleanup Routine
3062 * i40e_exit_module is called just before the driver is removed
3065 static void __exit i40evf_exit_module(void)
3067 pci_unregister_driver(&i40evf_driver);
3068 destroy_workqueue(i40evf_wq);
3071 module_exit(i40evf_exit_module);