]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/intel/i40evf/i40evf_main.c
i40e: remove unnecessary msleep() delay in i40e_free_vfs
[karo-tx-linux.git] / drivers / net / ethernet / intel / i40evf / i40evf_main.c
1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4  * Copyright(c) 2013 - 2016 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26
27 #include "i40evf.h"
28 #include "i40e_prototype.h"
29 #include "i40evf_client.h"
30 /* All i40evf tracepoints are defined by the include below, which must
31  * be included exactly once across the whole kernel with
32  * CREATE_TRACE_POINTS defined
33  */
34 #define CREATE_TRACE_POINTS
35 #include "i40e_trace.h"
36
37 static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
38 static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
39 static int i40evf_close(struct net_device *netdev);
40
41 char i40evf_driver_name[] = "i40evf";
42 static const char i40evf_driver_string[] =
43         "Intel(R) 40-10 Gigabit Virtual Function Network Driver";
44
45 #define DRV_KERN "-k"
46
47 #define DRV_VERSION_MAJOR 2
48 #define DRV_VERSION_MINOR 1
49 #define DRV_VERSION_BUILD 14
50 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
51              __stringify(DRV_VERSION_MINOR) "." \
52              __stringify(DRV_VERSION_BUILD) \
53              DRV_KERN
54 const char i40evf_driver_version[] = DRV_VERSION;
55 static const char i40evf_copyright[] =
56         "Copyright (c) 2013 - 2015 Intel Corporation.";
57
58 /* i40evf_pci_tbl - PCI Device ID Table
59  *
60  * Wildcard entries (PCI_ANY_ID) should come last
61  * Last entry must be all 0s
62  *
63  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
64  *   Class, Class Mask, private data (not used) }
65  */
66 static const struct pci_device_id i40evf_pci_tbl[] = {
67         {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
68         {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
69         {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
70         /* required last entry */
71         {0, }
72 };
73
74 MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl);
75
76 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
77 MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_VERSION);
80
81 static struct workqueue_struct *i40evf_wq;
82
83 /**
84  * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
85  * @hw:   pointer to the HW structure
86  * @mem:  ptr to mem struct to fill out
87  * @size: size of memory requested
88  * @alignment: what to align the allocation to
89  **/
90 i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw,
91                                       struct i40e_dma_mem *mem,
92                                       u64 size, u32 alignment)
93 {
94         struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
95
96         if (!mem)
97                 return I40E_ERR_PARAM;
98
99         mem->size = ALIGN(size, alignment);
100         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
101                                      (dma_addr_t *)&mem->pa, GFP_KERNEL);
102         if (mem->va)
103                 return 0;
104         else
105                 return I40E_ERR_NO_MEMORY;
106 }
107
108 /**
109  * i40evf_free_dma_mem_d - OS specific memory free for shared code
110  * @hw:   pointer to the HW structure
111  * @mem:  ptr to mem struct to free
112  **/
113 i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
114 {
115         struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
116
117         if (!mem || !mem->va)
118                 return I40E_ERR_PARAM;
119         dma_free_coherent(&adapter->pdev->dev, mem->size,
120                           mem->va, (dma_addr_t)mem->pa);
121         return 0;
122 }
123
124 /**
125  * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code
126  * @hw:   pointer to the HW structure
127  * @mem:  ptr to mem struct to fill out
128  * @size: size of memory requested
129  **/
130 i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw,
131                                        struct i40e_virt_mem *mem, u32 size)
132 {
133         if (!mem)
134                 return I40E_ERR_PARAM;
135
136         mem->size = size;
137         mem->va = kzalloc(size, GFP_KERNEL);
138
139         if (mem->va)
140                 return 0;
141         else
142                 return I40E_ERR_NO_MEMORY;
143 }
144
145 /**
146  * i40evf_free_virt_mem_d - OS specific memory free for shared code
147  * @hw:   pointer to the HW structure
148  * @mem:  ptr to mem struct to free
149  **/
150 i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw,
151                                    struct i40e_virt_mem *mem)
152 {
153         if (!mem)
154                 return I40E_ERR_PARAM;
155
156         /* it's ok to kfree a NULL pointer */
157         kfree(mem->va);
158
159         return 0;
160 }
161
162 /**
163  * i40evf_debug_d - OS dependent version of debug printing
164  * @hw:  pointer to the HW structure
165  * @mask: debug level mask
166  * @fmt_str: printf-type format description
167  **/
168 void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
169 {
170         char buf[512];
171         va_list argptr;
172
173         if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
174                 return;
175
176         va_start(argptr, fmt_str);
177         vsnprintf(buf, sizeof(buf), fmt_str, argptr);
178         va_end(argptr);
179
180         /* the debug string is already formatted with a newline */
181         pr_info("%s", buf);
182 }
183
184 /**
185  * i40evf_schedule_reset - Set the flags and schedule a reset event
186  * @adapter: board private structure
187  **/
188 void i40evf_schedule_reset(struct i40evf_adapter *adapter)
189 {
190         if (!(adapter->flags &
191               (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) {
192                 adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
193                 schedule_work(&adapter->reset_task);
194         }
195 }
196
197 /**
198  * i40evf_tx_timeout - Respond to a Tx Hang
199  * @netdev: network interface device structure
200  **/
201 static void i40evf_tx_timeout(struct net_device *netdev)
202 {
203         struct i40evf_adapter *adapter = netdev_priv(netdev);
204
205         adapter->tx_timeout_count++;
206         i40evf_schedule_reset(adapter);
207 }
208
209 /**
210  * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC
211  * @adapter: board private structure
212  **/
213 static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
214 {
215         struct i40e_hw *hw = &adapter->hw;
216
217         if (!adapter->msix_entries)
218                 return;
219
220         wr32(hw, I40E_VFINT_DYN_CTL01, 0);
221
222         /* read flush */
223         rd32(hw, I40E_VFGEN_RSTAT);
224
225         synchronize_irq(adapter->msix_entries[0].vector);
226 }
227
228 /**
229  * i40evf_misc_irq_enable - Enable default interrupt generation settings
230  * @adapter: board private structure
231  **/
232 static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
233 {
234         struct i40e_hw *hw = &adapter->hw;
235
236         wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
237                                        I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
238         wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
239
240         /* read flush */
241         rd32(hw, I40E_VFGEN_RSTAT);
242 }
243
244 /**
245  * i40evf_irq_disable - Mask off interrupt generation on the NIC
246  * @adapter: board private structure
247  **/
248 static void i40evf_irq_disable(struct i40evf_adapter *adapter)
249 {
250         int i;
251         struct i40e_hw *hw = &adapter->hw;
252
253         if (!adapter->msix_entries)
254                 return;
255
256         for (i = 1; i < adapter->num_msix_vectors; i++) {
257                 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
258                 synchronize_irq(adapter->msix_entries[i].vector);
259         }
260         /* read flush */
261         rd32(hw, I40E_VFGEN_RSTAT);
262 }
263
264 /**
265  * i40evf_irq_enable_queues - Enable interrupt for specified queues
266  * @adapter: board private structure
267  * @mask: bitmap of queues to enable
268  **/
269 void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
270 {
271         struct i40e_hw *hw = &adapter->hw;
272         int i;
273
274         for (i = 1; i < adapter->num_msix_vectors; i++) {
275                 if (mask & BIT(i - 1)) {
276                         wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
277                              I40E_VFINT_DYN_CTLN1_INTENA_MASK |
278                              I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
279                              I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK);
280                 }
281         }
282 }
283
284 /**
285  * i40evf_fire_sw_int - Generate SW interrupt for specified vectors
286  * @adapter: board private structure
287  * @mask: bitmap of vectors to trigger
288  **/
289 static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
290 {
291         struct i40e_hw *hw = &adapter->hw;
292         int i;
293         u32 dyn_ctl;
294
295         if (mask & 1) {
296                 dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01);
297                 dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
298                            I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
299                            I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
300                 wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
301         }
302         for (i = 1; i < adapter->num_msix_vectors; i++) {
303                 if (mask & BIT(i)) {
304                         dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
305                         dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
306                                    I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
307                                    I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
308                         wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
309                 }
310         }
311 }
312
313 /**
314  * i40evf_irq_enable - Enable default interrupt generation settings
315  * @adapter: board private structure
316  * @flush: boolean value whether to run rd32()
317  **/
318 void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
319 {
320         struct i40e_hw *hw = &adapter->hw;
321
322         i40evf_misc_irq_enable(adapter);
323         i40evf_irq_enable_queues(adapter, ~0);
324
325         if (flush)
326                 rd32(hw, I40E_VFGEN_RSTAT);
327 }
328
329 /**
330  * i40evf_msix_aq - Interrupt handler for vector 0
331  * @irq: interrupt number
332  * @data: pointer to netdev
333  **/
334 static irqreturn_t i40evf_msix_aq(int irq, void *data)
335 {
336         struct net_device *netdev = data;
337         struct i40evf_adapter *adapter = netdev_priv(netdev);
338         struct i40e_hw *hw = &adapter->hw;
339         u32 val;
340
341         /* handle non-queue interrupts, these reads clear the registers */
342         val = rd32(hw, I40E_VFINT_ICR01);
343         val = rd32(hw, I40E_VFINT_ICR0_ENA1);
344
345         val = rd32(hw, I40E_VFINT_DYN_CTL01) |
346               I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
347         wr32(hw, I40E_VFINT_DYN_CTL01, val);
348
349         /* schedule work on the private workqueue */
350         schedule_work(&adapter->adminq_task);
351
352         return IRQ_HANDLED;
353 }
354
355 /**
356  * i40evf_msix_clean_rings - MSIX mode Interrupt Handler
357  * @irq: interrupt number
358  * @data: pointer to a q_vector
359  **/
360 static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
361 {
362         struct i40e_q_vector *q_vector = data;
363
364         if (!q_vector->tx.ring && !q_vector->rx.ring)
365                 return IRQ_HANDLED;
366
367         napi_schedule_irqoff(&q_vector->napi);
368
369         return IRQ_HANDLED;
370 }
371
372 /**
373  * i40evf_map_vector_to_rxq - associate irqs with rx queues
374  * @adapter: board private structure
375  * @v_idx: interrupt number
376  * @r_idx: queue number
377  **/
378 static void
379 i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
380 {
381         struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
382         struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx];
383         struct i40e_hw *hw = &adapter->hw;
384
385         rx_ring->q_vector = q_vector;
386         rx_ring->next = q_vector->rx.ring;
387         rx_ring->vsi = &adapter->vsi;
388         q_vector->rx.ring = rx_ring;
389         q_vector->rx.count++;
390         q_vector->rx.latency_range = I40E_LOW_LATENCY;
391         q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
392         q_vector->ring_mask |= BIT(r_idx);
393         q_vector->itr_countdown = ITR_COUNTDOWN_START;
394         wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, v_idx - 1), q_vector->rx.itr);
395 }
396
397 /**
398  * i40evf_map_vector_to_txq - associate irqs with tx queues
399  * @adapter: board private structure
400  * @v_idx: interrupt number
401  * @t_idx: queue number
402  **/
403 static void
404 i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
405 {
406         struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
407         struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx];
408         struct i40e_hw *hw = &adapter->hw;
409
410         tx_ring->q_vector = q_vector;
411         tx_ring->next = q_vector->tx.ring;
412         tx_ring->vsi = &adapter->vsi;
413         q_vector->tx.ring = tx_ring;
414         q_vector->tx.count++;
415         q_vector->tx.latency_range = I40E_LOW_LATENCY;
416         q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
417         q_vector->itr_countdown = ITR_COUNTDOWN_START;
418         q_vector->num_ringpairs++;
419         wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, v_idx - 1), q_vector->tx.itr);
420 }
421
422 /**
423  * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors
424  * @adapter: board private structure to initialize
425  *
426  * This function maps descriptor rings to the queue-specific vectors
427  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
428  * one vector per ring/queue, but on a constrained vector budget, we
429  * group the rings as "efficiently" as possible.  You would add new
430  * mapping configurations in here.
431  **/
432 static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
433 {
434         int q_vectors;
435         int v_start = 0;
436         int rxr_idx = 0, txr_idx = 0;
437         int rxr_remaining = adapter->num_active_queues;
438         int txr_remaining = adapter->num_active_queues;
439         int i, j;
440         int rqpv, tqpv;
441         int err = 0;
442
443         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
444
445         /* The ideal configuration...
446          * We have enough vectors to map one per queue.
447          */
448         if (q_vectors >= (rxr_remaining * 2)) {
449                 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
450                         i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx);
451
452                 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
453                         i40evf_map_vector_to_txq(adapter, v_start, txr_idx);
454                 goto out;
455         }
456
457         /* If we don't have enough vectors for a 1-to-1
458          * mapping, we'll have to group them so there are
459          * multiple queues per vector.
460          * Re-adjusting *qpv takes care of the remainder.
461          */
462         for (i = v_start; i < q_vectors; i++) {
463                 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
464                 for (j = 0; j < rqpv; j++) {
465                         i40evf_map_vector_to_rxq(adapter, i, rxr_idx);
466                         rxr_idx++;
467                         rxr_remaining--;
468                 }
469         }
470         for (i = v_start; i < q_vectors; i++) {
471                 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
472                 for (j = 0; j < tqpv; j++) {
473                         i40evf_map_vector_to_txq(adapter, i, txr_idx);
474                         txr_idx++;
475                         txr_remaining--;
476                 }
477         }
478
479 out:
480         adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
481
482         return err;
483 }
484
485 #ifdef CONFIG_NET_POLL_CONTROLLER
486 /**
487  * i40evf_netpoll - A Polling 'interrupt' handler
488  * @netdev: network interface device structure
489  *
490  * This is used by netconsole to send skbs without having to re-enable
491  * interrupts.  It's not called while the normal interrupt routine is executing.
492  **/
493 static void i40evf_netpoll(struct net_device *netdev)
494 {
495         struct i40evf_adapter *adapter = netdev_priv(netdev);
496         int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
497         int i;
498
499         /* if interface is down do nothing */
500         if (test_bit(__I40E_DOWN, &adapter->vsi.state))
501                 return;
502
503         for (i = 0; i < q_vectors; i++)
504                 i40evf_msix_clean_rings(0, &adapter->q_vectors[i]);
505 }
506
507 #endif
508 /**
509  * i40evf_irq_affinity_notify - Callback for affinity changes
510  * @notify: context as to what irq was changed
511  * @mask: the new affinity mask
512  *
513  * This is a callback function used by the irq_set_affinity_notifier function
514  * so that we may register to receive changes to the irq affinity masks.
515  **/
516 static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify,
517                                        const cpumask_t *mask)
518 {
519         struct i40e_q_vector *q_vector =
520                 container_of(notify, struct i40e_q_vector, affinity_notify);
521
522         q_vector->affinity_mask = *mask;
523 }
524
525 /**
526  * i40evf_irq_affinity_release - Callback for affinity notifier release
527  * @ref: internal core kernel usage
528  *
529  * This is a callback function used by the irq_set_affinity_notifier function
530  * to inform the current notification subscriber that they will no longer
531  * receive notifications.
532  **/
533 static void i40evf_irq_affinity_release(struct kref *ref) {}
534
535 /**
536  * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
537  * @adapter: board private structure
538  *
539  * Allocates MSI-X vectors for tx and rx handling, and requests
540  * interrupts from the kernel.
541  **/
542 static int
543 i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
544 {
545         int vector, err, q_vectors;
546         int rx_int_idx = 0, tx_int_idx = 0;
547         int irq_num;
548
549         i40evf_irq_disable(adapter);
550         /* Decrement for Other and TCP Timer vectors */
551         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
552
553         for (vector = 0; vector < q_vectors; vector++) {
554                 struct i40e_q_vector *q_vector = &adapter->q_vectors[vector];
555                 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
556
557                 if (q_vector->tx.ring && q_vector->rx.ring) {
558                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
559                                  "i40evf-%s-%s-%d", basename,
560                                  "TxRx", rx_int_idx++);
561                         tx_int_idx++;
562                 } else if (q_vector->rx.ring) {
563                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
564                                  "i40evf-%s-%s-%d", basename,
565                                  "rx", rx_int_idx++);
566                 } else if (q_vector->tx.ring) {
567                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
568                                  "i40evf-%s-%s-%d", basename,
569                                  "tx", tx_int_idx++);
570                 } else {
571                         /* skip this unused q_vector */
572                         continue;
573                 }
574                 err = request_irq(irq_num,
575                                   i40evf_msix_clean_rings,
576                                   0,
577                                   q_vector->name,
578                                   q_vector);
579                 if (err) {
580                         dev_info(&adapter->pdev->dev,
581                                  "Request_irq failed, error: %d\n", err);
582                         goto free_queue_irqs;
583                 }
584                 /* register for affinity change notifications */
585                 q_vector->affinity_notify.notify = i40evf_irq_affinity_notify;
586                 q_vector->affinity_notify.release =
587                                                    i40evf_irq_affinity_release;
588                 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
589                 /* assign the mask for this irq */
590                 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
591         }
592
593         return 0;
594
595 free_queue_irqs:
596         while (vector) {
597                 vector--;
598                 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
599                 irq_set_affinity_notifier(irq_num, NULL);
600                 irq_set_affinity_hint(irq_num, NULL);
601                 free_irq(irq_num, &adapter->q_vectors[vector]);
602         }
603         return err;
604 }
605
606 /**
607  * i40evf_request_misc_irq - Initialize MSI-X interrupts
608  * @adapter: board private structure
609  *
610  * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
611  * vector is only for the admin queue, and stays active even when the netdev
612  * is closed.
613  **/
614 static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
615 {
616         struct net_device *netdev = adapter->netdev;
617         int err;
618
619         snprintf(adapter->misc_vector_name,
620                  sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx",
621                  dev_name(&adapter->pdev->dev));
622         err = request_irq(adapter->msix_entries[0].vector,
623                           &i40evf_msix_aq, 0,
624                           adapter->misc_vector_name, netdev);
625         if (err) {
626                 dev_err(&adapter->pdev->dev,
627                         "request_irq for %s failed: %d\n",
628                         adapter->misc_vector_name, err);
629                 free_irq(adapter->msix_entries[0].vector, netdev);
630         }
631         return err;
632 }
633
634 /**
635  * i40evf_free_traffic_irqs - Free MSI-X interrupts
636  * @adapter: board private structure
637  *
638  * Frees all MSI-X vectors other than 0.
639  **/
640 static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
641 {
642         int vector, irq_num, q_vectors;
643
644         if (!adapter->msix_entries)
645                 return;
646
647         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
648
649         for (vector = 0; vector < q_vectors; vector++) {
650                 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
651                 irq_set_affinity_notifier(irq_num, NULL);
652                 irq_set_affinity_hint(irq_num, NULL);
653                 free_irq(irq_num, &adapter->q_vectors[vector]);
654         }
655 }
656
657 /**
658  * i40evf_free_misc_irq - Free MSI-X miscellaneous vector
659  * @adapter: board private structure
660  *
661  * Frees MSI-X vector 0.
662  **/
663 static void i40evf_free_misc_irq(struct i40evf_adapter *adapter)
664 {
665         struct net_device *netdev = adapter->netdev;
666
667         if (!adapter->msix_entries)
668                 return;
669
670         free_irq(adapter->msix_entries[0].vector, netdev);
671 }
672
673 /**
674  * i40evf_configure_tx - Configure Transmit Unit after Reset
675  * @adapter: board private structure
676  *
677  * Configure the Tx unit of the MAC after a reset.
678  **/
679 static void i40evf_configure_tx(struct i40evf_adapter *adapter)
680 {
681         struct i40e_hw *hw = &adapter->hw;
682         int i;
683
684         for (i = 0; i < adapter->num_active_queues; i++)
685                 adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i);
686 }
687
688 /**
689  * i40evf_configure_rx - Configure Receive Unit after Reset
690  * @adapter: board private structure
691  *
692  * Configure the Rx unit of the MAC after a reset.
693  **/
694 static void i40evf_configure_rx(struct i40evf_adapter *adapter)
695 {
696         unsigned int rx_buf_len = I40E_RXBUFFER_2048;
697         struct net_device *netdev = adapter->netdev;
698         struct i40e_hw *hw = &adapter->hw;
699         int i;
700
701         /* Legacy Rx will always default to a 2048 buffer size. */
702 #if (PAGE_SIZE < 8192)
703         if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) {
704                 /* For jumbo frames on systems with 4K pages we have to use
705                  * an order 1 page, so we might as well increase the size
706                  * of our Rx buffer to make better use of the available space
707                  */
708                 rx_buf_len = I40E_RXBUFFER_3072;
709
710                 /* We use a 1536 buffer size for configurations with
711                  * standard Ethernet mtu.  On x86 this gives us enough room
712                  * for shared info and 192 bytes of padding.
713                  */
714                 if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
715                     (netdev->mtu <= ETH_DATA_LEN))
716                         rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
717         }
718 #endif
719
720         for (i = 0; i < adapter->num_active_queues; i++) {
721                 adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
722                 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
723
724                 if (adapter->flags & I40EVF_FLAG_LEGACY_RX)
725                         clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
726                 else
727                         set_ring_build_skb_enabled(&adapter->rx_rings[i]);
728         }
729 }
730
731 /**
732  * i40evf_find_vlan - Search filter list for specific vlan filter
733  * @adapter: board private structure
734  * @vlan: vlan tag
735  *
736  * Returns ptr to the filter object or NULL
737  **/
738 static struct
739 i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
740 {
741         struct i40evf_vlan_filter *f;
742
743         list_for_each_entry(f, &adapter->vlan_filter_list, list) {
744                 if (vlan == f->vlan)
745                         return f;
746         }
747         return NULL;
748 }
749
750 /**
751  * i40evf_add_vlan - Add a vlan filter to the list
752  * @adapter: board private structure
753  * @vlan: VLAN tag
754  *
755  * Returns ptr to the filter object or NULL when no memory available.
756  **/
757 static struct
758 i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
759 {
760         struct i40evf_vlan_filter *f = NULL;
761         int count = 50;
762
763         while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
764                                 &adapter->crit_section)) {
765                 udelay(1);
766                 if (--count == 0)
767                         goto out;
768         }
769
770         f = i40evf_find_vlan(adapter, vlan);
771         if (!f) {
772                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
773                 if (!f)
774                         goto clearout;
775
776                 f->vlan = vlan;
777
778                 INIT_LIST_HEAD(&f->list);
779                 list_add(&f->list, &adapter->vlan_filter_list);
780                 f->add = true;
781                 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
782         }
783
784 clearout:
785         clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
786 out:
787         return f;
788 }
789
790 /**
791  * i40evf_del_vlan - Remove a vlan filter from the list
792  * @adapter: board private structure
793  * @vlan: VLAN tag
794  **/
795 static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
796 {
797         struct i40evf_vlan_filter *f;
798         int count = 50;
799
800         while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
801                                 &adapter->crit_section)) {
802                 udelay(1);
803                 if (--count == 0)
804                         return;
805         }
806
807         f = i40evf_find_vlan(adapter, vlan);
808         if (f) {
809                 f->remove = true;
810                 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
811         }
812         clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
813 }
814
815 /**
816  * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
817  * @netdev: network device struct
818  * @vid: VLAN tag
819  **/
820 static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
821                                   __always_unused __be16 proto, u16 vid)
822 {
823         struct i40evf_adapter *adapter = netdev_priv(netdev);
824
825         if (!VLAN_ALLOWED(adapter))
826                 return -EIO;
827         if (i40evf_add_vlan(adapter, vid) == NULL)
828                 return -ENOMEM;
829         return 0;
830 }
831
832 /**
833  * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
834  * @netdev: network device struct
835  * @vid: VLAN tag
836  **/
837 static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
838                                    __always_unused __be16 proto, u16 vid)
839 {
840         struct i40evf_adapter *adapter = netdev_priv(netdev);
841
842         if (VLAN_ALLOWED(adapter)) {
843                 i40evf_del_vlan(adapter, vid);
844                 return 0;
845         }
846         return -EIO;
847 }
848
849 /**
850  * i40evf_find_filter - Search filter list for specific mac filter
851  * @adapter: board private structure
852  * @macaddr: the MAC address
853  *
854  * Returns ptr to the filter object or NULL
855  **/
856 static struct
857 i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
858                                       u8 *macaddr)
859 {
860         struct i40evf_mac_filter *f;
861
862         if (!macaddr)
863                 return NULL;
864
865         list_for_each_entry(f, &adapter->mac_filter_list, list) {
866                 if (ether_addr_equal(macaddr, f->macaddr))
867                         return f;
868         }
869         return NULL;
870 }
871
872 /**
873  * i40e_add_filter - Add a mac filter to the filter list
874  * @adapter: board private structure
875  * @macaddr: the MAC address
876  *
877  * Returns ptr to the filter object or NULL when no memory available.
878  **/
879 static struct
880 i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
881                                      u8 *macaddr)
882 {
883         struct i40evf_mac_filter *f;
884         int count = 50;
885
886         if (!macaddr)
887                 return NULL;
888
889         while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
890                                 &adapter->crit_section)) {
891                 udelay(1);
892                 if (--count == 0)
893                         return NULL;
894         }
895
896         f = i40evf_find_filter(adapter, macaddr);
897         if (!f) {
898                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
899                 if (!f) {
900                         clear_bit(__I40EVF_IN_CRITICAL_TASK,
901                                   &adapter->crit_section);
902                         return NULL;
903                 }
904
905                 ether_addr_copy(f->macaddr, macaddr);
906
907                 list_add_tail(&f->list, &adapter->mac_filter_list);
908                 f->add = true;
909                 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
910         }
911
912         clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
913         return f;
914 }
915
916 /**
917  * i40evf_set_mac - NDO callback to set port mac address
918  * @netdev: network interface device structure
919  * @p: pointer to an address structure
920  *
921  * Returns 0 on success, negative on failure
922  **/
923 static int i40evf_set_mac(struct net_device *netdev, void *p)
924 {
925         struct i40evf_adapter *adapter = netdev_priv(netdev);
926         struct i40e_hw *hw = &adapter->hw;
927         struct i40evf_mac_filter *f;
928         struct sockaddr *addr = p;
929
930         if (!is_valid_ether_addr(addr->sa_data))
931                 return -EADDRNOTAVAIL;
932
933         if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
934                 return 0;
935
936         if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
937                 return -EPERM;
938
939         f = i40evf_find_filter(adapter, hw->mac.addr);
940         if (f) {
941                 f->remove = true;
942                 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
943         }
944
945         f = i40evf_add_filter(adapter, addr->sa_data);
946         if (f) {
947                 ether_addr_copy(hw->mac.addr, addr->sa_data);
948                 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
949         }
950
951         return (f == NULL) ? -ENOMEM : 0;
952 }
953
954 /**
955  * i40evf_set_rx_mode - NDO callback to set the netdev filters
956  * @netdev: network interface device structure
957  **/
958 static void i40evf_set_rx_mode(struct net_device *netdev)
959 {
960         struct i40evf_adapter *adapter = netdev_priv(netdev);
961         struct i40evf_mac_filter *f, *ftmp;
962         struct netdev_hw_addr *uca;
963         struct netdev_hw_addr *mca;
964         struct netdev_hw_addr *ha;
965         int count = 50;
966
967         /* add addr if not already in the filter list */
968         netdev_for_each_uc_addr(uca, netdev) {
969                 i40evf_add_filter(adapter, uca->addr);
970         }
971         netdev_for_each_mc_addr(mca, netdev) {
972                 i40evf_add_filter(adapter, mca->addr);
973         }
974
975         while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
976                                 &adapter->crit_section)) {
977                 udelay(1);
978                 if (--count == 0) {
979                         dev_err(&adapter->pdev->dev,
980                                 "Failed to get lock in %s\n", __func__);
981                         return;
982                 }
983         }
984         /* remove filter if not in netdev list */
985         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
986                 netdev_for_each_mc_addr(mca, netdev)
987                         if (ether_addr_equal(mca->addr, f->macaddr))
988                                 goto bottom_of_search_loop;
989
990                 netdev_for_each_uc_addr(uca, netdev)
991                         if (ether_addr_equal(uca->addr, f->macaddr))
992                                 goto bottom_of_search_loop;
993
994                 for_each_dev_addr(netdev, ha)
995                         if (ether_addr_equal(ha->addr, f->macaddr))
996                                 goto bottom_of_search_loop;
997
998                 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
999                         goto bottom_of_search_loop;
1000
1001                 /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
1002                 f->remove = true;
1003                 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
1004
1005 bottom_of_search_loop:
1006                 continue;
1007         }
1008
1009         if (netdev->flags & IFF_PROMISC &&
1010             !(adapter->flags & I40EVF_FLAG_PROMISC_ON))
1011                 adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC;
1012         else if (!(netdev->flags & IFF_PROMISC) &&
1013                  adapter->flags & I40EVF_FLAG_PROMISC_ON)
1014                 adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC;
1015
1016         if (netdev->flags & IFF_ALLMULTI &&
1017             !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON))
1018                 adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
1019         else if (!(netdev->flags & IFF_ALLMULTI) &&
1020                  adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
1021                 adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
1022
1023         clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1024 }
1025
1026 /**
1027  * i40evf_napi_enable_all - enable NAPI on all queue vectors
1028  * @adapter: board private structure
1029  **/
1030 static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
1031 {
1032         int q_idx;
1033         struct i40e_q_vector *q_vector;
1034         int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1035
1036         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1037                 struct napi_struct *napi;
1038
1039                 q_vector = &adapter->q_vectors[q_idx];
1040                 napi = &q_vector->napi;
1041                 napi_enable(napi);
1042         }
1043 }
1044
1045 /**
1046  * i40evf_napi_disable_all - disable NAPI on all queue vectors
1047  * @adapter: board private structure
1048  **/
1049 static void i40evf_napi_disable_all(struct i40evf_adapter *adapter)
1050 {
1051         int q_idx;
1052         struct i40e_q_vector *q_vector;
1053         int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1054
1055         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1056                 q_vector = &adapter->q_vectors[q_idx];
1057                 napi_disable(&q_vector->napi);
1058         }
1059 }
1060
1061 /**
1062  * i40evf_configure - set up transmit and receive data structures
1063  * @adapter: board private structure
1064  **/
1065 static void i40evf_configure(struct i40evf_adapter *adapter)
1066 {
1067         struct net_device *netdev = adapter->netdev;
1068         int i;
1069
1070         i40evf_set_rx_mode(netdev);
1071
1072         i40evf_configure_tx(adapter);
1073         i40evf_configure_rx(adapter);
1074         adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
1075
1076         for (i = 0; i < adapter->num_active_queues; i++) {
1077                 struct i40e_ring *ring = &adapter->rx_rings[i];
1078
1079                 i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
1080         }
1081 }
1082
1083 /**
1084  * i40evf_up_complete - Finish the last steps of bringing up a connection
1085  * @adapter: board private structure
1086  **/
1087 static void i40evf_up_complete(struct i40evf_adapter *adapter)
1088 {
1089         adapter->state = __I40EVF_RUNNING;
1090         clear_bit(__I40E_DOWN, &adapter->vsi.state);
1091
1092         i40evf_napi_enable_all(adapter);
1093
1094         adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
1095         if (CLIENT_ENABLED(adapter))
1096                 adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN;
1097         mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
1098 }
1099
1100 /**
1101  * i40e_down - Shutdown the connection processing
1102  * @adapter: board private structure
1103  **/
1104 void i40evf_down(struct i40evf_adapter *adapter)
1105 {
1106         struct net_device *netdev = adapter->netdev;
1107         struct i40evf_mac_filter *f;
1108
1109         if (adapter->state <= __I40EVF_DOWN_PENDING)
1110                 return;
1111
1112         while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
1113                                 &adapter->crit_section))
1114                 usleep_range(500, 1000);
1115
1116         netif_carrier_off(netdev);
1117         netif_tx_disable(netdev);
1118         adapter->link_up = false;
1119         i40evf_napi_disable_all(adapter);
1120         i40evf_irq_disable(adapter);
1121
1122         /* remove all MAC filters */
1123         list_for_each_entry(f, &adapter->mac_filter_list, list) {
1124                 f->remove = true;
1125         }
1126         /* remove all VLAN filters */
1127         list_for_each_entry(f, &adapter->vlan_filter_list, list) {
1128                 f->remove = true;
1129         }
1130         if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
1131             adapter->state != __I40EVF_RESETTING) {
1132                 /* cancel any current operation */
1133                 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1134                 /* Schedule operations to close down the HW. Don't wait
1135                  * here for this to complete. The watchdog is still running
1136                  * and it will take care of this.
1137                  */
1138                 adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
1139                 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
1140                 adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
1141         }
1142
1143         clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1144 }
1145
1146 /**
1147  * i40evf_acquire_msix_vectors - Setup the MSIX capability
1148  * @adapter: board private structure
1149  * @vectors: number of vectors to request
1150  *
1151  * Work with the OS to set up the MSIX vectors needed.
1152  *
1153  * Returns 0 on success, negative on failure
1154  **/
1155 static int
1156 i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
1157 {
1158         int err, vector_threshold;
1159
1160         /* We'll want at least 3 (vector_threshold):
1161          * 0) Other (Admin Queue and link, mostly)
1162          * 1) TxQ[0] Cleanup
1163          * 2) RxQ[0] Cleanup
1164          */
1165         vector_threshold = MIN_MSIX_COUNT;
1166
1167         /* The more we get, the more we will assign to Tx/Rx Cleanup
1168          * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1169          * Right now, we simply care about how many we'll get; we'll
1170          * set them up later while requesting irq's.
1171          */
1172         err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1173                                     vector_threshold, vectors);
1174         if (err < 0) {
1175                 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1176                 kfree(adapter->msix_entries);
1177                 adapter->msix_entries = NULL;
1178                 return err;
1179         }
1180
1181         /* Adjust for only the vectors we'll use, which is minimum
1182          * of max_msix_q_vectors + NONQ_VECS, or the number of
1183          * vectors we were allocated.
1184          */
1185         adapter->num_msix_vectors = err;
1186         return 0;
1187 }
1188
1189 /**
1190  * i40evf_free_queues - Free memory for all rings
1191  * @adapter: board private structure to initialize
1192  *
1193  * Free all of the memory associated with queue pairs.
1194  **/
1195 static void i40evf_free_queues(struct i40evf_adapter *adapter)
1196 {
1197         if (!adapter->vsi_res)
1198                 return;
1199         kfree(adapter->tx_rings);
1200         adapter->tx_rings = NULL;
1201         kfree(adapter->rx_rings);
1202         adapter->rx_rings = NULL;
1203 }
1204
1205 /**
1206  * i40evf_alloc_queues - Allocate memory for all rings
1207  * @adapter: board private structure to initialize
1208  *
1209  * We allocate one ring per queue at run-time since we don't know the
1210  * number of queues at compile-time.  The polling_netdev array is
1211  * intended for Multiqueue, but should work fine with a single queue.
1212  **/
1213 static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
1214 {
1215         int i;
1216
1217         adapter->tx_rings = kcalloc(adapter->num_active_queues,
1218                                     sizeof(struct i40e_ring), GFP_KERNEL);
1219         if (!adapter->tx_rings)
1220                 goto err_out;
1221         adapter->rx_rings = kcalloc(adapter->num_active_queues,
1222                                     sizeof(struct i40e_ring), GFP_KERNEL);
1223         if (!adapter->rx_rings)
1224                 goto err_out;
1225
1226         for (i = 0; i < adapter->num_active_queues; i++) {
1227                 struct i40e_ring *tx_ring;
1228                 struct i40e_ring *rx_ring;
1229
1230                 tx_ring = &adapter->tx_rings[i];
1231
1232                 tx_ring->queue_index = i;
1233                 tx_ring->netdev = adapter->netdev;
1234                 tx_ring->dev = &adapter->pdev->dev;
1235                 tx_ring->count = adapter->tx_desc_count;
1236                 tx_ring->tx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF);
1237                 if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
1238                         tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
1239
1240                 rx_ring = &adapter->rx_rings[i];
1241                 rx_ring->queue_index = i;
1242                 rx_ring->netdev = adapter->netdev;
1243                 rx_ring->dev = &adapter->pdev->dev;
1244                 rx_ring->count = adapter->rx_desc_count;
1245                 rx_ring->rx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF);
1246         }
1247
1248         return 0;
1249
1250 err_out:
1251         i40evf_free_queues(adapter);
1252         return -ENOMEM;
1253 }
1254
1255 /**
1256  * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported
1257  * @adapter: board private structure to initialize
1258  *
1259  * Attempt to configure the interrupts using the best available
1260  * capabilities of the hardware and the kernel.
1261  **/
1262 static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
1263 {
1264         int vector, v_budget;
1265         int pairs = 0;
1266         int err = 0;
1267
1268         if (!adapter->vsi_res) {
1269                 err = -EIO;
1270                 goto out;
1271         }
1272         pairs = adapter->num_active_queues;
1273
1274         /* It's easy to be greedy for MSI-X vectors, but it really
1275          * doesn't do us much good if we have a lot more vectors
1276          * than CPU's.  So let's be conservative and only ask for
1277          * (roughly) twice the number of vectors as there are CPU's.
1278          */
1279         v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
1280         v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
1281
1282         adapter->msix_entries = kcalloc(v_budget,
1283                                         sizeof(struct msix_entry), GFP_KERNEL);
1284         if (!adapter->msix_entries) {
1285                 err = -ENOMEM;
1286                 goto out;
1287         }
1288
1289         for (vector = 0; vector < v_budget; vector++)
1290                 adapter->msix_entries[vector].entry = vector;
1291
1292         err = i40evf_acquire_msix_vectors(adapter, v_budget);
1293
1294 out:
1295         netif_set_real_num_rx_queues(adapter->netdev, pairs);
1296         netif_set_real_num_tx_queues(adapter->netdev, pairs);
1297         return err;
1298 }
1299
1300 /**
1301  * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
1302  * @adapter: board private structure
1303  *
1304  * Return 0 on success, negative on failure
1305  **/
1306 static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
1307 {
1308         struct i40e_aqc_get_set_rss_key_data *rss_key =
1309                 (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
1310         struct i40e_hw *hw = &adapter->hw;
1311         int ret = 0;
1312
1313         if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
1314                 /* bail because we already have a command pending */
1315                 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1316                         adapter->current_op);
1317                 return -EBUSY;
1318         }
1319
1320         ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1321         if (ret) {
1322                 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1323                         i40evf_stat_str(hw, ret),
1324                         i40evf_aq_str(hw, hw->aq.asq_last_status));
1325                 return ret;
1326
1327         }
1328
1329         ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1330                                     adapter->rss_lut, adapter->rss_lut_size);
1331         if (ret) {
1332                 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1333                         i40evf_stat_str(hw, ret),
1334                         i40evf_aq_str(hw, hw->aq.asq_last_status));
1335         }
1336
1337         return ret;
1338
1339 }
1340
1341 /**
1342  * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers
1343  * @adapter: board private structure
1344  *
1345  * Returns 0 on success, negative on failure
1346  **/
1347 static int i40evf_config_rss_reg(struct i40evf_adapter *adapter)
1348 {
1349         struct i40e_hw *hw = &adapter->hw;
1350         u32 *dw;
1351         u16 i;
1352
1353         dw = (u32 *)adapter->rss_key;
1354         for (i = 0; i <= adapter->rss_key_size / 4; i++)
1355                 wr32(hw, I40E_VFQF_HKEY(i), dw[i]);
1356
1357         dw = (u32 *)adapter->rss_lut;
1358         for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1359                 wr32(hw, I40E_VFQF_HLUT(i), dw[i]);
1360
1361         i40e_flush(hw);
1362
1363         return 0;
1364 }
1365
1366 /**
1367  * i40evf_config_rss - Configure RSS keys and lut
1368  * @adapter: board private structure
1369  *
1370  * Returns 0 on success, negative on failure
1371  **/
1372 int i40evf_config_rss(struct i40evf_adapter *adapter)
1373 {
1374
1375         if (RSS_PF(adapter)) {
1376                 adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT |
1377                                         I40EVF_FLAG_AQ_SET_RSS_KEY;
1378                 return 0;
1379         } else if (RSS_AQ(adapter)) {
1380                 return i40evf_config_rss_aq(adapter);
1381         } else {
1382                 return i40evf_config_rss_reg(adapter);
1383         }
1384 }
1385
1386 /**
1387  * i40evf_fill_rss_lut - Fill the lut with default values
1388  * @adapter: board private structure
1389  **/
1390 static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter)
1391 {
1392         u16 i;
1393
1394         for (i = 0; i < adapter->rss_lut_size; i++)
1395                 adapter->rss_lut[i] = i % adapter->num_active_queues;
1396 }
1397
1398 /**
1399  * i40evf_init_rss - Prepare for RSS
1400  * @adapter: board private structure
1401  *
1402  * Return 0 on success, negative on failure
1403  **/
1404 static int i40evf_init_rss(struct i40evf_adapter *adapter)
1405 {
1406         struct i40e_hw *hw = &adapter->hw;
1407         int ret;
1408
1409         if (!RSS_PF(adapter)) {
1410                 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1411                 if (adapter->vf_res->vf_offload_flags &
1412                     I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1413                         adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
1414                 else
1415                         adapter->hena = I40E_DEFAULT_RSS_HENA;
1416
1417                 wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena);
1418                 wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1419         }
1420
1421         i40evf_fill_rss_lut(adapter);
1422
1423         netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1424         ret = i40evf_config_rss(adapter);
1425
1426         return ret;
1427 }
1428
1429 /**
1430  * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
1431  * @adapter: board private structure to initialize
1432  *
1433  * We allocate one q_vector per queue interrupt.  If allocation fails we
1434  * return -ENOMEM.
1435  **/
1436 static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
1437 {
1438         int q_idx = 0, num_q_vectors;
1439         struct i40e_q_vector *q_vector;
1440
1441         num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1442         adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1443                                      GFP_KERNEL);
1444         if (!adapter->q_vectors)
1445                 return -ENOMEM;
1446
1447         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1448                 q_vector = &adapter->q_vectors[q_idx];
1449                 q_vector->adapter = adapter;
1450                 q_vector->vsi = &adapter->vsi;
1451                 q_vector->v_idx = q_idx;
1452                 netif_napi_add(adapter->netdev, &q_vector->napi,
1453                                i40evf_napi_poll, NAPI_POLL_WEIGHT);
1454         }
1455
1456         return 0;
1457 }
1458
1459 /**
1460  * i40evf_free_q_vectors - Free memory allocated for interrupt vectors
1461  * @adapter: board private structure to initialize
1462  *
1463  * This function frees the memory allocated to the q_vectors.  In addition if
1464  * NAPI is enabled it will delete any references to the NAPI struct prior
1465  * to freeing the q_vector.
1466  **/
1467 static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
1468 {
1469         int q_idx, num_q_vectors;
1470         int napi_vectors;
1471
1472         if (!adapter->q_vectors)
1473                 return;
1474
1475         num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1476         napi_vectors = adapter->num_active_queues;
1477
1478         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1479                 struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx];
1480                 if (q_idx < napi_vectors)
1481                         netif_napi_del(&q_vector->napi);
1482         }
1483         kfree(adapter->q_vectors);
1484         adapter->q_vectors = NULL;
1485 }
1486
1487 /**
1488  * i40evf_reset_interrupt_capability - Reset MSIX setup
1489  * @adapter: board private structure
1490  *
1491  **/
1492 void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
1493 {
1494         if (!adapter->msix_entries)
1495                 return;
1496
1497         pci_disable_msix(adapter->pdev);
1498         kfree(adapter->msix_entries);
1499         adapter->msix_entries = NULL;
1500 }
1501
1502 /**
1503  * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init
1504  * @adapter: board private structure to initialize
1505  *
1506  **/
1507 int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
1508 {
1509         int err;
1510
1511         rtnl_lock();
1512         err = i40evf_set_interrupt_capability(adapter);
1513         rtnl_unlock();
1514         if (err) {
1515                 dev_err(&adapter->pdev->dev,
1516                         "Unable to setup interrupt capabilities\n");
1517                 goto err_set_interrupt;
1518         }
1519
1520         err = i40evf_alloc_q_vectors(adapter);
1521         if (err) {
1522                 dev_err(&adapter->pdev->dev,
1523                         "Unable to allocate memory for queue vectors\n");
1524                 goto err_alloc_q_vectors;
1525         }
1526
1527         err = i40evf_alloc_queues(adapter);
1528         if (err) {
1529                 dev_err(&adapter->pdev->dev,
1530                         "Unable to allocate memory for queues\n");
1531                 goto err_alloc_queues;
1532         }
1533
1534         dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1535                  (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1536                  adapter->num_active_queues);
1537
1538         return 0;
1539 err_alloc_queues:
1540         i40evf_free_q_vectors(adapter);
1541 err_alloc_q_vectors:
1542         i40evf_reset_interrupt_capability(adapter);
1543 err_set_interrupt:
1544         return err;
1545 }
1546
1547 /**
1548  * i40evf_free_rss - Free memory used by RSS structs
1549  * @adapter: board private structure
1550  **/
1551 static void i40evf_free_rss(struct i40evf_adapter *adapter)
1552 {
1553         kfree(adapter->rss_key);
1554         adapter->rss_key = NULL;
1555
1556         kfree(adapter->rss_lut);
1557         adapter->rss_lut = NULL;
1558 }
1559
1560 /**
1561  * i40evf_watchdog_timer - Periodic call-back timer
1562  * @data: pointer to adapter disguised as unsigned long
1563  **/
1564 static void i40evf_watchdog_timer(unsigned long data)
1565 {
1566         struct i40evf_adapter *adapter = (struct i40evf_adapter *)data;
1567
1568         schedule_work(&adapter->watchdog_task);
1569         /* timer will be rescheduled in watchdog task */
1570 }
1571
1572 /**
1573  * i40evf_watchdog_task - Periodic call-back task
1574  * @work: pointer to work_struct
1575  **/
1576 static void i40evf_watchdog_task(struct work_struct *work)
1577 {
1578         struct i40evf_adapter *adapter = container_of(work,
1579                                                       struct i40evf_adapter,
1580                                                       watchdog_task);
1581         struct i40e_hw *hw = &adapter->hw;
1582         u32 reg_val;
1583
1584         if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
1585                 goto restart_watchdog;
1586
1587         if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
1588                 reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
1589                           I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1590                 if ((reg_val == I40E_VFR_VFACTIVE) ||
1591                     (reg_val == I40E_VFR_COMPLETED)) {
1592                         /* A chance for redemption! */
1593                         dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
1594                         adapter->state = __I40EVF_STARTUP;
1595                         adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
1596                         schedule_delayed_work(&adapter->init_task, 10);
1597                         clear_bit(__I40EVF_IN_CRITICAL_TASK,
1598                                   &adapter->crit_section);
1599                         /* Don't reschedule the watchdog, since we've restarted
1600                          * the init task. When init_task contacts the PF and
1601                          * gets everything set up again, it'll restart the
1602                          * watchdog for us. Down, boy. Sit. Stay. Woof.
1603                          */
1604                         return;
1605                 }
1606                 adapter->aq_required = 0;
1607                 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1608                 goto watchdog_done;
1609         }
1610
1611         if ((adapter->state < __I40EVF_DOWN) ||
1612             (adapter->flags & I40EVF_FLAG_RESET_PENDING))
1613                 goto watchdog_done;
1614
1615         /* check for reset */
1616         reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK;
1617         if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) {
1618                 adapter->state = __I40EVF_RESETTING;
1619                 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
1620                 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
1621                 schedule_work(&adapter->reset_task);
1622                 adapter->aq_required = 0;
1623                 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1624                 goto watchdog_done;
1625         }
1626
1627         /* Process admin queue tasks. After init, everything gets done
1628          * here so we don't race on the admin queue.
1629          */
1630         if (adapter->current_op) {
1631                 if (!i40evf_asq_done(hw)) {
1632                         dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
1633                         i40evf_send_api_ver(adapter);
1634                 }
1635                 goto watchdog_done;
1636         }
1637         if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) {
1638                 i40evf_send_vf_config_msg(adapter);
1639                 goto watchdog_done;
1640         }
1641
1642         if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
1643                 i40evf_disable_queues(adapter);
1644                 goto watchdog_done;
1645         }
1646
1647         if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
1648                 i40evf_map_queues(adapter);
1649                 goto watchdog_done;
1650         }
1651
1652         if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) {
1653                 i40evf_add_ether_addrs(adapter);
1654                 goto watchdog_done;
1655         }
1656
1657         if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) {
1658                 i40evf_add_vlans(adapter);
1659                 goto watchdog_done;
1660         }
1661
1662         if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) {
1663                 i40evf_del_ether_addrs(adapter);
1664                 goto watchdog_done;
1665         }
1666
1667         if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) {
1668                 i40evf_del_vlans(adapter);
1669                 goto watchdog_done;
1670         }
1671
1672         if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) {
1673                 i40evf_configure_queues(adapter);
1674                 goto watchdog_done;
1675         }
1676
1677         if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) {
1678                 i40evf_enable_queues(adapter);
1679                 goto watchdog_done;
1680         }
1681
1682         if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) {
1683                 /* This message goes straight to the firmware, not the
1684                  * PF, so we don't have to set current_op as we will
1685                  * not get a response through the ARQ.
1686                  */
1687                 i40evf_init_rss(adapter);
1688                 adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
1689                 goto watchdog_done;
1690         }
1691         if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) {
1692                 i40evf_get_hena(adapter);
1693                 goto watchdog_done;
1694         }
1695         if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) {
1696                 i40evf_set_hena(adapter);
1697                 goto watchdog_done;
1698         }
1699         if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) {
1700                 i40evf_set_rss_key(adapter);
1701                 goto watchdog_done;
1702         }
1703         if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) {
1704                 i40evf_set_rss_lut(adapter);
1705                 goto watchdog_done;
1706         }
1707
1708         if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) {
1709                 i40evf_set_promiscuous(adapter, I40E_FLAG_VF_UNICAST_PROMISC |
1710                                        I40E_FLAG_VF_MULTICAST_PROMISC);
1711                 goto watchdog_done;
1712         }
1713
1714         if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) {
1715                 i40evf_set_promiscuous(adapter, I40E_FLAG_VF_MULTICAST_PROMISC);
1716                 goto watchdog_done;
1717         }
1718
1719         if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) &&
1720             (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) {
1721                 i40evf_set_promiscuous(adapter, 0);
1722                 goto watchdog_done;
1723         }
1724         schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
1725
1726         if (adapter->state == __I40EVF_RUNNING)
1727                 i40evf_request_stats(adapter);
1728 watchdog_done:
1729         if (adapter->state == __I40EVF_RUNNING) {
1730                 i40evf_irq_enable_queues(adapter, ~0);
1731                 i40evf_fire_sw_int(adapter, 0xFF);
1732         } else {
1733                 i40evf_fire_sw_int(adapter, 0x1);
1734         }
1735
1736         clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1737 restart_watchdog:
1738         if (adapter->state == __I40EVF_REMOVE)
1739                 return;
1740         if (adapter->aq_required)
1741                 mod_timer(&adapter->watchdog_timer,
1742                           jiffies + msecs_to_jiffies(20));
1743         else
1744                 mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
1745         schedule_work(&adapter->adminq_task);
1746 }
1747
1748 static void i40evf_disable_vf(struct i40evf_adapter *adapter)
1749 {
1750         struct i40evf_mac_filter *f, *ftmp;
1751         struct i40evf_vlan_filter *fv, *fvtmp;
1752
1753         adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
1754
1755         if (netif_running(adapter->netdev)) {
1756                 set_bit(__I40E_DOWN, &adapter->vsi.state);
1757                 netif_carrier_off(adapter->netdev);
1758                 netif_tx_disable(adapter->netdev);
1759                 adapter->link_up = false;
1760                 i40evf_napi_disable_all(adapter);
1761                 i40evf_irq_disable(adapter);
1762                 i40evf_free_traffic_irqs(adapter);
1763                 i40evf_free_all_tx_resources(adapter);
1764                 i40evf_free_all_rx_resources(adapter);
1765         }
1766
1767         /* Delete all of the filters, both MAC and VLAN. */
1768         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
1769                 list_del(&f->list);
1770                 kfree(f);
1771         }
1772
1773         list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
1774                 list_del(&fv->list);
1775                 kfree(fv);
1776         }
1777
1778         i40evf_free_misc_irq(adapter);
1779         i40evf_reset_interrupt_capability(adapter);
1780         i40evf_free_queues(adapter);
1781         i40evf_free_q_vectors(adapter);
1782         kfree(adapter->vf_res);
1783         i40evf_shutdown_adminq(&adapter->hw);
1784         adapter->netdev->flags &= ~IFF_UP;
1785         clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1786         adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1787         adapter->state = __I40EVF_DOWN;
1788         dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
1789 }
1790
1791 #define I40EVF_RESET_WAIT_MS 10
1792 #define I40EVF_RESET_WAIT_COUNT 500
1793 /**
1794  * i40evf_reset_task - Call-back task to handle hardware reset
1795  * @work: pointer to work_struct
1796  *
1797  * During reset we need to shut down and reinitialize the admin queue
1798  * before we can use it to communicate with the PF again. We also clear
1799  * and reinit the rings because that context is lost as well.
1800  **/
1801 static void i40evf_reset_task(struct work_struct *work)
1802 {
1803         struct i40evf_adapter *adapter = container_of(work,
1804                                                       struct i40evf_adapter,
1805                                                       reset_task);
1806         struct net_device *netdev = adapter->netdev;
1807         struct i40e_hw *hw = &adapter->hw;
1808         struct i40evf_vlan_filter *vlf;
1809         struct i40evf_mac_filter *f;
1810         u32 reg_val;
1811         int i = 0, err;
1812
1813         while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
1814                                 &adapter->crit_section))
1815                 usleep_range(500, 1000);
1816         if (CLIENT_ENABLED(adapter)) {
1817                 adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN |
1818                                     I40EVF_FLAG_CLIENT_NEEDS_CLOSE |
1819                                     I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
1820                                     I40EVF_FLAG_SERVICE_CLIENT_REQUESTED);
1821                 cancel_delayed_work_sync(&adapter->client_task);
1822                 i40evf_notify_client_close(&adapter->vsi, true);
1823         }
1824         i40evf_misc_irq_disable(adapter);
1825         if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
1826                 adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED;
1827                 /* Restart the AQ here. If we have been reset but didn't
1828                  * detect it, or if the PF had to reinit, our AQ will be hosed.
1829                  */
1830                 i40evf_shutdown_adminq(hw);
1831                 i40evf_init_adminq(hw);
1832                 i40evf_request_reset(adapter);
1833         }
1834         adapter->flags |= I40EVF_FLAG_RESET_PENDING;
1835
1836         /* poll until we see the reset actually happen */
1837         for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1838                 reg_val = rd32(hw, I40E_VF_ARQLEN1) &
1839                           I40E_VF_ARQLEN1_ARQENABLE_MASK;
1840                 if (!reg_val)
1841                         break;
1842                 usleep_range(5000, 10000);
1843         }
1844         if (i == I40EVF_RESET_WAIT_COUNT) {
1845                 dev_info(&adapter->pdev->dev, "Never saw reset\n");
1846                 goto continue_reset; /* act like the reset happened */
1847         }
1848
1849         /* wait until the reset is complete and the PF is responding to us */
1850         for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1851                 /* sleep first to make sure a minimum wait time is met */
1852                 msleep(I40EVF_RESET_WAIT_MS);
1853
1854                 reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
1855                           I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1856                 if (reg_val == I40E_VFR_VFACTIVE)
1857                         break;
1858         }
1859
1860         pci_set_master(adapter->pdev);
1861
1862         if (i == I40EVF_RESET_WAIT_COUNT) {
1863                 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
1864                         reg_val);
1865                 i40evf_disable_vf(adapter);
1866                 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
1867                 return; /* Do not attempt to reinit. It's dead, Jim. */
1868         }
1869
1870 continue_reset:
1871         if (netif_running(adapter->netdev)) {
1872                 netif_carrier_off(netdev);
1873                 netif_tx_stop_all_queues(netdev);
1874                 adapter->link_up = false;
1875                 i40evf_napi_disable_all(adapter);
1876         }
1877         i40evf_irq_disable(adapter);
1878
1879         adapter->state = __I40EVF_RESETTING;
1880         adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1881
1882         /* free the Tx/Rx rings and descriptors, might be better to just
1883          * re-use them sometime in the future
1884          */
1885         i40evf_free_all_rx_resources(adapter);
1886         i40evf_free_all_tx_resources(adapter);
1887
1888         /* kill and reinit the admin queue */
1889         i40evf_shutdown_adminq(hw);
1890         adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1891         err = i40evf_init_adminq(hw);
1892         if (err)
1893                 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
1894                          err);
1895
1896         adapter->aq_required = I40EVF_FLAG_AQ_GET_CONFIG;
1897         adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
1898
1899         /* re-add all MAC filters */
1900         list_for_each_entry(f, &adapter->mac_filter_list, list) {
1901                 f->add = true;
1902         }
1903         /* re-add all VLAN filters */
1904         list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1905                 vlf->add = true;
1906         }
1907         adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
1908         adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
1909         clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1910         clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
1911         i40evf_misc_irq_enable(adapter);
1912
1913         mod_timer(&adapter->watchdog_timer, jiffies + 2);
1914
1915         if (netif_running(adapter->netdev)) {
1916                 /* allocate transmit descriptors */
1917                 err = i40evf_setup_all_tx_resources(adapter);
1918                 if (err)
1919                         goto reset_err;
1920
1921                 /* allocate receive descriptors */
1922                 err = i40evf_setup_all_rx_resources(adapter);
1923                 if (err)
1924                         goto reset_err;
1925
1926                 i40evf_configure(adapter);
1927
1928                 i40evf_up_complete(adapter);
1929
1930                 i40evf_irq_enable(adapter, true);
1931         } else {
1932                 adapter->state = __I40EVF_DOWN;
1933         }
1934
1935         return;
1936 reset_err:
1937         dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
1938         i40evf_close(adapter->netdev);
1939 }
1940
1941 /**
1942  * i40evf_adminq_task - worker thread to clean the admin queue
1943  * @work: pointer to work_struct containing our data
1944  **/
1945 static void i40evf_adminq_task(struct work_struct *work)
1946 {
1947         struct i40evf_adapter *adapter =
1948                 container_of(work, struct i40evf_adapter, adminq_task);
1949         struct i40e_hw *hw = &adapter->hw;
1950         struct i40e_arq_event_info event;
1951         struct i40e_virtchnl_msg *v_msg;
1952         i40e_status ret;
1953         u32 val, oldval;
1954         u16 pending;
1955
1956         if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
1957                 goto out;
1958
1959         event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
1960         event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1961         if (!event.msg_buf)
1962                 goto out;
1963
1964         v_msg = (struct i40e_virtchnl_msg *)&event.desc;
1965         do {
1966                 ret = i40evf_clean_arq_element(hw, &event, &pending);
1967                 if (ret || !v_msg->v_opcode)
1968                         break; /* No event to process or error cleaning ARQ */
1969
1970                 i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
1971                                            v_msg->v_retval, event.msg_buf,
1972                                            event.msg_len);
1973                 if (pending != 0)
1974                         memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
1975         } while (pending);
1976
1977         if ((adapter->flags &
1978              (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) ||
1979             adapter->state == __I40EVF_RESETTING)
1980                 goto freedom;
1981
1982         /* check for error indications */
1983         val = rd32(hw, hw->aq.arq.len);
1984         if (val == 0xdeadbeef) /* indicates device in reset */
1985                 goto freedom;
1986         oldval = val;
1987         if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
1988                 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
1989                 val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
1990         }
1991         if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
1992                 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
1993                 val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
1994         }
1995         if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
1996                 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
1997                 val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
1998         }
1999         if (oldval != val)
2000                 wr32(hw, hw->aq.arq.len, val);
2001
2002         val = rd32(hw, hw->aq.asq.len);
2003         oldval = val;
2004         if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2005                 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
2006                 val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2007         }
2008         if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2009                 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
2010                 val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2011         }
2012         if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2013                 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
2014                 val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2015         }
2016         if (oldval != val)
2017                 wr32(hw, hw->aq.asq.len, val);
2018
2019 freedom:
2020         kfree(event.msg_buf);
2021 out:
2022         /* re-enable Admin queue interrupt cause */
2023         i40evf_misc_irq_enable(adapter);
2024 }
2025
2026 /**
2027  * i40evf_client_task - worker thread to perform client work
2028  * @work: pointer to work_struct containing our data
2029  *
2030  * This task handles client interactions. Because client calls can be
2031  * reentrant, we can't handle them in the watchdog.
2032  **/
2033 static void i40evf_client_task(struct work_struct *work)
2034 {
2035         struct i40evf_adapter *adapter =
2036                 container_of(work, struct i40evf_adapter, client_task.work);
2037
2038         /* If we can't get the client bit, just give up. We'll be rescheduled
2039          * later.
2040          */
2041
2042         if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section))
2043                 return;
2044
2045         if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) {
2046                 i40evf_client_subtask(adapter);
2047                 adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
2048                 goto out;
2049         }
2050         if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
2051                 i40evf_notify_client_close(&adapter->vsi, false);
2052                 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
2053                 goto out;
2054         }
2055         if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
2056                 i40evf_notify_client_open(&adapter->vsi);
2057                 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
2058                 goto out;
2059         }
2060         if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
2061                 i40evf_notify_client_l2_params(&adapter->vsi);
2062                 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
2063         }
2064 out:
2065         clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
2066 }
2067
2068 /**
2069  * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
2070  * @adapter: board private structure
2071  *
2072  * Free all transmit software resources
2073  **/
2074 void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
2075 {
2076         int i;
2077
2078         if (!adapter->tx_rings)
2079                 return;
2080
2081         for (i = 0; i < adapter->num_active_queues; i++)
2082                 if (adapter->tx_rings[i].desc)
2083                         i40evf_free_tx_resources(&adapter->tx_rings[i]);
2084 }
2085
2086 /**
2087  * i40evf_setup_all_tx_resources - allocate all queues Tx resources
2088  * @adapter: board private structure
2089  *
2090  * If this function returns with an error, then it's possible one or
2091  * more of the rings is populated (while the rest are not).  It is the
2092  * callers duty to clean those orphaned rings.
2093  *
2094  * Return 0 on success, negative on failure
2095  **/
2096 static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
2097 {
2098         int i, err = 0;
2099
2100         for (i = 0; i < adapter->num_active_queues; i++) {
2101                 adapter->tx_rings[i].count = adapter->tx_desc_count;
2102                 err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]);
2103                 if (!err)
2104                         continue;
2105                 dev_err(&adapter->pdev->dev,
2106                         "Allocation for Tx Queue %u failed\n", i);
2107                 break;
2108         }
2109
2110         return err;
2111 }
2112
2113 /**
2114  * i40evf_setup_all_rx_resources - allocate all queues Rx resources
2115  * @adapter: board private structure
2116  *
2117  * If this function returns with an error, then it's possible one or
2118  * more of the rings is populated (while the rest are not).  It is the
2119  * callers duty to clean those orphaned rings.
2120  *
2121  * Return 0 on success, negative on failure
2122  **/
2123 static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
2124 {
2125         int i, err = 0;
2126
2127         for (i = 0; i < adapter->num_active_queues; i++) {
2128                 adapter->rx_rings[i].count = adapter->rx_desc_count;
2129                 err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]);
2130                 if (!err)
2131                         continue;
2132                 dev_err(&adapter->pdev->dev,
2133                         "Allocation for Rx Queue %u failed\n", i);
2134                 break;
2135         }
2136         return err;
2137 }
2138
2139 /**
2140  * i40evf_free_all_rx_resources - Free Rx Resources for All Queues
2141  * @adapter: board private structure
2142  *
2143  * Free all receive software resources
2144  **/
2145 void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
2146 {
2147         int i;
2148
2149         if (!adapter->rx_rings)
2150                 return;
2151
2152         for (i = 0; i < adapter->num_active_queues; i++)
2153                 if (adapter->rx_rings[i].desc)
2154                         i40evf_free_rx_resources(&adapter->rx_rings[i]);
2155 }
2156
2157 /**
2158  * i40evf_open - Called when a network interface is made active
2159  * @netdev: network interface device structure
2160  *
2161  * Returns 0 on success, negative value on failure
2162  *
2163  * The open entry point is called when a network interface is made
2164  * active by the system (IFF_UP).  At this point all resources needed
2165  * for transmit and receive operations are allocated, the interrupt
2166  * handler is registered with the OS, the watchdog timer is started,
2167  * and the stack is notified that the interface is ready.
2168  **/
2169 static int i40evf_open(struct net_device *netdev)
2170 {
2171         struct i40evf_adapter *adapter = netdev_priv(netdev);
2172         int err;
2173
2174         if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
2175                 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
2176                 return -EIO;
2177         }
2178
2179         if (adapter->state != __I40EVF_DOWN)
2180                 return -EBUSY;
2181
2182         /* allocate transmit descriptors */
2183         err = i40evf_setup_all_tx_resources(adapter);
2184         if (err)
2185                 goto err_setup_tx;
2186
2187         /* allocate receive descriptors */
2188         err = i40evf_setup_all_rx_resources(adapter);
2189         if (err)
2190                 goto err_setup_rx;
2191
2192         /* clear any pending interrupts, may auto mask */
2193         err = i40evf_request_traffic_irqs(adapter, netdev->name);
2194         if (err)
2195                 goto err_req_irq;
2196
2197         i40evf_add_filter(adapter, adapter->hw.mac.addr);
2198         i40evf_configure(adapter);
2199
2200         i40evf_up_complete(adapter);
2201
2202         i40evf_irq_enable(adapter, true);
2203
2204         return 0;
2205
2206 err_req_irq:
2207         i40evf_down(adapter);
2208         i40evf_free_traffic_irqs(adapter);
2209 err_setup_rx:
2210         i40evf_free_all_rx_resources(adapter);
2211 err_setup_tx:
2212         i40evf_free_all_tx_resources(adapter);
2213
2214         return err;
2215 }
2216
2217 /**
2218  * i40evf_close - Disables a network interface
2219  * @netdev: network interface device structure
2220  *
2221  * Returns 0, this is not allowed to fail
2222  *
2223  * The close entry point is called when an interface is de-activated
2224  * by the OS.  The hardware is still under the drivers control, but
2225  * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
2226  * are freed, along with all transmit and receive resources.
2227  **/
2228 static int i40evf_close(struct net_device *netdev)
2229 {
2230         struct i40evf_adapter *adapter = netdev_priv(netdev);
2231
2232         if (adapter->state <= __I40EVF_DOWN_PENDING)
2233                 return 0;
2234
2235
2236         set_bit(__I40E_DOWN, &adapter->vsi.state);
2237         if (CLIENT_ENABLED(adapter))
2238                 adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
2239
2240         i40evf_down(adapter);
2241         adapter->state = __I40EVF_DOWN_PENDING;
2242         i40evf_free_traffic_irqs(adapter);
2243
2244         /* We explicitly don't free resources here because the hardware is
2245          * still active and can DMA into memory. Resources are cleared in
2246          * i40evf_virtchnl_completion() after we get confirmation from the PF
2247          * driver that the rings have been stopped.
2248          */
2249         return 0;
2250 }
2251
2252 /**
2253  * i40evf_change_mtu - Change the Maximum Transfer Unit
2254  * @netdev: network interface device structure
2255  * @new_mtu: new value for maximum frame size
2256  *
2257  * Returns 0 on success, negative on failure
2258  **/
2259 static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
2260 {
2261         struct i40evf_adapter *adapter = netdev_priv(netdev);
2262
2263         netdev->mtu = new_mtu;
2264         if (CLIENT_ENABLED(adapter)) {
2265                 i40evf_notify_client_l2_params(&adapter->vsi);
2266                 adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
2267         }
2268         adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
2269         schedule_work(&adapter->reset_task);
2270
2271         return 0;
2272 }
2273
2274 /**
2275  * i40evf_features_check - Validate encapsulated packet conforms to limits
2276  * @skb: skb buff
2277  * @netdev: This physical port's netdev
2278  * @features: Offload features that the stack believes apply
2279  **/
2280 static netdev_features_t i40evf_features_check(struct sk_buff *skb,
2281                                                struct net_device *dev,
2282                                                netdev_features_t features)
2283 {
2284         size_t len;
2285
2286         /* No point in doing any of this if neither checksum nor GSO are
2287          * being requested for this frame.  We can rule out both by just
2288          * checking for CHECKSUM_PARTIAL
2289          */
2290         if (skb->ip_summed != CHECKSUM_PARTIAL)
2291                 return features;
2292
2293         /* We cannot support GSO if the MSS is going to be less than
2294          * 64 bytes.  If it is then we need to drop support for GSO.
2295          */
2296         if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
2297                 features &= ~NETIF_F_GSO_MASK;
2298
2299         /* MACLEN can support at most 63 words */
2300         len = skb_network_header(skb) - skb->data;
2301         if (len & ~(63 * 2))
2302                 goto out_err;
2303
2304         /* IPLEN and EIPLEN can support at most 127 dwords */
2305         len = skb_transport_header(skb) - skb_network_header(skb);
2306         if (len & ~(127 * 4))
2307                 goto out_err;
2308
2309         if (skb->encapsulation) {
2310                 /* L4TUNLEN can support 127 words */
2311                 len = skb_inner_network_header(skb) - skb_transport_header(skb);
2312                 if (len & ~(127 * 2))
2313                         goto out_err;
2314
2315                 /* IPLEN can support at most 127 dwords */
2316                 len = skb_inner_transport_header(skb) -
2317                       skb_inner_network_header(skb);
2318                 if (len & ~(127 * 4))
2319                         goto out_err;
2320         }
2321
2322         /* No need to validate L4LEN as TCP is the only protocol with a
2323          * a flexible value and we support all possible values supported
2324          * by TCP, which is at most 15 dwords
2325          */
2326
2327         return features;
2328 out_err:
2329         return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2330 }
2331
2332 #define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\
2333                               NETIF_F_HW_VLAN_CTAG_RX |\
2334                               NETIF_F_HW_VLAN_CTAG_FILTER)
2335
2336 /**
2337  * i40evf_fix_features - fix up the netdev feature bits
2338  * @netdev: our net device
2339  * @features: desired feature bits
2340  *
2341  * Returns fixed-up features bits
2342  **/
2343 static netdev_features_t i40evf_fix_features(struct net_device *netdev,
2344                                              netdev_features_t features)
2345 {
2346         struct i40evf_adapter *adapter = netdev_priv(netdev);
2347
2348         features &= ~I40EVF_VLAN_FEATURES;
2349         if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
2350                 features |= I40EVF_VLAN_FEATURES;
2351         return features;
2352 }
2353
2354 static const struct net_device_ops i40evf_netdev_ops = {
2355         .ndo_open               = i40evf_open,
2356         .ndo_stop               = i40evf_close,
2357         .ndo_start_xmit         = i40evf_xmit_frame,
2358         .ndo_set_rx_mode        = i40evf_set_rx_mode,
2359         .ndo_validate_addr      = eth_validate_addr,
2360         .ndo_set_mac_address    = i40evf_set_mac,
2361         .ndo_change_mtu         = i40evf_change_mtu,
2362         .ndo_tx_timeout         = i40evf_tx_timeout,
2363         .ndo_vlan_rx_add_vid    = i40evf_vlan_rx_add_vid,
2364         .ndo_vlan_rx_kill_vid   = i40evf_vlan_rx_kill_vid,
2365         .ndo_features_check     = i40evf_features_check,
2366         .ndo_fix_features       = i40evf_fix_features,
2367 #ifdef CONFIG_NET_POLL_CONTROLLER
2368         .ndo_poll_controller    = i40evf_netpoll,
2369 #endif
2370 };
2371
2372 /**
2373  * i40evf_check_reset_complete - check that VF reset is complete
2374  * @hw: pointer to hw struct
2375  *
2376  * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
2377  **/
2378 static int i40evf_check_reset_complete(struct i40e_hw *hw)
2379 {
2380         u32 rstat;
2381         int i;
2382
2383         for (i = 0; i < 100; i++) {
2384                 rstat = rd32(hw, I40E_VFGEN_RSTAT) &
2385                             I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2386                 if ((rstat == I40E_VFR_VFACTIVE) ||
2387                     (rstat == I40E_VFR_COMPLETED))
2388                         return 0;
2389                 usleep_range(10, 20);
2390         }
2391         return -EBUSY;
2392 }
2393
2394 /**
2395  * i40evf_process_config - Process the config information we got from the PF
2396  * @adapter: board private structure
2397  *
2398  * Verify that we have a valid config struct, and set up our netdev features
2399  * and our VSI struct.
2400  **/
2401 int i40evf_process_config(struct i40evf_adapter *adapter)
2402 {
2403         struct i40e_virtchnl_vf_resource *vfres = adapter->vf_res;
2404         struct net_device *netdev = adapter->netdev;
2405         struct i40e_vsi *vsi = &adapter->vsi;
2406         int i;
2407         netdev_features_t hw_enc_features;
2408         netdev_features_t hw_features;
2409
2410         /* got VF config message back from PF, now we can parse it */
2411         for (i = 0; i < vfres->num_vsis; i++) {
2412                 if (vfres->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
2413                         adapter->vsi_res = &vfres->vsi_res[i];
2414         }
2415         if (!adapter->vsi_res) {
2416                 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
2417                 return -ENODEV;
2418         }
2419
2420         hw_enc_features = NETIF_F_SG                    |
2421                           NETIF_F_IP_CSUM               |
2422                           NETIF_F_IPV6_CSUM             |
2423                           NETIF_F_HIGHDMA               |
2424                           NETIF_F_SOFT_FEATURES |
2425                           NETIF_F_TSO                   |
2426                           NETIF_F_TSO_ECN               |
2427                           NETIF_F_TSO6                  |
2428                           NETIF_F_SCTP_CRC              |
2429                           NETIF_F_RXHASH                |
2430                           NETIF_F_RXCSUM                |
2431                           0;
2432
2433         /* advertise to stack only if offloads for encapsulated packets is
2434          * supported
2435          */
2436         if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP) {
2437                 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL       |
2438                                    NETIF_F_GSO_GRE              |
2439                                    NETIF_F_GSO_GRE_CSUM         |
2440                                    NETIF_F_GSO_IPXIP4           |
2441                                    NETIF_F_GSO_IPXIP6           |
2442                                    NETIF_F_GSO_UDP_TUNNEL_CSUM  |
2443                                    NETIF_F_GSO_PARTIAL          |
2444                                    0;
2445
2446                 if (!(vfres->vf_offload_flags &
2447                       I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
2448                         netdev->gso_partial_features |=
2449                                 NETIF_F_GSO_UDP_TUNNEL_CSUM;
2450
2451                 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
2452                 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
2453                 netdev->hw_enc_features |= hw_enc_features;
2454         }
2455         /* record features VLANs can make use of */
2456         netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
2457
2458         /* Write features and hw_features separately to avoid polluting
2459          * with, or dropping, features that are set when we registered.
2460          */
2461         hw_features = hw_enc_features;
2462
2463         netdev->hw_features |= hw_features;
2464
2465         netdev->features |= hw_features | I40EVF_VLAN_FEATURES;
2466
2467         adapter->vsi.id = adapter->vsi_res->vsi_id;
2468
2469         adapter->vsi.back = adapter;
2470         adapter->vsi.base_vector = 1;
2471         adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
2472         vsi->netdev = adapter->netdev;
2473         vsi->qs_handle = adapter->vsi_res->qset_handle;
2474         if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2475                 adapter->rss_key_size = vfres->rss_key_size;
2476                 adapter->rss_lut_size = vfres->rss_lut_size;
2477         } else {
2478                 adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE;
2479                 adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE;
2480         }
2481
2482         return 0;
2483 }
2484
2485 /**
2486  * i40evf_init_task - worker thread to perform delayed initialization
2487  * @work: pointer to work_struct containing our data
2488  *
2489  * This task completes the work that was begun in probe. Due to the nature
2490  * of VF-PF communications, we may need to wait tens of milliseconds to get
2491  * responses back from the PF. Rather than busy-wait in probe and bog down the
2492  * whole system, we'll do it in a task so we can sleep.
2493  * This task only runs during driver init. Once we've established
2494  * communications with the PF driver and set up our netdev, the watchdog
2495  * takes over.
2496  **/
2497 static void i40evf_init_task(struct work_struct *work)
2498 {
2499         struct i40evf_adapter *adapter = container_of(work,
2500                                                       struct i40evf_adapter,
2501                                                       init_task.work);
2502         struct net_device *netdev = adapter->netdev;
2503         struct i40e_hw *hw = &adapter->hw;
2504         struct pci_dev *pdev = adapter->pdev;
2505         int err, bufsz;
2506
2507         switch (adapter->state) {
2508         case __I40EVF_STARTUP:
2509                 /* driver loaded, probe complete */
2510                 adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
2511                 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
2512                 err = i40e_set_mac_type(hw);
2513                 if (err) {
2514                         dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
2515                                 err);
2516                         goto err;
2517                 }
2518                 err = i40evf_check_reset_complete(hw);
2519                 if (err) {
2520                         dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
2521                                  err);
2522                         goto err;
2523                 }
2524                 hw->aq.num_arq_entries = I40EVF_AQ_LEN;
2525                 hw->aq.num_asq_entries = I40EVF_AQ_LEN;
2526                 hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
2527                 hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
2528
2529                 err = i40evf_init_adminq(hw);
2530                 if (err) {
2531                         dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
2532                                 err);
2533                         goto err;
2534                 }
2535                 err = i40evf_send_api_ver(adapter);
2536                 if (err) {
2537                         dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
2538                         i40evf_shutdown_adminq(hw);
2539                         goto err;
2540                 }
2541                 adapter->state = __I40EVF_INIT_VERSION_CHECK;
2542                 goto restart;
2543         case __I40EVF_INIT_VERSION_CHECK:
2544                 if (!i40evf_asq_done(hw)) {
2545                         dev_err(&pdev->dev, "Admin queue command never completed\n");
2546                         i40evf_shutdown_adminq(hw);
2547                         adapter->state = __I40EVF_STARTUP;
2548                         goto err;
2549                 }
2550
2551                 /* aq msg sent, awaiting reply */
2552                 err = i40evf_verify_api_ver(adapter);
2553                 if (err) {
2554                         if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
2555                                 err = i40evf_send_api_ver(adapter);
2556                         else
2557                                 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
2558                                         adapter->pf_version.major,
2559                                         adapter->pf_version.minor,
2560                                         I40E_VIRTCHNL_VERSION_MAJOR,
2561                                         I40E_VIRTCHNL_VERSION_MINOR);
2562                         goto err;
2563                 }
2564                 err = i40evf_send_vf_config_msg(adapter);
2565                 if (err) {
2566                         dev_err(&pdev->dev, "Unable to send config request (%d)\n",
2567                                 err);
2568                         goto err;
2569                 }
2570                 adapter->state = __I40EVF_INIT_GET_RESOURCES;
2571                 goto restart;
2572         case __I40EVF_INIT_GET_RESOURCES:
2573                 /* aq msg sent, awaiting reply */
2574                 if (!adapter->vf_res) {
2575                         bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
2576                                 (I40E_MAX_VF_VSI *
2577                                  sizeof(struct i40e_virtchnl_vsi_resource));
2578                         adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
2579                         if (!adapter->vf_res)
2580                                 goto err;
2581                 }
2582                 err = i40evf_get_vf_config(adapter);
2583                 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
2584                         err = i40evf_send_vf_config_msg(adapter);
2585                         goto err;
2586                 } else if (err == I40E_ERR_PARAM) {
2587                         /* We only get ERR_PARAM if the device is in a very bad
2588                          * state or if we've been disabled for previous bad
2589                          * behavior. Either way, we're done now.
2590                          */
2591                         i40evf_shutdown_adminq(hw);
2592                         dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
2593                         return;
2594                 }
2595                 if (err) {
2596                         dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
2597                                 err);
2598                         goto err_alloc;
2599                 }
2600                 adapter->state = __I40EVF_INIT_SW;
2601                 break;
2602         default:
2603                 goto err_alloc;
2604         }
2605
2606         if (i40evf_process_config(adapter))
2607                 goto err_alloc;
2608         adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
2609
2610         adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
2611
2612         netdev->netdev_ops = &i40evf_netdev_ops;
2613         i40evf_set_ethtool_ops(netdev);
2614         netdev->watchdog_timeo = 5 * HZ;
2615
2616         /* MTU range: 68 - 9710 */
2617         netdev->min_mtu = ETH_MIN_MTU;
2618         netdev->max_mtu = I40E_MAX_RXBUFFER - (ETH_HLEN + ETH_FCS_LEN);
2619
2620         if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2621                 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
2622                          adapter->hw.mac.addr);
2623                 eth_hw_addr_random(netdev);
2624                 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2625         } else {
2626                 adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF;
2627                 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2628                 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2629         }
2630
2631         init_timer(&adapter->watchdog_timer);
2632         adapter->watchdog_timer.function = &i40evf_watchdog_timer;
2633         adapter->watchdog_timer.data = (unsigned long)adapter;
2634         mod_timer(&adapter->watchdog_timer, jiffies + 1);
2635
2636         adapter->num_active_queues = min_t(int,
2637                                            adapter->vsi_res->num_queue_pairs,
2638                                            (int)(num_online_cpus()));
2639         adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
2640         adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
2641         err = i40evf_init_interrupt_scheme(adapter);
2642         if (err)
2643                 goto err_sw_init;
2644         i40evf_map_rings_to_vectors(adapter);
2645         if (adapter->vf_res->vf_offload_flags &
2646             I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2647                 adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
2648
2649         err = i40evf_request_misc_irq(adapter);
2650         if (err)
2651                 goto err_sw_init;
2652
2653         netif_carrier_off(netdev);
2654         adapter->link_up = false;
2655
2656         if (!adapter->netdev_registered) {
2657                 err = register_netdev(netdev);
2658                 if (err)
2659                         goto err_register;
2660         }
2661
2662         adapter->netdev_registered = true;
2663
2664         netif_tx_stop_all_queues(netdev);
2665         if (CLIENT_ALLOWED(adapter)) {
2666                 err = i40evf_lan_add_device(adapter);
2667                 if (err)
2668                         dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
2669                                  err);
2670         }
2671
2672         dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2673         if (netdev->features & NETIF_F_GRO)
2674                 dev_info(&pdev->dev, "GRO is enabled\n");
2675
2676         adapter->state = __I40EVF_DOWN;
2677         set_bit(__I40E_DOWN, &adapter->vsi.state);
2678         i40evf_misc_irq_enable(adapter);
2679
2680         adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
2681         adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
2682         if (!adapter->rss_key || !adapter->rss_lut)
2683                 goto err_mem;
2684
2685         if (RSS_AQ(adapter)) {
2686                 adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
2687                 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
2688         } else {
2689                 i40evf_init_rss(adapter);
2690         }
2691         return;
2692 restart:
2693         schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
2694         return;
2695 err_mem:
2696         i40evf_free_rss(adapter);
2697 err_register:
2698         i40evf_free_misc_irq(adapter);
2699 err_sw_init:
2700         i40evf_reset_interrupt_capability(adapter);
2701 err_alloc:
2702         kfree(adapter->vf_res);
2703         adapter->vf_res = NULL;
2704 err:
2705         /* Things went into the weeds, so try again later */
2706         if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
2707                 dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
2708                 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
2709                 i40evf_shutdown_adminq(hw);
2710                 adapter->state = __I40EVF_STARTUP;
2711                 schedule_delayed_work(&adapter->init_task, HZ * 5);
2712                 return;
2713         }
2714         schedule_delayed_work(&adapter->init_task, HZ);
2715 }
2716
2717 /**
2718  * i40evf_shutdown - Shutdown the device in preparation for a reboot
2719  * @pdev: pci device structure
2720  **/
2721 static void i40evf_shutdown(struct pci_dev *pdev)
2722 {
2723         struct net_device *netdev = pci_get_drvdata(pdev);
2724         struct i40evf_adapter *adapter = netdev_priv(netdev);
2725
2726         netif_device_detach(netdev);
2727
2728         if (netif_running(netdev))
2729                 i40evf_close(netdev);
2730
2731         /* Prevent the watchdog from running. */
2732         adapter->state = __I40EVF_REMOVE;
2733         adapter->aq_required = 0;
2734
2735 #ifdef CONFIG_PM
2736         pci_save_state(pdev);
2737
2738 #endif
2739         pci_disable_device(pdev);
2740 }
2741
2742 /**
2743  * i40evf_probe - Device Initialization Routine
2744  * @pdev: PCI device information struct
2745  * @ent: entry in i40evf_pci_tbl
2746  *
2747  * Returns 0 on success, negative on failure
2748  *
2749  * i40evf_probe initializes an adapter identified by a pci_dev structure.
2750  * The OS initialization, configuring of the adapter private structure,
2751  * and a hardware reset occur.
2752  **/
2753 static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2754 {
2755         struct net_device *netdev;
2756         struct i40evf_adapter *adapter = NULL;
2757         struct i40e_hw *hw = NULL;
2758         int err;
2759
2760         err = pci_enable_device(pdev);
2761         if (err)
2762                 return err;
2763
2764         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2765         if (err) {
2766                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2767                 if (err) {
2768                         dev_err(&pdev->dev,
2769                                 "DMA configuration failed: 0x%x\n", err);
2770                         goto err_dma;
2771                 }
2772         }
2773
2774         err = pci_request_regions(pdev, i40evf_driver_name);
2775         if (err) {
2776                 dev_err(&pdev->dev,
2777                         "pci_request_regions failed 0x%x\n", err);
2778                 goto err_pci_reg;
2779         }
2780
2781         pci_enable_pcie_error_reporting(pdev);
2782
2783         pci_set_master(pdev);
2784
2785         netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), MAX_QUEUES);
2786         if (!netdev) {
2787                 err = -ENOMEM;
2788                 goto err_alloc_etherdev;
2789         }
2790
2791         SET_NETDEV_DEV(netdev, &pdev->dev);
2792
2793         pci_set_drvdata(pdev, netdev);
2794         adapter = netdev_priv(netdev);
2795
2796         adapter->netdev = netdev;
2797         adapter->pdev = pdev;
2798
2799         hw = &adapter->hw;
2800         hw->back = adapter;
2801
2802         adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
2803         adapter->state = __I40EVF_STARTUP;
2804
2805         /* Call save state here because it relies on the adapter struct. */
2806         pci_save_state(pdev);
2807
2808         hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
2809                               pci_resource_len(pdev, 0));
2810         if (!hw->hw_addr) {
2811                 err = -EIO;
2812                 goto err_ioremap;
2813         }
2814         hw->vendor_id = pdev->vendor;
2815         hw->device_id = pdev->device;
2816         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2817         hw->subsystem_vendor_id = pdev->subsystem_vendor;
2818         hw->subsystem_device_id = pdev->subsystem_device;
2819         hw->bus.device = PCI_SLOT(pdev->devfn);
2820         hw->bus.func = PCI_FUNC(pdev->devfn);
2821         hw->bus.bus_id = pdev->bus->number;
2822
2823         /* set up the locks for the AQ, do this only once in probe
2824          * and destroy them only once in remove
2825          */
2826         mutex_init(&hw->aq.asq_mutex);
2827         mutex_init(&hw->aq.arq_mutex);
2828
2829         INIT_LIST_HEAD(&adapter->mac_filter_list);
2830         INIT_LIST_HEAD(&adapter->vlan_filter_list);
2831
2832         INIT_WORK(&adapter->reset_task, i40evf_reset_task);
2833         INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
2834         INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
2835         INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task);
2836         INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
2837         schedule_delayed_work(&adapter->init_task,
2838                               msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
2839
2840         return 0;
2841
2842 err_ioremap:
2843         free_netdev(netdev);
2844 err_alloc_etherdev:
2845         pci_release_regions(pdev);
2846 err_pci_reg:
2847 err_dma:
2848         pci_disable_device(pdev);
2849         return err;
2850 }
2851
2852 #ifdef CONFIG_PM
2853 /**
2854  * i40evf_suspend - Power management suspend routine
2855  * @pdev: PCI device information struct
2856  * @state: unused
2857  *
2858  * Called when the system (VM) is entering sleep/suspend.
2859  **/
2860 static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
2861 {
2862         struct net_device *netdev = pci_get_drvdata(pdev);
2863         struct i40evf_adapter *adapter = netdev_priv(netdev);
2864         int retval = 0;
2865
2866         netif_device_detach(netdev);
2867
2868         if (netif_running(netdev)) {
2869                 rtnl_lock();
2870                 i40evf_down(adapter);
2871                 rtnl_unlock();
2872         }
2873         i40evf_free_misc_irq(adapter);
2874         i40evf_reset_interrupt_capability(adapter);
2875
2876         retval = pci_save_state(pdev);
2877         if (retval)
2878                 return retval;
2879
2880         pci_disable_device(pdev);
2881
2882         return 0;
2883 }
2884
2885 /**
2886  * i40evf_resume - Power management resume routine
2887  * @pdev: PCI device information struct
2888  *
2889  * Called when the system (VM) is resumed from sleep/suspend.
2890  **/
2891 static int i40evf_resume(struct pci_dev *pdev)
2892 {
2893         struct i40evf_adapter *adapter = pci_get_drvdata(pdev);
2894         struct net_device *netdev = adapter->netdev;
2895         u32 err;
2896
2897         pci_set_power_state(pdev, PCI_D0);
2898         pci_restore_state(pdev);
2899         /* pci_restore_state clears dev->state_saved so call
2900          * pci_save_state to restore it.
2901          */
2902         pci_save_state(pdev);
2903
2904         err = pci_enable_device_mem(pdev);
2905         if (err) {
2906                 dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
2907                 return err;
2908         }
2909         pci_set_master(pdev);
2910
2911         rtnl_lock();
2912         err = i40evf_set_interrupt_capability(adapter);
2913         if (err) {
2914                 rtnl_unlock();
2915                 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
2916                 return err;
2917         }
2918         err = i40evf_request_misc_irq(adapter);
2919         rtnl_unlock();
2920         if (err) {
2921                 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
2922                 return err;
2923         }
2924
2925         schedule_work(&adapter->reset_task);
2926
2927         netif_device_attach(netdev);
2928
2929         return err;
2930 }
2931
2932 #endif /* CONFIG_PM */
2933 /**
2934  * i40evf_remove - Device Removal Routine
2935  * @pdev: PCI device information struct
2936  *
2937  * i40evf_remove is called by the PCI subsystem to alert the driver
2938  * that it should release a PCI device.  The could be caused by a
2939  * Hot-Plug event, or because the driver is going to be removed from
2940  * memory.
2941  **/
2942 static void i40evf_remove(struct pci_dev *pdev)
2943 {
2944         struct net_device *netdev = pci_get_drvdata(pdev);
2945         struct i40evf_adapter *adapter = netdev_priv(netdev);
2946         struct i40evf_mac_filter *f, *ftmp;
2947         struct i40e_hw *hw = &adapter->hw;
2948         int err;
2949
2950         cancel_delayed_work_sync(&adapter->init_task);
2951         cancel_work_sync(&adapter->reset_task);
2952         cancel_delayed_work_sync(&adapter->client_task);
2953         if (adapter->netdev_registered) {
2954                 unregister_netdev(netdev);
2955                 adapter->netdev_registered = false;
2956         }
2957         if (CLIENT_ALLOWED(adapter)) {
2958                 err = i40evf_lan_del_device(adapter);
2959                 if (err)
2960                         dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
2961                                  err);
2962         }
2963
2964         /* Shut down all the garbage mashers on the detention level */
2965         adapter->state = __I40EVF_REMOVE;
2966         adapter->aq_required = 0;
2967         i40evf_request_reset(adapter);
2968         msleep(50);
2969         /* If the FW isn't responding, kick it once, but only once. */
2970         if (!i40evf_asq_done(hw)) {
2971                 i40evf_request_reset(adapter);
2972                 msleep(50);
2973         }
2974         i40evf_free_all_tx_resources(adapter);
2975         i40evf_free_all_rx_resources(adapter);
2976         i40evf_misc_irq_disable(adapter);
2977         i40evf_free_misc_irq(adapter);
2978         i40evf_reset_interrupt_capability(adapter);
2979         i40evf_free_q_vectors(adapter);
2980
2981         if (adapter->watchdog_timer.function)
2982                 del_timer_sync(&adapter->watchdog_timer);
2983
2984         flush_scheduled_work();
2985
2986         i40evf_free_rss(adapter);
2987
2988         if (hw->aq.asq.count)
2989                 i40evf_shutdown_adminq(hw);
2990
2991         /* destroy the locks only once, here */
2992         mutex_destroy(&hw->aq.arq_mutex);
2993         mutex_destroy(&hw->aq.asq_mutex);
2994
2995         iounmap(hw->hw_addr);
2996         pci_release_regions(pdev);
2997         i40evf_free_all_tx_resources(adapter);
2998         i40evf_free_all_rx_resources(adapter);
2999         i40evf_free_queues(adapter);
3000         kfree(adapter->vf_res);
3001         /* If we got removed before an up/down sequence, we've got a filter
3002          * hanging out there that we need to get rid of.
3003          */
3004         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3005                 list_del(&f->list);
3006                 kfree(f);
3007         }
3008         list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
3009                 list_del(&f->list);
3010                 kfree(f);
3011         }
3012
3013         free_netdev(netdev);
3014
3015         pci_disable_pcie_error_reporting(pdev);
3016
3017         pci_disable_device(pdev);
3018 }
3019
3020 static struct pci_driver i40evf_driver = {
3021         .name     = i40evf_driver_name,
3022         .id_table = i40evf_pci_tbl,
3023         .probe    = i40evf_probe,
3024         .remove   = i40evf_remove,
3025 #ifdef CONFIG_PM
3026         .suspend  = i40evf_suspend,
3027         .resume   = i40evf_resume,
3028 #endif
3029         .shutdown = i40evf_shutdown,
3030 };
3031
3032 /**
3033  * i40e_init_module - Driver Registration Routine
3034  *
3035  * i40e_init_module is the first routine called when the driver is
3036  * loaded. All it does is register with the PCI subsystem.
3037  **/
3038 static int __init i40evf_init_module(void)
3039 {
3040         int ret;
3041
3042         pr_info("i40evf: %s - version %s\n", i40evf_driver_string,
3043                 i40evf_driver_version);
3044
3045         pr_info("%s\n", i40evf_copyright);
3046
3047         i40evf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
3048                                     i40evf_driver_name);
3049         if (!i40evf_wq) {
3050                 pr_err("%s: Failed to create workqueue\n", i40evf_driver_name);
3051                 return -ENOMEM;
3052         }
3053         ret = pci_register_driver(&i40evf_driver);
3054         return ret;
3055 }
3056
3057 module_init(i40evf_init_module);
3058
3059 /**
3060  * i40e_exit_module - Driver Exit Cleanup Routine
3061  *
3062  * i40e_exit_module is called just before the driver is removed
3063  * from memory.
3064  **/
3065 static void __exit i40evf_exit_module(void)
3066 {
3067         pci_unregister_driver(&i40evf_driver);
3068         destroy_workqueue(i40evf_wq);
3069 }
3070
3071 module_exit(i40evf_exit_module);
3072
3073 /* i40evf_main.c */