]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/intel/i40e/i40e_main.c
i40e: Remove unnecessary pf members
[karo-tx-linux.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 - 2015 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26
27 /* Local includes */
28 #include "i40e.h"
29 #include "i40e_diag.h"
30 #ifdef CONFIG_I40E_VXLAN
31 #include <net/vxlan.h>
32 #endif
33
34 const char i40e_driver_name[] = "i40e";
35 static const char i40e_driver_string[] =
36                         "Intel(R) Ethernet Connection XL710 Network Driver";
37
38 #define DRV_KERN "-k"
39
40 #define DRV_VERSION_MAJOR 1
41 #define DRV_VERSION_MINOR 3
42 #define DRV_VERSION_BUILD 2
43 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44              __stringify(DRV_VERSION_MINOR) "." \
45              __stringify(DRV_VERSION_BUILD)    DRV_KERN
46 const char i40e_driver_version_str[] = DRV_VERSION;
47 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
48
49 /* a bit of forward declarations */
50 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
51 static void i40e_handle_reset_warning(struct i40e_pf *pf);
52 static int i40e_add_vsi(struct i40e_vsi *vsi);
53 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
54 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
55 static int i40e_setup_misc_vector(struct i40e_pf *pf);
56 static void i40e_determine_queue_usage(struct i40e_pf *pf);
57 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
58 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
59 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
60
61 /* i40e_pci_tbl - PCI Device ID Table
62  *
63  * Last entry must be all 0s
64  *
65  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66  *   Class, Class Mask, private data (not used) }
67  */
68 static const struct pci_device_id i40e_pci_tbl[] = {
69         {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
70         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
71         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
72         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
73         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
74         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
75         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
76         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
77         {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
78         {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
79         /* required last entry */
80         {0, }
81 };
82 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
83
84 #define I40E_MAX_VF_COUNT 128
85 static int debug = -1;
86 module_param(debug, int, 0);
87 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
88
89 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
90 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
91 MODULE_LICENSE("GPL");
92 MODULE_VERSION(DRV_VERSION);
93
94 /**
95  * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
96  * @hw:   pointer to the HW structure
97  * @mem:  ptr to mem struct to fill out
98  * @size: size of memory requested
99  * @alignment: what to align the allocation to
100  **/
101 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
102                             u64 size, u32 alignment)
103 {
104         struct i40e_pf *pf = (struct i40e_pf *)hw->back;
105
106         mem->size = ALIGN(size, alignment);
107         mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
108                                       &mem->pa, GFP_KERNEL);
109         if (!mem->va)
110                 return -ENOMEM;
111
112         return 0;
113 }
114
115 /**
116  * i40e_free_dma_mem_d - OS specific memory free for shared code
117  * @hw:   pointer to the HW structure
118  * @mem:  ptr to mem struct to free
119  **/
120 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
121 {
122         struct i40e_pf *pf = (struct i40e_pf *)hw->back;
123
124         dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
125         mem->va = NULL;
126         mem->pa = 0;
127         mem->size = 0;
128
129         return 0;
130 }
131
132 /**
133  * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
134  * @hw:   pointer to the HW structure
135  * @mem:  ptr to mem struct to fill out
136  * @size: size of memory requested
137  **/
138 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
139                              u32 size)
140 {
141         mem->size = size;
142         mem->va = kzalloc(size, GFP_KERNEL);
143
144         if (!mem->va)
145                 return -ENOMEM;
146
147         return 0;
148 }
149
150 /**
151  * i40e_free_virt_mem_d - OS specific memory free for shared code
152  * @hw:   pointer to the HW structure
153  * @mem:  ptr to mem struct to free
154  **/
155 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
156 {
157         /* it's ok to kfree a NULL pointer */
158         kfree(mem->va);
159         mem->va = NULL;
160         mem->size = 0;
161
162         return 0;
163 }
164
165 /**
166  * i40e_get_lump - find a lump of free generic resource
167  * @pf: board private structure
168  * @pile: the pile of resource to search
169  * @needed: the number of items needed
170  * @id: an owner id to stick on the items assigned
171  *
172  * Returns the base item index of the lump, or negative for error
173  *
174  * The search_hint trick and lack of advanced fit-finding only work
175  * because we're highly likely to have all the same size lump requests.
176  * Linear search time and any fragmentation should be minimal.
177  **/
178 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
179                          u16 needed, u16 id)
180 {
181         int ret = -ENOMEM;
182         int i, j;
183
184         if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
185                 dev_info(&pf->pdev->dev,
186                          "param err: pile=%p needed=%d id=0x%04x\n",
187                          pile, needed, id);
188                 return -EINVAL;
189         }
190
191         /* start the linear search with an imperfect hint */
192         i = pile->search_hint;
193         while (i < pile->num_entries) {
194                 /* skip already allocated entries */
195                 if (pile->list[i] & I40E_PILE_VALID_BIT) {
196                         i++;
197                         continue;
198                 }
199
200                 /* do we have enough in this lump? */
201                 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
202                         if (pile->list[i+j] & I40E_PILE_VALID_BIT)
203                                 break;
204                 }
205
206                 if (j == needed) {
207                         /* there was enough, so assign it to the requestor */
208                         for (j = 0; j < needed; j++)
209                                 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
210                         ret = i;
211                         pile->search_hint = i + j;
212                         break;
213                 } else {
214                         /* not enough, so skip over it and continue looking */
215                         i += j;
216                 }
217         }
218
219         return ret;
220 }
221
222 /**
223  * i40e_put_lump - return a lump of generic resource
224  * @pile: the pile of resource to search
225  * @index: the base item index
226  * @id: the owner id of the items assigned
227  *
228  * Returns the count of items in the lump
229  **/
230 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
231 {
232         int valid_id = (id | I40E_PILE_VALID_BIT);
233         int count = 0;
234         int i;
235
236         if (!pile || index >= pile->num_entries)
237                 return -EINVAL;
238
239         for (i = index;
240              i < pile->num_entries && pile->list[i] == valid_id;
241              i++) {
242                 pile->list[i] = 0;
243                 count++;
244         }
245
246         if (count && index < pile->search_hint)
247                 pile->search_hint = index;
248
249         return count;
250 }
251
252 /**
253  * i40e_find_vsi_from_id - searches for the vsi with the given id
254  * @pf - the pf structure to search for the vsi
255  * @id - id of the vsi it is searching for
256  **/
257 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
258 {
259         int i;
260
261         for (i = 0; i < pf->num_alloc_vsi; i++)
262                 if (pf->vsi[i] && (pf->vsi[i]->id == id))
263                         return pf->vsi[i];
264
265         return NULL;
266 }
267
268 /**
269  * i40e_service_event_schedule - Schedule the service task to wake up
270  * @pf: board private structure
271  *
272  * If not already scheduled, this puts the task into the work queue
273  **/
274 static void i40e_service_event_schedule(struct i40e_pf *pf)
275 {
276         if (!test_bit(__I40E_DOWN, &pf->state) &&
277             !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
278             !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
279                 schedule_work(&pf->service_task);
280 }
281
282 /**
283  * i40e_tx_timeout - Respond to a Tx Hang
284  * @netdev: network interface device structure
285  *
286  * If any port has noticed a Tx timeout, it is likely that the whole
287  * device is munged, not just the one netdev port, so go for the full
288  * reset.
289  **/
290 #ifdef I40E_FCOE
291 void i40e_tx_timeout(struct net_device *netdev)
292 #else
293 static void i40e_tx_timeout(struct net_device *netdev)
294 #endif
295 {
296         struct i40e_netdev_priv *np = netdev_priv(netdev);
297         struct i40e_vsi *vsi = np->vsi;
298         struct i40e_pf *pf = vsi->back;
299
300         pf->tx_timeout_count++;
301
302         if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
303                 pf->tx_timeout_recovery_level = 1;
304         pf->tx_timeout_last_recovery = jiffies;
305         netdev_info(netdev, "tx_timeout recovery level %d\n",
306                     pf->tx_timeout_recovery_level);
307
308         switch (pf->tx_timeout_recovery_level) {
309         case 0:
310                 /* disable and re-enable queues for the VSI */
311                 if (in_interrupt()) {
312                         set_bit(__I40E_REINIT_REQUESTED, &pf->state);
313                         set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
314                 } else {
315                         i40e_vsi_reinit_locked(vsi);
316                 }
317                 break;
318         case 1:
319                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
320                 break;
321         case 2:
322                 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
323                 break;
324         case 3:
325                 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
326                 break;
327         default:
328                 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
329                 set_bit(__I40E_DOWN_REQUESTED, &pf->state);
330                 set_bit(__I40E_DOWN_REQUESTED, &vsi->state);
331                 break;
332         }
333         i40e_service_event_schedule(pf);
334         pf->tx_timeout_recovery_level++;
335 }
336
337 /**
338  * i40e_release_rx_desc - Store the new tail and head values
339  * @rx_ring: ring to bump
340  * @val: new head index
341  **/
342 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
343 {
344         rx_ring->next_to_use = val;
345
346         /* Force memory writes to complete before letting h/w
347          * know there are new descriptors to fetch.  (Only
348          * applicable for weak-ordered memory model archs,
349          * such as IA-64).
350          */
351         wmb();
352         writel(val, rx_ring->tail);
353 }
354
355 /**
356  * i40e_get_vsi_stats_struct - Get System Network Statistics
357  * @vsi: the VSI we care about
358  *
359  * Returns the address of the device statistics structure.
360  * The statistics are actually updated from the service task.
361  **/
362 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
363 {
364         return &vsi->net_stats;
365 }
366
367 /**
368  * i40e_get_netdev_stats_struct - Get statistics for netdev interface
369  * @netdev: network interface device structure
370  *
371  * Returns the address of the device statistics structure.
372  * The statistics are actually updated from the service task.
373  **/
374 #ifdef I40E_FCOE
375 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
376                                              struct net_device *netdev,
377                                              struct rtnl_link_stats64 *stats)
378 #else
379 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
380                                              struct net_device *netdev,
381                                              struct rtnl_link_stats64 *stats)
382 #endif
383 {
384         struct i40e_netdev_priv *np = netdev_priv(netdev);
385         struct i40e_ring *tx_ring, *rx_ring;
386         struct i40e_vsi *vsi = np->vsi;
387         struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
388         int i;
389
390         if (test_bit(__I40E_DOWN, &vsi->state))
391                 return stats;
392
393         if (!vsi->tx_rings)
394                 return stats;
395
396         rcu_read_lock();
397         for (i = 0; i < vsi->num_queue_pairs; i++) {
398                 u64 bytes, packets;
399                 unsigned int start;
400
401                 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
402                 if (!tx_ring)
403                         continue;
404
405                 do {
406                         start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
407                         packets = tx_ring->stats.packets;
408                         bytes   = tx_ring->stats.bytes;
409                 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
410
411                 stats->tx_packets += packets;
412                 stats->tx_bytes   += bytes;
413                 rx_ring = &tx_ring[1];
414
415                 do {
416                         start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
417                         packets = rx_ring->stats.packets;
418                         bytes   = rx_ring->stats.bytes;
419                 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
420
421                 stats->rx_packets += packets;
422                 stats->rx_bytes   += bytes;
423         }
424         rcu_read_unlock();
425
426         /* following stats updated by i40e_watchdog_subtask() */
427         stats->multicast        = vsi_stats->multicast;
428         stats->tx_errors        = vsi_stats->tx_errors;
429         stats->tx_dropped       = vsi_stats->tx_dropped;
430         stats->rx_errors        = vsi_stats->rx_errors;
431         stats->rx_crc_errors    = vsi_stats->rx_crc_errors;
432         stats->rx_length_errors = vsi_stats->rx_length_errors;
433
434         return stats;
435 }
436
437 /**
438  * i40e_vsi_reset_stats - Resets all stats of the given vsi
439  * @vsi: the VSI to have its stats reset
440  **/
441 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
442 {
443         struct rtnl_link_stats64 *ns;
444         int i;
445
446         if (!vsi)
447                 return;
448
449         ns = i40e_get_vsi_stats_struct(vsi);
450         memset(ns, 0, sizeof(*ns));
451         memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
452         memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
453         memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
454         if (vsi->rx_rings && vsi->rx_rings[0]) {
455                 for (i = 0; i < vsi->num_queue_pairs; i++) {
456                         memset(&vsi->rx_rings[i]->stats, 0 ,
457                                sizeof(vsi->rx_rings[i]->stats));
458                         memset(&vsi->rx_rings[i]->rx_stats, 0 ,
459                                sizeof(vsi->rx_rings[i]->rx_stats));
460                         memset(&vsi->tx_rings[i]->stats, 0 ,
461                                sizeof(vsi->tx_rings[i]->stats));
462                         memset(&vsi->tx_rings[i]->tx_stats, 0,
463                                sizeof(vsi->tx_rings[i]->tx_stats));
464                 }
465         }
466         vsi->stat_offsets_loaded = false;
467 }
468
469 /**
470  * i40e_pf_reset_stats - Reset all of the stats for the given PF
471  * @pf: the PF to be reset
472  **/
473 void i40e_pf_reset_stats(struct i40e_pf *pf)
474 {
475         int i;
476
477         memset(&pf->stats, 0, sizeof(pf->stats));
478         memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
479         pf->stat_offsets_loaded = false;
480
481         for (i = 0; i < I40E_MAX_VEB; i++) {
482                 if (pf->veb[i]) {
483                         memset(&pf->veb[i]->stats, 0,
484                                sizeof(pf->veb[i]->stats));
485                         memset(&pf->veb[i]->stats_offsets, 0,
486                                sizeof(pf->veb[i]->stats_offsets));
487                         pf->veb[i]->stat_offsets_loaded = false;
488                 }
489         }
490 }
491
492 /**
493  * i40e_stat_update48 - read and update a 48 bit stat from the chip
494  * @hw: ptr to the hardware info
495  * @hireg: the high 32 bit reg to read
496  * @loreg: the low 32 bit reg to read
497  * @offset_loaded: has the initial offset been loaded yet
498  * @offset: ptr to current offset value
499  * @stat: ptr to the stat
500  *
501  * Since the device stats are not reset at PFReset, they likely will not
502  * be zeroed when the driver starts.  We'll save the first values read
503  * and use them as offsets to be subtracted from the raw values in order
504  * to report stats that count from zero.  In the process, we also manage
505  * the potential roll-over.
506  **/
507 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
508                                bool offset_loaded, u64 *offset, u64 *stat)
509 {
510         u64 new_data;
511
512         if (hw->device_id == I40E_DEV_ID_QEMU) {
513                 new_data = rd32(hw, loreg);
514                 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
515         } else {
516                 new_data = rd64(hw, loreg);
517         }
518         if (!offset_loaded)
519                 *offset = new_data;
520         if (likely(new_data >= *offset))
521                 *stat = new_data - *offset;
522         else
523                 *stat = (new_data + ((u64)1 << 48)) - *offset;
524         *stat &= 0xFFFFFFFFFFFFULL;
525 }
526
527 /**
528  * i40e_stat_update32 - read and update a 32 bit stat from the chip
529  * @hw: ptr to the hardware info
530  * @reg: the hw reg to read
531  * @offset_loaded: has the initial offset been loaded yet
532  * @offset: ptr to current offset value
533  * @stat: ptr to the stat
534  **/
535 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
536                                bool offset_loaded, u64 *offset, u64 *stat)
537 {
538         u32 new_data;
539
540         new_data = rd32(hw, reg);
541         if (!offset_loaded)
542                 *offset = new_data;
543         if (likely(new_data >= *offset))
544                 *stat = (u32)(new_data - *offset);
545         else
546                 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
547 }
548
549 /**
550  * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
551  * @vsi: the VSI to be updated
552  **/
553 void i40e_update_eth_stats(struct i40e_vsi *vsi)
554 {
555         int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
556         struct i40e_pf *pf = vsi->back;
557         struct i40e_hw *hw = &pf->hw;
558         struct i40e_eth_stats *oes;
559         struct i40e_eth_stats *es;     /* device's eth stats */
560
561         es = &vsi->eth_stats;
562         oes = &vsi->eth_stats_offsets;
563
564         /* Gather up the stats that the hw collects */
565         i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
566                            vsi->stat_offsets_loaded,
567                            &oes->tx_errors, &es->tx_errors);
568         i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
569                            vsi->stat_offsets_loaded,
570                            &oes->rx_discards, &es->rx_discards);
571         i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
572                            vsi->stat_offsets_loaded,
573                            &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
574         i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
575                            vsi->stat_offsets_loaded,
576                            &oes->tx_errors, &es->tx_errors);
577
578         i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
579                            I40E_GLV_GORCL(stat_idx),
580                            vsi->stat_offsets_loaded,
581                            &oes->rx_bytes, &es->rx_bytes);
582         i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
583                            I40E_GLV_UPRCL(stat_idx),
584                            vsi->stat_offsets_loaded,
585                            &oes->rx_unicast, &es->rx_unicast);
586         i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
587                            I40E_GLV_MPRCL(stat_idx),
588                            vsi->stat_offsets_loaded,
589                            &oes->rx_multicast, &es->rx_multicast);
590         i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
591                            I40E_GLV_BPRCL(stat_idx),
592                            vsi->stat_offsets_loaded,
593                            &oes->rx_broadcast, &es->rx_broadcast);
594
595         i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
596                            I40E_GLV_GOTCL(stat_idx),
597                            vsi->stat_offsets_loaded,
598                            &oes->tx_bytes, &es->tx_bytes);
599         i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
600                            I40E_GLV_UPTCL(stat_idx),
601                            vsi->stat_offsets_loaded,
602                            &oes->tx_unicast, &es->tx_unicast);
603         i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
604                            I40E_GLV_MPTCL(stat_idx),
605                            vsi->stat_offsets_loaded,
606                            &oes->tx_multicast, &es->tx_multicast);
607         i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
608                            I40E_GLV_BPTCL(stat_idx),
609                            vsi->stat_offsets_loaded,
610                            &oes->tx_broadcast, &es->tx_broadcast);
611         vsi->stat_offsets_loaded = true;
612 }
613
614 /**
615  * i40e_update_veb_stats - Update Switch component statistics
616  * @veb: the VEB being updated
617  **/
618 static void i40e_update_veb_stats(struct i40e_veb *veb)
619 {
620         struct i40e_pf *pf = veb->pf;
621         struct i40e_hw *hw = &pf->hw;
622         struct i40e_eth_stats *oes;
623         struct i40e_eth_stats *es;     /* device's eth stats */
624         int idx = 0;
625
626         idx = veb->stats_idx;
627         es = &veb->stats;
628         oes = &veb->stats_offsets;
629
630         /* Gather up the stats that the hw collects */
631         i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
632                            veb->stat_offsets_loaded,
633                            &oes->tx_discards, &es->tx_discards);
634         if (hw->revision_id > 0)
635                 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
636                                    veb->stat_offsets_loaded,
637                                    &oes->rx_unknown_protocol,
638                                    &es->rx_unknown_protocol);
639         i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
640                            veb->stat_offsets_loaded,
641                            &oes->rx_bytes, &es->rx_bytes);
642         i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
643                            veb->stat_offsets_loaded,
644                            &oes->rx_unicast, &es->rx_unicast);
645         i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
646                            veb->stat_offsets_loaded,
647                            &oes->rx_multicast, &es->rx_multicast);
648         i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
649                            veb->stat_offsets_loaded,
650                            &oes->rx_broadcast, &es->rx_broadcast);
651
652         i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
653                            veb->stat_offsets_loaded,
654                            &oes->tx_bytes, &es->tx_bytes);
655         i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
656                            veb->stat_offsets_loaded,
657                            &oes->tx_unicast, &es->tx_unicast);
658         i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
659                            veb->stat_offsets_loaded,
660                            &oes->tx_multicast, &es->tx_multicast);
661         i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
662                            veb->stat_offsets_loaded,
663                            &oes->tx_broadcast, &es->tx_broadcast);
664         veb->stat_offsets_loaded = true;
665 }
666
667 #ifdef I40E_FCOE
668 /**
669  * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
670  * @vsi: the VSI that is capable of doing FCoE
671  **/
672 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
673 {
674         struct i40e_pf *pf = vsi->back;
675         struct i40e_hw *hw = &pf->hw;
676         struct i40e_fcoe_stats *ofs;
677         struct i40e_fcoe_stats *fs;     /* device's eth stats */
678         int idx;
679
680         if (vsi->type != I40E_VSI_FCOE)
681                 return;
682
683         idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
684         fs = &vsi->fcoe_stats;
685         ofs = &vsi->fcoe_stats_offsets;
686
687         i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
688                            vsi->fcoe_stat_offsets_loaded,
689                            &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
690         i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
691                            vsi->fcoe_stat_offsets_loaded,
692                            &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
693         i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
694                            vsi->fcoe_stat_offsets_loaded,
695                            &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
696         i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
697                            vsi->fcoe_stat_offsets_loaded,
698                            &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
699         i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
700                            vsi->fcoe_stat_offsets_loaded,
701                            &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
702         i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
703                            vsi->fcoe_stat_offsets_loaded,
704                            &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
705         i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
706                            vsi->fcoe_stat_offsets_loaded,
707                            &ofs->fcoe_last_error, &fs->fcoe_last_error);
708         i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
709                            vsi->fcoe_stat_offsets_loaded,
710                            &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
711
712         vsi->fcoe_stat_offsets_loaded = true;
713 }
714
715 #endif
716 /**
717  * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
718  * @pf: the corresponding PF
719  *
720  * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
721  **/
722 static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
723 {
724         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
725         struct i40e_hw_port_stats *nsd = &pf->stats;
726         struct i40e_hw *hw = &pf->hw;
727         u64 xoff = 0;
728         u16 i, v;
729
730         if ((hw->fc.current_mode != I40E_FC_FULL) &&
731             (hw->fc.current_mode != I40E_FC_RX_PAUSE))
732                 return;
733
734         xoff = nsd->link_xoff_rx;
735         i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
736                            pf->stat_offsets_loaded,
737                            &osd->link_xoff_rx, &nsd->link_xoff_rx);
738
739         /* No new LFC xoff rx */
740         if (!(nsd->link_xoff_rx - xoff))
741                 return;
742
743         /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
744         for (v = 0; v < pf->num_alloc_vsi; v++) {
745                 struct i40e_vsi *vsi = pf->vsi[v];
746
747                 if (!vsi || !vsi->tx_rings[0])
748                         continue;
749
750                 for (i = 0; i < vsi->num_queue_pairs; i++) {
751                         struct i40e_ring *ring = vsi->tx_rings[i];
752                         clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
753                 }
754         }
755 }
756
757 /**
758  * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
759  * @pf: the corresponding PF
760  *
761  * Update the Rx XOFF counter (PAUSE frames) in PFC mode
762  **/
763 static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
764 {
765         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
766         struct i40e_hw_port_stats *nsd = &pf->stats;
767         bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
768         struct i40e_dcbx_config *dcb_cfg;
769         struct i40e_hw *hw = &pf->hw;
770         u16 i, v;
771         u8 tc;
772
773         dcb_cfg = &hw->local_dcbx_config;
774
775         /* Collect Link XOFF stats when PFC is disabled */
776         if (!dcb_cfg->pfc.pfcenable) {
777                 i40e_update_link_xoff_rx(pf);
778                 return;
779         }
780
781         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
782                 u64 prio_xoff = nsd->priority_xoff_rx[i];
783                 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
784                                    pf->stat_offsets_loaded,
785                                    &osd->priority_xoff_rx[i],
786                                    &nsd->priority_xoff_rx[i]);
787
788                 /* No new PFC xoff rx */
789                 if (!(nsd->priority_xoff_rx[i] - prio_xoff))
790                         continue;
791                 /* Get the TC for given priority */
792                 tc = dcb_cfg->etscfg.prioritytable[i];
793                 xoff[tc] = true;
794         }
795
796         /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
797         for (v = 0; v < pf->num_alloc_vsi; v++) {
798                 struct i40e_vsi *vsi = pf->vsi[v];
799
800                 if (!vsi || !vsi->tx_rings[0])
801                         continue;
802
803                 for (i = 0; i < vsi->num_queue_pairs; i++) {
804                         struct i40e_ring *ring = vsi->tx_rings[i];
805
806                         tc = ring->dcb_tc;
807                         if (xoff[tc])
808                                 clear_bit(__I40E_HANG_CHECK_ARMED,
809                                           &ring->state);
810                 }
811         }
812 }
813
814 /**
815  * i40e_update_vsi_stats - Update the vsi statistics counters.
816  * @vsi: the VSI to be updated
817  *
818  * There are a few instances where we store the same stat in a
819  * couple of different structs.  This is partly because we have
820  * the netdev stats that need to be filled out, which is slightly
821  * different from the "eth_stats" defined by the chip and used in
822  * VF communications.  We sort it out here.
823  **/
824 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
825 {
826         struct i40e_pf *pf = vsi->back;
827         struct rtnl_link_stats64 *ons;
828         struct rtnl_link_stats64 *ns;   /* netdev stats */
829         struct i40e_eth_stats *oes;
830         struct i40e_eth_stats *es;     /* device's eth stats */
831         u32 tx_restart, tx_busy;
832         struct i40e_ring *p;
833         u32 rx_page, rx_buf;
834         u64 bytes, packets;
835         unsigned int start;
836         u64 rx_p, rx_b;
837         u64 tx_p, tx_b;
838         u16 q;
839
840         if (test_bit(__I40E_DOWN, &vsi->state) ||
841             test_bit(__I40E_CONFIG_BUSY, &pf->state))
842                 return;
843
844         ns = i40e_get_vsi_stats_struct(vsi);
845         ons = &vsi->net_stats_offsets;
846         es = &vsi->eth_stats;
847         oes = &vsi->eth_stats_offsets;
848
849         /* Gather up the netdev and vsi stats that the driver collects
850          * on the fly during packet processing
851          */
852         rx_b = rx_p = 0;
853         tx_b = tx_p = 0;
854         tx_restart = tx_busy = 0;
855         rx_page = 0;
856         rx_buf = 0;
857         rcu_read_lock();
858         for (q = 0; q < vsi->num_queue_pairs; q++) {
859                 /* locate Tx ring */
860                 p = ACCESS_ONCE(vsi->tx_rings[q]);
861
862                 do {
863                         start = u64_stats_fetch_begin_irq(&p->syncp);
864                         packets = p->stats.packets;
865                         bytes = p->stats.bytes;
866                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
867                 tx_b += bytes;
868                 tx_p += packets;
869                 tx_restart += p->tx_stats.restart_queue;
870                 tx_busy += p->tx_stats.tx_busy;
871
872                 /* Rx queue is part of the same block as Tx queue */
873                 p = &p[1];
874                 do {
875                         start = u64_stats_fetch_begin_irq(&p->syncp);
876                         packets = p->stats.packets;
877                         bytes = p->stats.bytes;
878                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
879                 rx_b += bytes;
880                 rx_p += packets;
881                 rx_buf += p->rx_stats.alloc_buff_failed;
882                 rx_page += p->rx_stats.alloc_page_failed;
883         }
884         rcu_read_unlock();
885         vsi->tx_restart = tx_restart;
886         vsi->tx_busy = tx_busy;
887         vsi->rx_page_failed = rx_page;
888         vsi->rx_buf_failed = rx_buf;
889
890         ns->rx_packets = rx_p;
891         ns->rx_bytes = rx_b;
892         ns->tx_packets = tx_p;
893         ns->tx_bytes = tx_b;
894
895         /* update netdev stats from eth stats */
896         i40e_update_eth_stats(vsi);
897         ons->tx_errors = oes->tx_errors;
898         ns->tx_errors = es->tx_errors;
899         ons->multicast = oes->rx_multicast;
900         ns->multicast = es->rx_multicast;
901         ons->rx_dropped = oes->rx_discards;
902         ns->rx_dropped = es->rx_discards;
903         ons->tx_dropped = oes->tx_discards;
904         ns->tx_dropped = es->tx_discards;
905
906         /* pull in a couple PF stats if this is the main vsi */
907         if (vsi == pf->vsi[pf->lan_vsi]) {
908                 ns->rx_crc_errors = pf->stats.crc_errors;
909                 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
910                 ns->rx_length_errors = pf->stats.rx_length_errors;
911         }
912 }
913
914 /**
915  * i40e_update_pf_stats - Update the PF statistics counters.
916  * @pf: the PF to be updated
917  **/
918 static void i40e_update_pf_stats(struct i40e_pf *pf)
919 {
920         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
921         struct i40e_hw_port_stats *nsd = &pf->stats;
922         struct i40e_hw *hw = &pf->hw;
923         u32 val;
924         int i;
925
926         i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
927                            I40E_GLPRT_GORCL(hw->port),
928                            pf->stat_offsets_loaded,
929                            &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
930         i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
931                            I40E_GLPRT_GOTCL(hw->port),
932                            pf->stat_offsets_loaded,
933                            &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
934         i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
935                            pf->stat_offsets_loaded,
936                            &osd->eth.rx_discards,
937                            &nsd->eth.rx_discards);
938         i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
939                            I40E_GLPRT_UPRCL(hw->port),
940                            pf->stat_offsets_loaded,
941                            &osd->eth.rx_unicast,
942                            &nsd->eth.rx_unicast);
943         i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
944                            I40E_GLPRT_MPRCL(hw->port),
945                            pf->stat_offsets_loaded,
946                            &osd->eth.rx_multicast,
947                            &nsd->eth.rx_multicast);
948         i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
949                            I40E_GLPRT_BPRCL(hw->port),
950                            pf->stat_offsets_loaded,
951                            &osd->eth.rx_broadcast,
952                            &nsd->eth.rx_broadcast);
953         i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
954                            I40E_GLPRT_UPTCL(hw->port),
955                            pf->stat_offsets_loaded,
956                            &osd->eth.tx_unicast,
957                            &nsd->eth.tx_unicast);
958         i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
959                            I40E_GLPRT_MPTCL(hw->port),
960                            pf->stat_offsets_loaded,
961                            &osd->eth.tx_multicast,
962                            &nsd->eth.tx_multicast);
963         i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
964                            I40E_GLPRT_BPTCL(hw->port),
965                            pf->stat_offsets_loaded,
966                            &osd->eth.tx_broadcast,
967                            &nsd->eth.tx_broadcast);
968
969         i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
970                            pf->stat_offsets_loaded,
971                            &osd->tx_dropped_link_down,
972                            &nsd->tx_dropped_link_down);
973
974         i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
975                            pf->stat_offsets_loaded,
976                            &osd->crc_errors, &nsd->crc_errors);
977
978         i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
979                            pf->stat_offsets_loaded,
980                            &osd->illegal_bytes, &nsd->illegal_bytes);
981
982         i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
983                            pf->stat_offsets_loaded,
984                            &osd->mac_local_faults,
985                            &nsd->mac_local_faults);
986         i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
987                            pf->stat_offsets_loaded,
988                            &osd->mac_remote_faults,
989                            &nsd->mac_remote_faults);
990
991         i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
992                            pf->stat_offsets_loaded,
993                            &osd->rx_length_errors,
994                            &nsd->rx_length_errors);
995
996         i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
997                            pf->stat_offsets_loaded,
998                            &osd->link_xon_rx, &nsd->link_xon_rx);
999         i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
1000                            pf->stat_offsets_loaded,
1001                            &osd->link_xon_tx, &nsd->link_xon_tx);
1002         i40e_update_prio_xoff_rx(pf);  /* handles I40E_GLPRT_LXOFFRXC */
1003         i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1004                            pf->stat_offsets_loaded,
1005                            &osd->link_xoff_tx, &nsd->link_xoff_tx);
1006
1007         for (i = 0; i < 8; i++) {
1008                 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1009                                    pf->stat_offsets_loaded,
1010                                    &osd->priority_xon_rx[i],
1011                                    &nsd->priority_xon_rx[i]);
1012                 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1013                                    pf->stat_offsets_loaded,
1014                                    &osd->priority_xon_tx[i],
1015                                    &nsd->priority_xon_tx[i]);
1016                 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1017                                    pf->stat_offsets_loaded,
1018                                    &osd->priority_xoff_tx[i],
1019                                    &nsd->priority_xoff_tx[i]);
1020                 i40e_stat_update32(hw,
1021                                    I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1022                                    pf->stat_offsets_loaded,
1023                                    &osd->priority_xon_2_xoff[i],
1024                                    &nsd->priority_xon_2_xoff[i]);
1025         }
1026
1027         i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1028                            I40E_GLPRT_PRC64L(hw->port),
1029                            pf->stat_offsets_loaded,
1030                            &osd->rx_size_64, &nsd->rx_size_64);
1031         i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1032                            I40E_GLPRT_PRC127L(hw->port),
1033                            pf->stat_offsets_loaded,
1034                            &osd->rx_size_127, &nsd->rx_size_127);
1035         i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1036                            I40E_GLPRT_PRC255L(hw->port),
1037                            pf->stat_offsets_loaded,
1038                            &osd->rx_size_255, &nsd->rx_size_255);
1039         i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1040                            I40E_GLPRT_PRC511L(hw->port),
1041                            pf->stat_offsets_loaded,
1042                            &osd->rx_size_511, &nsd->rx_size_511);
1043         i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1044                            I40E_GLPRT_PRC1023L(hw->port),
1045                            pf->stat_offsets_loaded,
1046                            &osd->rx_size_1023, &nsd->rx_size_1023);
1047         i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1048                            I40E_GLPRT_PRC1522L(hw->port),
1049                            pf->stat_offsets_loaded,
1050                            &osd->rx_size_1522, &nsd->rx_size_1522);
1051         i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1052                            I40E_GLPRT_PRC9522L(hw->port),
1053                            pf->stat_offsets_loaded,
1054                            &osd->rx_size_big, &nsd->rx_size_big);
1055
1056         i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1057                            I40E_GLPRT_PTC64L(hw->port),
1058                            pf->stat_offsets_loaded,
1059                            &osd->tx_size_64, &nsd->tx_size_64);
1060         i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1061                            I40E_GLPRT_PTC127L(hw->port),
1062                            pf->stat_offsets_loaded,
1063                            &osd->tx_size_127, &nsd->tx_size_127);
1064         i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1065                            I40E_GLPRT_PTC255L(hw->port),
1066                            pf->stat_offsets_loaded,
1067                            &osd->tx_size_255, &nsd->tx_size_255);
1068         i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1069                            I40E_GLPRT_PTC511L(hw->port),
1070                            pf->stat_offsets_loaded,
1071                            &osd->tx_size_511, &nsd->tx_size_511);
1072         i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1073                            I40E_GLPRT_PTC1023L(hw->port),
1074                            pf->stat_offsets_loaded,
1075                            &osd->tx_size_1023, &nsd->tx_size_1023);
1076         i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1077                            I40E_GLPRT_PTC1522L(hw->port),
1078                            pf->stat_offsets_loaded,
1079                            &osd->tx_size_1522, &nsd->tx_size_1522);
1080         i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1081                            I40E_GLPRT_PTC9522L(hw->port),
1082                            pf->stat_offsets_loaded,
1083                            &osd->tx_size_big, &nsd->tx_size_big);
1084
1085         i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1086                            pf->stat_offsets_loaded,
1087                            &osd->rx_undersize, &nsd->rx_undersize);
1088         i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1089                            pf->stat_offsets_loaded,
1090                            &osd->rx_fragments, &nsd->rx_fragments);
1091         i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1092                            pf->stat_offsets_loaded,
1093                            &osd->rx_oversize, &nsd->rx_oversize);
1094         i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1095                            pf->stat_offsets_loaded,
1096                            &osd->rx_jabber, &nsd->rx_jabber);
1097
1098         /* FDIR stats */
1099         i40e_stat_update32(hw,
1100                            I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
1101                            pf->stat_offsets_loaded,
1102                            &osd->fd_atr_match, &nsd->fd_atr_match);
1103         i40e_stat_update32(hw,
1104                            I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
1105                            pf->stat_offsets_loaded,
1106                            &osd->fd_sb_match, &nsd->fd_sb_match);
1107         i40e_stat_update32(hw,
1108                       I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
1109                       pf->stat_offsets_loaded,
1110                       &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
1111
1112         val = rd32(hw, I40E_PRTPM_EEE_STAT);
1113         nsd->tx_lpi_status =
1114                        (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1115                         I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1116         nsd->rx_lpi_status =
1117                        (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1118                         I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1119         i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1120                            pf->stat_offsets_loaded,
1121                            &osd->tx_lpi_count, &nsd->tx_lpi_count);
1122         i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1123                            pf->stat_offsets_loaded,
1124                            &osd->rx_lpi_count, &nsd->rx_lpi_count);
1125
1126         pf->stat_offsets_loaded = true;
1127 }
1128
1129 /**
1130  * i40e_update_stats - Update the various statistics counters.
1131  * @vsi: the VSI to be updated
1132  *
1133  * Update the various stats for this VSI and its related entities.
1134  **/
1135 void i40e_update_stats(struct i40e_vsi *vsi)
1136 {
1137         struct i40e_pf *pf = vsi->back;
1138
1139         if (vsi == pf->vsi[pf->lan_vsi])
1140                 i40e_update_pf_stats(pf);
1141
1142         i40e_update_vsi_stats(vsi);
1143 #ifdef I40E_FCOE
1144         i40e_update_fcoe_stats(vsi);
1145 #endif
1146 }
1147
1148 /**
1149  * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1150  * @vsi: the VSI to be searched
1151  * @macaddr: the MAC address
1152  * @vlan: the vlan
1153  * @is_vf: make sure its a VF filter, else doesn't matter
1154  * @is_netdev: make sure its a netdev filter, else doesn't matter
1155  *
1156  * Returns ptr to the filter object or NULL
1157  **/
1158 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1159                                                 u8 *macaddr, s16 vlan,
1160                                                 bool is_vf, bool is_netdev)
1161 {
1162         struct i40e_mac_filter *f;
1163
1164         if (!vsi || !macaddr)
1165                 return NULL;
1166
1167         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1168                 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1169                     (vlan == f->vlan)    &&
1170                     (!is_vf || f->is_vf) &&
1171                     (!is_netdev || f->is_netdev))
1172                         return f;
1173         }
1174         return NULL;
1175 }
1176
1177 /**
1178  * i40e_find_mac - Find a mac addr in the macvlan filters list
1179  * @vsi: the VSI to be searched
1180  * @macaddr: the MAC address we are searching for
1181  * @is_vf: make sure its a VF filter, else doesn't matter
1182  * @is_netdev: make sure its a netdev filter, else doesn't matter
1183  *
1184  * Returns the first filter with the provided MAC address or NULL if
1185  * MAC address was not found
1186  **/
1187 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1188                                       bool is_vf, bool is_netdev)
1189 {
1190         struct i40e_mac_filter *f;
1191
1192         if (!vsi || !macaddr)
1193                 return NULL;
1194
1195         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1196                 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1197                     (!is_vf || f->is_vf) &&
1198                     (!is_netdev || f->is_netdev))
1199                         return f;
1200         }
1201         return NULL;
1202 }
1203
1204 /**
1205  * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1206  * @vsi: the VSI to be searched
1207  *
1208  * Returns true if VSI is in vlan mode or false otherwise
1209  **/
1210 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1211 {
1212         struct i40e_mac_filter *f;
1213
1214         /* Only -1 for all the filters denotes not in vlan mode
1215          * so we have to go through all the list in order to make sure
1216          */
1217         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1218                 if (f->vlan >= 0)
1219                         return true;
1220         }
1221
1222         return false;
1223 }
1224
1225 /**
1226  * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1227  * @vsi: the VSI to be searched
1228  * @macaddr: the mac address to be filtered
1229  * @is_vf: true if it is a VF
1230  * @is_netdev: true if it is a netdev
1231  *
1232  * Goes through all the macvlan filters and adds a
1233  * macvlan filter for each unique vlan that already exists
1234  *
1235  * Returns first filter found on success, else NULL
1236  **/
1237 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1238                                              bool is_vf, bool is_netdev)
1239 {
1240         struct i40e_mac_filter *f;
1241
1242         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1243                 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1244                                       is_vf, is_netdev)) {
1245                         if (!i40e_add_filter(vsi, macaddr, f->vlan,
1246                                              is_vf, is_netdev))
1247                                 return NULL;
1248                 }
1249         }
1250
1251         return list_first_entry_or_null(&vsi->mac_filter_list,
1252                                         struct i40e_mac_filter, list);
1253 }
1254
1255 /**
1256  * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1257  * @vsi: the PF Main VSI - inappropriate for any other VSI
1258  * @macaddr: the MAC address
1259  *
1260  * Some older firmware configurations set up a default promiscuous VLAN
1261  * filter that needs to be removed.
1262  **/
1263 static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1264 {
1265         struct i40e_aqc_remove_macvlan_element_data element;
1266         struct i40e_pf *pf = vsi->back;
1267         i40e_status aq_ret;
1268
1269         /* Only appropriate for the PF main VSI */
1270         if (vsi->type != I40E_VSI_MAIN)
1271                 return -EINVAL;
1272
1273         memset(&element, 0, sizeof(element));
1274         ether_addr_copy(element.mac_addr, macaddr);
1275         element.vlan_tag = 0;
1276         element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1277                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1278         aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1279         if (aq_ret)
1280                 return -ENOENT;
1281
1282         return 0;
1283 }
1284
1285 /**
1286  * i40e_add_filter - Add a mac/vlan filter to the VSI
1287  * @vsi: the VSI to be searched
1288  * @macaddr: the MAC address
1289  * @vlan: the vlan
1290  * @is_vf: make sure its a VF filter, else doesn't matter
1291  * @is_netdev: make sure its a netdev filter, else doesn't matter
1292  *
1293  * Returns ptr to the filter object or NULL when no memory available.
1294  **/
1295 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1296                                         u8 *macaddr, s16 vlan,
1297                                         bool is_vf, bool is_netdev)
1298 {
1299         struct i40e_mac_filter *f;
1300
1301         if (!vsi || !macaddr)
1302                 return NULL;
1303
1304         f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1305         if (!f) {
1306                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1307                 if (!f)
1308                         goto add_filter_out;
1309
1310                 ether_addr_copy(f->macaddr, macaddr);
1311                 f->vlan = vlan;
1312                 f->changed = true;
1313
1314                 INIT_LIST_HEAD(&f->list);
1315                 list_add(&f->list, &vsi->mac_filter_list);
1316         }
1317
1318         /* increment counter and add a new flag if needed */
1319         if (is_vf) {
1320                 if (!f->is_vf) {
1321                         f->is_vf = true;
1322                         f->counter++;
1323                 }
1324         } else if (is_netdev) {
1325                 if (!f->is_netdev) {
1326                         f->is_netdev = true;
1327                         f->counter++;
1328                 }
1329         } else {
1330                 f->counter++;
1331         }
1332
1333         /* changed tells sync_filters_subtask to
1334          * push the filter down to the firmware
1335          */
1336         if (f->changed) {
1337                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1338                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1339         }
1340
1341 add_filter_out:
1342         return f;
1343 }
1344
1345 /**
1346  * i40e_del_filter - Remove a mac/vlan filter from the VSI
1347  * @vsi: the VSI to be searched
1348  * @macaddr: the MAC address
1349  * @vlan: the vlan
1350  * @is_vf: make sure it's a VF filter, else doesn't matter
1351  * @is_netdev: make sure it's a netdev filter, else doesn't matter
1352  **/
1353 void i40e_del_filter(struct i40e_vsi *vsi,
1354                      u8 *macaddr, s16 vlan,
1355                      bool is_vf, bool is_netdev)
1356 {
1357         struct i40e_mac_filter *f;
1358
1359         if (!vsi || !macaddr)
1360                 return;
1361
1362         f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1363         if (!f || f->counter == 0)
1364                 return;
1365
1366         if (is_vf) {
1367                 if (f->is_vf) {
1368                         f->is_vf = false;
1369                         f->counter--;
1370                 }
1371         } else if (is_netdev) {
1372                 if (f->is_netdev) {
1373                         f->is_netdev = false;
1374                         f->counter--;
1375                 }
1376         } else {
1377                 /* make sure we don't remove a filter in use by VF or netdev */
1378                 int min_f = 0;
1379                 min_f += (f->is_vf ? 1 : 0);
1380                 min_f += (f->is_netdev ? 1 : 0);
1381
1382                 if (f->counter > min_f)
1383                         f->counter--;
1384         }
1385
1386         /* counter == 0 tells sync_filters_subtask to
1387          * remove the filter from the firmware's list
1388          */
1389         if (f->counter == 0) {
1390                 f->changed = true;
1391                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1392                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1393         }
1394 }
1395
1396 /**
1397  * i40e_set_mac - NDO callback to set mac address
1398  * @netdev: network interface device structure
1399  * @p: pointer to an address structure
1400  *
1401  * Returns 0 on success, negative on failure
1402  **/
1403 #ifdef I40E_FCOE
1404 int i40e_set_mac(struct net_device *netdev, void *p)
1405 #else
1406 static int i40e_set_mac(struct net_device *netdev, void *p)
1407 #endif
1408 {
1409         struct i40e_netdev_priv *np = netdev_priv(netdev);
1410         struct i40e_vsi *vsi = np->vsi;
1411         struct i40e_pf *pf = vsi->back;
1412         struct i40e_hw *hw = &pf->hw;
1413         struct sockaddr *addr = p;
1414         struct i40e_mac_filter *f;
1415
1416         if (!is_valid_ether_addr(addr->sa_data))
1417                 return -EADDRNOTAVAIL;
1418
1419         if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1420                 netdev_info(netdev, "already using mac address %pM\n",
1421                             addr->sa_data);
1422                 return 0;
1423         }
1424
1425         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1426             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1427                 return -EADDRNOTAVAIL;
1428
1429         if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1430                 netdev_info(netdev, "returning to hw mac address %pM\n",
1431                             hw->mac.addr);
1432         else
1433                 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1434
1435         if (vsi->type == I40E_VSI_MAIN) {
1436                 i40e_status ret;
1437                 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1438                                                 I40E_AQC_WRITE_TYPE_LAA_WOL,
1439                                                 addr->sa_data, NULL);
1440                 if (ret) {
1441                         netdev_info(netdev,
1442                                     "Addr change for Main VSI failed: %d\n",
1443                                     ret);
1444                         return -EADDRNOTAVAIL;
1445                 }
1446         }
1447
1448         if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
1449                 struct i40e_aqc_remove_macvlan_element_data element;
1450
1451                 memset(&element, 0, sizeof(element));
1452                 ether_addr_copy(element.mac_addr, netdev->dev_addr);
1453                 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1454                 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1455         } else {
1456                 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1457                                 false, false);
1458         }
1459
1460         if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
1461                 struct i40e_aqc_add_macvlan_element_data element;
1462
1463                 memset(&element, 0, sizeof(element));
1464                 ether_addr_copy(element.mac_addr, hw->mac.addr);
1465                 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
1466                 i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1467         } else {
1468                 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
1469                                     false, false);
1470                 if (f)
1471                         f->is_laa = true;
1472         }
1473
1474         i40e_sync_vsi_filters(vsi);
1475         ether_addr_copy(netdev->dev_addr, addr->sa_data);
1476
1477         return 0;
1478 }
1479
1480 /**
1481  * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1482  * @vsi: the VSI being setup
1483  * @ctxt: VSI context structure
1484  * @enabled_tc: Enabled TCs bitmap
1485  * @is_add: True if called before Add VSI
1486  *
1487  * Setup VSI queue mapping for enabled traffic classes.
1488  **/
1489 #ifdef I40E_FCOE
1490 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1491                               struct i40e_vsi_context *ctxt,
1492                               u8 enabled_tc,
1493                               bool is_add)
1494 #else
1495 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1496                                      struct i40e_vsi_context *ctxt,
1497                                      u8 enabled_tc,
1498                                      bool is_add)
1499 #endif
1500 {
1501         struct i40e_pf *pf = vsi->back;
1502         u16 sections = 0;
1503         u8 netdev_tc = 0;
1504         u16 numtc = 0;
1505         u16 qcount;
1506         u8 offset;
1507         u16 qmap;
1508         int i;
1509         u16 num_tc_qps = 0;
1510
1511         sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1512         offset = 0;
1513
1514         if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1515                 /* Find numtc from enabled TC bitmap */
1516                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1517                         if (enabled_tc & (1 << i)) /* TC is enabled */
1518                                 numtc++;
1519                 }
1520                 if (!numtc) {
1521                         dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1522                         numtc = 1;
1523                 }
1524         } else {
1525                 /* At least TC0 is enabled in case of non-DCB case */
1526                 numtc = 1;
1527         }
1528
1529         vsi->tc_config.numtc = numtc;
1530         vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1531         /* Number of queues per enabled TC */
1532         /* In MFP case we can have a much lower count of MSIx
1533          * vectors available and so we need to lower the used
1534          * q count.
1535          */
1536         qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
1537         num_tc_qps = qcount / numtc;
1538         num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
1539
1540         /* Setup queue offset/count for all TCs for given VSI */
1541         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1542                 /* See if the given TC is enabled for the given VSI */
1543                 if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
1544                         int pow, num_qps;
1545
1546                         switch (vsi->type) {
1547                         case I40E_VSI_MAIN:
1548                                 qcount = min_t(int, pf->rss_size, num_tc_qps);
1549                                 break;
1550 #ifdef I40E_FCOE
1551                         case I40E_VSI_FCOE:
1552                                 qcount = num_tc_qps;
1553                                 break;
1554 #endif
1555                         case I40E_VSI_FDIR:
1556                         case I40E_VSI_SRIOV:
1557                         case I40E_VSI_VMDQ2:
1558                         default:
1559                                 qcount = num_tc_qps;
1560                                 WARN_ON(i != 0);
1561                                 break;
1562                         }
1563                         vsi->tc_config.tc_info[i].qoffset = offset;
1564                         vsi->tc_config.tc_info[i].qcount = qcount;
1565
1566                         /* find the next higher power-of-2 of num queue pairs */
1567                         num_qps = qcount;
1568                         pow = 0;
1569                         while (num_qps && ((1 << pow) < qcount)) {
1570                                 pow++;
1571                                 num_qps >>= 1;
1572                         }
1573
1574                         vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1575                         qmap =
1576                             (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1577                             (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1578
1579                         offset += qcount;
1580                 } else {
1581                         /* TC is not enabled so set the offset to
1582                          * default queue and allocate one queue
1583                          * for the given TC.
1584                          */
1585                         vsi->tc_config.tc_info[i].qoffset = 0;
1586                         vsi->tc_config.tc_info[i].qcount = 1;
1587                         vsi->tc_config.tc_info[i].netdev_tc = 0;
1588
1589                         qmap = 0;
1590                 }
1591                 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1592         }
1593
1594         /* Set actual Tx/Rx queue pairs */
1595         vsi->num_queue_pairs = offset;
1596         if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1597                 if (vsi->req_queue_pairs > 0)
1598                         vsi->num_queue_pairs = vsi->req_queue_pairs;
1599                 else
1600                         vsi->num_queue_pairs = pf->num_lan_msix;
1601         }
1602
1603         /* Scheduler section valid can only be set for ADD VSI */
1604         if (is_add) {
1605                 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1606
1607                 ctxt->info.up_enable_bits = enabled_tc;
1608         }
1609         if (vsi->type == I40E_VSI_SRIOV) {
1610                 ctxt->info.mapping_flags |=
1611                                      cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1612                 for (i = 0; i < vsi->num_queue_pairs; i++)
1613                         ctxt->info.queue_mapping[i] =
1614                                                cpu_to_le16(vsi->base_queue + i);
1615         } else {
1616                 ctxt->info.mapping_flags |=
1617                                         cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1618                 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1619         }
1620         ctxt->info.valid_sections |= cpu_to_le16(sections);
1621 }
1622
1623 /**
1624  * i40e_set_rx_mode - NDO callback to set the netdev filters
1625  * @netdev: network interface device structure
1626  **/
1627 #ifdef I40E_FCOE
1628 void i40e_set_rx_mode(struct net_device *netdev)
1629 #else
1630 static void i40e_set_rx_mode(struct net_device *netdev)
1631 #endif
1632 {
1633         struct i40e_netdev_priv *np = netdev_priv(netdev);
1634         struct i40e_mac_filter *f, *ftmp;
1635         struct i40e_vsi *vsi = np->vsi;
1636         struct netdev_hw_addr *uca;
1637         struct netdev_hw_addr *mca;
1638         struct netdev_hw_addr *ha;
1639
1640         /* add addr if not already in the filter list */
1641         netdev_for_each_uc_addr(uca, netdev) {
1642                 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1643                         if (i40e_is_vsi_in_vlan(vsi))
1644                                 i40e_put_mac_in_vlan(vsi, uca->addr,
1645                                                      false, true);
1646                         else
1647                                 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1648                                                 false, true);
1649                 }
1650         }
1651
1652         netdev_for_each_mc_addr(mca, netdev) {
1653                 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1654                         if (i40e_is_vsi_in_vlan(vsi))
1655                                 i40e_put_mac_in_vlan(vsi, mca->addr,
1656                                                      false, true);
1657                         else
1658                                 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1659                                                 false, true);
1660                 }
1661         }
1662
1663         /* remove filter if not in netdev list */
1664         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1665                 bool found = false;
1666
1667                 if (!f->is_netdev)
1668                         continue;
1669
1670                 if (is_multicast_ether_addr(f->macaddr)) {
1671                         netdev_for_each_mc_addr(mca, netdev) {
1672                                 if (ether_addr_equal(mca->addr, f->macaddr)) {
1673                                         found = true;
1674                                         break;
1675                                 }
1676                         }
1677                 } else {
1678                         netdev_for_each_uc_addr(uca, netdev) {
1679                                 if (ether_addr_equal(uca->addr, f->macaddr)) {
1680                                         found = true;
1681                                         break;
1682                                 }
1683                         }
1684
1685                         for_each_dev_addr(netdev, ha) {
1686                                 if (ether_addr_equal(ha->addr, f->macaddr)) {
1687                                         found = true;
1688                                         break;
1689                                 }
1690                         }
1691                 }
1692                 if (!found)
1693                         i40e_del_filter(
1694                            vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1695         }
1696
1697         /* check for other flag changes */
1698         if (vsi->current_netdev_flags != vsi->netdev->flags) {
1699                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1700                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1701         }
1702 }
1703
1704 /**
1705  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1706  * @vsi: ptr to the VSI
1707  *
1708  * Push any outstanding VSI filter changes through the AdminQ.
1709  *
1710  * Returns 0 or error value
1711  **/
1712 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1713 {
1714         struct i40e_mac_filter *f, *ftmp;
1715         bool promisc_forced_on = false;
1716         bool add_happened = false;
1717         int filter_list_len = 0;
1718         u32 changed_flags = 0;
1719         i40e_status aq_ret = 0;
1720         struct i40e_pf *pf;
1721         int num_add = 0;
1722         int num_del = 0;
1723         u16 cmd_flags;
1724
1725         /* empty array typed pointers, kcalloc later */
1726         struct i40e_aqc_add_macvlan_element_data *add_list;
1727         struct i40e_aqc_remove_macvlan_element_data *del_list;
1728
1729         while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1730                 usleep_range(1000, 2000);
1731         pf = vsi->back;
1732
1733         if (vsi->netdev) {
1734                 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1735                 vsi->current_netdev_flags = vsi->netdev->flags;
1736         }
1737
1738         if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1739                 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1740
1741                 filter_list_len = pf->hw.aq.asq_buf_size /
1742                             sizeof(struct i40e_aqc_remove_macvlan_element_data);
1743                 del_list = kcalloc(filter_list_len,
1744                             sizeof(struct i40e_aqc_remove_macvlan_element_data),
1745                             GFP_KERNEL);
1746                 if (!del_list)
1747                         return -ENOMEM;
1748
1749                 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1750                         if (!f->changed)
1751                                 continue;
1752
1753                         if (f->counter != 0)
1754                                 continue;
1755                         f->changed = false;
1756                         cmd_flags = 0;
1757
1758                         /* add to delete list */
1759                         ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
1760                         del_list[num_del].vlan_tag =
1761                                 cpu_to_le16((u16)(f->vlan ==
1762                                             I40E_VLAN_ANY ? 0 : f->vlan));
1763
1764                         cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1765                         del_list[num_del].flags = cmd_flags;
1766                         num_del++;
1767
1768                         /* unlink from filter list */
1769                         list_del(&f->list);
1770                         kfree(f);
1771
1772                         /* flush a full buffer */
1773                         if (num_del == filter_list_len) {
1774                                 aq_ret = i40e_aq_remove_macvlan(&pf->hw,
1775                                             vsi->seid, del_list, num_del,
1776                                             NULL);
1777                                 num_del = 0;
1778                                 memset(del_list, 0, sizeof(*del_list));
1779
1780                                 if (aq_ret &&
1781                                     pf->hw.aq.asq_last_status !=
1782                                                               I40E_AQ_RC_ENOENT)
1783                                         dev_info(&pf->pdev->dev,
1784                                                  "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
1785                                                  aq_ret,
1786                                                  pf->hw.aq.asq_last_status);
1787                         }
1788                 }
1789                 if (num_del) {
1790                         aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1791                                                      del_list, num_del, NULL);
1792                         num_del = 0;
1793
1794                         if (aq_ret &&
1795                             pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
1796                                 dev_info(&pf->pdev->dev,
1797                                          "ignoring delete macvlan error, err %d, aq_err %d\n",
1798                                          aq_ret, pf->hw.aq.asq_last_status);
1799                 }
1800
1801                 kfree(del_list);
1802                 del_list = NULL;
1803
1804                 /* do all the adds now */
1805                 filter_list_len = pf->hw.aq.asq_buf_size /
1806                                sizeof(struct i40e_aqc_add_macvlan_element_data),
1807                 add_list = kcalloc(filter_list_len,
1808                                sizeof(struct i40e_aqc_add_macvlan_element_data),
1809                                GFP_KERNEL);
1810                 if (!add_list)
1811                         return -ENOMEM;
1812
1813                 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1814                         if (!f->changed)
1815                                 continue;
1816
1817                         if (f->counter == 0)
1818                                 continue;
1819                         f->changed = false;
1820                         add_happened = true;
1821                         cmd_flags = 0;
1822
1823                         /* add to add array */
1824                         ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
1825                         add_list[num_add].vlan_tag =
1826                                 cpu_to_le16(
1827                                  (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1828                         add_list[num_add].queue_number = 0;
1829
1830                         cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1831                         add_list[num_add].flags = cpu_to_le16(cmd_flags);
1832                         num_add++;
1833
1834                         /* flush a full buffer */
1835                         if (num_add == filter_list_len) {
1836                                 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1837                                                              add_list, num_add,
1838                                                              NULL);
1839                                 num_add = 0;
1840
1841                                 if (aq_ret)
1842                                         break;
1843                                 memset(add_list, 0, sizeof(*add_list));
1844                         }
1845                 }
1846                 if (num_add) {
1847                         aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1848                                                      add_list, num_add, NULL);
1849                         num_add = 0;
1850                 }
1851                 kfree(add_list);
1852                 add_list = NULL;
1853
1854                 if (add_happened && aq_ret &&
1855                     pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) {
1856                         dev_info(&pf->pdev->dev,
1857                                  "add filter failed, err %d, aq_err %d\n",
1858                                  aq_ret, pf->hw.aq.asq_last_status);
1859                         if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1860                             !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1861                                       &vsi->state)) {
1862                                 promisc_forced_on = true;
1863                                 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1864                                         &vsi->state);
1865                                 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1866                         }
1867                 }
1868         }
1869
1870         /* check for changes in promiscuous modes */
1871         if (changed_flags & IFF_ALLMULTI) {
1872                 bool cur_multipromisc;
1873                 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
1874                 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1875                                                                vsi->seid,
1876                                                                cur_multipromisc,
1877                                                                NULL);
1878                 if (aq_ret)
1879                         dev_info(&pf->pdev->dev,
1880                                  "set multi promisc failed, err %d, aq_err %d\n",
1881                                  aq_ret, pf->hw.aq.asq_last_status);
1882         }
1883         if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1884                 bool cur_promisc;
1885                 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1886                                test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1887                                         &vsi->state));
1888                 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1889                                                              vsi->seid,
1890                                                              cur_promisc, NULL);
1891                 if (aq_ret)
1892                         dev_info(&pf->pdev->dev,
1893                                  "set uni promisc failed, err %d, aq_err %d\n",
1894                                  aq_ret, pf->hw.aq.asq_last_status);
1895                 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
1896                                                    vsi->seid,
1897                                                    cur_promisc, NULL);
1898                 if (aq_ret)
1899                         dev_info(&pf->pdev->dev,
1900                                  "set brdcast promisc failed, err %d, aq_err %d\n",
1901                                  aq_ret, pf->hw.aq.asq_last_status);
1902         }
1903
1904         clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
1905         return 0;
1906 }
1907
1908 /**
1909  * i40e_sync_filters_subtask - Sync the VSI filter list with HW
1910  * @pf: board private structure
1911  **/
1912 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1913 {
1914         int v;
1915
1916         if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
1917                 return;
1918         pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1919
1920         for (v = 0; v < pf->num_alloc_vsi; v++) {
1921                 if (pf->vsi[v] &&
1922                     (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1923                         i40e_sync_vsi_filters(pf->vsi[v]);
1924         }
1925 }
1926
1927 /**
1928  * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
1929  * @netdev: network interface device structure
1930  * @new_mtu: new value for maximum frame size
1931  *
1932  * Returns 0 on success, negative on failure
1933  **/
1934 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1935 {
1936         struct i40e_netdev_priv *np = netdev_priv(netdev);
1937         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1938         struct i40e_vsi *vsi = np->vsi;
1939
1940         /* MTU < 68 is an error and causes problems on some kernels */
1941         if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
1942                 return -EINVAL;
1943
1944         netdev_info(netdev, "changing MTU from %d to %d\n",
1945                     netdev->mtu, new_mtu);
1946         netdev->mtu = new_mtu;
1947         if (netif_running(netdev))
1948                 i40e_vsi_reinit_locked(vsi);
1949
1950         return 0;
1951 }
1952
1953 /**
1954  * i40e_ioctl - Access the hwtstamp interface
1955  * @netdev: network interface device structure
1956  * @ifr: interface request data
1957  * @cmd: ioctl command
1958  **/
1959 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1960 {
1961         struct i40e_netdev_priv *np = netdev_priv(netdev);
1962         struct i40e_pf *pf = np->vsi->back;
1963
1964         switch (cmd) {
1965         case SIOCGHWTSTAMP:
1966                 return i40e_ptp_get_ts_config(pf, ifr);
1967         case SIOCSHWTSTAMP:
1968                 return i40e_ptp_set_ts_config(pf, ifr);
1969         default:
1970                 return -EOPNOTSUPP;
1971         }
1972 }
1973
1974 /**
1975  * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
1976  * @vsi: the vsi being adjusted
1977  **/
1978 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
1979 {
1980         struct i40e_vsi_context ctxt;
1981         i40e_status ret;
1982
1983         if ((vsi->info.valid_sections &
1984              cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1985             ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
1986                 return;  /* already enabled */
1987
1988         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1989         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1990                                     I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1991
1992         ctxt.seid = vsi->seid;
1993         ctxt.info = vsi->info;
1994         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1995         if (ret) {
1996                 dev_info(&vsi->back->pdev->dev,
1997                          "%s: update vsi failed, aq_err=%d\n",
1998                          __func__, vsi->back->hw.aq.asq_last_status);
1999         }
2000 }
2001
2002 /**
2003  * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2004  * @vsi: the vsi being adjusted
2005  **/
2006 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2007 {
2008         struct i40e_vsi_context ctxt;
2009         i40e_status ret;
2010
2011         if ((vsi->info.valid_sections &
2012              cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2013             ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2014              I40E_AQ_VSI_PVLAN_EMOD_MASK))
2015                 return;  /* already disabled */
2016
2017         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2018         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2019                                     I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2020
2021         ctxt.seid = vsi->seid;
2022         ctxt.info = vsi->info;
2023         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2024         if (ret) {
2025                 dev_info(&vsi->back->pdev->dev,
2026                          "%s: update vsi failed, aq_err=%d\n",
2027                          __func__, vsi->back->hw.aq.asq_last_status);
2028         }
2029 }
2030
2031 /**
2032  * i40e_vlan_rx_register - Setup or shutdown vlan offload
2033  * @netdev: network interface to be adjusted
2034  * @features: netdev features to test if VLAN offload is enabled or not
2035  **/
2036 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2037 {
2038         struct i40e_netdev_priv *np = netdev_priv(netdev);
2039         struct i40e_vsi *vsi = np->vsi;
2040
2041         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2042                 i40e_vlan_stripping_enable(vsi);
2043         else
2044                 i40e_vlan_stripping_disable(vsi);
2045 }
2046
2047 /**
2048  * i40e_vsi_add_vlan - Add vsi membership for given vlan
2049  * @vsi: the vsi being configured
2050  * @vid: vlan id to be added (0 = untagged only , -1 = any)
2051  **/
2052 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
2053 {
2054         struct i40e_mac_filter *f, *add_f;
2055         bool is_netdev, is_vf;
2056
2057         is_vf = (vsi->type == I40E_VSI_SRIOV);
2058         is_netdev = !!(vsi->netdev);
2059
2060         if (is_netdev) {
2061                 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
2062                                         is_vf, is_netdev);
2063                 if (!add_f) {
2064                         dev_info(&vsi->back->pdev->dev,
2065                                  "Could not add vlan filter %d for %pM\n",
2066                                  vid, vsi->netdev->dev_addr);
2067                         return -ENOMEM;
2068                 }
2069         }
2070
2071         list_for_each_entry(f, &vsi->mac_filter_list, list) {
2072                 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2073                 if (!add_f) {
2074                         dev_info(&vsi->back->pdev->dev,
2075                                  "Could not add vlan filter %d for %pM\n",
2076                                  vid, f->macaddr);
2077                         return -ENOMEM;
2078                 }
2079         }
2080
2081         /* Now if we add a vlan tag, make sure to check if it is the first
2082          * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
2083          * with 0, so we now accept untagged and specified tagged traffic
2084          * (and not any taged and untagged)
2085          */
2086         if (vid > 0) {
2087                 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
2088                                                   I40E_VLAN_ANY,
2089                                                   is_vf, is_netdev)) {
2090                         i40e_del_filter(vsi, vsi->netdev->dev_addr,
2091                                         I40E_VLAN_ANY, is_vf, is_netdev);
2092                         add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
2093                                                 is_vf, is_netdev);
2094                         if (!add_f) {
2095                                 dev_info(&vsi->back->pdev->dev,
2096                                          "Could not add filter 0 for %pM\n",
2097                                          vsi->netdev->dev_addr);
2098                                 return -ENOMEM;
2099                         }
2100                 }
2101         }
2102
2103         /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
2104         if (vid > 0 && !vsi->info.pvid) {
2105                 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2106                         if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2107                                              is_vf, is_netdev)) {
2108                                 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2109                                                 is_vf, is_netdev);
2110                                 add_f = i40e_add_filter(vsi, f->macaddr,
2111                                                         0, is_vf, is_netdev);
2112                                 if (!add_f) {
2113                                         dev_info(&vsi->back->pdev->dev,
2114                                                  "Could not add filter 0 for %pM\n",
2115                                                  f->macaddr);
2116                                         return -ENOMEM;
2117                                 }
2118                         }
2119                 }
2120         }
2121
2122         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2123             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2124                 return 0;
2125
2126         return i40e_sync_vsi_filters(vsi);
2127 }
2128
2129 /**
2130  * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
2131  * @vsi: the vsi being configured
2132  * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2133  *
2134  * Return: 0 on success or negative otherwise
2135  **/
2136 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2137 {
2138         struct net_device *netdev = vsi->netdev;
2139         struct i40e_mac_filter *f, *add_f;
2140         bool is_vf, is_netdev;
2141         int filter_count = 0;
2142
2143         is_vf = (vsi->type == I40E_VSI_SRIOV);
2144         is_netdev = !!(netdev);
2145
2146         if (is_netdev)
2147                 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
2148
2149         list_for_each_entry(f, &vsi->mac_filter_list, list)
2150                 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2151
2152         /* go through all the filters for this VSI and if there is only
2153          * vid == 0 it means there are no other filters, so vid 0 must
2154          * be replaced with -1. This signifies that we should from now
2155          * on accept any traffic (with any tag present, or untagged)
2156          */
2157         list_for_each_entry(f, &vsi->mac_filter_list, list) {
2158                 if (is_netdev) {
2159                         if (f->vlan &&
2160                             ether_addr_equal(netdev->dev_addr, f->macaddr))
2161                                 filter_count++;
2162                 }
2163
2164                 if (f->vlan)
2165                         filter_count++;
2166         }
2167
2168         if (!filter_count && is_netdev) {
2169                 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
2170                 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
2171                                     is_vf, is_netdev);
2172                 if (!f) {
2173                         dev_info(&vsi->back->pdev->dev,
2174                                  "Could not add filter %d for %pM\n",
2175                                  I40E_VLAN_ANY, netdev->dev_addr);
2176                         return -ENOMEM;
2177                 }
2178         }
2179
2180         if (!filter_count) {
2181                 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2182                         i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
2183                         add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2184                                             is_vf, is_netdev);
2185                         if (!add_f) {
2186                                 dev_info(&vsi->back->pdev->dev,
2187                                          "Could not add filter %d for %pM\n",
2188                                          I40E_VLAN_ANY, f->macaddr);
2189                                 return -ENOMEM;
2190                         }
2191                 }
2192         }
2193
2194         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2195             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2196                 return 0;
2197
2198         return i40e_sync_vsi_filters(vsi);
2199 }
2200
2201 /**
2202  * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2203  * @netdev: network interface to be adjusted
2204  * @vid: vlan id to be added
2205  *
2206  * net_device_ops implementation for adding vlan ids
2207  **/
2208 #ifdef I40E_FCOE
2209 int i40e_vlan_rx_add_vid(struct net_device *netdev,
2210                          __always_unused __be16 proto, u16 vid)
2211 #else
2212 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2213                                 __always_unused __be16 proto, u16 vid)
2214 #endif
2215 {
2216         struct i40e_netdev_priv *np = netdev_priv(netdev);
2217         struct i40e_vsi *vsi = np->vsi;
2218         int ret = 0;
2219
2220         if (vid > 4095)
2221                 return -EINVAL;
2222
2223         netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
2224
2225         /* If the network stack called us with vid = 0 then
2226          * it is asking to receive priority tagged packets with
2227          * vlan id 0.  Our HW receives them by default when configured
2228          * to receive untagged packets so there is no need to add an
2229          * extra filter for vlan 0 tagged packets.
2230          */
2231         if (vid)
2232                 ret = i40e_vsi_add_vlan(vsi, vid);
2233
2234         if (!ret && (vid < VLAN_N_VID))
2235                 set_bit(vid, vsi->active_vlans);
2236
2237         return ret;
2238 }
2239
2240 /**
2241  * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2242  * @netdev: network interface to be adjusted
2243  * @vid: vlan id to be removed
2244  *
2245  * net_device_ops implementation for removing vlan ids
2246  **/
2247 #ifdef I40E_FCOE
2248 int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2249                           __always_unused __be16 proto, u16 vid)
2250 #else
2251 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2252                                  __always_unused __be16 proto, u16 vid)
2253 #endif
2254 {
2255         struct i40e_netdev_priv *np = netdev_priv(netdev);
2256         struct i40e_vsi *vsi = np->vsi;
2257
2258         netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
2259
2260         /* return code is ignored as there is nothing a user
2261          * can do about failure to remove and a log message was
2262          * already printed from the other function
2263          */
2264         i40e_vsi_kill_vlan(vsi, vid);
2265
2266         clear_bit(vid, vsi->active_vlans);
2267
2268         return 0;
2269 }
2270
2271 /**
2272  * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2273  * @vsi: the vsi being brought back up
2274  **/
2275 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2276 {
2277         u16 vid;
2278
2279         if (!vsi->netdev)
2280                 return;
2281
2282         i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2283
2284         for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2285                 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2286                                      vid);
2287 }
2288
2289 /**
2290  * i40e_vsi_add_pvid - Add pvid for the VSI
2291  * @vsi: the vsi being adjusted
2292  * @vid: the vlan id to set as a PVID
2293  **/
2294 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2295 {
2296         struct i40e_vsi_context ctxt;
2297         i40e_status aq_ret;
2298
2299         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2300         vsi->info.pvid = cpu_to_le16(vid);
2301         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2302                                     I40E_AQ_VSI_PVLAN_INSERT_PVID |
2303                                     I40E_AQ_VSI_PVLAN_EMOD_STR;
2304
2305         ctxt.seid = vsi->seid;
2306         ctxt.info = vsi->info;
2307         aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2308         if (aq_ret) {
2309                 dev_info(&vsi->back->pdev->dev,
2310                          "%s: update vsi failed, aq_err=%d\n",
2311                          __func__, vsi->back->hw.aq.asq_last_status);
2312                 return -ENOENT;
2313         }
2314
2315         return 0;
2316 }
2317
2318 /**
2319  * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2320  * @vsi: the vsi being adjusted
2321  *
2322  * Just use the vlan_rx_register() service to put it back to normal
2323  **/
2324 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2325 {
2326         i40e_vlan_stripping_disable(vsi);
2327
2328         vsi->info.pvid = 0;
2329 }
2330
2331 /**
2332  * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2333  * @vsi: ptr to the VSI
2334  *
2335  * If this function returns with an error, then it's possible one or
2336  * more of the rings is populated (while the rest are not).  It is the
2337  * callers duty to clean those orphaned rings.
2338  *
2339  * Return 0 on success, negative on failure
2340  **/
2341 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2342 {
2343         int i, err = 0;
2344
2345         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2346                 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2347
2348         return err;
2349 }
2350
2351 /**
2352  * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2353  * @vsi: ptr to the VSI
2354  *
2355  * Free VSI's transmit software resources
2356  **/
2357 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2358 {
2359         int i;
2360
2361         if (!vsi->tx_rings)
2362                 return;
2363
2364         for (i = 0; i < vsi->num_queue_pairs; i++)
2365                 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2366                         i40e_free_tx_resources(vsi->tx_rings[i]);
2367 }
2368
2369 /**
2370  * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2371  * @vsi: ptr to the VSI
2372  *
2373  * If this function returns with an error, then it's possible one or
2374  * more of the rings is populated (while the rest are not).  It is the
2375  * callers duty to clean those orphaned rings.
2376  *
2377  * Return 0 on success, negative on failure
2378  **/
2379 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2380 {
2381         int i, err = 0;
2382
2383         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2384                 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2385 #ifdef I40E_FCOE
2386         i40e_fcoe_setup_ddp_resources(vsi);
2387 #endif
2388         return err;
2389 }
2390
2391 /**
2392  * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2393  * @vsi: ptr to the VSI
2394  *
2395  * Free all receive software resources
2396  **/
2397 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2398 {
2399         int i;
2400
2401         if (!vsi->rx_rings)
2402                 return;
2403
2404         for (i = 0; i < vsi->num_queue_pairs; i++)
2405                 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2406                         i40e_free_rx_resources(vsi->rx_rings[i]);
2407 #ifdef I40E_FCOE
2408         i40e_fcoe_free_ddp_resources(vsi);
2409 #endif
2410 }
2411
2412 /**
2413  * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2414  * @ring: The Tx ring to configure
2415  *
2416  * This enables/disables XPS for a given Tx descriptor ring
2417  * based on the TCs enabled for the VSI that ring belongs to.
2418  **/
2419 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2420 {
2421         struct i40e_vsi *vsi = ring->vsi;
2422         cpumask_var_t mask;
2423
2424         if (!ring->q_vector || !ring->netdev)
2425                 return;
2426
2427         /* Single TC mode enable XPS */
2428         if (vsi->tc_config.numtc <= 1) {
2429                 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2430                         netif_set_xps_queue(ring->netdev,
2431                                             &ring->q_vector->affinity_mask,
2432                                             ring->queue_index);
2433         } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2434                 /* Disable XPS to allow selection based on TC */
2435                 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2436                 netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
2437                 free_cpumask_var(mask);
2438         }
2439 }
2440
2441 /**
2442  * i40e_configure_tx_ring - Configure a transmit ring context and rest
2443  * @ring: The Tx ring to configure
2444  *
2445  * Configure the Tx descriptor ring in the HMC context.
2446  **/
2447 static int i40e_configure_tx_ring(struct i40e_ring *ring)
2448 {
2449         struct i40e_vsi *vsi = ring->vsi;
2450         u16 pf_q = vsi->base_queue + ring->queue_index;
2451         struct i40e_hw *hw = &vsi->back->hw;
2452         struct i40e_hmc_obj_txq tx_ctx;
2453         i40e_status err = 0;
2454         u32 qtx_ctl = 0;
2455
2456         /* some ATR related tx ring init */
2457         if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2458                 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2459                 ring->atr_count = 0;
2460         } else {
2461                 ring->atr_sample_rate = 0;
2462         }
2463
2464         /* configure XPS */
2465         i40e_config_xps_tx_ring(ring);
2466
2467         /* clear the context structure first */
2468         memset(&tx_ctx, 0, sizeof(tx_ctx));
2469
2470         tx_ctx.new_context = 1;
2471         tx_ctx.base = (ring->dma / 128);
2472         tx_ctx.qlen = ring->count;
2473         tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2474                                                I40E_FLAG_FD_ATR_ENABLED));
2475 #ifdef I40E_FCOE
2476         tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2477 #endif
2478         tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2479         /* FDIR VSI tx ring can still use RS bit and writebacks */
2480         if (vsi->type != I40E_VSI_FDIR)
2481                 tx_ctx.head_wb_ena = 1;
2482         tx_ctx.head_wb_addr = ring->dma +
2483                               (ring->count * sizeof(struct i40e_tx_desc));
2484
2485         /* As part of VSI creation/update, FW allocates certain
2486          * Tx arbitration queue sets for each TC enabled for
2487          * the VSI. The FW returns the handles to these queue
2488          * sets as part of the response buffer to Add VSI,
2489          * Update VSI, etc. AQ commands. It is expected that
2490          * these queue set handles be associated with the Tx
2491          * queues by the driver as part of the TX queue context
2492          * initialization. This has to be done regardless of
2493          * DCB as by default everything is mapped to TC0.
2494          */
2495         tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2496         tx_ctx.rdylist_act = 0;
2497
2498         /* clear the context in the HMC */
2499         err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2500         if (err) {
2501                 dev_info(&vsi->back->pdev->dev,
2502                          "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2503                          ring->queue_index, pf_q, err);
2504                 return -ENOMEM;
2505         }
2506
2507         /* set the context in the HMC */
2508         err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2509         if (err) {
2510                 dev_info(&vsi->back->pdev->dev,
2511                          "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2512                          ring->queue_index, pf_q, err);
2513                 return -ENOMEM;
2514         }
2515
2516         /* Now associate this queue with this PCI function */
2517         if (vsi->type == I40E_VSI_VMDQ2) {
2518                 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2519                 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2520                            I40E_QTX_CTL_VFVM_INDX_MASK;
2521         } else {
2522                 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2523         }
2524
2525         qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2526                     I40E_QTX_CTL_PF_INDX_MASK);
2527         wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2528         i40e_flush(hw);
2529
2530         clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
2531
2532         /* cache tail off for easier writes later */
2533         ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2534
2535         return 0;
2536 }
2537
2538 /**
2539  * i40e_configure_rx_ring - Configure a receive ring context
2540  * @ring: The Rx ring to configure
2541  *
2542  * Configure the Rx descriptor ring in the HMC context.
2543  **/
2544 static int i40e_configure_rx_ring(struct i40e_ring *ring)
2545 {
2546         struct i40e_vsi *vsi = ring->vsi;
2547         u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2548         u16 pf_q = vsi->base_queue + ring->queue_index;
2549         struct i40e_hw *hw = &vsi->back->hw;
2550         struct i40e_hmc_obj_rxq rx_ctx;
2551         i40e_status err = 0;
2552
2553         ring->state = 0;
2554
2555         /* clear the context structure first */
2556         memset(&rx_ctx, 0, sizeof(rx_ctx));
2557
2558         ring->rx_buf_len = vsi->rx_buf_len;
2559         ring->rx_hdr_len = vsi->rx_hdr_len;
2560
2561         rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2562         rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2563
2564         rx_ctx.base = (ring->dma / 128);
2565         rx_ctx.qlen = ring->count;
2566
2567         if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2568                 set_ring_16byte_desc_enabled(ring);
2569                 rx_ctx.dsize = 0;
2570         } else {
2571                 rx_ctx.dsize = 1;
2572         }
2573
2574         rx_ctx.dtype = vsi->dtype;
2575         if (vsi->dtype) {
2576                 set_ring_ps_enabled(ring);
2577                 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
2578                                   I40E_RX_SPLIT_IP      |
2579                                   I40E_RX_SPLIT_TCP_UDP |
2580                                   I40E_RX_SPLIT_SCTP;
2581         } else {
2582                 rx_ctx.hsplit_0 = 0;
2583         }
2584
2585         rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2586                                   (chain_len * ring->rx_buf_len));
2587         if (hw->revision_id == 0)
2588                 rx_ctx.lrxqthresh = 0;
2589         else
2590                 rx_ctx.lrxqthresh = 2;
2591         rx_ctx.crcstrip = 1;
2592         rx_ctx.l2tsel = 1;
2593         rx_ctx.showiv = 1;
2594 #ifdef I40E_FCOE
2595         rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2596 #endif
2597         /* set the prefena field to 1 because the manual says to */
2598         rx_ctx.prefena = 1;
2599
2600         /* clear the context in the HMC */
2601         err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2602         if (err) {
2603                 dev_info(&vsi->back->pdev->dev,
2604                          "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2605                          ring->queue_index, pf_q, err);
2606                 return -ENOMEM;
2607         }
2608
2609         /* set the context in the HMC */
2610         err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2611         if (err) {
2612                 dev_info(&vsi->back->pdev->dev,
2613                          "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2614                          ring->queue_index, pf_q, err);
2615                 return -ENOMEM;
2616         }
2617
2618         /* cache tail for quicker writes, and clear the reg before use */
2619         ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2620         writel(0, ring->tail);
2621
2622         if (ring_is_ps_enabled(ring)) {
2623                 i40e_alloc_rx_headers(ring);
2624                 i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
2625         } else {
2626                 i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
2627         }
2628
2629         return 0;
2630 }
2631
2632 /**
2633  * i40e_vsi_configure_tx - Configure the VSI for Tx
2634  * @vsi: VSI structure describing this set of rings and resources
2635  *
2636  * Configure the Tx VSI for operation.
2637  **/
2638 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2639 {
2640         int err = 0;
2641         u16 i;
2642
2643         for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2644                 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2645
2646         return err;
2647 }
2648
2649 /**
2650  * i40e_vsi_configure_rx - Configure the VSI for Rx
2651  * @vsi: the VSI being configured
2652  *
2653  * Configure the Rx VSI for operation.
2654  **/
2655 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2656 {
2657         int err = 0;
2658         u16 i;
2659
2660         if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2661                 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2662                                + ETH_FCS_LEN + VLAN_HLEN;
2663         else
2664                 vsi->max_frame = I40E_RXBUFFER_2048;
2665
2666         /* figure out correct receive buffer length */
2667         switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2668                                     I40E_FLAG_RX_PS_ENABLED)) {
2669         case I40E_FLAG_RX_1BUF_ENABLED:
2670                 vsi->rx_hdr_len = 0;
2671                 vsi->rx_buf_len = vsi->max_frame;
2672                 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2673                 break;
2674         case I40E_FLAG_RX_PS_ENABLED:
2675                 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2676                 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2677                 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2678                 break;
2679         default:
2680                 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2681                 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2682                 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2683                 break;
2684         }
2685
2686 #ifdef I40E_FCOE
2687         /* setup rx buffer for FCoE */
2688         if ((vsi->type == I40E_VSI_FCOE) &&
2689             (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
2690                 vsi->rx_hdr_len = 0;
2691                 vsi->rx_buf_len = I40E_RXBUFFER_3072;
2692                 vsi->max_frame = I40E_RXBUFFER_3072;
2693                 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2694         }
2695
2696 #endif /* I40E_FCOE */
2697         /* round up for the chip's needs */
2698         vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2699                                 (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
2700         vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2701                                 (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
2702
2703         /* set up individual rings */
2704         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2705                 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
2706
2707         return err;
2708 }
2709
2710 /**
2711  * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2712  * @vsi: ptr to the VSI
2713  **/
2714 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2715 {
2716         struct i40e_ring *tx_ring, *rx_ring;
2717         u16 qoffset, qcount;
2718         int i, n;
2719
2720         if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2721                 /* Reset the TC information */
2722                 for (i = 0; i < vsi->num_queue_pairs; i++) {
2723                         rx_ring = vsi->rx_rings[i];
2724                         tx_ring = vsi->tx_rings[i];
2725                         rx_ring->dcb_tc = 0;
2726                         tx_ring->dcb_tc = 0;
2727                 }
2728         }
2729
2730         for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2731                 if (!(vsi->tc_config.enabled_tc & (1 << n)))
2732                         continue;
2733
2734                 qoffset = vsi->tc_config.tc_info[n].qoffset;
2735                 qcount = vsi->tc_config.tc_info[n].qcount;
2736                 for (i = qoffset; i < (qoffset + qcount); i++) {
2737                         rx_ring = vsi->rx_rings[i];
2738                         tx_ring = vsi->tx_rings[i];
2739                         rx_ring->dcb_tc = n;
2740                         tx_ring->dcb_tc = n;
2741                 }
2742         }
2743 }
2744
2745 /**
2746  * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2747  * @vsi: ptr to the VSI
2748  **/
2749 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2750 {
2751         if (vsi->netdev)
2752                 i40e_set_rx_mode(vsi->netdev);
2753 }
2754
2755 /**
2756  * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
2757  * @vsi: Pointer to the targeted VSI
2758  *
2759  * This function replays the hlist on the hw where all the SB Flow Director
2760  * filters were saved.
2761  **/
2762 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
2763 {
2764         struct i40e_fdir_filter *filter;
2765         struct i40e_pf *pf = vsi->back;
2766         struct hlist_node *node;
2767
2768         if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2769                 return;
2770
2771         hlist_for_each_entry_safe(filter, node,
2772                                   &pf->fdir_filter_list, fdir_node) {
2773                 i40e_add_del_fdir(vsi, filter, true);
2774         }
2775 }
2776
2777 /**
2778  * i40e_vsi_configure - Set up the VSI for action
2779  * @vsi: the VSI being configured
2780  **/
2781 static int i40e_vsi_configure(struct i40e_vsi *vsi)
2782 {
2783         int err;
2784
2785         i40e_set_vsi_rx_mode(vsi);
2786         i40e_restore_vlan(vsi);
2787         i40e_vsi_config_dcb_rings(vsi);
2788         err = i40e_vsi_configure_tx(vsi);
2789         if (!err)
2790                 err = i40e_vsi_configure_rx(vsi);
2791
2792         return err;
2793 }
2794
2795 /**
2796  * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2797  * @vsi: the VSI being configured
2798  **/
2799 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2800 {
2801         struct i40e_pf *pf = vsi->back;
2802         struct i40e_q_vector *q_vector;
2803         struct i40e_hw *hw = &pf->hw;
2804         u16 vector;
2805         int i, q;
2806         u32 val;
2807         u32 qp;
2808
2809         /* The interrupt indexing is offset by 1 in the PFINT_ITRn
2810          * and PFINT_LNKLSTn registers, e.g.:
2811          *   PFINT_ITRn[0..n-1] gets msix-1..msix-n  (qpair interrupts)
2812          */
2813         qp = vsi->base_queue;
2814         vector = vsi->base_vector;
2815         for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2816                 q_vector = vsi->q_vectors[i];
2817                 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2818                 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2819                 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2820                      q_vector->rx.itr);
2821                 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2822                 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2823                 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2824                      q_vector->tx.itr);
2825
2826                 /* Linked list for the queuepairs assigned to this vector */
2827                 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2828                 for (q = 0; q < q_vector->num_ringpairs; q++) {
2829                         val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2830                               (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
2831                               (vector      << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2832                               (qp          << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2833                               (I40E_QUEUE_TYPE_TX
2834                                       << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2835
2836                         wr32(hw, I40E_QINT_RQCTL(qp), val);
2837
2838                         val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2839                               (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)  |
2840                               (vector      << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2841                               ((qp+1)      << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2842                               (I40E_QUEUE_TYPE_RX
2843                                       << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2844
2845                         /* Terminate the linked list */
2846                         if (q == (q_vector->num_ringpairs - 1))
2847                                 val |= (I40E_QUEUE_END_OF_LIST
2848                                            << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2849
2850                         wr32(hw, I40E_QINT_TQCTL(qp), val);
2851                         qp++;
2852                 }
2853         }
2854
2855         i40e_flush(hw);
2856 }
2857
2858 /**
2859  * i40e_enable_misc_int_causes - enable the non-queue interrupts
2860  * @hw: ptr to the hardware info
2861  **/
2862 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
2863 {
2864         struct i40e_hw *hw = &pf->hw;
2865         u32 val;
2866
2867         /* clear things first */
2868         wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2869         rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2870
2871         val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK       |
2872               I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK    |
2873               I40E_PFINT_ICR0_ENA_GRST_MASK          |
2874               I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2875               I40E_PFINT_ICR0_ENA_GPIO_MASK          |
2876               I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
2877               I40E_PFINT_ICR0_ENA_VFLR_MASK          |
2878               I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2879
2880         if (pf->flags & I40E_FLAG_PTP)
2881                 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
2882
2883         wr32(hw, I40E_PFINT_ICR0_ENA, val);
2884
2885         /* SW_ITR_IDX = 0, but don't change INTENA */
2886         wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2887                                         I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2888
2889         /* OTHER_ITR_IDX = 0 */
2890         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2891 }
2892
2893 /**
2894  * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
2895  * @vsi: the VSI being configured
2896  **/
2897 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2898 {
2899         struct i40e_q_vector *q_vector = vsi->q_vectors[0];
2900         struct i40e_pf *pf = vsi->back;
2901         struct i40e_hw *hw = &pf->hw;
2902         u32 val;
2903
2904         /* set the ITR configuration */
2905         q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2906         q_vector->rx.latency_range = I40E_LOW_LATENCY;
2907         wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
2908         q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2909         q_vector->tx.latency_range = I40E_LOW_LATENCY;
2910         wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2911
2912         i40e_enable_misc_int_causes(pf);
2913
2914         /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2915         wr32(hw, I40E_PFINT_LNKLST0, 0);
2916
2917         /* Associate the queue pair to the vector and enable the queue int */
2918         val = I40E_QINT_RQCTL_CAUSE_ENA_MASK                  |
2919               (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2920               (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2921
2922         wr32(hw, I40E_QINT_RQCTL(0), val);
2923
2924         val = I40E_QINT_TQCTL_CAUSE_ENA_MASK                  |
2925               (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2926               (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2927
2928         wr32(hw, I40E_QINT_TQCTL(0), val);
2929         i40e_flush(hw);
2930 }
2931
2932 /**
2933  * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
2934  * @pf: board private structure
2935  **/
2936 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
2937 {
2938         struct i40e_hw *hw = &pf->hw;
2939
2940         wr32(hw, I40E_PFINT_DYN_CTL0,
2941              I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2942         i40e_flush(hw);
2943 }
2944
2945 /**
2946  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2947  * @pf: board private structure
2948  **/
2949 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
2950 {
2951         struct i40e_hw *hw = &pf->hw;
2952         u32 val;
2953
2954         val = I40E_PFINT_DYN_CTL0_INTENA_MASK   |
2955               I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2956               (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2957
2958         wr32(hw, I40E_PFINT_DYN_CTL0, val);
2959         i40e_flush(hw);
2960 }
2961
2962 /**
2963  * i40e_irq_dynamic_enable - Enable default interrupt generation settings
2964  * @vsi: pointer to a vsi
2965  * @vector: enable a particular Hw Interrupt vector
2966  **/
2967 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2968 {
2969         struct i40e_pf *pf = vsi->back;
2970         struct i40e_hw *hw = &pf->hw;
2971         u32 val;
2972
2973         val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2974               I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2975               (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2976         wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
2977         /* skip the flush */
2978 }
2979
2980 /**
2981  * i40e_irq_dynamic_disable - Disable default interrupt generation settings
2982  * @vsi: pointer to a vsi
2983  * @vector: disable a particular Hw Interrupt vector
2984  **/
2985 void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
2986 {
2987         struct i40e_pf *pf = vsi->back;
2988         struct i40e_hw *hw = &pf->hw;
2989         u32 val;
2990
2991         val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2992         wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
2993         i40e_flush(hw);
2994 }
2995
2996 /**
2997  * i40e_msix_clean_rings - MSIX mode Interrupt Handler
2998  * @irq: interrupt number
2999  * @data: pointer to a q_vector
3000  **/
3001 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3002 {
3003         struct i40e_q_vector *q_vector = data;
3004
3005         if (!q_vector->tx.ring && !q_vector->rx.ring)
3006                 return IRQ_HANDLED;
3007
3008         napi_schedule(&q_vector->napi);
3009
3010         return IRQ_HANDLED;
3011 }
3012
3013 /**
3014  * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3015  * @vsi: the VSI being configured
3016  * @basename: name for the vector
3017  *
3018  * Allocates MSI-X vectors and requests interrupts from the kernel.
3019  **/
3020 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3021 {
3022         int q_vectors = vsi->num_q_vectors;
3023         struct i40e_pf *pf = vsi->back;
3024         int base = vsi->base_vector;
3025         int rx_int_idx = 0;
3026         int tx_int_idx = 0;
3027         int vector, err;
3028
3029         for (vector = 0; vector < q_vectors; vector++) {
3030                 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3031
3032                 if (q_vector->tx.ring && q_vector->rx.ring) {
3033                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3034                                  "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3035                         tx_int_idx++;
3036                 } else if (q_vector->rx.ring) {
3037                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3038                                  "%s-%s-%d", basename, "rx", rx_int_idx++);
3039                 } else if (q_vector->tx.ring) {
3040                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3041                                  "%s-%s-%d", basename, "tx", tx_int_idx++);
3042                 } else {
3043                         /* skip this unused q_vector */
3044                         continue;
3045                 }
3046                 err = request_irq(pf->msix_entries[base + vector].vector,
3047                                   vsi->irq_handler,
3048                                   0,
3049                                   q_vector->name,
3050                                   q_vector);
3051                 if (err) {
3052                         dev_info(&pf->pdev->dev,
3053                                  "%s: request_irq failed, error: %d\n",
3054                                  __func__, err);
3055                         goto free_queue_irqs;
3056                 }
3057                 /* assign the mask for this irq */
3058                 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3059                                       &q_vector->affinity_mask);
3060         }
3061
3062         vsi->irqs_ready = true;
3063         return 0;
3064
3065 free_queue_irqs:
3066         while (vector) {
3067                 vector--;
3068                 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3069                                       NULL);
3070                 free_irq(pf->msix_entries[base + vector].vector,
3071                          &(vsi->q_vectors[vector]));
3072         }
3073         return err;
3074 }
3075
3076 /**
3077  * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3078  * @vsi: the VSI being un-configured
3079  **/
3080 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3081 {
3082         struct i40e_pf *pf = vsi->back;
3083         struct i40e_hw *hw = &pf->hw;
3084         int base = vsi->base_vector;
3085         int i;
3086
3087         for (i = 0; i < vsi->num_queue_pairs; i++) {
3088                 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3089                 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
3090         }
3091
3092         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3093                 for (i = vsi->base_vector;
3094                      i < (vsi->num_q_vectors + vsi->base_vector); i++)
3095                         wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3096
3097                 i40e_flush(hw);
3098                 for (i = 0; i < vsi->num_q_vectors; i++)
3099                         synchronize_irq(pf->msix_entries[i + base].vector);
3100         } else {
3101                 /* Legacy and MSI mode - this stops all interrupt handling */
3102                 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3103                 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3104                 i40e_flush(hw);
3105                 synchronize_irq(pf->pdev->irq);
3106         }
3107 }
3108
3109 /**
3110  * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3111  * @vsi: the VSI being configured
3112  **/
3113 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3114 {
3115         struct i40e_pf *pf = vsi->back;
3116         int i;
3117
3118         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3119                 for (i = vsi->base_vector;
3120                      i < (vsi->num_q_vectors + vsi->base_vector); i++)
3121                         i40e_irq_dynamic_enable(vsi, i);
3122         } else {
3123                 i40e_irq_dynamic_enable_icr0(pf);
3124         }
3125
3126         i40e_flush(&pf->hw);
3127         return 0;
3128 }
3129
3130 /**
3131  * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3132  * @pf: board private structure
3133  **/
3134 static void i40e_stop_misc_vector(struct i40e_pf *pf)
3135 {
3136         /* Disable ICR 0 */
3137         wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3138         i40e_flush(&pf->hw);
3139 }
3140
3141 /**
3142  * i40e_intr - MSI/Legacy and non-queue interrupt handler
3143  * @irq: interrupt number
3144  * @data: pointer to a q_vector
3145  *
3146  * This is the handler used for all MSI/Legacy interrupts, and deals
3147  * with both queue and non-queue interrupts.  This is also used in
3148  * MSIX mode to handle the non-queue interrupts.
3149  **/
3150 static irqreturn_t i40e_intr(int irq, void *data)
3151 {
3152         struct i40e_pf *pf = (struct i40e_pf *)data;
3153         struct i40e_hw *hw = &pf->hw;
3154         irqreturn_t ret = IRQ_NONE;
3155         u32 icr0, icr0_remaining;
3156         u32 val, ena_mask;
3157
3158         icr0 = rd32(hw, I40E_PFINT_ICR0);
3159         ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3160
3161         /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3162         if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3163                 goto enable_intr;
3164
3165         /* if interrupt but no bits showing, must be SWINT */
3166         if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3167             (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3168                 pf->sw_int_count++;
3169
3170         /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3171         if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3172
3173                 /* temporarily disable queue cause for NAPI processing */
3174                 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
3175                 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3176                 wr32(hw, I40E_QINT_RQCTL(0), qval);
3177
3178                 qval = rd32(hw, I40E_QINT_TQCTL(0));
3179                 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3180                 wr32(hw, I40E_QINT_TQCTL(0), qval);
3181
3182                 if (!test_bit(__I40E_DOWN, &pf->state))
3183                         napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
3184         }
3185
3186         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3187                 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3188                 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
3189         }
3190
3191         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3192                 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3193                 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3194         }
3195
3196         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3197                 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3198                 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3199         }
3200
3201         if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3202                 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3203                         set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3204                 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3205                 val = rd32(hw, I40E_GLGEN_RSTAT);
3206                 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3207                        >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3208                 if (val == I40E_RESET_CORER) {
3209                         pf->corer_count++;
3210                 } else if (val == I40E_RESET_GLOBR) {
3211                         pf->globr_count++;
3212                 } else if (val == I40E_RESET_EMPR) {
3213                         pf->empr_count++;
3214                         set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
3215                 }
3216         }
3217
3218         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3219                 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3220                 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3221                 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3222                          rd32(hw, I40E_PFHMC_ERRORINFO),
3223                          rd32(hw, I40E_PFHMC_ERRORDATA));
3224         }
3225
3226         if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3227                 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3228
3229                 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3230                         icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3231                         i40e_ptp_tx_hwtstamp(pf);
3232                 }
3233         }
3234
3235         /* If a critical error is pending we have no choice but to reset the
3236          * device.
3237          * Report and mask out any remaining unexpected interrupts.
3238          */
3239         icr0_remaining = icr0 & ena_mask;
3240         if (icr0_remaining) {
3241                 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3242                          icr0_remaining);
3243                 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3244                     (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3245                     (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3246                         dev_info(&pf->pdev->dev, "device will be reset\n");
3247                         set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3248                         i40e_service_event_schedule(pf);
3249                 }
3250                 ena_mask &= ~icr0_remaining;
3251         }
3252         ret = IRQ_HANDLED;
3253
3254 enable_intr:
3255         /* re-enable interrupt causes */
3256         wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3257         if (!test_bit(__I40E_DOWN, &pf->state)) {
3258                 i40e_service_event_schedule(pf);
3259                 i40e_irq_dynamic_enable_icr0(pf);
3260         }
3261
3262         return ret;
3263 }
3264
3265 /**
3266  * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3267  * @tx_ring:  tx ring to clean
3268  * @budget:   how many cleans we're allowed
3269  *
3270  * Returns true if there's any budget left (e.g. the clean is finished)
3271  **/
3272 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3273 {
3274         struct i40e_vsi *vsi = tx_ring->vsi;
3275         u16 i = tx_ring->next_to_clean;
3276         struct i40e_tx_buffer *tx_buf;
3277         struct i40e_tx_desc *tx_desc;
3278
3279         tx_buf = &tx_ring->tx_bi[i];
3280         tx_desc = I40E_TX_DESC(tx_ring, i);
3281         i -= tx_ring->count;
3282
3283         do {
3284                 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3285
3286                 /* if next_to_watch is not set then there is no work pending */
3287                 if (!eop_desc)
3288                         break;
3289
3290                 /* prevent any other reads prior to eop_desc */
3291                 read_barrier_depends();
3292
3293                 /* if the descriptor isn't done, no work yet to do */
3294                 if (!(eop_desc->cmd_type_offset_bsz &
3295                       cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3296                         break;
3297
3298                 /* clear next_to_watch to prevent false hangs */
3299                 tx_buf->next_to_watch = NULL;
3300
3301                 tx_desc->buffer_addr = 0;
3302                 tx_desc->cmd_type_offset_bsz = 0;
3303                 /* move past filter desc */
3304                 tx_buf++;
3305                 tx_desc++;
3306                 i++;
3307                 if (unlikely(!i)) {
3308                         i -= tx_ring->count;
3309                         tx_buf = tx_ring->tx_bi;
3310                         tx_desc = I40E_TX_DESC(tx_ring, 0);
3311                 }
3312                 /* unmap skb header data */
3313                 dma_unmap_single(tx_ring->dev,
3314                                  dma_unmap_addr(tx_buf, dma),
3315                                  dma_unmap_len(tx_buf, len),
3316                                  DMA_TO_DEVICE);
3317                 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3318                         kfree(tx_buf->raw_buf);
3319
3320                 tx_buf->raw_buf = NULL;
3321                 tx_buf->tx_flags = 0;
3322                 tx_buf->next_to_watch = NULL;
3323                 dma_unmap_len_set(tx_buf, len, 0);
3324                 tx_desc->buffer_addr = 0;
3325                 tx_desc->cmd_type_offset_bsz = 0;
3326
3327                 /* move us past the eop_desc for start of next FD desc */
3328                 tx_buf++;
3329                 tx_desc++;
3330                 i++;
3331                 if (unlikely(!i)) {
3332                         i -= tx_ring->count;
3333                         tx_buf = tx_ring->tx_bi;
3334                         tx_desc = I40E_TX_DESC(tx_ring, 0);
3335                 }
3336
3337                 /* update budget accounting */
3338                 budget--;
3339         } while (likely(budget));
3340
3341         i += tx_ring->count;
3342         tx_ring->next_to_clean = i;
3343
3344         if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
3345                 i40e_irq_dynamic_enable(vsi,
3346                                 tx_ring->q_vector->v_idx + vsi->base_vector);
3347         }
3348         return budget > 0;
3349 }
3350
3351 /**
3352  * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3353  * @irq: interrupt number
3354  * @data: pointer to a q_vector
3355  **/
3356 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3357 {
3358         struct i40e_q_vector *q_vector = data;
3359         struct i40e_vsi *vsi;
3360
3361         if (!q_vector->tx.ring)
3362                 return IRQ_HANDLED;
3363
3364         vsi = q_vector->tx.ring->vsi;
3365         i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3366
3367         return IRQ_HANDLED;
3368 }
3369
3370 /**
3371  * i40e_map_vector_to_qp - Assigns the queue pair to the vector
3372  * @vsi: the VSI being configured
3373  * @v_idx: vector index
3374  * @qp_idx: queue pair index
3375  **/
3376 static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3377 {
3378         struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3379         struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3380         struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3381
3382         tx_ring->q_vector = q_vector;
3383         tx_ring->next = q_vector->tx.ring;
3384         q_vector->tx.ring = tx_ring;
3385         q_vector->tx.count++;
3386
3387         rx_ring->q_vector = q_vector;
3388         rx_ring->next = q_vector->rx.ring;
3389         q_vector->rx.ring = rx_ring;
3390         q_vector->rx.count++;
3391 }
3392
3393 /**
3394  * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3395  * @vsi: the VSI being configured
3396  *
3397  * This function maps descriptor rings to the queue-specific vectors
3398  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
3399  * one vector per queue pair, but on a constrained vector budget, we
3400  * group the queue pairs as "efficiently" as possible.
3401  **/
3402 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3403 {
3404         int qp_remaining = vsi->num_queue_pairs;
3405         int q_vectors = vsi->num_q_vectors;
3406         int num_ringpairs;
3407         int v_start = 0;
3408         int qp_idx = 0;
3409
3410         /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3411          * group them so there are multiple queues per vector.
3412          * It is also important to go through all the vectors available to be
3413          * sure that if we don't use all the vectors, that the remaining vectors
3414          * are cleared. This is especially important when decreasing the
3415          * number of queues in use.
3416          */
3417         for (; v_start < q_vectors; v_start++) {
3418                 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3419
3420                 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3421
3422                 q_vector->num_ringpairs = num_ringpairs;
3423
3424                 q_vector->rx.count = 0;
3425                 q_vector->tx.count = 0;
3426                 q_vector->rx.ring = NULL;
3427                 q_vector->tx.ring = NULL;
3428
3429                 while (num_ringpairs--) {
3430                         map_vector_to_qp(vsi, v_start, qp_idx);
3431                         qp_idx++;
3432                         qp_remaining--;
3433                 }
3434         }
3435 }
3436
3437 /**
3438  * i40e_vsi_request_irq - Request IRQ from the OS
3439  * @vsi: the VSI being configured
3440  * @basename: name for the vector
3441  **/
3442 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3443 {
3444         struct i40e_pf *pf = vsi->back;
3445         int err;
3446
3447         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3448                 err = i40e_vsi_request_irq_msix(vsi, basename);
3449         else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3450                 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3451                                   pf->int_name, pf);
3452         else
3453                 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3454                                   pf->int_name, pf);
3455
3456         if (err)
3457                 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3458
3459         return err;
3460 }
3461
3462 #ifdef CONFIG_NET_POLL_CONTROLLER
3463 /**
3464  * i40e_netpoll - A Polling 'interrupt'handler
3465  * @netdev: network interface device structure
3466  *
3467  * This is used by netconsole to send skbs without having to re-enable
3468  * interrupts.  It's not called while the normal interrupt routine is executing.
3469  **/
3470 #ifdef I40E_FCOE
3471 void i40e_netpoll(struct net_device *netdev)
3472 #else
3473 static void i40e_netpoll(struct net_device *netdev)
3474 #endif
3475 {
3476         struct i40e_netdev_priv *np = netdev_priv(netdev);
3477         struct i40e_vsi *vsi = np->vsi;
3478         struct i40e_pf *pf = vsi->back;
3479         int i;
3480
3481         /* if interface is down do nothing */
3482         if (test_bit(__I40E_DOWN, &vsi->state))
3483                 return;
3484
3485         pf->flags |= I40E_FLAG_IN_NETPOLL;
3486         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3487                 for (i = 0; i < vsi->num_q_vectors; i++)
3488                         i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3489         } else {
3490                 i40e_intr(pf->pdev->irq, netdev);
3491         }
3492         pf->flags &= ~I40E_FLAG_IN_NETPOLL;
3493 }
3494 #endif
3495
3496 /**
3497  * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3498  * @pf: the PF being configured
3499  * @pf_q: the PF queue
3500  * @enable: enable or disable state of the queue
3501  *
3502  * This routine will wait for the given Tx queue of the PF to reach the
3503  * enabled or disabled state.
3504  * Returns -ETIMEDOUT in case of failing to reach the requested state after
3505  * multiple retries; else will return 0 in case of success.
3506  **/
3507 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3508 {
3509         int i;
3510         u32 tx_reg;
3511
3512         for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3513                 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3514                 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3515                         break;
3516
3517                 usleep_range(10, 20);
3518         }
3519         if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3520                 return -ETIMEDOUT;
3521
3522         return 0;
3523 }
3524
3525 /**
3526  * i40e_vsi_control_tx - Start or stop a VSI's rings
3527  * @vsi: the VSI being configured
3528  * @enable: start or stop the rings
3529  **/
3530 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3531 {
3532         struct i40e_pf *pf = vsi->back;
3533         struct i40e_hw *hw = &pf->hw;
3534         int i, j, pf_q, ret = 0;
3535         u32 tx_reg;
3536
3537         pf_q = vsi->base_queue;
3538         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3539
3540                 /* warn the TX unit of coming changes */
3541                 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3542                 if (!enable)
3543                         usleep_range(10, 20);
3544
3545                 for (j = 0; j < 50; j++) {
3546                         tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3547                         if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3548                             ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3549                                 break;
3550                         usleep_range(1000, 2000);
3551                 }
3552                 /* Skip if the queue is already in the requested state */
3553                 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3554                         continue;
3555
3556                 /* turn on/off the queue */
3557                 if (enable) {
3558                         wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3559                         tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3560                 } else {
3561                         tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3562                 }
3563
3564                 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3565                 /* No waiting for the Tx queue to disable */
3566                 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3567                         continue;
3568
3569                 /* wait for the change to finish */
3570                 ret = i40e_pf_txq_wait(pf, pf_q, enable);
3571                 if (ret) {
3572                         dev_info(&pf->pdev->dev,
3573                                  "%s: VSI seid %d Tx ring %d %sable timeout\n",
3574                                  __func__, vsi->seid, pf_q,
3575                                  (enable ? "en" : "dis"));
3576                         break;
3577                 }
3578         }
3579
3580         if (hw->revision_id == 0)
3581                 mdelay(50);
3582         return ret;
3583 }
3584
3585 /**
3586  * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3587  * @pf: the PF being configured
3588  * @pf_q: the PF queue
3589  * @enable: enable or disable state of the queue
3590  *
3591  * This routine will wait for the given Rx queue of the PF to reach the
3592  * enabled or disabled state.
3593  * Returns -ETIMEDOUT in case of failing to reach the requested state after
3594  * multiple retries; else will return 0 in case of success.
3595  **/
3596 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3597 {
3598         int i;
3599         u32 rx_reg;
3600
3601         for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3602                 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
3603                 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3604                         break;
3605
3606                 usleep_range(10, 20);
3607         }
3608         if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3609                 return -ETIMEDOUT;
3610
3611         return 0;
3612 }
3613
3614 /**
3615  * i40e_vsi_control_rx - Start or stop a VSI's rings
3616  * @vsi: the VSI being configured
3617  * @enable: start or stop the rings
3618  **/
3619 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3620 {
3621         struct i40e_pf *pf = vsi->back;
3622         struct i40e_hw *hw = &pf->hw;
3623         int i, j, pf_q, ret = 0;
3624         u32 rx_reg;
3625
3626         pf_q = vsi->base_queue;
3627         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3628                 for (j = 0; j < 50; j++) {
3629                         rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3630                         if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3631                             ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3632                                 break;
3633                         usleep_range(1000, 2000);
3634                 }
3635
3636                 /* Skip if the queue is already in the requested state */
3637                 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3638                         continue;
3639
3640                 /* turn on/off the queue */
3641                 if (enable)
3642                         rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3643                 else
3644                         rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3645                 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3646
3647                 /* wait for the change to finish */
3648                 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
3649                 if (ret) {
3650                         dev_info(&pf->pdev->dev,
3651                                  "%s: VSI seid %d Rx ring %d %sable timeout\n",
3652                                  __func__, vsi->seid, pf_q,
3653                                  (enable ? "en" : "dis"));
3654                         break;
3655                 }
3656         }
3657
3658         return ret;
3659 }
3660
3661 /**
3662  * i40e_vsi_control_rings - Start or stop a VSI's rings
3663  * @vsi: the VSI being configured
3664  * @enable: start or stop the rings
3665  **/
3666 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3667 {
3668         int ret = 0;
3669
3670         /* do rx first for enable and last for disable */
3671         if (request) {
3672                 ret = i40e_vsi_control_rx(vsi, request);
3673                 if (ret)
3674                         return ret;
3675                 ret = i40e_vsi_control_tx(vsi, request);
3676         } else {
3677                 /* Ignore return value, we need to shutdown whatever we can */
3678                 i40e_vsi_control_tx(vsi, request);
3679                 i40e_vsi_control_rx(vsi, request);
3680         }
3681
3682         return ret;
3683 }
3684
3685 /**
3686  * i40e_vsi_free_irq - Free the irq association with the OS
3687  * @vsi: the VSI being configured
3688  **/
3689 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3690 {
3691         struct i40e_pf *pf = vsi->back;
3692         struct i40e_hw *hw = &pf->hw;
3693         int base = vsi->base_vector;
3694         u32 val, qp;
3695         int i;
3696
3697         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3698                 if (!vsi->q_vectors)
3699                         return;
3700
3701                 if (!vsi->irqs_ready)
3702                         return;
3703
3704                 vsi->irqs_ready = false;
3705                 for (i = 0; i < vsi->num_q_vectors; i++) {
3706                         u16 vector = i + base;
3707
3708                         /* free only the irqs that were actually requested */
3709                         if (!vsi->q_vectors[i] ||
3710                             !vsi->q_vectors[i]->num_ringpairs)
3711                                 continue;
3712
3713                         /* clear the affinity_mask in the IRQ descriptor */
3714                         irq_set_affinity_hint(pf->msix_entries[vector].vector,
3715                                               NULL);
3716                         free_irq(pf->msix_entries[vector].vector,
3717                                  vsi->q_vectors[i]);
3718
3719                         /* Tear down the interrupt queue link list
3720                          *
3721                          * We know that they come in pairs and always
3722                          * the Rx first, then the Tx.  To clear the
3723                          * link list, stick the EOL value into the
3724                          * next_q field of the registers.
3725                          */
3726                         val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3727                         qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3728                                 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3729                         val |= I40E_QUEUE_END_OF_LIST
3730                                 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3731                         wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3732
3733                         while (qp != I40E_QUEUE_END_OF_LIST) {
3734                                 u32 next;
3735
3736                                 val = rd32(hw, I40E_QINT_RQCTL(qp));
3737
3738                                 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
3739                                          I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3740                                          I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
3741                                          I40E_QINT_RQCTL_INTEVENT_MASK);
3742
3743                                 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3744                                          I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3745
3746                                 wr32(hw, I40E_QINT_RQCTL(qp), val);
3747
3748                                 val = rd32(hw, I40E_QINT_TQCTL(qp));
3749
3750                                 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3751                                         >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3752
3753                                 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
3754                                          I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3755                                          I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
3756                                          I40E_QINT_TQCTL_INTEVENT_MASK);
3757
3758                                 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3759                                          I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3760
3761                                 wr32(hw, I40E_QINT_TQCTL(qp), val);
3762                                 qp = next;
3763                         }
3764                 }
3765         } else {
3766                 free_irq(pf->pdev->irq, pf);
3767
3768                 val = rd32(hw, I40E_PFINT_LNKLST0);
3769                 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3770                         >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3771                 val |= I40E_QUEUE_END_OF_LIST
3772                         << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3773                 wr32(hw, I40E_PFINT_LNKLST0, val);
3774
3775                 val = rd32(hw, I40E_QINT_RQCTL(qp));
3776                 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
3777                          I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3778                          I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
3779                          I40E_QINT_RQCTL_INTEVENT_MASK);
3780
3781                 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3782                         I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3783
3784                 wr32(hw, I40E_QINT_RQCTL(qp), val);
3785
3786                 val = rd32(hw, I40E_QINT_TQCTL(qp));
3787
3788                 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
3789                          I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3790                          I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
3791                          I40E_QINT_TQCTL_INTEVENT_MASK);
3792
3793                 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3794                         I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3795
3796                 wr32(hw, I40E_QINT_TQCTL(qp), val);
3797         }
3798 }
3799
3800 /**
3801  * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3802  * @vsi: the VSI being configured
3803  * @v_idx: Index of vector to be freed
3804  *
3805  * This function frees the memory allocated to the q_vector.  In addition if
3806  * NAPI is enabled it will delete any references to the NAPI struct prior
3807  * to freeing the q_vector.
3808  **/
3809 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3810 {
3811         struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3812         struct i40e_ring *ring;
3813
3814         if (!q_vector)
3815                 return;
3816
3817         /* disassociate q_vector from rings */
3818         i40e_for_each_ring(ring, q_vector->tx)
3819                 ring->q_vector = NULL;
3820
3821         i40e_for_each_ring(ring, q_vector->rx)
3822                 ring->q_vector = NULL;
3823
3824         /* only VSI w/ an associated netdev is set up w/ NAPI */
3825         if (vsi->netdev)
3826                 netif_napi_del(&q_vector->napi);
3827
3828         vsi->q_vectors[v_idx] = NULL;
3829
3830         kfree_rcu(q_vector, rcu);
3831 }
3832
3833 /**
3834  * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3835  * @vsi: the VSI being un-configured
3836  *
3837  * This frees the memory allocated to the q_vectors and
3838  * deletes references to the NAPI struct.
3839  **/
3840 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3841 {
3842         int v_idx;
3843
3844         for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3845                 i40e_free_q_vector(vsi, v_idx);
3846 }
3847
3848 /**
3849  * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3850  * @pf: board private structure
3851  **/
3852 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3853 {
3854         /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3855         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3856                 pci_disable_msix(pf->pdev);
3857                 kfree(pf->msix_entries);
3858                 pf->msix_entries = NULL;
3859                 kfree(pf->irq_pile);
3860                 pf->irq_pile = NULL;
3861         } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3862                 pci_disable_msi(pf->pdev);
3863         }
3864         pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3865 }
3866
3867 /**
3868  * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3869  * @pf: board private structure
3870  *
3871  * We go through and clear interrupt specific resources and reset the structure
3872  * to pre-load conditions
3873  **/
3874 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3875 {
3876         int i;
3877
3878         i40e_stop_misc_vector(pf);
3879         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3880                 synchronize_irq(pf->msix_entries[0].vector);
3881                 free_irq(pf->msix_entries[0].vector, pf);
3882         }
3883
3884         i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3885         for (i = 0; i < pf->num_alloc_vsi; i++)
3886                 if (pf->vsi[i])
3887                         i40e_vsi_free_q_vectors(pf->vsi[i]);
3888         i40e_reset_interrupt_capability(pf);
3889 }
3890
3891 /**
3892  * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3893  * @vsi: the VSI being configured
3894  **/
3895 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3896 {
3897         int q_idx;
3898
3899         if (!vsi->netdev)
3900                 return;
3901
3902         for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3903                 napi_enable(&vsi->q_vectors[q_idx]->napi);
3904 }
3905
3906 /**
3907  * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3908  * @vsi: the VSI being configured
3909  **/
3910 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3911 {
3912         int q_idx;
3913
3914         if (!vsi->netdev)
3915                 return;
3916
3917         for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3918                 napi_disable(&vsi->q_vectors[q_idx]->napi);
3919 }
3920
3921 /**
3922  * i40e_vsi_close - Shut down a VSI
3923  * @vsi: the vsi to be quelled
3924  **/
3925 static void i40e_vsi_close(struct i40e_vsi *vsi)
3926 {
3927         if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
3928                 i40e_down(vsi);
3929         i40e_vsi_free_irq(vsi);
3930         i40e_vsi_free_tx_resources(vsi);
3931         i40e_vsi_free_rx_resources(vsi);
3932 }
3933
3934 /**
3935  * i40e_quiesce_vsi - Pause a given VSI
3936  * @vsi: the VSI being paused
3937  **/
3938 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3939 {
3940         if (test_bit(__I40E_DOWN, &vsi->state))
3941                 return;
3942
3943         /* No need to disable FCoE VSI when Tx suspended */
3944         if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
3945             vsi->type == I40E_VSI_FCOE) {
3946                 dev_dbg(&vsi->back->pdev->dev,
3947                         "%s: VSI seid %d skipping FCoE VSI disable\n",
3948                          __func__, vsi->seid);
3949                 return;
3950         }
3951
3952         set_bit(__I40E_NEEDS_RESTART, &vsi->state);
3953         if (vsi->netdev && netif_running(vsi->netdev)) {
3954                 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3955         } else {
3956                 i40e_vsi_close(vsi);
3957         }
3958 }
3959
3960 /**
3961  * i40e_unquiesce_vsi - Resume a given VSI
3962  * @vsi: the VSI being resumed
3963  **/
3964 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
3965 {
3966         if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
3967                 return;
3968
3969         clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
3970         if (vsi->netdev && netif_running(vsi->netdev))
3971                 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3972         else
3973                 i40e_vsi_open(vsi);   /* this clears the DOWN bit */
3974 }
3975
3976 /**
3977  * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
3978  * @pf: the PF
3979  **/
3980 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3981 {
3982         int v;
3983
3984         for (v = 0; v < pf->num_alloc_vsi; v++) {
3985                 if (pf->vsi[v])
3986                         i40e_quiesce_vsi(pf->vsi[v]);
3987         }
3988 }
3989
3990 /**
3991  * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
3992  * @pf: the PF
3993  **/
3994 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3995 {
3996         int v;
3997
3998         for (v = 0; v < pf->num_alloc_vsi; v++) {
3999                 if (pf->vsi[v])
4000                         i40e_unquiesce_vsi(pf->vsi[v]);
4001         }
4002 }
4003
4004 #ifdef CONFIG_I40E_DCB
4005 /**
4006  * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled
4007  * @vsi: the VSI being configured
4008  *
4009  * This function waits for the given VSI's Tx queues to be disabled.
4010  **/
4011 static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
4012 {
4013         struct i40e_pf *pf = vsi->back;
4014         int i, pf_q, ret;
4015
4016         pf_q = vsi->base_queue;
4017         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4018                 /* Check and wait for the disable status of the queue */
4019                 ret = i40e_pf_txq_wait(pf, pf_q, false);
4020                 if (ret) {
4021                         dev_info(&pf->pdev->dev,
4022                                  "%s: VSI seid %d Tx ring %d disable timeout\n",
4023                                  __func__, vsi->seid, pf_q);
4024                         return ret;
4025                 }
4026         }
4027
4028         return 0;
4029 }
4030
4031 /**
4032  * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled
4033  * @pf: the PF
4034  *
4035  * This function waits for the Tx queues to be in disabled state for all the
4036  * VSIs that are managed by this PF.
4037  **/
4038 static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
4039 {
4040         int v, ret = 0;
4041
4042         for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4043                 /* No need to wait for FCoE VSI queues */
4044                 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
4045                         ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]);
4046                         if (ret)
4047                                 break;
4048                 }
4049         }
4050
4051         return ret;
4052 }
4053
4054 #endif
4055 /**
4056  * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4057  * @pf: pointer to PF
4058  *
4059  * Get TC map for ISCSI PF type that will include iSCSI TC
4060  * and LAN TC.
4061  **/
4062 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4063 {
4064         struct i40e_dcb_app_priority_table app;
4065         struct i40e_hw *hw = &pf->hw;
4066         u8 enabled_tc = 1; /* TC0 is always enabled */
4067         u8 tc, i;
4068         /* Get the iSCSI APP TLV */
4069         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4070
4071         for (i = 0; i < dcbcfg->numapps; i++) {
4072                 app = dcbcfg->app[i];
4073                 if (app.selector == I40E_APP_SEL_TCPIP &&
4074                     app.protocolid == I40E_APP_PROTOID_ISCSI) {
4075                         tc = dcbcfg->etscfg.prioritytable[app.priority];
4076                         enabled_tc |= (1 << tc);
4077                         break;
4078                 }
4079         }
4080
4081         return enabled_tc;
4082 }
4083
4084 /**
4085  * i40e_dcb_get_num_tc -  Get the number of TCs from DCBx config
4086  * @dcbcfg: the corresponding DCBx configuration structure
4087  *
4088  * Return the number of TCs from given DCBx configuration
4089  **/
4090 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4091 {
4092         u8 num_tc = 0;
4093         int i;
4094
4095         /* Scan the ETS Config Priority Table to find
4096          * traffic class enabled for a given priority
4097          * and use the traffic class index to get the
4098          * number of traffic classes enabled
4099          */
4100         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4101                 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
4102                         num_tc = dcbcfg->etscfg.prioritytable[i];
4103         }
4104
4105         /* Traffic class index starts from zero so
4106          * increment to return the actual count
4107          */
4108         return num_tc + 1;
4109 }
4110
4111 /**
4112  * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4113  * @dcbcfg: the corresponding DCBx configuration structure
4114  *
4115  * Query the current DCB configuration and return the number of
4116  * traffic classes enabled from the given DCBX config
4117  **/
4118 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4119 {
4120         u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4121         u8 enabled_tc = 1;
4122         u8 i;
4123
4124         for (i = 0; i < num_tc; i++)
4125                 enabled_tc |= 1 << i;
4126
4127         return enabled_tc;
4128 }
4129
4130 /**
4131  * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4132  * @pf: PF being queried
4133  *
4134  * Return number of traffic classes enabled for the given PF
4135  **/
4136 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4137 {
4138         struct i40e_hw *hw = &pf->hw;
4139         u8 i, enabled_tc;
4140         u8 num_tc = 0;
4141         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4142
4143         /* If DCB is not enabled then always in single TC */
4144         if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4145                 return 1;
4146
4147         /* SFP mode will be enabled for all TCs on port */
4148         if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4149                 return i40e_dcb_get_num_tc(dcbcfg);
4150
4151         /* MFP mode return count of enabled TCs for this PF */
4152         if (pf->hw.func_caps.iscsi)
4153                 enabled_tc =  i40e_get_iscsi_tc_map(pf);
4154         else
4155                 return 1; /* Only TC0 */
4156
4157         /* At least have TC0 */
4158         enabled_tc = (enabled_tc ? enabled_tc : 0x1);
4159         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4160                 if (enabled_tc & (1 << i))
4161                         num_tc++;
4162         }
4163         return num_tc;
4164 }
4165
4166 /**
4167  * i40e_pf_get_default_tc - Get bitmap for first enabled TC
4168  * @pf: PF being queried
4169  *
4170  * Return a bitmap for first enabled traffic class for this PF.
4171  **/
4172 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
4173 {
4174         u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
4175         u8 i = 0;
4176
4177         if (!enabled_tc)
4178                 return 0x1; /* TC0 */
4179
4180         /* Find the first enabled TC */
4181         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4182                 if (enabled_tc & (1 << i))
4183                         break;
4184         }
4185
4186         return 1 << i;
4187 }
4188
4189 /**
4190  * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4191  * @pf: PF being queried
4192  *
4193  * Return a bitmap for enabled traffic classes for this PF.
4194  **/
4195 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4196 {
4197         /* If DCB is not enabled for this PF then just return default TC */
4198         if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4199                 return i40e_pf_get_default_tc(pf);
4200
4201         /* SFP mode we want PF to be enabled for all TCs */
4202         if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4203                 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4204
4205         /* MFP enabled and iSCSI PF type */
4206         if (pf->hw.func_caps.iscsi)
4207                 return i40e_get_iscsi_tc_map(pf);
4208         else
4209                 return i40e_pf_get_default_tc(pf);
4210 }
4211
4212 /**
4213  * i40e_vsi_get_bw_info - Query VSI BW Information
4214  * @vsi: the VSI being queried
4215  *
4216  * Returns 0 on success, negative value on failure
4217  **/
4218 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4219 {
4220         struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4221         struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4222         struct i40e_pf *pf = vsi->back;
4223         struct i40e_hw *hw = &pf->hw;
4224         i40e_status aq_ret;
4225         u32 tc_bw_max;
4226         int i;
4227
4228         /* Get the VSI level BW configuration */
4229         aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4230         if (aq_ret) {
4231                 dev_info(&pf->pdev->dev,
4232                          "couldn't get PF vsi bw config, err %d, aq_err %d\n",
4233                          aq_ret, pf->hw.aq.asq_last_status);
4234                 return -EINVAL;
4235         }
4236
4237         /* Get the VSI level BW configuration per TC */
4238         aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4239                                                   NULL);
4240         if (aq_ret) {
4241                 dev_info(&pf->pdev->dev,
4242                          "couldn't get PF vsi ets bw config, err %d, aq_err %d\n",
4243                          aq_ret, pf->hw.aq.asq_last_status);
4244                 return -EINVAL;
4245         }
4246
4247         if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4248                 dev_info(&pf->pdev->dev,
4249                          "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4250                          bw_config.tc_valid_bits,
4251                          bw_ets_config.tc_valid_bits);
4252                 /* Still continuing */
4253         }
4254
4255         vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4256         vsi->bw_max_quanta = bw_config.max_bw;
4257         tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4258                     (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4259         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4260                 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4261                 vsi->bw_ets_limit_credits[i] =
4262                                         le16_to_cpu(bw_ets_config.credits[i]);
4263                 /* 3 bits out of 4 for each TC */
4264                 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4265         }
4266
4267         return 0;
4268 }
4269
4270 /**
4271  * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4272  * @vsi: the VSI being configured
4273  * @enabled_tc: TC bitmap
4274  * @bw_credits: BW shared credits per TC
4275  *
4276  * Returns 0 on success, negative value on failure
4277  **/
4278 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
4279                                        u8 *bw_share)
4280 {
4281         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
4282         i40e_status aq_ret;
4283         int i;
4284
4285         bw_data.tc_valid_bits = enabled_tc;
4286         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4287                 bw_data.tc_bw_credits[i] = bw_share[i];
4288
4289         aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4290                                           NULL);
4291         if (aq_ret) {
4292                 dev_info(&vsi->back->pdev->dev,
4293                          "AQ command Config VSI BW allocation per TC failed = %d\n",
4294                          vsi->back->hw.aq.asq_last_status);
4295                 return -EINVAL;
4296         }
4297
4298         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4299                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4300
4301         return 0;
4302 }
4303
4304 /**
4305  * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4306  * @vsi: the VSI being configured
4307  * @enabled_tc: TC map to be enabled
4308  *
4309  **/
4310 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4311 {
4312         struct net_device *netdev = vsi->netdev;
4313         struct i40e_pf *pf = vsi->back;
4314         struct i40e_hw *hw = &pf->hw;
4315         u8 netdev_tc = 0;
4316         int i;
4317         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4318
4319         if (!netdev)
4320                 return;
4321
4322         if (!enabled_tc) {
4323                 netdev_reset_tc(netdev);
4324                 return;
4325         }
4326
4327         /* Set up actual enabled TCs on the VSI */
4328         if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4329                 return;
4330
4331         /* set per TC queues for the VSI */
4332         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4333                 /* Only set TC queues for enabled tcs
4334                  *
4335                  * e.g. For a VSI that has TC0 and TC3 enabled the
4336                  * enabled_tc bitmap would be 0x00001001; the driver
4337                  * will set the numtc for netdev as 2 that will be
4338                  * referenced by the netdev layer as TC 0 and 1.
4339                  */
4340                 if (vsi->tc_config.enabled_tc & (1 << i))
4341                         netdev_set_tc_queue(netdev,
4342                                         vsi->tc_config.tc_info[i].netdev_tc,
4343                                         vsi->tc_config.tc_info[i].qcount,
4344                                         vsi->tc_config.tc_info[i].qoffset);
4345         }
4346
4347         /* Assign UP2TC map for the VSI */
4348         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4349                 /* Get the actual TC# for the UP */
4350                 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4351                 /* Get the mapped netdev TC# for the UP */
4352                 netdev_tc =  vsi->tc_config.tc_info[ets_tc].netdev_tc;
4353                 netdev_set_prio_tc_map(netdev, i, netdev_tc);
4354         }
4355 }
4356
4357 /**
4358  * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4359  * @vsi: the VSI being configured
4360  * @ctxt: the ctxt buffer returned from AQ VSI update param command
4361  **/
4362 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4363                                       struct i40e_vsi_context *ctxt)
4364 {
4365         /* copy just the sections touched not the entire info
4366          * since not all sections are valid as returned by
4367          * update vsi params
4368          */
4369         vsi->info.mapping_flags = ctxt->info.mapping_flags;
4370         memcpy(&vsi->info.queue_mapping,
4371                &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4372         memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4373                sizeof(vsi->info.tc_mapping));
4374 }
4375
4376 /**
4377  * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4378  * @vsi: VSI to be configured
4379  * @enabled_tc: TC bitmap
4380  *
4381  * This configures a particular VSI for TCs that are mapped to the
4382  * given TC bitmap. It uses default bandwidth share for TCs across
4383  * VSIs to configure TC for a particular VSI.
4384  *
4385  * NOTE:
4386  * It is expected that the VSI queues have been quisced before calling
4387  * this function.
4388  **/
4389 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4390 {
4391         u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4392         struct i40e_vsi_context ctxt;
4393         int ret = 0;
4394         int i;
4395
4396         /* Check if enabled_tc is same as existing or new TCs */
4397         if (vsi->tc_config.enabled_tc == enabled_tc)
4398                 return ret;
4399
4400         /* Enable ETS TCs with equal BW Share for now across all VSIs */
4401         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4402                 if (enabled_tc & (1 << i))
4403                         bw_share[i] = 1;
4404         }
4405
4406         ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4407         if (ret) {
4408                 dev_info(&vsi->back->pdev->dev,
4409                          "Failed configuring TC map %d for VSI %d\n",
4410                          enabled_tc, vsi->seid);
4411                 goto out;
4412         }
4413
4414         /* Update Queue Pairs Mapping for currently enabled UPs */
4415         ctxt.seid = vsi->seid;
4416         ctxt.pf_num = vsi->back->hw.pf_id;
4417         ctxt.vf_num = 0;
4418         ctxt.uplink_seid = vsi->uplink_seid;
4419         ctxt.info = vsi->info;
4420         i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4421
4422         /* Update the VSI after updating the VSI queue-mapping information */
4423         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4424         if (ret) {
4425                 dev_info(&vsi->back->pdev->dev,
4426                          "update vsi failed, aq_err=%d\n",
4427                          vsi->back->hw.aq.asq_last_status);
4428                 goto out;
4429         }
4430         /* update the local VSI info with updated queue map */
4431         i40e_vsi_update_queue_map(vsi, &ctxt);
4432         vsi->info.valid_sections = 0;
4433
4434         /* Update current VSI BW information */
4435         ret = i40e_vsi_get_bw_info(vsi);
4436         if (ret) {
4437                 dev_info(&vsi->back->pdev->dev,
4438                          "Failed updating vsi bw info, aq_err=%d\n",
4439                          vsi->back->hw.aq.asq_last_status);
4440                 goto out;
4441         }
4442
4443         /* Update the netdev TC setup */
4444         i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4445 out:
4446         return ret;
4447 }
4448
4449 /**
4450  * i40e_veb_config_tc - Configure TCs for given VEB
4451  * @veb: given VEB
4452  * @enabled_tc: TC bitmap
4453  *
4454  * Configures given TC bitmap for VEB (switching) element
4455  **/
4456 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4457 {
4458         struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4459         struct i40e_pf *pf = veb->pf;
4460         int ret = 0;
4461         int i;
4462
4463         /* No TCs or already enabled TCs just return */
4464         if (!enabled_tc || veb->enabled_tc == enabled_tc)
4465                 return ret;
4466
4467         bw_data.tc_valid_bits = enabled_tc;
4468         /* bw_data.absolute_credits is not set (relative) */
4469
4470         /* Enable ETS TCs with equal BW Share for now */
4471         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4472                 if (enabled_tc & (1 << i))
4473                         bw_data.tc_bw_share_credits[i] = 1;
4474         }
4475
4476         ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4477                                                    &bw_data, NULL);
4478         if (ret) {
4479                 dev_info(&pf->pdev->dev,
4480                          "veb bw config failed, aq_err=%d\n",
4481                          pf->hw.aq.asq_last_status);
4482                 goto out;
4483         }
4484
4485         /* Update the BW information */
4486         ret = i40e_veb_get_bw_info(veb);
4487         if (ret) {
4488                 dev_info(&pf->pdev->dev,
4489                          "Failed getting veb bw config, aq_err=%d\n",
4490                          pf->hw.aq.asq_last_status);
4491         }
4492
4493 out:
4494         return ret;
4495 }
4496
4497 #ifdef CONFIG_I40E_DCB
4498 /**
4499  * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4500  * @pf: PF struct
4501  *
4502  * Reconfigure VEB/VSIs on a given PF; it is assumed that
4503  * the caller would've quiesce all the VSIs before calling
4504  * this function
4505  **/
4506 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4507 {
4508         u8 tc_map = 0;
4509         int ret;
4510         u8 v;
4511
4512         /* Enable the TCs available on PF to all VEBs */
4513         tc_map = i40e_pf_get_tc_map(pf);
4514         for (v = 0; v < I40E_MAX_VEB; v++) {
4515                 if (!pf->veb[v])
4516                         continue;
4517                 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
4518                 if (ret) {
4519                         dev_info(&pf->pdev->dev,
4520                                  "Failed configuring TC for VEB seid=%d\n",
4521                                  pf->veb[v]->seid);
4522                         /* Will try to configure as many components */
4523                 }
4524         }
4525
4526         /* Update each VSI */
4527         for (v = 0; v < pf->num_alloc_vsi; v++) {
4528                 if (!pf->vsi[v])
4529                         continue;
4530
4531                 /* - Enable all TCs for the LAN VSI
4532 #ifdef I40E_FCOE
4533                  * - For FCoE VSI only enable the TC configured
4534                  *   as per the APP TLV
4535 #endif
4536                  * - For all others keep them at TC0 for now
4537                  */
4538                 if (v == pf->lan_vsi)
4539                         tc_map = i40e_pf_get_tc_map(pf);
4540                 else
4541                         tc_map = i40e_pf_get_default_tc(pf);
4542 #ifdef I40E_FCOE
4543                 if (pf->vsi[v]->type == I40E_VSI_FCOE)
4544                         tc_map = i40e_get_fcoe_tc_map(pf);
4545 #endif /* #ifdef I40E_FCOE */
4546
4547                 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4548                 if (ret) {
4549                         dev_info(&pf->pdev->dev,
4550                                  "Failed configuring TC for VSI seid=%d\n",
4551                                  pf->vsi[v]->seid);
4552                         /* Will try to configure as many components */
4553                 } else {
4554                         /* Re-configure VSI vectors based on updated TC map */
4555                         i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4556                         if (pf->vsi[v]->netdev)
4557                                 i40e_dcbnl_set_all(pf->vsi[v]);
4558                 }
4559         }
4560 }
4561
4562 /**
4563  * i40e_resume_port_tx - Resume port Tx
4564  * @pf: PF struct
4565  *
4566  * Resume a port's Tx and issue a PF reset in case of failure to
4567  * resume.
4568  **/
4569 static int i40e_resume_port_tx(struct i40e_pf *pf)
4570 {
4571         struct i40e_hw *hw = &pf->hw;
4572         int ret;
4573
4574         ret = i40e_aq_resume_port_tx(hw, NULL);
4575         if (ret) {
4576                 dev_info(&pf->pdev->dev,
4577                          "AQ command Resume Port Tx failed = %d\n",
4578                           pf->hw.aq.asq_last_status);
4579                 /* Schedule PF reset to recover */
4580                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4581                 i40e_service_event_schedule(pf);
4582         }
4583
4584         return ret;
4585 }
4586
4587 /**
4588  * i40e_init_pf_dcb - Initialize DCB configuration
4589  * @pf: PF being configured
4590  *
4591  * Query the current DCB configuration and cache it
4592  * in the hardware structure
4593  **/
4594 static int i40e_init_pf_dcb(struct i40e_pf *pf)
4595 {
4596         struct i40e_hw *hw = &pf->hw;
4597         int err = 0;
4598
4599         /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
4600         if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
4601             (pf->hw.aq.fw_maj_ver < 4))
4602                 goto out;
4603
4604         /* Get the initial DCB configuration */
4605         err = i40e_init_dcb(hw);
4606         if (!err) {
4607                 /* Device/Function is not DCBX capable */
4608                 if ((!hw->func_caps.dcb) ||
4609                     (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
4610                         dev_info(&pf->pdev->dev,
4611                                  "DCBX offload is not supported or is disabled for this PF.\n");
4612
4613                         if (pf->flags & I40E_FLAG_MFP_ENABLED)
4614                                 goto out;
4615
4616                 } else {
4617                         /* When status is not DISABLED then DCBX in FW */
4618                         pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
4619                                        DCB_CAP_DCBX_VER_IEEE;
4620
4621                         pf->flags |= I40E_FLAG_DCB_CAPABLE;
4622                         /* Enable DCB tagging only when more than one TC */
4623                         if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
4624                                 pf->flags |= I40E_FLAG_DCB_ENABLED;
4625                         dev_dbg(&pf->pdev->dev,
4626                                 "DCBX offload is supported for this PF.\n");
4627                 }
4628         } else {
4629                 dev_info(&pf->pdev->dev,
4630                          "AQ Querying DCB configuration failed: aq_err %d\n",
4631                          pf->hw.aq.asq_last_status);
4632         }
4633
4634 out:
4635         return err;
4636 }
4637 #endif /* CONFIG_I40E_DCB */
4638 #define SPEED_SIZE 14
4639 #define FC_SIZE 8
4640 /**
4641  * i40e_print_link_message - print link up or down
4642  * @vsi: the VSI for which link needs a message
4643  */
4644 static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
4645 {
4646         char speed[SPEED_SIZE] = "Unknown";
4647         char fc[FC_SIZE] = "RX/TX";
4648
4649         if (!isup) {
4650                 netdev_info(vsi->netdev, "NIC Link is Down\n");
4651                 return;
4652         }
4653
4654         /* Warn user if link speed on NPAR enabled partition is not at
4655          * least 10GB
4656          */
4657         if (vsi->back->hw.func_caps.npar_enable &&
4658             (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
4659              vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
4660                 netdev_warn(vsi->netdev,
4661                             "The partition detected link speed that is less than 10Gbps\n");
4662
4663         switch (vsi->back->hw.phy.link_info.link_speed) {
4664         case I40E_LINK_SPEED_40GB:
4665                 strlcpy(speed, "40 Gbps", SPEED_SIZE);
4666                 break;
4667         case I40E_LINK_SPEED_20GB:
4668                 strncpy(speed, "20 Gbps", SPEED_SIZE);
4669                 break;
4670         case I40E_LINK_SPEED_10GB:
4671                 strlcpy(speed, "10 Gbps", SPEED_SIZE);
4672                 break;
4673         case I40E_LINK_SPEED_1GB:
4674                 strlcpy(speed, "1000 Mbps", SPEED_SIZE);
4675                 break;
4676         case I40E_LINK_SPEED_100MB:
4677                 strncpy(speed, "100 Mbps", SPEED_SIZE);
4678                 break;
4679         default:
4680                 break;
4681         }
4682
4683         switch (vsi->back->hw.fc.current_mode) {
4684         case I40E_FC_FULL:
4685                 strlcpy(fc, "RX/TX", FC_SIZE);
4686                 break;
4687         case I40E_FC_TX_PAUSE:
4688                 strlcpy(fc, "TX", FC_SIZE);
4689                 break;
4690         case I40E_FC_RX_PAUSE:
4691                 strlcpy(fc, "RX", FC_SIZE);
4692                 break;
4693         default:
4694                 strlcpy(fc, "None", FC_SIZE);
4695                 break;
4696         }
4697
4698         netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
4699                     speed, fc);
4700 }
4701
4702 /**
4703  * i40e_up_complete - Finish the last steps of bringing up a connection
4704  * @vsi: the VSI being configured
4705  **/
4706 static int i40e_up_complete(struct i40e_vsi *vsi)
4707 {
4708         struct i40e_pf *pf = vsi->back;
4709         int err;
4710
4711         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4712                 i40e_vsi_configure_msix(vsi);
4713         else
4714                 i40e_configure_msi_and_legacy(vsi);
4715
4716         /* start rings */
4717         err = i40e_vsi_control_rings(vsi, true);
4718         if (err)
4719                 return err;
4720
4721         clear_bit(__I40E_DOWN, &vsi->state);
4722         i40e_napi_enable_all(vsi);
4723         i40e_vsi_enable_irq(vsi);
4724
4725         if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
4726             (vsi->netdev)) {
4727                 i40e_print_link_message(vsi, true);
4728                 netif_tx_start_all_queues(vsi->netdev);
4729                 netif_carrier_on(vsi->netdev);
4730         } else if (vsi->netdev) {
4731                 i40e_print_link_message(vsi, false);
4732                 /* need to check for qualified module here*/
4733                 if ((pf->hw.phy.link_info.link_info &
4734                         I40E_AQ_MEDIA_AVAILABLE) &&
4735                     (!(pf->hw.phy.link_info.an_info &
4736                         I40E_AQ_QUALIFIED_MODULE)))
4737                         netdev_err(vsi->netdev,
4738                                    "the driver failed to link because an unqualified module was detected.");
4739         }
4740
4741         /* replay FDIR SB filters */
4742         if (vsi->type == I40E_VSI_FDIR) {
4743                 /* reset fd counters */
4744                 pf->fd_add_err = pf->fd_atr_cnt = 0;
4745                 if (pf->fd_tcp_rule > 0) {
4746                         pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
4747                         dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
4748                         pf->fd_tcp_rule = 0;
4749                 }
4750                 i40e_fdir_filter_restore(vsi);
4751         }
4752         i40e_service_event_schedule(pf);
4753
4754         return 0;
4755 }
4756
4757 /**
4758  * i40e_vsi_reinit_locked - Reset the VSI
4759  * @vsi: the VSI being configured
4760  *
4761  * Rebuild the ring structs after some configuration
4762  * has changed, e.g. MTU size.
4763  **/
4764 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
4765 {
4766         struct i40e_pf *pf = vsi->back;
4767
4768         WARN_ON(in_interrupt());
4769         while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
4770                 usleep_range(1000, 2000);
4771         i40e_down(vsi);
4772
4773         /* Give a VF some time to respond to the reset.  The
4774          * two second wait is based upon the watchdog cycle in
4775          * the VF driver.
4776          */
4777         if (vsi->type == I40E_VSI_SRIOV)
4778                 msleep(2000);
4779         i40e_up(vsi);
4780         clear_bit(__I40E_CONFIG_BUSY, &pf->state);
4781 }
4782
4783 /**
4784  * i40e_up - Bring the connection back up after being down
4785  * @vsi: the VSI being configured
4786  **/
4787 int i40e_up(struct i40e_vsi *vsi)
4788 {
4789         int err;
4790
4791         err = i40e_vsi_configure(vsi);
4792         if (!err)
4793                 err = i40e_up_complete(vsi);
4794
4795         return err;
4796 }
4797
4798 /**
4799  * i40e_down - Shutdown the connection processing
4800  * @vsi: the VSI being stopped
4801  **/
4802 void i40e_down(struct i40e_vsi *vsi)
4803 {
4804         int i;
4805
4806         /* It is assumed that the caller of this function
4807          * sets the vsi->state __I40E_DOWN bit.
4808          */
4809         if (vsi->netdev) {
4810                 netif_carrier_off(vsi->netdev);
4811                 netif_tx_disable(vsi->netdev);
4812         }
4813         i40e_vsi_disable_irq(vsi);
4814         i40e_vsi_control_rings(vsi, false);
4815         i40e_napi_disable_all(vsi);
4816
4817         for (i = 0; i < vsi->num_queue_pairs; i++) {
4818                 i40e_clean_tx_ring(vsi->tx_rings[i]);
4819                 i40e_clean_rx_ring(vsi->rx_rings[i]);
4820         }
4821 }
4822
4823 /**
4824  * i40e_setup_tc - configure multiple traffic classes
4825  * @netdev: net device to configure
4826  * @tc: number of traffic classes to enable
4827  **/
4828 #ifdef I40E_FCOE
4829 int i40e_setup_tc(struct net_device *netdev, u8 tc)
4830 #else
4831 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
4832 #endif
4833 {
4834         struct i40e_netdev_priv *np = netdev_priv(netdev);
4835         struct i40e_vsi *vsi = np->vsi;
4836         struct i40e_pf *pf = vsi->back;
4837         u8 enabled_tc = 0;
4838         int ret = -EINVAL;
4839         int i;
4840
4841         /* Check if DCB enabled to continue */
4842         if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
4843                 netdev_info(netdev, "DCB is not enabled for adapter\n");
4844                 goto exit;
4845         }
4846
4847         /* Check if MFP enabled */
4848         if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4849                 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
4850                 goto exit;
4851         }
4852
4853         /* Check whether tc count is within enabled limit */
4854         if (tc > i40e_pf_get_num_tc(pf)) {
4855                 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
4856                 goto exit;
4857         }
4858
4859         /* Generate TC map for number of tc requested */
4860         for (i = 0; i < tc; i++)
4861                 enabled_tc |= (1 << i);
4862
4863         /* Requesting same TC configuration as already enabled */
4864         if (enabled_tc == vsi->tc_config.enabled_tc)
4865                 return 0;
4866
4867         /* Quiesce VSI queues */
4868         i40e_quiesce_vsi(vsi);
4869
4870         /* Configure VSI for enabled TCs */
4871         ret = i40e_vsi_config_tc(vsi, enabled_tc);
4872         if (ret) {
4873                 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
4874                             vsi->seid);
4875                 goto exit;
4876         }
4877
4878         /* Unquiesce VSI */
4879         i40e_unquiesce_vsi(vsi);
4880
4881 exit:
4882         return ret;
4883 }
4884
4885 /**
4886  * i40e_open - Called when a network interface is made active
4887  * @netdev: network interface device structure
4888  *
4889  * The open entry point is called when a network interface is made
4890  * active by the system (IFF_UP).  At this point all resources needed
4891  * for transmit and receive operations are allocated, the interrupt
4892  * handler is registered with the OS, the netdev watchdog subtask is
4893  * enabled, and the stack is notified that the interface is ready.
4894  *
4895  * Returns 0 on success, negative value on failure
4896  **/
4897 int i40e_open(struct net_device *netdev)
4898 {
4899         struct i40e_netdev_priv *np = netdev_priv(netdev);
4900         struct i40e_vsi *vsi = np->vsi;
4901         struct i40e_pf *pf = vsi->back;
4902         int err;
4903
4904         /* disallow open during test or if eeprom is broken */
4905         if (test_bit(__I40E_TESTING, &pf->state) ||
4906             test_bit(__I40E_BAD_EEPROM, &pf->state))
4907                 return -EBUSY;
4908
4909         netif_carrier_off(netdev);
4910
4911         err = i40e_vsi_open(vsi);
4912         if (err)
4913                 return err;
4914
4915         /* configure global TSO hardware offload settings */
4916         wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
4917                                                        TCP_FLAG_FIN) >> 16);
4918         wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
4919                                                        TCP_FLAG_FIN |
4920                                                        TCP_FLAG_CWR) >> 16);
4921         wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
4922
4923 #ifdef CONFIG_I40E_VXLAN
4924         vxlan_get_rx_port(netdev);
4925 #endif
4926
4927         return 0;
4928 }
4929
4930 /**
4931  * i40e_vsi_open -
4932  * @vsi: the VSI to open
4933  *
4934  * Finish initialization of the VSI.
4935  *
4936  * Returns 0 on success, negative value on failure
4937  **/
4938 int i40e_vsi_open(struct i40e_vsi *vsi)
4939 {
4940         struct i40e_pf *pf = vsi->back;
4941         char int_name[I40E_INT_NAME_STR_LEN];
4942         int err;
4943
4944         /* allocate descriptors */
4945         err = i40e_vsi_setup_tx_resources(vsi);
4946         if (err)
4947                 goto err_setup_tx;
4948         err = i40e_vsi_setup_rx_resources(vsi);
4949         if (err)
4950                 goto err_setup_rx;
4951
4952         err = i40e_vsi_configure(vsi);
4953         if (err)
4954                 goto err_setup_rx;
4955
4956         if (vsi->netdev) {
4957                 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
4958                          dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
4959                 err = i40e_vsi_request_irq(vsi, int_name);
4960                 if (err)
4961                         goto err_setup_rx;
4962
4963                 /* Notify the stack of the actual queue counts. */
4964                 err = netif_set_real_num_tx_queues(vsi->netdev,
4965                                                    vsi->num_queue_pairs);
4966                 if (err)
4967                         goto err_set_queues;
4968
4969                 err = netif_set_real_num_rx_queues(vsi->netdev,
4970                                                    vsi->num_queue_pairs);
4971                 if (err)
4972                         goto err_set_queues;
4973
4974         } else if (vsi->type == I40E_VSI_FDIR) {
4975                 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
4976                          dev_driver_string(&pf->pdev->dev),
4977                          dev_name(&pf->pdev->dev));
4978                 err = i40e_vsi_request_irq(vsi, int_name);
4979
4980         } else {
4981                 err = -EINVAL;
4982                 goto err_setup_rx;
4983         }
4984
4985         err = i40e_up_complete(vsi);
4986         if (err)
4987                 goto err_up_complete;
4988
4989         return 0;
4990
4991 err_up_complete:
4992         i40e_down(vsi);
4993 err_set_queues:
4994         i40e_vsi_free_irq(vsi);
4995 err_setup_rx:
4996         i40e_vsi_free_rx_resources(vsi);
4997 err_setup_tx:
4998         i40e_vsi_free_tx_resources(vsi);
4999         if (vsi == pf->vsi[pf->lan_vsi])
5000                 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
5001
5002         return err;
5003 }
5004
5005 /**
5006  * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
5007  * @pf: Pointer to PF
5008  *
5009  * This function destroys the hlist where all the Flow Director
5010  * filters were saved.
5011  **/
5012 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5013 {
5014         struct i40e_fdir_filter *filter;
5015         struct hlist_node *node2;
5016
5017         hlist_for_each_entry_safe(filter, node2,
5018                                   &pf->fdir_filter_list, fdir_node) {
5019                 hlist_del(&filter->fdir_node);
5020                 kfree(filter);
5021         }
5022         pf->fdir_pf_active_filters = 0;
5023 }
5024
5025 /**
5026  * i40e_close - Disables a network interface
5027  * @netdev: network interface device structure
5028  *
5029  * The close entry point is called when an interface is de-activated
5030  * by the OS.  The hardware is still under the driver's control, but
5031  * this netdev interface is disabled.
5032  *
5033  * Returns 0, this is not allowed to fail
5034  **/
5035 #ifdef I40E_FCOE
5036 int i40e_close(struct net_device *netdev)
5037 #else
5038 static int i40e_close(struct net_device *netdev)
5039 #endif
5040 {
5041         struct i40e_netdev_priv *np = netdev_priv(netdev);
5042         struct i40e_vsi *vsi = np->vsi;
5043
5044         i40e_vsi_close(vsi);
5045
5046         return 0;
5047 }
5048
5049 /**
5050  * i40e_do_reset - Start a PF or Core Reset sequence
5051  * @pf: board private structure
5052  * @reset_flags: which reset is requested
5053  *
5054  * The essential difference in resets is that the PF Reset
5055  * doesn't clear the packet buffers, doesn't reset the PE
5056  * firmware, and doesn't bother the other PFs on the chip.
5057  **/
5058 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5059 {
5060         u32 val;
5061
5062         WARN_ON(in_interrupt());
5063
5064         if (i40e_check_asq_alive(&pf->hw))
5065                 i40e_vc_notify_reset(pf);
5066
5067         /* do the biggest reset indicated */
5068         if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
5069
5070                 /* Request a Global Reset
5071                  *
5072                  * This will start the chip's countdown to the actual full
5073                  * chip reset event, and a warning interrupt to be sent
5074                  * to all PFs, including the requestor.  Our handler
5075                  * for the warning interrupt will deal with the shutdown
5076                  * and recovery of the switch setup.
5077                  */
5078                 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
5079                 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5080                 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5081                 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5082
5083         } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
5084
5085                 /* Request a Core Reset
5086                  *
5087                  * Same as Global Reset, except does *not* include the MAC/PHY
5088                  */
5089                 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
5090                 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5091                 val |= I40E_GLGEN_RTRIG_CORER_MASK;
5092                 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5093                 i40e_flush(&pf->hw);
5094
5095         } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
5096
5097                 /* Request a PF Reset
5098                  *
5099                  * Resets only the PF-specific registers
5100                  *
5101                  * This goes directly to the tear-down and rebuild of
5102                  * the switch, since we need to do all the recovery as
5103                  * for the Core Reset.
5104                  */
5105                 dev_dbg(&pf->pdev->dev, "PFR requested\n");
5106                 i40e_handle_reset_warning(pf);
5107
5108         } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
5109                 int v;
5110
5111                 /* Find the VSI(s) that requested a re-init */
5112                 dev_info(&pf->pdev->dev,
5113                          "VSI reinit requested\n");
5114                 for (v = 0; v < pf->num_alloc_vsi; v++) {
5115                         struct i40e_vsi *vsi = pf->vsi[v];
5116                         if (vsi != NULL &&
5117                             test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
5118                                 i40e_vsi_reinit_locked(pf->vsi[v]);
5119                                 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
5120                         }
5121                 }
5122
5123                 /* no further action needed, so return now */
5124                 return;
5125         } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) {
5126                 int v;
5127
5128                 /* Find the VSI(s) that needs to be brought down */
5129                 dev_info(&pf->pdev->dev, "VSI down requested\n");
5130                 for (v = 0; v < pf->num_alloc_vsi; v++) {
5131                         struct i40e_vsi *vsi = pf->vsi[v];
5132                         if (vsi != NULL &&
5133                             test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
5134                                 set_bit(__I40E_DOWN, &vsi->state);
5135                                 i40e_down(vsi);
5136                                 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
5137                         }
5138                 }
5139
5140                 /* no further action needed, so return now */
5141                 return;
5142         } else {
5143                 dev_info(&pf->pdev->dev,
5144                          "bad reset request 0x%08x\n", reset_flags);
5145                 return;
5146         }
5147 }
5148
5149 #ifdef CONFIG_I40E_DCB
5150 /**
5151  * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5152  * @pf: board private structure
5153  * @old_cfg: current DCB config
5154  * @new_cfg: new DCB config
5155  **/
5156 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5157                             struct i40e_dcbx_config *old_cfg,
5158                             struct i40e_dcbx_config *new_cfg)
5159 {
5160         bool need_reconfig = false;
5161
5162         /* Check if ETS configuration has changed */
5163         if (memcmp(&new_cfg->etscfg,
5164                    &old_cfg->etscfg,
5165                    sizeof(new_cfg->etscfg))) {
5166                 /* If Priority Table has changed reconfig is needed */
5167                 if (memcmp(&new_cfg->etscfg.prioritytable,
5168                            &old_cfg->etscfg.prioritytable,
5169                            sizeof(new_cfg->etscfg.prioritytable))) {
5170                         need_reconfig = true;
5171                         dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
5172                 }
5173
5174                 if (memcmp(&new_cfg->etscfg.tcbwtable,
5175                            &old_cfg->etscfg.tcbwtable,
5176                            sizeof(new_cfg->etscfg.tcbwtable)))
5177                         dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
5178
5179                 if (memcmp(&new_cfg->etscfg.tsatable,
5180                            &old_cfg->etscfg.tsatable,
5181                            sizeof(new_cfg->etscfg.tsatable)))
5182                         dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
5183         }
5184
5185         /* Check if PFC configuration has changed */
5186         if (memcmp(&new_cfg->pfc,
5187                    &old_cfg->pfc,
5188                    sizeof(new_cfg->pfc))) {
5189                 need_reconfig = true;
5190                 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
5191         }
5192
5193         /* Check if APP Table has changed */
5194         if (memcmp(&new_cfg->app,
5195                    &old_cfg->app,
5196                    sizeof(new_cfg->app))) {
5197                 need_reconfig = true;
5198                 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
5199         }
5200
5201         dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__,
5202                 need_reconfig);
5203         return need_reconfig;
5204 }
5205
5206 /**
5207  * i40e_handle_lldp_event - Handle LLDP Change MIB event
5208  * @pf: board private structure
5209  * @e: event info posted on ARQ
5210  **/
5211 static int i40e_handle_lldp_event(struct i40e_pf *pf,
5212                                   struct i40e_arq_event_info *e)
5213 {
5214         struct i40e_aqc_lldp_get_mib *mib =
5215                 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5216         struct i40e_hw *hw = &pf->hw;
5217         struct i40e_dcbx_config tmp_dcbx_cfg;
5218         bool need_reconfig = false;
5219         int ret = 0;
5220         u8 type;
5221
5222         /* Not DCB capable or capability disabled */
5223         if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
5224                 return ret;
5225
5226         /* Ignore if event is not for Nearest Bridge */
5227         type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
5228                 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
5229         dev_dbg(&pf->pdev->dev,
5230                 "%s: LLDP event mib bridge type 0x%x\n", __func__, type);
5231         if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
5232                 return ret;
5233
5234         /* Check MIB Type and return if event for Remote MIB update */
5235         type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
5236         dev_dbg(&pf->pdev->dev,
5237                 "%s: LLDP event mib type %s\n", __func__,
5238                 type ? "remote" : "local");
5239         if (type == I40E_AQ_LLDP_MIB_REMOTE) {
5240                 /* Update the remote cached instance and return */
5241                 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
5242                                 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
5243                                 &hw->remote_dcbx_config);
5244                 goto exit;
5245         }
5246
5247         /* Store the old configuration */
5248         tmp_dcbx_cfg = hw->local_dcbx_config;
5249
5250         /* Reset the old DCBx configuration data */
5251         memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
5252         /* Get updated DCBX data from firmware */
5253         ret = i40e_get_dcb_config(&pf->hw);
5254         if (ret) {
5255                 dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n");
5256                 goto exit;
5257         }
5258
5259         /* No change detected in DCBX configs */
5260         if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
5261                     sizeof(tmp_dcbx_cfg))) {
5262                 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
5263                 goto exit;
5264         }
5265
5266         need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
5267                                                &hw->local_dcbx_config);
5268
5269         i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
5270
5271         if (!need_reconfig)
5272                 goto exit;
5273
5274         /* Enable DCB tagging only when more than one TC */
5275         if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5276                 pf->flags |= I40E_FLAG_DCB_ENABLED;
5277         else
5278                 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5279
5280         set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5281         /* Reconfiguration needed quiesce all VSIs */
5282         i40e_pf_quiesce_all_vsi(pf);
5283
5284         /* Changes in configuration update VEB/VSI */
5285         i40e_dcb_reconfigure(pf);
5286
5287         ret = i40e_resume_port_tx(pf);
5288
5289         clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5290         /* In case of error no point in resuming VSIs */
5291         if (ret)
5292                 goto exit;
5293
5294         /* Wait for the PF's Tx queues to be disabled */
5295         ret = i40e_pf_wait_txq_disabled(pf);
5296         if (ret) {
5297                 /* Schedule PF reset to recover */
5298                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5299                 i40e_service_event_schedule(pf);
5300         } else {
5301                 i40e_pf_unquiesce_all_vsi(pf);
5302         }
5303
5304 exit:
5305         return ret;
5306 }
5307 #endif /* CONFIG_I40E_DCB */
5308
5309 /**
5310  * i40e_do_reset_safe - Protected reset path for userland calls.
5311  * @pf: board private structure
5312  * @reset_flags: which reset is requested
5313  *
5314  **/
5315 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
5316 {
5317         rtnl_lock();
5318         i40e_do_reset(pf, reset_flags);
5319         rtnl_unlock();
5320 }
5321
5322 /**
5323  * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
5324  * @pf: board private structure
5325  * @e: event info posted on ARQ
5326  *
5327  * Handler for LAN Queue Overflow Event generated by the firmware for PF
5328  * and VF queues
5329  **/
5330 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
5331                                            struct i40e_arq_event_info *e)
5332 {
5333         struct i40e_aqc_lan_overflow *data =
5334                 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
5335         u32 queue = le32_to_cpu(data->prtdcb_rupto);
5336         u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
5337         struct i40e_hw *hw = &pf->hw;
5338         struct i40e_vf *vf;
5339         u16 vf_id;
5340
5341         dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5342                 queue, qtx_ctl);
5343
5344         /* Queue belongs to VF, find the VF and issue VF reset */
5345         if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
5346             >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
5347                 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
5348                          >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
5349                 vf_id -= hw->func_caps.vf_base_id;
5350                 vf = &pf->vf[vf_id];
5351                 i40e_vc_notify_vf_reset(vf);
5352                 /* Allow VF to process pending reset notification */
5353                 msleep(20);
5354                 i40e_reset_vf(vf, false);
5355         }
5356 }
5357
5358 /**
5359  * i40e_service_event_complete - Finish up the service event
5360  * @pf: board private structure
5361  **/
5362 static void i40e_service_event_complete(struct i40e_pf *pf)
5363 {
5364         BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
5365
5366         /* flush memory to make sure state is correct before next watchog */
5367         smp_mb__before_atomic();
5368         clear_bit(__I40E_SERVICE_SCHED, &pf->state);
5369 }
5370
5371 /**
5372  * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
5373  * @pf: board private structure
5374  **/
5375 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
5376 {
5377         u32 val, fcnt_prog;
5378
5379         val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5380         fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
5381         return fcnt_prog;
5382 }
5383
5384 /**
5385  * i40e_get_current_fd_count - Get total FD filters programmed for this PF
5386  * @pf: board private structure
5387  **/
5388 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
5389 {
5390         u32 val, fcnt_prog;
5391
5392         val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5393         fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
5394                     ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
5395                       I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5396         return fcnt_prog;
5397 }
5398
5399 /**
5400  * i40e_get_global_fd_count - Get total FD filters programmed on device
5401  * @pf: board private structure
5402  **/
5403 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
5404 {
5405         u32 val, fcnt_prog;
5406
5407         val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
5408         fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
5409                     ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
5410                      I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
5411         return fcnt_prog;
5412 }
5413
5414 /**
5415  * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
5416  * @pf: board private structure
5417  **/
5418 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5419 {
5420         u32 fcnt_prog, fcnt_avail;
5421
5422         if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5423                 return;
5424
5425         /* Check if, FD SB or ATR was auto disabled and if there is enough room
5426          * to re-enable
5427          */
5428         fcnt_prog = i40e_get_global_fd_count(pf);
5429         fcnt_avail = pf->fdir_pf_filter_count;
5430         if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
5431             (pf->fd_add_err == 0) ||
5432             (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
5433                 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5434                     (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5435                         pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5436                         dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
5437                 }
5438         }
5439         /* Wait for some more space to be available to turn on ATR */
5440         if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
5441                 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
5442                     (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
5443                         pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5444                         dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
5445                 }
5446         }
5447 }
5448
5449 #define I40E_MIN_FD_FLUSH_INTERVAL 10
5450 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
5451 /**
5452  * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
5453  * @pf: board private structure
5454  **/
5455 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5456 {
5457         unsigned long min_flush_time;
5458         int flush_wait_retry = 50;
5459         bool disable_atr = false;
5460         int fd_room;
5461         int reg;
5462
5463         if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5464                 return;
5465
5466         if (time_after(jiffies, pf->fd_flush_timestamp +
5467                                 (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
5468                 /* If the flush is happening too quick and we have mostly
5469                  * SB rules we should not re-enable ATR for some time.
5470                  */
5471                 min_flush_time = pf->fd_flush_timestamp
5472                                 + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
5473                 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
5474
5475                 if (!(time_after(jiffies, min_flush_time)) &&
5476                     (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
5477                         dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
5478                         disable_atr = true;
5479                 }
5480
5481                 pf->fd_flush_timestamp = jiffies;
5482                 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5483                 /* flush all filters */
5484                 wr32(&pf->hw, I40E_PFQF_CTL_1,
5485                      I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
5486                 i40e_flush(&pf->hw);
5487                 pf->fd_flush_cnt++;
5488                 pf->fd_add_err = 0;
5489                 do {
5490                         /* Check FD flush status every 5-6msec */
5491                         usleep_range(5000, 6000);
5492                         reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
5493                         if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
5494                                 break;
5495                 } while (flush_wait_retry--);
5496                 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
5497                         dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
5498                 } else {
5499                         /* replay sideband filters */
5500                         i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
5501                         if (!disable_atr)
5502                                 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
5503                         clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5504                         dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
5505                 }
5506         }
5507 }
5508
5509 /**
5510  * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
5511  * @pf: board private structure
5512  **/
5513 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
5514 {
5515         return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
5516 }
5517
5518 /* We can see up to 256 filter programming desc in transit if the filters are
5519  * being applied really fast; before we see the first
5520  * filter miss error on Rx queue 0. Accumulating enough error messages before
5521  * reacting will make sure we don't cause flush too often.
5522  */
5523 #define I40E_MAX_FD_PROGRAM_ERROR 256
5524
5525 /**
5526  * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
5527  * @pf: board private structure
5528  **/
5529 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
5530 {
5531
5532         /* if interface is down do nothing */
5533         if (test_bit(__I40E_DOWN, &pf->state))
5534                 return;
5535
5536         if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5537                 return;
5538
5539         if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5540                 i40e_fdir_flush_and_replay(pf);
5541
5542         i40e_fdir_check_and_reenable(pf);
5543
5544 }
5545
5546 /**
5547  * i40e_vsi_link_event - notify VSI of a link event
5548  * @vsi: vsi to be notified
5549  * @link_up: link up or down
5550  **/
5551 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
5552 {
5553         if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
5554                 return;
5555
5556         switch (vsi->type) {
5557         case I40E_VSI_MAIN:
5558 #ifdef I40E_FCOE
5559         case I40E_VSI_FCOE:
5560 #endif
5561                 if (!vsi->netdev || !vsi->netdev_registered)
5562                         break;
5563
5564                 if (link_up) {
5565                         netif_carrier_on(vsi->netdev);
5566                         netif_tx_wake_all_queues(vsi->netdev);
5567                 } else {
5568                         netif_carrier_off(vsi->netdev);
5569                         netif_tx_stop_all_queues(vsi->netdev);
5570                 }
5571                 break;
5572
5573         case I40E_VSI_SRIOV:
5574         case I40E_VSI_VMDQ2:
5575         case I40E_VSI_CTRL:
5576         case I40E_VSI_MIRROR:
5577         default:
5578                 /* there is no notification for other VSIs */
5579                 break;
5580         }
5581 }
5582
5583 /**
5584  * i40e_veb_link_event - notify elements on the veb of a link event
5585  * @veb: veb to be notified
5586  * @link_up: link up or down
5587  **/
5588 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
5589 {
5590         struct i40e_pf *pf;
5591         int i;
5592
5593         if (!veb || !veb->pf)
5594                 return;
5595         pf = veb->pf;
5596
5597         /* depth first... */
5598         for (i = 0; i < I40E_MAX_VEB; i++)
5599                 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
5600                         i40e_veb_link_event(pf->veb[i], link_up);
5601
5602         /* ... now the local VSIs */
5603         for (i = 0; i < pf->num_alloc_vsi; i++)
5604                 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
5605                         i40e_vsi_link_event(pf->vsi[i], link_up);
5606 }
5607
5608 /**
5609  * i40e_link_event - Update netif_carrier status
5610  * @pf: board private structure
5611  **/
5612 static void i40e_link_event(struct i40e_pf *pf)
5613 {
5614         bool new_link, old_link;
5615         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5616         u8 new_link_speed, old_link_speed;
5617
5618         /* set this to force the get_link_status call to refresh state */
5619         pf->hw.phy.get_link_info = true;
5620
5621         old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
5622         new_link = i40e_get_link_status(&pf->hw);
5623         old_link_speed = pf->hw.phy.link_info_old.link_speed;
5624         new_link_speed = pf->hw.phy.link_info.link_speed;
5625
5626         if (new_link == old_link &&
5627             new_link_speed == old_link_speed &&
5628             (test_bit(__I40E_DOWN, &vsi->state) ||
5629              new_link == netif_carrier_ok(vsi->netdev)))
5630                 return;
5631
5632         if (!test_bit(__I40E_DOWN, &vsi->state))
5633                 i40e_print_link_message(vsi, new_link);
5634
5635         /* Notify the base of the switch tree connected to
5636          * the link.  Floating VEBs are not notified.
5637          */
5638         if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
5639                 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
5640         else
5641                 i40e_vsi_link_event(vsi, new_link);
5642
5643         if (pf->vf)
5644                 i40e_vc_notify_link_state(pf);
5645
5646         if (pf->flags & I40E_FLAG_PTP)
5647                 i40e_ptp_set_increment(pf);
5648 }
5649
5650 /**
5651  * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
5652  * @pf: board private structure
5653  *
5654  * Set the per-queue flags to request a check for stuck queues in the irq
5655  * clean functions, then force interrupts to be sure the irq clean is called.
5656  **/
5657 static void i40e_check_hang_subtask(struct i40e_pf *pf)
5658 {
5659         int i, v;
5660
5661         /* If we're down or resetting, just bail */
5662         if (test_bit(__I40E_DOWN, &pf->state) ||
5663             test_bit(__I40E_CONFIG_BUSY, &pf->state))
5664                 return;
5665
5666         /* for each VSI/netdev
5667          *     for each Tx queue
5668          *         set the check flag
5669          *     for each q_vector
5670          *         force an interrupt
5671          */
5672         for (v = 0; v < pf->num_alloc_vsi; v++) {
5673                 struct i40e_vsi *vsi = pf->vsi[v];
5674                 int armed = 0;
5675
5676                 if (!pf->vsi[v] ||
5677                     test_bit(__I40E_DOWN, &vsi->state) ||
5678                     (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
5679                         continue;
5680
5681                 for (i = 0; i < vsi->num_queue_pairs; i++) {
5682                         set_check_for_tx_hang(vsi->tx_rings[i]);
5683                         if (test_bit(__I40E_HANG_CHECK_ARMED,
5684                                      &vsi->tx_rings[i]->state))
5685                                 armed++;
5686                 }
5687
5688                 if (armed) {
5689                         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
5690                                 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
5691                                      (I40E_PFINT_DYN_CTL0_INTENA_MASK |
5692                                       I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
5693                                       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
5694                                       I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
5695                                       I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
5696                         } else {
5697                                 u16 vec = vsi->base_vector - 1;
5698                                 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
5699                                       I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
5700                                       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK |
5701                                       I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
5702                                       I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK);
5703                                 for (i = 0; i < vsi->num_q_vectors; i++, vec++)
5704                                         wr32(&vsi->back->hw,
5705                                              I40E_PFINT_DYN_CTLN(vec), val);
5706                         }
5707                         i40e_flush(&vsi->back->hw);
5708                 }
5709         }
5710 }
5711
5712 /**
5713  * i40e_watchdog_subtask - periodic checks not using event driven response
5714  * @pf: board private structure
5715  **/
5716 static void i40e_watchdog_subtask(struct i40e_pf *pf)
5717 {
5718         int i;
5719
5720         /* if interface is down do nothing */
5721         if (test_bit(__I40E_DOWN, &pf->state) ||
5722             test_bit(__I40E_CONFIG_BUSY, &pf->state))
5723                 return;
5724
5725         /* make sure we don't do these things too often */
5726         if (time_before(jiffies, (pf->service_timer_previous +
5727                                   pf->service_timer_period)))
5728                 return;
5729         pf->service_timer_previous = jiffies;
5730
5731         i40e_check_hang_subtask(pf);
5732         i40e_link_event(pf);
5733
5734         /* Update the stats for active netdevs so the network stack
5735          * can look at updated numbers whenever it cares to
5736          */
5737         for (i = 0; i < pf->num_alloc_vsi; i++)
5738                 if (pf->vsi[i] && pf->vsi[i]->netdev)
5739                         i40e_update_stats(pf->vsi[i]);
5740
5741         /* Update the stats for the active switching components */
5742         for (i = 0; i < I40E_MAX_VEB; i++)
5743                 if (pf->veb[i])
5744                         i40e_update_veb_stats(pf->veb[i]);
5745
5746         i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
5747 }
5748
5749 /**
5750  * i40e_reset_subtask - Set up for resetting the device and driver
5751  * @pf: board private structure
5752  **/
5753 static void i40e_reset_subtask(struct i40e_pf *pf)
5754 {
5755         u32 reset_flags = 0;
5756
5757         rtnl_lock();
5758         if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
5759                 reset_flags |= (1 << __I40E_REINIT_REQUESTED);
5760                 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
5761         }
5762         if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
5763                 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
5764                 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5765         }
5766         if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
5767                 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
5768                 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
5769         }
5770         if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
5771                 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
5772                 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
5773         }
5774         if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
5775                 reset_flags |= (1 << __I40E_DOWN_REQUESTED);
5776                 clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
5777         }
5778
5779         /* If there's a recovery already waiting, it takes
5780          * precedence before starting a new reset sequence.
5781          */
5782         if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
5783                 i40e_handle_reset_warning(pf);
5784                 goto unlock;
5785         }
5786
5787         /* If we're already down or resetting, just bail */
5788         if (reset_flags &&
5789             !test_bit(__I40E_DOWN, &pf->state) &&
5790             !test_bit(__I40E_CONFIG_BUSY, &pf->state))
5791                 i40e_do_reset(pf, reset_flags);
5792
5793 unlock:
5794         rtnl_unlock();
5795 }
5796
5797 /**
5798  * i40e_handle_link_event - Handle link event
5799  * @pf: board private structure
5800  * @e: event info posted on ARQ
5801  **/
5802 static void i40e_handle_link_event(struct i40e_pf *pf,
5803                                    struct i40e_arq_event_info *e)
5804 {
5805         struct i40e_hw *hw = &pf->hw;
5806         struct i40e_aqc_get_link_status *status =
5807                 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
5808
5809         /* save off old link status information */
5810         hw->phy.link_info_old = hw->phy.link_info;
5811
5812         /* Do a new status request to re-enable LSE reporting
5813          * and load new status information into the hw struct
5814          * This completely ignores any state information
5815          * in the ARQ event info, instead choosing to always
5816          * issue the AQ update link status command.
5817          */
5818         i40e_link_event(pf);
5819
5820         /* check for unqualified module, if link is down */
5821         if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
5822             (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
5823             (!(status->link_info & I40E_AQ_LINK_UP)))
5824                 dev_err(&pf->pdev->dev,
5825                         "The driver failed to link because an unqualified module was detected.\n");
5826 }
5827
5828 /**
5829  * i40e_clean_adminq_subtask - Clean the AdminQ rings
5830  * @pf: board private structure
5831  **/
5832 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
5833 {
5834         struct i40e_arq_event_info event;
5835         struct i40e_hw *hw = &pf->hw;
5836         u16 pending, i = 0;
5837         i40e_status ret;
5838         u16 opcode;
5839         u32 oldval;
5840         u32 val;
5841
5842         /* Do not run clean AQ when PF reset fails */
5843         if (test_bit(__I40E_RESET_FAILED, &pf->state))
5844                 return;
5845
5846         /* check for error indications */
5847         val = rd32(&pf->hw, pf->hw.aq.arq.len);
5848         oldval = val;
5849         if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
5850                 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
5851                 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
5852         }
5853         if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
5854                 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
5855                 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
5856         }
5857         if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
5858                 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
5859                 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
5860         }
5861         if (oldval != val)
5862                 wr32(&pf->hw, pf->hw.aq.arq.len, val);
5863
5864         val = rd32(&pf->hw, pf->hw.aq.asq.len);
5865         oldval = val;
5866         if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
5867                 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
5868                 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
5869         }
5870         if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
5871                 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
5872                 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
5873         }
5874         if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
5875                 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
5876                 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
5877         }
5878         if (oldval != val)
5879                 wr32(&pf->hw, pf->hw.aq.asq.len, val);
5880
5881         event.buf_len = I40E_MAX_AQ_BUF_SIZE;
5882         event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
5883         if (!event.msg_buf)
5884                 return;
5885
5886         do {
5887                 ret = i40e_clean_arq_element(hw, &event, &pending);
5888                 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
5889                         break;
5890                 else if (ret) {
5891                         dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
5892                         break;
5893                 }
5894
5895                 opcode = le16_to_cpu(event.desc.opcode);
5896                 switch (opcode) {
5897
5898                 case i40e_aqc_opc_get_link_status:
5899                         i40e_handle_link_event(pf, &event);
5900                         break;
5901                 case i40e_aqc_opc_send_msg_to_pf:
5902                         ret = i40e_vc_process_vf_msg(pf,
5903                                         le16_to_cpu(event.desc.retval),
5904                                         le32_to_cpu(event.desc.cookie_high),
5905                                         le32_to_cpu(event.desc.cookie_low),
5906                                         event.msg_buf,
5907                                         event.msg_len);
5908                         break;
5909                 case i40e_aqc_opc_lldp_update_mib:
5910                         dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
5911 #ifdef CONFIG_I40E_DCB
5912                         rtnl_lock();
5913                         ret = i40e_handle_lldp_event(pf, &event);
5914                         rtnl_unlock();
5915 #endif /* CONFIG_I40E_DCB */
5916                         break;
5917                 case i40e_aqc_opc_event_lan_overflow:
5918                         dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
5919                         i40e_handle_lan_overflow_event(pf, &event);
5920                         break;
5921                 case i40e_aqc_opc_send_msg_to_peer:
5922                         dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
5923                         break;
5924                 case i40e_aqc_opc_nvm_erase:
5925                 case i40e_aqc_opc_nvm_update:
5926                         i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n");
5927                         break;
5928                 default:
5929                         dev_info(&pf->pdev->dev,
5930                                  "ARQ Error: Unknown event 0x%04x received\n",
5931                                  opcode);
5932                         break;
5933                 }
5934         } while (pending && (i++ < pf->adminq_work_limit));
5935
5936         clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
5937         /* re-enable Admin queue interrupt cause */
5938         val = rd32(hw, I40E_PFINT_ICR0_ENA);
5939         val |=  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
5940         wr32(hw, I40E_PFINT_ICR0_ENA, val);
5941         i40e_flush(hw);
5942
5943         kfree(event.msg_buf);
5944 }
5945
5946 /**
5947  * i40e_verify_eeprom - make sure eeprom is good to use
5948  * @pf: board private structure
5949  **/
5950 static void i40e_verify_eeprom(struct i40e_pf *pf)
5951 {
5952         int err;
5953
5954         err = i40e_diag_eeprom_test(&pf->hw);
5955         if (err) {
5956                 /* retry in case of garbage read */
5957                 err = i40e_diag_eeprom_test(&pf->hw);
5958                 if (err) {
5959                         dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
5960                                  err);
5961                         set_bit(__I40E_BAD_EEPROM, &pf->state);
5962                 }
5963         }
5964
5965         if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
5966                 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
5967                 clear_bit(__I40E_BAD_EEPROM, &pf->state);
5968         }
5969 }
5970
5971 /**
5972  * i40e_enable_pf_switch_lb
5973  * @pf: pointer to the PF structure
5974  *
5975  * enable switch loop back or die - no point in a return value
5976  **/
5977 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
5978 {
5979         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5980         struct i40e_vsi_context ctxt;
5981         int aq_ret;
5982
5983         ctxt.seid = pf->main_vsi_seid;
5984         ctxt.pf_num = pf->hw.pf_id;
5985         ctxt.vf_num = 0;
5986         aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
5987         if (aq_ret) {
5988                 dev_info(&pf->pdev->dev,
5989                          "%s couldn't get PF vsi config, err %d, aq_err %d\n",
5990                          __func__, aq_ret, pf->hw.aq.asq_last_status);
5991                 return;
5992         }
5993         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5994         ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5995         ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5996
5997         aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
5998         if (aq_ret) {
5999                 dev_info(&pf->pdev->dev,
6000                          "%s: update vsi switch failed, aq_err=%d\n",
6001                          __func__, vsi->back->hw.aq.asq_last_status);
6002         }
6003 }
6004
6005 /**
6006  * i40e_disable_pf_switch_lb
6007  * @pf: pointer to the PF structure
6008  *
6009  * disable switch loop back or die - no point in a return value
6010  **/
6011 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6012 {
6013         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6014         struct i40e_vsi_context ctxt;
6015         int aq_ret;
6016
6017         ctxt.seid = pf->main_vsi_seid;
6018         ctxt.pf_num = pf->hw.pf_id;
6019         ctxt.vf_num = 0;
6020         aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6021         if (aq_ret) {
6022                 dev_info(&pf->pdev->dev,
6023                          "%s couldn't get PF vsi config, err %d, aq_err %d\n",
6024                          __func__, aq_ret, pf->hw.aq.asq_last_status);
6025                 return;
6026         }
6027         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6028         ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6029         ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6030
6031         aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6032         if (aq_ret) {
6033                 dev_info(&pf->pdev->dev,
6034                          "%s: update vsi switch failed, aq_err=%d\n",
6035                          __func__, vsi->back->hw.aq.asq_last_status);
6036         }
6037 }
6038
6039 /**
6040  * i40e_config_bridge_mode - Configure the HW bridge mode
6041  * @veb: pointer to the bridge instance
6042  *
6043  * Configure the loop back mode for the LAN VSI that is downlink to the
6044  * specified HW bridge instance. It is expected this function is called
6045  * when a new HW bridge is instantiated.
6046  **/
6047 static void i40e_config_bridge_mode(struct i40e_veb *veb)
6048 {
6049         struct i40e_pf *pf = veb->pf;
6050
6051         dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6052                  veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
6053         if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6054                 i40e_disable_pf_switch_lb(pf);
6055         else
6056                 i40e_enable_pf_switch_lb(pf);
6057 }
6058
6059 /**
6060  * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
6061  * @veb: pointer to the VEB instance
6062  *
6063  * This is a recursive function that first builds the attached VSIs then
6064  * recurses in to build the next layer of VEB.  We track the connections
6065  * through our own index numbers because the seid's from the HW could
6066  * change across the reset.
6067  **/
6068 static int i40e_reconstitute_veb(struct i40e_veb *veb)
6069 {
6070         struct i40e_vsi *ctl_vsi = NULL;
6071         struct i40e_pf *pf = veb->pf;
6072         int v, veb_idx;
6073         int ret;
6074
6075         /* build VSI that owns this VEB, temporarily attached to base VEB */
6076         for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
6077                 if (pf->vsi[v] &&
6078                     pf->vsi[v]->veb_idx == veb->idx &&
6079                     pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6080                         ctl_vsi = pf->vsi[v];
6081                         break;
6082                 }
6083         }
6084         if (!ctl_vsi) {
6085                 dev_info(&pf->pdev->dev,
6086                          "missing owner VSI for veb_idx %d\n", veb->idx);
6087                 ret = -ENOENT;
6088                 goto end_reconstitute;
6089         }
6090         if (ctl_vsi != pf->vsi[pf->lan_vsi])
6091                 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6092         ret = i40e_add_vsi(ctl_vsi);
6093         if (ret) {
6094                 dev_info(&pf->pdev->dev,
6095                          "rebuild of owner VSI failed: %d\n", ret);
6096                 goto end_reconstitute;
6097         }
6098         i40e_vsi_reset_stats(ctl_vsi);
6099
6100         /* create the VEB in the switch and move the VSI onto the VEB */
6101         ret = i40e_add_veb(veb, ctl_vsi);
6102         if (ret)
6103                 goto end_reconstitute;
6104
6105         i40e_config_bridge_mode(veb);
6106
6107         /* create the remaining VSIs attached to this VEB */
6108         for (v = 0; v < pf->num_alloc_vsi; v++) {
6109                 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6110                         continue;
6111
6112                 if (pf->vsi[v]->veb_idx == veb->idx) {
6113                         struct i40e_vsi *vsi = pf->vsi[v];
6114                         vsi->uplink_seid = veb->seid;
6115                         ret = i40e_add_vsi(vsi);
6116                         if (ret) {
6117                                 dev_info(&pf->pdev->dev,
6118                                          "rebuild of vsi_idx %d failed: %d\n",
6119                                          v, ret);
6120                                 goto end_reconstitute;
6121                         }
6122                         i40e_vsi_reset_stats(vsi);
6123                 }
6124         }
6125
6126         /* create any VEBs attached to this VEB - RECURSION */
6127         for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6128                 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6129                         pf->veb[veb_idx]->uplink_seid = veb->seid;
6130                         ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6131                         if (ret)
6132                                 break;
6133                 }
6134         }
6135
6136 end_reconstitute:
6137         return ret;
6138 }
6139
6140 /**
6141  * i40e_get_capabilities - get info about the HW
6142  * @pf: the PF struct
6143  **/
6144 static int i40e_get_capabilities(struct i40e_pf *pf)
6145 {
6146         struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6147         u16 data_size;
6148         int buf_len;
6149         int err;
6150
6151         buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6152         do {
6153                 cap_buf = kzalloc(buf_len, GFP_KERNEL);
6154                 if (!cap_buf)
6155                         return -ENOMEM;
6156
6157                 /* this loads the data into the hw struct for us */
6158                 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6159                                             &data_size,
6160                                             i40e_aqc_opc_list_func_capabilities,
6161                                             NULL);
6162                 /* data loaded, buffer no longer needed */
6163                 kfree(cap_buf);
6164
6165                 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6166                         /* retry with a larger buffer */
6167                         buf_len = data_size;
6168                 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6169                         dev_info(&pf->pdev->dev,
6170                                  "capability discovery failed: aq=%d\n",
6171                                  pf->hw.aq.asq_last_status);
6172                         return -ENODEV;
6173                 }
6174         } while (err);
6175
6176         if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
6177             (pf->hw.aq.fw_maj_ver < 2)) {
6178                 pf->hw.func_caps.num_msix_vectors++;
6179                 pf->hw.func_caps.num_msix_vectors_vf++;
6180         }
6181
6182         if (pf->hw.debug_mask & I40E_DEBUG_USER)
6183                 dev_info(&pf->pdev->dev,
6184                          "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6185                          pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6186                          pf->hw.func_caps.num_msix_vectors,
6187                          pf->hw.func_caps.num_msix_vectors_vf,
6188                          pf->hw.func_caps.fd_filters_guaranteed,
6189                          pf->hw.func_caps.fd_filters_best_effort,
6190                          pf->hw.func_caps.num_tx_qp,
6191                          pf->hw.func_caps.num_vsis);
6192
6193 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6194                        + pf->hw.func_caps.num_vfs)
6195         if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6196                 dev_info(&pf->pdev->dev,
6197                          "got num_vsis %d, setting num_vsis to %d\n",
6198                          pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6199                 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6200         }
6201
6202         return 0;
6203 }
6204
6205 static int i40e_vsi_clear(struct i40e_vsi *vsi);
6206
6207 /**
6208  * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
6209  * @pf: board private structure
6210  **/
6211 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
6212 {
6213         struct i40e_vsi *vsi;
6214         int i;
6215
6216         /* quick workaround for an NVM issue that leaves a critical register
6217          * uninitialized
6218          */
6219         if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6220                 static const u32 hkey[] = {
6221                         0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6222                         0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6223                         0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6224                         0x95b3a76d};
6225
6226                 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6227                         wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6228         }
6229
6230         if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6231                 return;
6232
6233         /* find existing VSI and see if it needs configuring */
6234         vsi = NULL;
6235         for (i = 0; i < pf->num_alloc_vsi; i++) {
6236                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6237                         vsi = pf->vsi[i];
6238                         break;
6239                 }
6240         }
6241
6242         /* create a new VSI if none exists */
6243         if (!vsi) {
6244                 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
6245                                      pf->vsi[pf->lan_vsi]->seid, 0);
6246                 if (!vsi) {
6247                         dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
6248                         pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6249                         return;
6250                 }
6251         }
6252
6253         i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
6254 }
6255
6256 /**
6257  * i40e_fdir_teardown - release the Flow Director resources
6258  * @pf: board private structure
6259  **/
6260 static void i40e_fdir_teardown(struct i40e_pf *pf)
6261 {
6262         int i;
6263
6264         i40e_fdir_filter_exit(pf);
6265         for (i = 0; i < pf->num_alloc_vsi; i++) {
6266                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6267                         i40e_vsi_release(pf->vsi[i]);
6268                         break;
6269                 }
6270         }
6271 }
6272
6273 /**
6274  * i40e_prep_for_reset - prep for the core to reset
6275  * @pf: board private structure
6276  *
6277  * Close up the VFs and other things in prep for PF Reset.
6278   **/
6279 static void i40e_prep_for_reset(struct i40e_pf *pf)
6280 {
6281         struct i40e_hw *hw = &pf->hw;
6282         i40e_status ret = 0;
6283         u32 v;
6284
6285         clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6286         if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
6287                 return;
6288
6289         dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
6290
6291         /* quiesce the VSIs and their queues that are not already DOWN */
6292         i40e_pf_quiesce_all_vsi(pf);
6293
6294         for (v = 0; v < pf->num_alloc_vsi; v++) {
6295                 if (pf->vsi[v])
6296                         pf->vsi[v]->seid = 0;
6297         }
6298
6299         i40e_shutdown_adminq(&pf->hw);
6300
6301         /* call shutdown HMC */
6302         if (hw->hmc.hmc_obj) {
6303                 ret = i40e_shutdown_lan_hmc(hw);
6304                 if (ret)
6305                         dev_warn(&pf->pdev->dev,
6306                                  "shutdown_lan_hmc failed: %d\n", ret);
6307         }
6308 }
6309
6310 /**
6311  * i40e_send_version - update firmware with driver version
6312  * @pf: PF struct
6313  */
6314 static void i40e_send_version(struct i40e_pf *pf)
6315 {
6316         struct i40e_driver_version dv;
6317
6318         dv.major_version = DRV_VERSION_MAJOR;
6319         dv.minor_version = DRV_VERSION_MINOR;
6320         dv.build_version = DRV_VERSION_BUILD;
6321         dv.subbuild_version = 0;
6322         strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
6323         i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
6324 }
6325
6326 /**
6327  * i40e_reset_and_rebuild - reset and rebuild using a saved config
6328  * @pf: board private structure
6329  * @reinit: if the Main VSI needs to re-initialized.
6330  **/
6331 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
6332 {
6333         struct i40e_hw *hw = &pf->hw;
6334         u8 set_fc_aq_fail = 0;
6335         i40e_status ret;
6336         u32 v;
6337
6338         /* Now we wait for GRST to settle out.
6339          * We don't have to delete the VEBs or VSIs from the hw switch
6340          * because the reset will make them disappear.
6341          */
6342         ret = i40e_pf_reset(hw);
6343         if (ret) {
6344                 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
6345                 set_bit(__I40E_RESET_FAILED, &pf->state);
6346                 goto clear_recovery;
6347         }
6348         pf->pfr_count++;
6349
6350         if (test_bit(__I40E_DOWN, &pf->state))
6351                 goto clear_recovery;
6352         dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
6353
6354         /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
6355         ret = i40e_init_adminq(&pf->hw);
6356         if (ret) {
6357                 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
6358                 goto clear_recovery;
6359         }
6360
6361         /* re-verify the eeprom if we just had an EMP reset */
6362         if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
6363                 i40e_verify_eeprom(pf);
6364
6365         i40e_clear_pxe_mode(hw);
6366         ret = i40e_get_capabilities(pf);
6367         if (ret) {
6368                 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
6369                          ret);
6370                 goto end_core_reset;
6371         }
6372
6373         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
6374                                 hw->func_caps.num_rx_qp,
6375                                 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
6376         if (ret) {
6377                 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
6378                 goto end_core_reset;
6379         }
6380         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
6381         if (ret) {
6382                 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
6383                 goto end_core_reset;
6384         }
6385
6386 #ifdef CONFIG_I40E_DCB
6387         ret = i40e_init_pf_dcb(pf);
6388         if (ret) {
6389                 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6390                 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6391                 /* Continue without DCB enabled */
6392         }
6393 #endif /* CONFIG_I40E_DCB */
6394 #ifdef I40E_FCOE
6395         ret = i40e_init_pf_fcoe(pf);
6396         if (ret)
6397                 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret);
6398
6399 #endif
6400         /* do basic switch setup */
6401         ret = i40e_setup_pf_switch(pf, reinit);
6402         if (ret)
6403                 goto end_core_reset;
6404
6405         /* driver is only interested in link up/down and module qualification
6406          * reports from firmware
6407          */
6408         ret = i40e_aq_set_phy_int_mask(&pf->hw,
6409                                        I40E_AQ_EVENT_LINK_UPDOWN |
6410                                        I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
6411         if (ret)
6412                 dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret);
6413
6414         /* make sure our flow control settings are restored */
6415         ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
6416         if (ret)
6417                 dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret);
6418
6419         /* Rebuild the VSIs and VEBs that existed before reset.
6420          * They are still in our local switch element arrays, so only
6421          * need to rebuild the switch model in the HW.
6422          *
6423          * If there were VEBs but the reconstitution failed, we'll try
6424          * try to recover minimal use by getting the basic PF VSI working.
6425          */
6426         if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
6427                 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
6428                 /* find the one VEB connected to the MAC, and find orphans */
6429                 for (v = 0; v < I40E_MAX_VEB; v++) {
6430                         if (!pf->veb[v])
6431                                 continue;
6432
6433                         if (pf->veb[v]->uplink_seid == pf->mac_seid ||
6434                             pf->veb[v]->uplink_seid == 0) {
6435                                 ret = i40e_reconstitute_veb(pf->veb[v]);
6436
6437                                 if (!ret)
6438                                         continue;
6439
6440                                 /* If Main VEB failed, we're in deep doodoo,
6441                                  * so give up rebuilding the switch and set up
6442                                  * for minimal rebuild of PF VSI.
6443                                  * If orphan failed, we'll report the error
6444                                  * but try to keep going.
6445                                  */
6446                                 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
6447                                         dev_info(&pf->pdev->dev,
6448                                                  "rebuild of switch failed: %d, will try to set up simple PF connection\n",
6449                                                  ret);
6450                                         pf->vsi[pf->lan_vsi]->uplink_seid
6451                                                                 = pf->mac_seid;
6452                                         break;
6453                                 } else if (pf->veb[v]->uplink_seid == 0) {
6454                                         dev_info(&pf->pdev->dev,
6455                                                  "rebuild of orphan VEB failed: %d\n",
6456                                                  ret);
6457                                 }
6458                         }
6459                 }
6460         }
6461
6462         if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
6463                 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
6464                 /* no VEB, so rebuild only the Main VSI */
6465                 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
6466                 if (ret) {
6467                         dev_info(&pf->pdev->dev,
6468                                  "rebuild of Main VSI failed: %d\n", ret);
6469                         goto end_core_reset;
6470                 }
6471         }
6472
6473         if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
6474             (pf->hw.aq.fw_maj_ver < 4)) {
6475                 msleep(75);
6476                 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
6477                 if (ret)
6478                         dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
6479                                  pf->hw.aq.asq_last_status);
6480         }
6481         /* reinit the misc interrupt */
6482         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6483                 ret = i40e_setup_misc_vector(pf);
6484
6485         /* restart the VSIs that were rebuilt and running before the reset */
6486         i40e_pf_unquiesce_all_vsi(pf);
6487
6488         if (pf->num_alloc_vfs) {
6489                 for (v = 0; v < pf->num_alloc_vfs; v++)
6490                         i40e_reset_vf(&pf->vf[v], true);
6491         }
6492
6493         /* tell the firmware that we're starting */
6494         i40e_send_version(pf);
6495
6496 end_core_reset:
6497         clear_bit(__I40E_RESET_FAILED, &pf->state);
6498 clear_recovery:
6499         clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
6500 }
6501
6502 /**
6503  * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
6504  * @pf: board private structure
6505  *
6506  * Close up the VFs and other things in prep for a Core Reset,
6507  * then get ready to rebuild the world.
6508  **/
6509 static void i40e_handle_reset_warning(struct i40e_pf *pf)
6510 {
6511         i40e_prep_for_reset(pf);
6512         i40e_reset_and_rebuild(pf, false);
6513 }
6514
6515 /**
6516  * i40e_handle_mdd_event
6517  * @pf: pointer to the PF structure
6518  *
6519  * Called from the MDD irq handler to identify possibly malicious vfs
6520  **/
6521 static void i40e_handle_mdd_event(struct i40e_pf *pf)
6522 {
6523         struct i40e_hw *hw = &pf->hw;
6524         bool mdd_detected = false;
6525         bool pf_mdd_detected = false;
6526         struct i40e_vf *vf;
6527         u32 reg;
6528         int i;
6529
6530         if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
6531                 return;
6532
6533         /* find what triggered the MDD event */
6534         reg = rd32(hw, I40E_GL_MDET_TX);
6535         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6536                 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6537                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
6538                 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6539                                 I40E_GL_MDET_TX_VF_NUM_SHIFT;
6540                 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6541                                 I40E_GL_MDET_TX_EVENT_SHIFT;
6542                 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6543                                 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6544                                 pf->hw.func_caps.base_queue;
6545                 if (netif_msg_tx_err(pf))
6546                         dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
6547                                  event, queue, pf_num, vf_num);
6548                 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
6549                 mdd_detected = true;
6550         }
6551         reg = rd32(hw, I40E_GL_MDET_RX);
6552         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6553                 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6554                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
6555                 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6556                                 I40E_GL_MDET_RX_EVENT_SHIFT;
6557                 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6558                                 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6559                                 pf->hw.func_caps.base_queue;
6560                 if (netif_msg_rx_err(pf))
6561                         dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
6562                                  event, queue, func);
6563                 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
6564                 mdd_detected = true;
6565         }
6566
6567         if (mdd_detected) {
6568                 reg = rd32(hw, I40E_PF_MDET_TX);
6569                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6570                         wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
6571                         dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
6572                         pf_mdd_detected = true;
6573                 }
6574                 reg = rd32(hw, I40E_PF_MDET_RX);
6575                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6576                         wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
6577                         dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
6578                         pf_mdd_detected = true;
6579                 }
6580                 /* Queue belongs to the PF, initiate a reset */
6581                 if (pf_mdd_detected) {
6582                         set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6583                         i40e_service_event_schedule(pf);
6584                 }
6585         }
6586
6587         /* see if one of the VFs needs its hand slapped */
6588         for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
6589                 vf = &(pf->vf[i]);
6590                 reg = rd32(hw, I40E_VP_MDET_TX(i));
6591                 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6592                         wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
6593                         vf->num_mdd_events++;
6594                         dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
6595                                  i);
6596                 }
6597
6598                 reg = rd32(hw, I40E_VP_MDET_RX(i));
6599                 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6600                         wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
6601                         vf->num_mdd_events++;
6602                         dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
6603                                  i);
6604                 }
6605
6606                 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
6607                         dev_info(&pf->pdev->dev,
6608                                  "Too many MDD events on VF %d, disabled\n", i);
6609                         dev_info(&pf->pdev->dev,
6610                                  "Use PF Control I/F to re-enable the VF\n");
6611                         set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
6612                 }
6613         }
6614
6615         /* re-enable mdd interrupt cause */
6616         clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
6617         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
6618         reg |=  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
6619         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
6620         i40e_flush(hw);
6621 }
6622
6623 #ifdef CONFIG_I40E_VXLAN
6624 /**
6625  * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
6626  * @pf: board private structure
6627  **/
6628 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
6629 {
6630         struct i40e_hw *hw = &pf->hw;
6631         i40e_status ret;
6632         __be16 port;
6633         int i;
6634
6635         if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
6636                 return;
6637
6638         pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
6639
6640         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
6641                 if (pf->pending_vxlan_bitmap & (1 << i)) {
6642                         pf->pending_vxlan_bitmap &= ~(1 << i);
6643                         port = pf->vxlan_ports[i];
6644                         if (port)
6645                                 ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
6646                                                      I40E_AQC_TUNNEL_TYPE_VXLAN,
6647                                                      NULL, NULL);
6648                         else
6649                                 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
6650
6651                         if (ret) {
6652                                 dev_info(&pf->pdev->dev,
6653                                          "%s vxlan port %d, index %d failed, err %d, aq_err %d\n",
6654                                          port ? "add" : "delete",
6655                                          ntohs(port), i, ret,
6656                                          pf->hw.aq.asq_last_status);
6657                                 pf->vxlan_ports[i] = 0;
6658                         }
6659                 }
6660         }
6661 }
6662
6663 #endif
6664 /**
6665  * i40e_service_task - Run the driver's async subtasks
6666  * @work: pointer to work_struct containing our data
6667  **/
6668 static void i40e_service_task(struct work_struct *work)
6669 {
6670         struct i40e_pf *pf = container_of(work,
6671                                           struct i40e_pf,
6672                                           service_task);
6673         unsigned long start_time = jiffies;
6674
6675         /* don't bother with service tasks if a reset is in progress */
6676         if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
6677                 i40e_service_event_complete(pf);
6678                 return;
6679         }
6680
6681         i40e_reset_subtask(pf);
6682         i40e_handle_mdd_event(pf);
6683         i40e_vc_process_vflr_event(pf);
6684         i40e_watchdog_subtask(pf);
6685         i40e_fdir_reinit_subtask(pf);
6686         i40e_sync_filters_subtask(pf);
6687 #ifdef CONFIG_I40E_VXLAN
6688         i40e_sync_vxlan_filters_subtask(pf);
6689 #endif
6690         i40e_clean_adminq_subtask(pf);
6691
6692         i40e_service_event_complete(pf);
6693
6694         /* If the tasks have taken longer than one timer cycle or there
6695          * is more work to be done, reschedule the service task now
6696          * rather than wait for the timer to tick again.
6697          */
6698         if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
6699             test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)            ||
6700             test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)               ||
6701             test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
6702                 i40e_service_event_schedule(pf);
6703 }
6704
6705 /**
6706  * i40e_service_timer - timer callback
6707  * @data: pointer to PF struct
6708  **/
6709 static void i40e_service_timer(unsigned long data)
6710 {
6711         struct i40e_pf *pf = (struct i40e_pf *)data;
6712
6713         mod_timer(&pf->service_timer,
6714                   round_jiffies(jiffies + pf->service_timer_period));
6715         i40e_service_event_schedule(pf);
6716 }
6717
6718 /**
6719  * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
6720  * @vsi: the VSI being configured
6721  **/
6722 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
6723 {
6724         struct i40e_pf *pf = vsi->back;
6725
6726         switch (vsi->type) {
6727         case I40E_VSI_MAIN:
6728                 vsi->alloc_queue_pairs = pf->num_lan_qps;
6729                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6730                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
6731                 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6732                         vsi->num_q_vectors = pf->num_lan_msix;
6733                 else
6734                         vsi->num_q_vectors = 1;
6735
6736                 break;
6737
6738         case I40E_VSI_FDIR:
6739                 vsi->alloc_queue_pairs = 1;
6740                 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
6741                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
6742                 vsi->num_q_vectors = 1;
6743                 break;
6744
6745         case I40E_VSI_VMDQ2:
6746                 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
6747                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6748                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
6749                 vsi->num_q_vectors = pf->num_vmdq_msix;
6750                 break;
6751
6752         case I40E_VSI_SRIOV:
6753                 vsi->alloc_queue_pairs = pf->num_vf_qps;
6754                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6755                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
6756                 break;
6757
6758 #ifdef I40E_FCOE
6759         case I40E_VSI_FCOE:
6760                 vsi->alloc_queue_pairs = pf->num_fcoe_qps;
6761                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6762                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
6763                 vsi->num_q_vectors = pf->num_fcoe_msix;
6764                 break;
6765
6766 #endif /* I40E_FCOE */
6767         default:
6768                 WARN_ON(1);
6769                 return -ENODATA;
6770         }
6771
6772         return 0;
6773 }
6774
6775 /**
6776  * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
6777  * @type: VSI pointer
6778  * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
6779  *
6780  * On error: returns error code (negative)
6781  * On success: returns 0
6782  **/
6783 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
6784 {
6785         int size;
6786         int ret = 0;
6787
6788         /* allocate memory for both Tx and Rx ring pointers */
6789         size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
6790         vsi->tx_rings = kzalloc(size, GFP_KERNEL);
6791         if (!vsi->tx_rings)
6792                 return -ENOMEM;
6793         vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
6794
6795         if (alloc_qvectors) {
6796                 /* allocate memory for q_vector pointers */
6797                 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
6798                 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
6799                 if (!vsi->q_vectors) {
6800                         ret = -ENOMEM;
6801                         goto err_vectors;
6802                 }
6803         }
6804         return ret;
6805
6806 err_vectors:
6807         kfree(vsi->tx_rings);
6808         return ret;
6809 }
6810
6811 /**
6812  * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
6813  * @pf: board private structure
6814  * @type: type of VSI
6815  *
6816  * On error: returns error code (negative)
6817  * On success: returns vsi index in PF (positive)
6818  **/
6819 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
6820 {
6821         int ret = -ENODEV;
6822         struct i40e_vsi *vsi;
6823         int vsi_idx;
6824         int i;
6825
6826         /* Need to protect the allocation of the VSIs at the PF level */
6827         mutex_lock(&pf->switch_mutex);
6828
6829         /* VSI list may be fragmented if VSI creation/destruction has
6830          * been happening.  We can afford to do a quick scan to look
6831          * for any free VSIs in the list.
6832          *
6833          * find next empty vsi slot, looping back around if necessary
6834          */
6835         i = pf->next_vsi;
6836         while (i < pf->num_alloc_vsi && pf->vsi[i])
6837                 i++;
6838         if (i >= pf->num_alloc_vsi) {
6839                 i = 0;
6840                 while (i < pf->next_vsi && pf->vsi[i])
6841                         i++;
6842         }
6843
6844         if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
6845                 vsi_idx = i;             /* Found one! */
6846         } else {
6847                 ret = -ENODEV;
6848                 goto unlock_pf;  /* out of VSI slots! */
6849         }
6850         pf->next_vsi = ++i;
6851
6852         vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
6853         if (!vsi) {
6854                 ret = -ENOMEM;
6855                 goto unlock_pf;
6856         }
6857         vsi->type = type;
6858         vsi->back = pf;
6859         set_bit(__I40E_DOWN, &vsi->state);
6860         vsi->flags = 0;
6861         vsi->idx = vsi_idx;
6862         vsi->rx_itr_setting = pf->rx_itr_default;
6863         vsi->tx_itr_setting = pf->tx_itr_default;
6864         vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
6865                                 pf->rss_table_size : 64;
6866         vsi->netdev_registered = false;
6867         vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
6868         INIT_LIST_HEAD(&vsi->mac_filter_list);
6869         vsi->irqs_ready = false;
6870
6871         ret = i40e_set_num_rings_in_vsi(vsi);
6872         if (ret)
6873                 goto err_rings;
6874
6875         ret = i40e_vsi_alloc_arrays(vsi, true);
6876         if (ret)
6877                 goto err_rings;
6878
6879         /* Setup default MSIX irq handler for VSI */
6880         i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
6881
6882         pf->vsi[vsi_idx] = vsi;
6883         ret = vsi_idx;
6884         goto unlock_pf;
6885
6886 err_rings:
6887         pf->next_vsi = i - 1;
6888         kfree(vsi);
6889 unlock_pf:
6890         mutex_unlock(&pf->switch_mutex);
6891         return ret;
6892 }
6893
6894 /**
6895  * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
6896  * @type: VSI pointer
6897  * @free_qvectors: a bool to specify if q_vectors need to be freed.
6898  *
6899  * On error: returns error code (negative)
6900  * On success: returns 0
6901  **/
6902 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
6903 {
6904         /* free the ring and vector containers */
6905         if (free_qvectors) {
6906                 kfree(vsi->q_vectors);
6907                 vsi->q_vectors = NULL;
6908         }
6909         kfree(vsi->tx_rings);
6910         vsi->tx_rings = NULL;
6911         vsi->rx_rings = NULL;
6912 }
6913
6914 /**
6915  * i40e_vsi_clear - Deallocate the VSI provided
6916  * @vsi: the VSI being un-configured
6917  **/
6918 static int i40e_vsi_clear(struct i40e_vsi *vsi)
6919 {
6920         struct i40e_pf *pf;
6921
6922         if (!vsi)
6923                 return 0;
6924
6925         if (!vsi->back)
6926                 goto free_vsi;
6927         pf = vsi->back;
6928
6929         mutex_lock(&pf->switch_mutex);
6930         if (!pf->vsi[vsi->idx]) {
6931                 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
6932                         vsi->idx, vsi->idx, vsi, vsi->type);
6933                 goto unlock_vsi;
6934         }
6935
6936         if (pf->vsi[vsi->idx] != vsi) {
6937                 dev_err(&pf->pdev->dev,
6938                         "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
6939                         pf->vsi[vsi->idx]->idx,
6940                         pf->vsi[vsi->idx],
6941                         pf->vsi[vsi->idx]->type,
6942                         vsi->idx, vsi, vsi->type);
6943                 goto unlock_vsi;
6944         }
6945
6946         /* updates the PF for this cleared vsi */
6947         i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
6948         i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
6949
6950         i40e_vsi_free_arrays(vsi, true);
6951
6952         pf->vsi[vsi->idx] = NULL;
6953         if (vsi->idx < pf->next_vsi)
6954                 pf->next_vsi = vsi->idx;
6955
6956 unlock_vsi:
6957         mutex_unlock(&pf->switch_mutex);
6958 free_vsi:
6959         kfree(vsi);
6960
6961         return 0;
6962 }
6963
6964 /**
6965  * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
6966  * @vsi: the VSI being cleaned
6967  **/
6968 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
6969 {
6970         int i;
6971
6972         if (vsi->tx_rings && vsi->tx_rings[0]) {
6973                 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
6974                         kfree_rcu(vsi->tx_rings[i], rcu);
6975                         vsi->tx_rings[i] = NULL;
6976                         vsi->rx_rings[i] = NULL;
6977                 }
6978         }
6979 }
6980
6981 /**
6982  * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
6983  * @vsi: the VSI being configured
6984  **/
6985 static int i40e_alloc_rings(struct i40e_vsi *vsi)
6986 {
6987         struct i40e_ring *tx_ring, *rx_ring;
6988         struct i40e_pf *pf = vsi->back;
6989         int i;
6990
6991         /* Set basic values in the rings to be used later during open() */
6992         for (i = 0; i < vsi->alloc_queue_pairs; i++) {
6993                 /* allocate space for both Tx and Rx in one shot */
6994                 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
6995                 if (!tx_ring)
6996                         goto err_out;
6997
6998                 tx_ring->queue_index = i;
6999                 tx_ring->reg_idx = vsi->base_queue + i;
7000                 tx_ring->ring_active = false;
7001                 tx_ring->vsi = vsi;
7002                 tx_ring->netdev = vsi->netdev;
7003                 tx_ring->dev = &pf->pdev->dev;
7004                 tx_ring->count = vsi->num_desc;
7005                 tx_ring->size = 0;
7006                 tx_ring->dcb_tc = 0;
7007                 vsi->tx_rings[i] = tx_ring;
7008
7009                 rx_ring = &tx_ring[1];
7010                 rx_ring->queue_index = i;
7011                 rx_ring->reg_idx = vsi->base_queue + i;
7012                 rx_ring->ring_active = false;
7013                 rx_ring->vsi = vsi;
7014                 rx_ring->netdev = vsi->netdev;
7015                 rx_ring->dev = &pf->pdev->dev;
7016                 rx_ring->count = vsi->num_desc;
7017                 rx_ring->size = 0;
7018                 rx_ring->dcb_tc = 0;
7019                 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
7020                         set_ring_16byte_desc_enabled(rx_ring);
7021                 else
7022                         clear_ring_16byte_desc_enabled(rx_ring);
7023                 vsi->rx_rings[i] = rx_ring;
7024         }
7025
7026         return 0;
7027
7028 err_out:
7029         i40e_vsi_clear_rings(vsi);
7030         return -ENOMEM;
7031 }
7032
7033 /**
7034  * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
7035  * @pf: board private structure
7036  * @vectors: the number of MSI-X vectors to request
7037  *
7038  * Returns the number of vectors reserved, or error
7039  **/
7040 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
7041 {
7042         vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
7043                                         I40E_MIN_MSIX, vectors);
7044         if (vectors < 0) {
7045                 dev_info(&pf->pdev->dev,
7046                          "MSI-X vector reservation failed: %d\n", vectors);
7047                 vectors = 0;
7048         }
7049
7050         return vectors;
7051 }
7052
7053 /**
7054  * i40e_init_msix - Setup the MSIX capability
7055  * @pf: board private structure
7056  *
7057  * Work with the OS to set up the MSIX vectors needed.
7058  *
7059  * Returns the number of vectors reserved or negative on failure
7060  **/
7061 static int i40e_init_msix(struct i40e_pf *pf)
7062 {
7063         struct i40e_hw *hw = &pf->hw;
7064         int vectors_left;
7065         int v_budget, i;
7066         int v_actual;
7067
7068         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7069                 return -ENODEV;
7070
7071         /* The number of vectors we'll request will be comprised of:
7072          *   - Add 1 for "other" cause for Admin Queue events, etc.
7073          *   - The number of LAN queue pairs
7074          *      - Queues being used for RSS.
7075          *              We don't need as many as max_rss_size vectors.
7076          *              use rss_size instead in the calculation since that
7077          *              is governed by number of cpus in the system.
7078          *      - assumes symmetric Tx/Rx pairing
7079          *   - The number of VMDq pairs
7080 #ifdef I40E_FCOE
7081          *   - The number of FCOE qps.
7082 #endif
7083          * Once we count this up, try the request.
7084          *
7085          * If we can't get what we want, we'll simplify to nearly nothing
7086          * and try again.  If that still fails, we punt.
7087          */
7088         vectors_left = hw->func_caps.num_msix_vectors;
7089         v_budget = 0;
7090
7091         /* reserve one vector for miscellaneous handler */
7092         if (vectors_left) {
7093                 v_budget++;
7094                 vectors_left--;
7095         }
7096
7097         /* reserve vectors for the main PF traffic queues */
7098         pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
7099         vectors_left -= pf->num_lan_msix;
7100         v_budget += pf->num_lan_msix;
7101
7102         /* reserve one vector for sideband flow director */
7103         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7104                 if (vectors_left) {
7105                         v_budget++;
7106                         vectors_left--;
7107                 } else {
7108                         pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7109                 }
7110         }
7111
7112 #ifdef I40E_FCOE
7113         /* can we reserve enough for FCoE? */
7114         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7115                 if (!vectors_left)
7116                         pf->num_fcoe_msix = 0;
7117                 else if (vectors_left >= pf->num_fcoe_qps)
7118                         pf->num_fcoe_msix = pf->num_fcoe_qps;
7119                 else
7120                         pf->num_fcoe_msix = 1;
7121                 v_budget += pf->num_fcoe_msix;
7122                 vectors_left -= pf->num_fcoe_msix;
7123         }
7124
7125 #endif
7126         /* any vectors left over go for VMDq support */
7127         if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
7128                 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
7129                 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
7130
7131                 /* if we're short on vectors for what's desired, we limit
7132                  * the queues per vmdq.  If this is still more than are
7133                  * available, the user will need to change the number of
7134                  * queues/vectors used by the PF later with the ethtool
7135                  * channels command
7136                  */
7137                 if (vmdq_vecs < vmdq_vecs_wanted)
7138                         pf->num_vmdq_qps = 1;
7139                 pf->num_vmdq_msix = pf->num_vmdq_qps;
7140
7141                 v_budget += vmdq_vecs;
7142                 vectors_left -= vmdq_vecs;
7143         }
7144
7145         pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
7146                                    GFP_KERNEL);
7147         if (!pf->msix_entries)
7148                 return -ENOMEM;
7149
7150         for (i = 0; i < v_budget; i++)
7151                 pf->msix_entries[i].entry = i;
7152         v_actual = i40e_reserve_msix_vectors(pf, v_budget);
7153
7154         if (v_actual != v_budget) {
7155                 /* If we have limited resources, we will start with no vectors
7156                  * for the special features and then allocate vectors to some
7157                  * of these features based on the policy and at the end disable
7158                  * the features that did not get any vectors.
7159                  */
7160 #ifdef I40E_FCOE
7161                 pf->num_fcoe_qps = 0;
7162                 pf->num_fcoe_msix = 0;
7163 #endif
7164                 pf->num_vmdq_msix = 0;
7165         }
7166
7167         if (v_actual < I40E_MIN_MSIX) {
7168                 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
7169                 kfree(pf->msix_entries);
7170                 pf->msix_entries = NULL;
7171                 return -ENODEV;
7172
7173         } else if (v_actual == I40E_MIN_MSIX) {
7174                 /* Adjust for minimal MSIX use */
7175                 pf->num_vmdq_vsis = 0;
7176                 pf->num_vmdq_qps = 0;
7177                 pf->num_lan_qps = 1;
7178                 pf->num_lan_msix = 1;
7179
7180         } else if (v_actual != v_budget) {
7181                 int vec;
7182
7183                 /* reserve the misc vector */
7184                 vec = v_actual - 1;
7185
7186                 /* Scale vector usage down */
7187                 pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
7188                 pf->num_vmdq_vsis = 1;
7189                 pf->num_vmdq_qps = 1;
7190                 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7191
7192                 /* partition out the remaining vectors */
7193                 switch (vec) {
7194                 case 2:
7195                         pf->num_lan_msix = 1;
7196                         break;
7197                 case 3:
7198 #ifdef I40E_FCOE
7199                         /* give one vector to FCoE */
7200                         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7201                                 pf->num_lan_msix = 1;
7202                                 pf->num_fcoe_msix = 1;
7203                         }
7204 #else
7205                         pf->num_lan_msix = 2;
7206 #endif
7207                         break;
7208                 default:
7209 #ifdef I40E_FCOE
7210                         /* give one vector to FCoE */
7211                         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7212                                 pf->num_fcoe_msix = 1;
7213                                 vec--;
7214                         }
7215 #endif
7216                         /* give the rest to the PF */
7217                         pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps);
7218                         break;
7219                 }
7220         }
7221
7222         if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7223             (pf->num_vmdq_msix == 0)) {
7224                 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
7225                 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
7226         }
7227 #ifdef I40E_FCOE
7228
7229         if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
7230                 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
7231                 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
7232         }
7233 #endif
7234         return v_actual;
7235 }
7236
7237 /**
7238  * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
7239  * @vsi: the VSI being configured
7240  * @v_idx: index of the vector in the vsi struct
7241  *
7242  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
7243  **/
7244 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
7245 {
7246         struct i40e_q_vector *q_vector;
7247
7248         /* allocate q_vector */
7249         q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
7250         if (!q_vector)
7251                 return -ENOMEM;
7252
7253         q_vector->vsi = vsi;
7254         q_vector->v_idx = v_idx;
7255         cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
7256         if (vsi->netdev)
7257                 netif_napi_add(vsi->netdev, &q_vector->napi,
7258                                i40e_napi_poll, NAPI_POLL_WEIGHT);
7259
7260         q_vector->rx.latency_range = I40E_LOW_LATENCY;
7261         q_vector->tx.latency_range = I40E_LOW_LATENCY;
7262
7263         /* tie q_vector and vsi together */
7264         vsi->q_vectors[v_idx] = q_vector;
7265
7266         return 0;
7267 }
7268
7269 /**
7270  * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
7271  * @vsi: the VSI being configured
7272  *
7273  * We allocate one q_vector per queue interrupt.  If allocation fails we
7274  * return -ENOMEM.
7275  **/
7276 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
7277 {
7278         struct i40e_pf *pf = vsi->back;
7279         int v_idx, num_q_vectors;
7280         int err;
7281
7282         /* if not MSIX, give the one vector only to the LAN VSI */
7283         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7284                 num_q_vectors = vsi->num_q_vectors;
7285         else if (vsi == pf->vsi[pf->lan_vsi])
7286                 num_q_vectors = 1;
7287         else
7288                 return -EINVAL;
7289
7290         for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
7291                 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
7292                 if (err)
7293                         goto err_out;
7294         }
7295
7296         return 0;
7297
7298 err_out:
7299         while (v_idx--)
7300                 i40e_free_q_vector(vsi, v_idx);
7301
7302         return err;
7303 }
7304
7305 /**
7306  * i40e_init_interrupt_scheme - Determine proper interrupt scheme
7307  * @pf: board private structure to initialize
7308  **/
7309 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
7310 {
7311         int vectors = 0;
7312         ssize_t size;
7313
7314         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7315                 vectors = i40e_init_msix(pf);
7316                 if (vectors < 0) {
7317                         pf->flags &= ~(I40E_FLAG_MSIX_ENABLED   |
7318 #ifdef I40E_FCOE
7319                                        I40E_FLAG_FCOE_ENABLED   |
7320 #endif
7321                                        I40E_FLAG_RSS_ENABLED    |
7322                                        I40E_FLAG_DCB_CAPABLE    |
7323                                        I40E_FLAG_SRIOV_ENABLED  |
7324                                        I40E_FLAG_FD_SB_ENABLED  |
7325                                        I40E_FLAG_FD_ATR_ENABLED |
7326                                        I40E_FLAG_VMDQ_ENABLED);
7327
7328                         /* rework the queue expectations without MSIX */
7329                         i40e_determine_queue_usage(pf);
7330                 }
7331         }
7332
7333         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
7334             (pf->flags & I40E_FLAG_MSI_ENABLED)) {
7335                 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
7336                 vectors = pci_enable_msi(pf->pdev);
7337                 if (vectors < 0) {
7338                         dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
7339                                  vectors);
7340                         pf->flags &= ~I40E_FLAG_MSI_ENABLED;
7341                 }
7342                 vectors = 1;  /* one MSI or Legacy vector */
7343         }
7344
7345         if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
7346                 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
7347
7348         /* set up vector assignment tracking */
7349         size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
7350         pf->irq_pile = kzalloc(size, GFP_KERNEL);
7351         if (!pf->irq_pile) {
7352                 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
7353                 return -ENOMEM;
7354         }
7355         pf->irq_pile->num_entries = vectors;
7356         pf->irq_pile->search_hint = 0;
7357
7358         /* track first vector for misc interrupts, ignore return */
7359         (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
7360
7361         return 0;
7362 }
7363
7364 /**
7365  * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
7366  * @pf: board private structure
7367  *
7368  * This sets up the handler for MSIX 0, which is used to manage the
7369  * non-queue interrupts, e.g. AdminQ and errors.  This is not used
7370  * when in MSI or Legacy interrupt mode.
7371  **/
7372 static int i40e_setup_misc_vector(struct i40e_pf *pf)
7373 {
7374         struct i40e_hw *hw = &pf->hw;
7375         int err = 0;
7376
7377         /* Only request the irq if this is the first time through, and
7378          * not when we're rebuilding after a Reset
7379          */
7380         if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7381                 err = request_irq(pf->msix_entries[0].vector,
7382                                   i40e_intr, 0, pf->int_name, pf);
7383                 if (err) {
7384                         dev_info(&pf->pdev->dev,
7385                                  "request_irq for %s failed: %d\n",
7386                                  pf->int_name, err);
7387                         return -EFAULT;
7388                 }
7389         }
7390
7391         i40e_enable_misc_int_causes(pf);
7392
7393         /* associate no queues to the misc vector */
7394         wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
7395         wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
7396
7397         i40e_flush(hw);
7398
7399         i40e_irq_dynamic_enable_icr0(pf);
7400
7401         return err;
7402 }
7403
7404 /**
7405  * i40e_config_rss - Prepare for RSS if used
7406  * @pf: board private structure
7407  **/
7408 static int i40e_config_rss(struct i40e_pf *pf)
7409 {
7410         u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1];
7411         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7412         struct i40e_hw *hw = &pf->hw;
7413         u32 lut = 0;
7414         int i, j;
7415         u64 hena;
7416         u32 reg_val;
7417
7418         netdev_rss_key_fill(rss_key, sizeof(rss_key));
7419         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7420                 wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]);
7421
7422         /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
7423         hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
7424                 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
7425         hena |= I40E_DEFAULT_RSS_HENA;
7426         wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
7427         wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
7428
7429         vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
7430
7431         /* Check capability and Set table size and register per hw expectation*/
7432         reg_val = rd32(hw, I40E_PFQF_CTL_0);
7433         if (pf->rss_table_size == 512)
7434                 reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
7435         else
7436                 reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
7437         wr32(hw, I40E_PFQF_CTL_0, reg_val);
7438
7439         /* Populate the LUT with max no. of queues in round robin fashion */
7440         for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) {
7441
7442                 /* The assumption is that lan qp count will be the highest
7443                  * qp count for any PF VSI that needs RSS.
7444                  * If multiple VSIs need RSS support, all the qp counts
7445                  * for those VSIs should be a power of 2 for RSS to work.
7446                  * If LAN VSI is the only consumer for RSS then this requirement
7447                  * is not necessary.
7448                  */
7449                 if (j == vsi->rss_size)
7450                         j = 0;
7451                 /* lut = 4-byte sliding window of 4 lut entries */
7452                 lut = (lut << 8) | (j &
7453                          ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
7454                 /* On i = 3, we have 4 entries in lut; write to the register */
7455                 if ((i & 3) == 3)
7456                         wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
7457         }
7458         i40e_flush(hw);
7459
7460         return 0;
7461 }
7462
7463 /**
7464  * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
7465  * @pf: board private structure
7466  * @queue_count: the requested queue count for rss.
7467  *
7468  * returns 0 if rss is not enabled, if enabled returns the final rss queue
7469  * count which may be different from the requested queue count.
7470  **/
7471 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
7472 {
7473         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7474         int new_rss_size;
7475
7476         if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
7477                 return 0;
7478
7479         new_rss_size = min_t(int, queue_count, pf->rss_size_max);
7480
7481         if (queue_count != vsi->num_queue_pairs) {
7482                 vsi->req_queue_pairs = queue_count;
7483                 i40e_prep_for_reset(pf);
7484
7485                 pf->rss_size = new_rss_size;
7486
7487                 i40e_reset_and_rebuild(pf, true);
7488                 i40e_config_rss(pf);
7489         }
7490         dev_info(&pf->pdev->dev, "RSS count:  %d\n", pf->rss_size);
7491         return pf->rss_size;
7492 }
7493
7494 /**
7495  * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
7496  * @pf: board private structure
7497  **/
7498 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
7499 {
7500         i40e_status status;
7501         bool min_valid, max_valid;
7502         u32 max_bw, min_bw;
7503
7504         status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
7505                                            &min_valid, &max_valid);
7506
7507         if (!status) {
7508                 if (min_valid)
7509                         pf->npar_min_bw = min_bw;
7510                 if (max_valid)
7511                         pf->npar_max_bw = max_bw;
7512         }
7513
7514         return status;
7515 }
7516
7517 /**
7518  * i40e_set_npar_bw_setting - Set BW settings for this PF partition
7519  * @pf: board private structure
7520  **/
7521 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
7522 {
7523         struct i40e_aqc_configure_partition_bw_data bw_data;
7524         i40e_status status;
7525
7526         /* Set the valid bit for this PF */
7527         bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id);
7528         bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
7529         bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
7530
7531         /* Set the new bandwidths */
7532         status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
7533
7534         return status;
7535 }
7536
7537 /**
7538  * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
7539  * @pf: board private structure
7540  **/
7541 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
7542 {
7543         /* Commit temporary BW setting to permanent NVM image */
7544         enum i40e_admin_queue_err last_aq_status;
7545         i40e_status ret;
7546         u16 nvm_word;
7547
7548         if (pf->hw.partition_id != 1) {
7549                 dev_info(&pf->pdev->dev,
7550                          "Commit BW only works on partition 1! This is partition %d",
7551                          pf->hw.partition_id);
7552                 ret = I40E_NOT_SUPPORTED;
7553                 goto bw_commit_out;
7554         }
7555
7556         /* Acquire NVM for read access */
7557         ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
7558         last_aq_status = pf->hw.aq.asq_last_status;
7559         if (ret) {
7560                 dev_info(&pf->pdev->dev,
7561                          "Cannot acquire NVM for read access, err %d: aq_err %d\n",
7562                          ret, last_aq_status);
7563                 goto bw_commit_out;
7564         }
7565
7566         /* Read word 0x10 of NVM - SW compatibility word 1 */
7567         ret = i40e_aq_read_nvm(&pf->hw,
7568                                I40E_SR_NVM_CONTROL_WORD,
7569                                0x10, sizeof(nvm_word), &nvm_word,
7570                                false, NULL);
7571         /* Save off last admin queue command status before releasing
7572          * the NVM
7573          */
7574         last_aq_status = pf->hw.aq.asq_last_status;
7575         i40e_release_nvm(&pf->hw);
7576         if (ret) {
7577                 dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n",
7578                          ret, last_aq_status);
7579                 goto bw_commit_out;
7580         }
7581
7582         /* Wait a bit for NVM release to complete */
7583         msleep(50);
7584
7585         /* Acquire NVM for write access */
7586         ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
7587         last_aq_status = pf->hw.aq.asq_last_status;
7588         if (ret) {
7589                 dev_info(&pf->pdev->dev,
7590                          "Cannot acquire NVM for write access, err %d: aq_err %d\n",
7591                          ret, last_aq_status);
7592                 goto bw_commit_out;
7593         }
7594         /* Write it back out unchanged to initiate update NVM,
7595          * which will force a write of the shadow (alt) RAM to
7596          * the NVM - thus storing the bandwidth values permanently.
7597          */
7598         ret = i40e_aq_update_nvm(&pf->hw,
7599                                  I40E_SR_NVM_CONTROL_WORD,
7600                                  0x10, sizeof(nvm_word),
7601                                  &nvm_word, true, NULL);
7602         /* Save off last admin queue command status before releasing
7603          * the NVM
7604          */
7605         last_aq_status = pf->hw.aq.asq_last_status;
7606         i40e_release_nvm(&pf->hw);
7607         if (ret)
7608                 dev_info(&pf->pdev->dev,
7609                          "BW settings NOT SAVED, err %d aq_err %d\n",
7610                          ret, last_aq_status);
7611 bw_commit_out:
7612
7613         return ret;
7614 }
7615
7616 /**
7617  * i40e_sw_init - Initialize general software structures (struct i40e_pf)
7618  * @pf: board private structure to initialize
7619  *
7620  * i40e_sw_init initializes the Adapter private data structure.
7621  * Fields are initialized based on PCI device information and
7622  * OS network device settings (MTU size).
7623  **/
7624 static int i40e_sw_init(struct i40e_pf *pf)
7625 {
7626         int err = 0;
7627         int size;
7628
7629         pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
7630                                 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
7631         pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
7632         if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
7633                 if (I40E_DEBUG_USER & debug)
7634                         pf->hw.debug_mask = debug;
7635                 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
7636                                                 I40E_DEFAULT_MSG_ENABLE);
7637         }
7638
7639         /* Set default capability flags */
7640         pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
7641                     I40E_FLAG_MSI_ENABLED     |
7642                     I40E_FLAG_MSIX_ENABLED;
7643
7644         if (iommu_present(&pci_bus_type))
7645                 pf->flags |= I40E_FLAG_RX_PS_ENABLED;
7646         else
7647                 pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
7648
7649         /* Set default ITR */
7650         pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
7651         pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
7652
7653         /* Depending on PF configurations, it is possible that the RSS
7654          * maximum might end up larger than the available queues
7655          */
7656         pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
7657         pf->rss_size = 1;
7658         pf->rss_table_size = pf->hw.func_caps.rss_table_size;
7659         pf->rss_size_max = min_t(int, pf->rss_size_max,
7660                                  pf->hw.func_caps.num_tx_qp);
7661         if (pf->hw.func_caps.rss) {
7662                 pf->flags |= I40E_FLAG_RSS_ENABLED;
7663                 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
7664         }
7665
7666         /* MFP mode enabled */
7667         if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
7668                 pf->flags |= I40E_FLAG_MFP_ENABLED;
7669                 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
7670                 if (i40e_get_npar_bw_setting(pf))
7671                         dev_warn(&pf->pdev->dev,
7672                                  "Could not get NPAR bw settings\n");
7673                 else
7674                         dev_info(&pf->pdev->dev,
7675                                  "Min BW = %8.8x, Max BW = %8.8x\n",
7676                                  pf->npar_min_bw, pf->npar_max_bw);
7677         }
7678
7679         /* FW/NVM is not yet fixed in this regard */
7680         if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
7681             (pf->hw.func_caps.fd_filters_best_effort > 0)) {
7682                 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7683                 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
7684                 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
7685                         pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7686                 } else {
7687                         dev_info(&pf->pdev->dev,
7688                                  "Flow Director Sideband mode Disabled in MFP mode\n");
7689                 }
7690                 pf->fdir_pf_filter_count =
7691                                  pf->hw.func_caps.fd_filters_guaranteed;
7692                 pf->hw.fdir_shared_filter_count =
7693                                  pf->hw.func_caps.fd_filters_best_effort;
7694         }
7695
7696         if (pf->hw.func_caps.vmdq) {
7697                 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
7698                 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
7699                 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
7700         }
7701
7702 #ifdef I40E_FCOE
7703         err = i40e_init_pf_fcoe(pf);
7704         if (err)
7705                 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err);
7706
7707 #endif /* I40E_FCOE */
7708 #ifdef CONFIG_PCI_IOV
7709         if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
7710                 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
7711                 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
7712                 pf->num_req_vfs = min_t(int,
7713                                         pf->hw.func_caps.num_vfs,
7714                                         I40E_MAX_VF_COUNT);
7715         }
7716 #endif /* CONFIG_PCI_IOV */
7717         pf->eeprom_version = 0xDEAD;
7718         pf->lan_veb = I40E_NO_VEB;
7719         pf->lan_vsi = I40E_NO_VSI;
7720
7721         /* set up queue assignment tracking */
7722         size = sizeof(struct i40e_lump_tracking)
7723                 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
7724         pf->qp_pile = kzalloc(size, GFP_KERNEL);
7725         if (!pf->qp_pile) {
7726                 err = -ENOMEM;
7727                 goto sw_init_done;
7728         }
7729         pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
7730         pf->qp_pile->search_hint = 0;
7731
7732         pf->tx_timeout_recovery_level = 1;
7733
7734         mutex_init(&pf->switch_mutex);
7735
7736         /* If NPAR is enabled nudge the Tx scheduler */
7737         if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
7738                 i40e_set_npar_bw_setting(pf);
7739
7740 sw_init_done:
7741         return err;
7742 }
7743
7744 /**
7745  * i40e_set_ntuple - set the ntuple feature flag and take action
7746  * @pf: board private structure to initialize
7747  * @features: the feature set that the stack is suggesting
7748  *
7749  * returns a bool to indicate if reset needs to happen
7750  **/
7751 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
7752 {
7753         bool need_reset = false;
7754
7755         /* Check if Flow Director n-tuple support was enabled or disabled.  If
7756          * the state changed, we need to reset.
7757          */
7758         if (features & NETIF_F_NTUPLE) {
7759                 /* Enable filters and mark for reset */
7760                 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
7761                         need_reset = true;
7762                 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7763         } else {
7764                 /* turn off filters, mark for reset and clear SW filter list */
7765                 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7766                         need_reset = true;
7767                         i40e_fdir_filter_exit(pf);
7768                 }
7769                 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7770                 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
7771                 /* reset fd counters */
7772                 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
7773                 pf->fdir_pf_active_filters = 0;
7774                 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7775                 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
7776                 /* if ATR was auto disabled it can be re-enabled. */
7777                 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
7778                     (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
7779                         pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
7780         }
7781         return need_reset;
7782 }
7783
7784 /**
7785  * i40e_set_features - set the netdev feature flags
7786  * @netdev: ptr to the netdev being adjusted
7787  * @features: the feature set that the stack is suggesting
7788  **/
7789 static int i40e_set_features(struct net_device *netdev,
7790                              netdev_features_t features)
7791 {
7792         struct i40e_netdev_priv *np = netdev_priv(netdev);
7793         struct i40e_vsi *vsi = np->vsi;
7794         struct i40e_pf *pf = vsi->back;
7795         bool need_reset;
7796
7797         if (features & NETIF_F_HW_VLAN_CTAG_RX)
7798                 i40e_vlan_stripping_enable(vsi);
7799         else
7800                 i40e_vlan_stripping_disable(vsi);
7801
7802         need_reset = i40e_set_ntuple(pf, features);
7803
7804         if (need_reset)
7805                 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
7806
7807         return 0;
7808 }
7809
7810 #ifdef CONFIG_I40E_VXLAN
7811 /**
7812  * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
7813  * @pf: board private structure
7814  * @port: The UDP port to look up
7815  *
7816  * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
7817  **/
7818 static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
7819 {
7820         u8 i;
7821
7822         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7823                 if (pf->vxlan_ports[i] == port)
7824                         return i;
7825         }
7826
7827         return i;
7828 }
7829
7830 /**
7831  * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
7832  * @netdev: This physical port's netdev
7833  * @sa_family: Socket Family that VXLAN is notifying us about
7834  * @port: New UDP port number that VXLAN started listening to
7835  **/
7836 static void i40e_add_vxlan_port(struct net_device *netdev,
7837                                 sa_family_t sa_family, __be16 port)
7838 {
7839         struct i40e_netdev_priv *np = netdev_priv(netdev);
7840         struct i40e_vsi *vsi = np->vsi;
7841         struct i40e_pf *pf = vsi->back;
7842         u8 next_idx;
7843         u8 idx;
7844
7845         if (sa_family == AF_INET6)
7846                 return;
7847
7848         idx = i40e_get_vxlan_port_idx(pf, port);
7849
7850         /* Check if port already exists */
7851         if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
7852                 netdev_info(netdev, "vxlan port %d already offloaded\n",
7853                             ntohs(port));
7854                 return;
7855         }
7856
7857         /* Now check if there is space to add the new port */
7858         next_idx = i40e_get_vxlan_port_idx(pf, 0);
7859
7860         if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
7861                 netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
7862                             ntohs(port));
7863                 return;
7864         }
7865
7866         /* New port: add it and mark its index in the bitmap */
7867         pf->vxlan_ports[next_idx] = port;
7868         pf->pending_vxlan_bitmap |= (1 << next_idx);
7869         pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
7870
7871         dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port));
7872 }
7873
7874 /**
7875  * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
7876  * @netdev: This physical port's netdev
7877  * @sa_family: Socket Family that VXLAN is notifying us about
7878  * @port: UDP port number that VXLAN stopped listening to
7879  **/
7880 static void i40e_del_vxlan_port(struct net_device *netdev,
7881                                 sa_family_t sa_family, __be16 port)
7882 {
7883         struct i40e_netdev_priv *np = netdev_priv(netdev);
7884         struct i40e_vsi *vsi = np->vsi;
7885         struct i40e_pf *pf = vsi->back;
7886         u8 idx;
7887
7888         if (sa_family == AF_INET6)
7889                 return;
7890
7891         idx = i40e_get_vxlan_port_idx(pf, port);
7892
7893         /* Check if port already exists */
7894         if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
7895                 /* if port exists, set it to 0 (mark for deletion)
7896                  * and make it pending
7897                  */
7898                 pf->vxlan_ports[idx] = 0;
7899                 pf->pending_vxlan_bitmap |= (1 << idx);
7900                 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
7901
7902                 dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
7903                          ntohs(port));
7904         } else {
7905                 netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
7906                             ntohs(port));
7907         }
7908 }
7909
7910 #endif
7911 static int i40e_get_phys_port_id(struct net_device *netdev,
7912                                  struct netdev_phys_item_id *ppid)
7913 {
7914         struct i40e_netdev_priv *np = netdev_priv(netdev);
7915         struct i40e_pf *pf = np->vsi->back;
7916         struct i40e_hw *hw = &pf->hw;
7917
7918         if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
7919                 return -EOPNOTSUPP;
7920
7921         ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
7922         memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
7923
7924         return 0;
7925 }
7926
7927 /**
7928  * i40e_ndo_fdb_add - add an entry to the hardware database
7929  * @ndm: the input from the stack
7930  * @tb: pointer to array of nladdr (unused)
7931  * @dev: the net device pointer
7932  * @addr: the MAC address entry being added
7933  * @flags: instructions from stack about fdb operation
7934  */
7935 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
7936                             struct net_device *dev,
7937                             const unsigned char *addr, u16 vid,
7938                             u16 flags)
7939 {
7940         struct i40e_netdev_priv *np = netdev_priv(dev);
7941         struct i40e_pf *pf = np->vsi->back;
7942         int err = 0;
7943
7944         if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
7945                 return -EOPNOTSUPP;
7946
7947         if (vid) {
7948                 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
7949                 return -EINVAL;
7950         }
7951
7952         /* Hardware does not support aging addresses so if a
7953          * ndm_state is given only allow permanent addresses
7954          */
7955         if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
7956                 netdev_info(dev, "FDB only supports static addresses\n");
7957                 return -EINVAL;
7958         }
7959
7960         if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
7961                 err = dev_uc_add_excl(dev, addr);
7962         else if (is_multicast_ether_addr(addr))
7963                 err = dev_mc_add_excl(dev, addr);
7964         else
7965                 err = -EINVAL;
7966
7967         /* Only return duplicate errors if NLM_F_EXCL is set */
7968         if (err == -EEXIST && !(flags & NLM_F_EXCL))
7969                 err = 0;
7970
7971         return err;
7972 }
7973
7974 #ifdef HAVE_BRIDGE_ATTRIBS
7975 /**
7976  * i40e_ndo_bridge_setlink - Set the hardware bridge mode
7977  * @dev: the netdev being configured
7978  * @nlh: RTNL message
7979  *
7980  * Inserts a new hardware bridge if not already created and
7981  * enables the bridging mode requested (VEB or VEPA). If the
7982  * hardware bridge has already been inserted and the request
7983  * is to change the mode then that requires a PF reset to
7984  * allow rebuild of the components with required hardware
7985  * bridge mode enabled.
7986  **/
7987 static int i40e_ndo_bridge_setlink(struct net_device *dev,
7988                                    struct nlmsghdr *nlh)
7989 {
7990         struct i40e_netdev_priv *np = netdev_priv(dev);
7991         struct i40e_vsi *vsi = np->vsi;
7992         struct i40e_pf *pf = vsi->back;
7993         struct i40e_veb *veb = NULL;
7994         struct nlattr *attr, *br_spec;
7995         int i, rem;
7996
7997         /* Only for PF VSI for now */
7998         if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
7999                 return -EOPNOTSUPP;
8000
8001         /* Find the HW bridge for PF VSI */
8002         for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8003                 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8004                         veb = pf->veb[i];
8005         }
8006
8007         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8008
8009         nla_for_each_nested(attr, br_spec, rem) {
8010                 __u16 mode;
8011
8012                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
8013                         continue;
8014
8015                 mode = nla_get_u16(attr);
8016                 if ((mode != BRIDGE_MODE_VEPA) &&
8017                     (mode != BRIDGE_MODE_VEB))
8018                         return -EINVAL;
8019
8020                 /* Insert a new HW bridge */
8021                 if (!veb) {
8022                         veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8023                                              vsi->tc_config.enabled_tc);
8024                         if (veb) {
8025                                 veb->bridge_mode = mode;
8026                                 i40e_config_bridge_mode(veb);
8027                         } else {
8028                                 /* No Bridge HW offload available */
8029                                 return -ENOENT;
8030                         }
8031                         break;
8032                 } else if (mode != veb->bridge_mode) {
8033                         /* Existing HW bridge but different mode needs reset */
8034                         veb->bridge_mode = mode;
8035                         i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
8036                         break;
8037                 }
8038         }
8039
8040         return 0;
8041 }
8042
8043 /**
8044  * i40e_ndo_bridge_getlink - Get the hardware bridge mode
8045  * @skb: skb buff
8046  * @pid: process id
8047  * @seq: RTNL message seq #
8048  * @dev: the netdev being configured
8049  * @filter_mask: unused
8050  *
8051  * Return the mode in which the hardware bridge is operating in
8052  * i.e VEB or VEPA.
8053  **/
8054 #ifdef HAVE_BRIDGE_FILTER
8055 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8056                                    struct net_device *dev,
8057                                    u32 __always_unused filter_mask, int nlflags)
8058 #else
8059 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8060                                    struct net_device *dev, int nlflags)
8061 #endif /* HAVE_BRIDGE_FILTER */
8062 {
8063         struct i40e_netdev_priv *np = netdev_priv(dev);
8064         struct i40e_vsi *vsi = np->vsi;
8065         struct i40e_pf *pf = vsi->back;
8066         struct i40e_veb *veb = NULL;
8067         int i;
8068
8069         /* Only for PF VSI for now */
8070         if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8071                 return -EOPNOTSUPP;
8072
8073         /* Find the HW bridge for the PF VSI */
8074         for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8075                 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8076                         veb = pf->veb[i];
8077         }
8078
8079         if (!veb)
8080                 return 0;
8081
8082         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
8083                                        nlflags);
8084 }
8085 #endif /* HAVE_BRIDGE_ATTRIBS */
8086
8087 static const struct net_device_ops i40e_netdev_ops = {
8088         .ndo_open               = i40e_open,
8089         .ndo_stop               = i40e_close,
8090         .ndo_start_xmit         = i40e_lan_xmit_frame,
8091         .ndo_get_stats64        = i40e_get_netdev_stats_struct,
8092         .ndo_set_rx_mode        = i40e_set_rx_mode,
8093         .ndo_validate_addr      = eth_validate_addr,
8094         .ndo_set_mac_address    = i40e_set_mac,
8095         .ndo_change_mtu         = i40e_change_mtu,
8096         .ndo_do_ioctl           = i40e_ioctl,
8097         .ndo_tx_timeout         = i40e_tx_timeout,
8098         .ndo_vlan_rx_add_vid    = i40e_vlan_rx_add_vid,
8099         .ndo_vlan_rx_kill_vid   = i40e_vlan_rx_kill_vid,
8100 #ifdef CONFIG_NET_POLL_CONTROLLER
8101         .ndo_poll_controller    = i40e_netpoll,
8102 #endif
8103         .ndo_setup_tc           = i40e_setup_tc,
8104 #ifdef I40E_FCOE
8105         .ndo_fcoe_enable        = i40e_fcoe_enable,
8106         .ndo_fcoe_disable       = i40e_fcoe_disable,
8107 #endif
8108         .ndo_set_features       = i40e_set_features,
8109         .ndo_set_vf_mac         = i40e_ndo_set_vf_mac,
8110         .ndo_set_vf_vlan        = i40e_ndo_set_vf_port_vlan,
8111         .ndo_set_vf_rate        = i40e_ndo_set_vf_bw,
8112         .ndo_get_vf_config      = i40e_ndo_get_vf_config,
8113         .ndo_set_vf_link_state  = i40e_ndo_set_vf_link_state,
8114         .ndo_set_vf_spoofchk    = i40e_ndo_set_vf_spoofchk,
8115 #ifdef CONFIG_I40E_VXLAN
8116         .ndo_add_vxlan_port     = i40e_add_vxlan_port,
8117         .ndo_del_vxlan_port     = i40e_del_vxlan_port,
8118 #endif
8119         .ndo_get_phys_port_id   = i40e_get_phys_port_id,
8120         .ndo_fdb_add            = i40e_ndo_fdb_add,
8121 #ifdef HAVE_BRIDGE_ATTRIBS
8122         .ndo_bridge_getlink     = i40e_ndo_bridge_getlink,
8123         .ndo_bridge_setlink     = i40e_ndo_bridge_setlink,
8124 #endif /* HAVE_BRIDGE_ATTRIBS */
8125 };
8126
8127 /**
8128  * i40e_config_netdev - Setup the netdev flags
8129  * @vsi: the VSI being configured
8130  *
8131  * Returns 0 on success, negative value on failure
8132  **/
8133 static int i40e_config_netdev(struct i40e_vsi *vsi)
8134 {
8135         u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
8136         struct i40e_pf *pf = vsi->back;
8137         struct i40e_hw *hw = &pf->hw;
8138         struct i40e_netdev_priv *np;
8139         struct net_device *netdev;
8140         u8 mac_addr[ETH_ALEN];
8141         int etherdev_size;
8142
8143         etherdev_size = sizeof(struct i40e_netdev_priv);
8144         netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
8145         if (!netdev)
8146                 return -ENOMEM;
8147
8148         vsi->netdev = netdev;
8149         np = netdev_priv(netdev);
8150         np->vsi = vsi;
8151
8152         netdev->hw_enc_features |= NETIF_F_IP_CSUM       |
8153                                   NETIF_F_GSO_UDP_TUNNEL |
8154                                   NETIF_F_TSO;
8155
8156         netdev->features = NETIF_F_SG                  |
8157                            NETIF_F_IP_CSUM             |
8158                            NETIF_F_SCTP_CSUM           |
8159                            NETIF_F_HIGHDMA             |
8160                            NETIF_F_GSO_UDP_TUNNEL      |
8161                            NETIF_F_HW_VLAN_CTAG_TX     |
8162                            NETIF_F_HW_VLAN_CTAG_RX     |
8163                            NETIF_F_HW_VLAN_CTAG_FILTER |
8164                            NETIF_F_IPV6_CSUM           |
8165                            NETIF_F_TSO                 |
8166                            NETIF_F_TSO_ECN             |
8167                            NETIF_F_TSO6                |
8168                            NETIF_F_RXCSUM              |
8169                            NETIF_F_RXHASH              |
8170                            0;
8171
8172         if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
8173                 netdev->features |= NETIF_F_NTUPLE;
8174
8175         /* copy netdev features into list of user selectable features */
8176         netdev->hw_features |= netdev->features;
8177
8178         if (vsi->type == I40E_VSI_MAIN) {
8179                 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
8180                 ether_addr_copy(mac_addr, hw->mac.perm_addr);
8181                 /* The following steps are necessary to prevent reception
8182                  * of tagged packets - some older NVM configurations load a
8183                  * default a MAC-VLAN filter that accepts any tagged packet
8184                  * which must be replaced by a normal filter.
8185                  */
8186                 if (!i40e_rm_default_mac_filter(vsi, mac_addr))
8187                         i40e_add_filter(vsi, mac_addr,
8188                                         I40E_VLAN_ANY, false, true);
8189         } else {
8190                 /* relate the VSI_VMDQ name to the VSI_MAIN name */
8191                 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
8192                          pf->vsi[pf->lan_vsi]->netdev->name);
8193                 random_ether_addr(mac_addr);
8194                 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
8195         }
8196         i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
8197
8198         ether_addr_copy(netdev->dev_addr, mac_addr);
8199         ether_addr_copy(netdev->perm_addr, mac_addr);
8200         /* vlan gets same features (except vlan offload)
8201          * after any tweaks for specific VSI types
8202          */
8203         netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
8204                                                      NETIF_F_HW_VLAN_CTAG_RX |
8205                                                    NETIF_F_HW_VLAN_CTAG_FILTER);
8206         netdev->priv_flags |= IFF_UNICAST_FLT;
8207         netdev->priv_flags |= IFF_SUPP_NOFCS;
8208         /* Setup netdev TC information */
8209         i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
8210
8211         netdev->netdev_ops = &i40e_netdev_ops;
8212         netdev->watchdog_timeo = 5 * HZ;
8213         i40e_set_ethtool_ops(netdev);
8214 #ifdef I40E_FCOE
8215         i40e_fcoe_config_netdev(netdev, vsi);
8216 #endif
8217
8218         return 0;
8219 }
8220
8221 /**
8222  * i40e_vsi_delete - Delete a VSI from the switch
8223  * @vsi: the VSI being removed
8224  *
8225  * Returns 0 on success, negative value on failure
8226  **/
8227 static void i40e_vsi_delete(struct i40e_vsi *vsi)
8228 {
8229         /* remove default VSI is not allowed */
8230         if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
8231                 return;
8232
8233         i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
8234 }
8235
8236 /**
8237  * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
8238  * @vsi: the VSI being queried
8239  *
8240  * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
8241  **/
8242 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
8243 {
8244         struct i40e_veb *veb;
8245         struct i40e_pf *pf = vsi->back;
8246
8247         /* Uplink is not a bridge so default to VEB */
8248         if (vsi->veb_idx == I40E_NO_VEB)
8249                 return 1;
8250
8251         veb = pf->veb[vsi->veb_idx];
8252         /* Uplink is a bridge in VEPA mode */
8253         if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA))
8254                 return 0;
8255
8256         /* Uplink is a bridge in VEB mode */
8257         return 1;
8258 }
8259
8260 /**
8261  * i40e_add_vsi - Add a VSI to the switch
8262  * @vsi: the VSI being configured
8263  *
8264  * This initializes a VSI context depending on the VSI type to be added and
8265  * passes it down to the add_vsi aq command.
8266  **/
8267 static int i40e_add_vsi(struct i40e_vsi *vsi)
8268 {
8269         int ret = -ENODEV;
8270         struct i40e_mac_filter *f, *ftmp;
8271         struct i40e_pf *pf = vsi->back;
8272         struct i40e_hw *hw = &pf->hw;
8273         struct i40e_vsi_context ctxt;
8274         u8 enabled_tc = 0x1; /* TC0 enabled */
8275         int f_count = 0;
8276
8277         memset(&ctxt, 0, sizeof(ctxt));
8278         switch (vsi->type) {
8279         case I40E_VSI_MAIN:
8280                 /* The PF's main VSI is already setup as part of the
8281                  * device initialization, so we'll not bother with
8282                  * the add_vsi call, but we will retrieve the current
8283                  * VSI context.
8284                  */
8285                 ctxt.seid = pf->main_vsi_seid;
8286                 ctxt.pf_num = pf->hw.pf_id;
8287                 ctxt.vf_num = 0;
8288                 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8289                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8290                 if (ret) {
8291                         dev_info(&pf->pdev->dev,
8292                                  "couldn't get PF vsi config, err %d, aq_err %d\n",
8293                                  ret, pf->hw.aq.asq_last_status);
8294                         return -ENOENT;
8295                 }
8296                 vsi->info = ctxt.info;
8297                 vsi->info.valid_sections = 0;
8298
8299                 vsi->seid = ctxt.seid;
8300                 vsi->id = ctxt.vsi_number;
8301
8302                 enabled_tc = i40e_pf_get_tc_map(pf);
8303
8304                 /* MFP mode setup queue map and update VSI */
8305                 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
8306                     !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
8307                         memset(&ctxt, 0, sizeof(ctxt));
8308                         ctxt.seid = pf->main_vsi_seid;
8309                         ctxt.pf_num = pf->hw.pf_id;
8310                         ctxt.vf_num = 0;
8311                         i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
8312                         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
8313                         if (ret) {
8314                                 dev_info(&pf->pdev->dev,
8315                                          "update vsi failed, aq_err=%d\n",
8316                                          pf->hw.aq.asq_last_status);
8317                                 ret = -ENOENT;
8318                                 goto err;
8319                         }
8320                         /* update the local VSI info queue map */
8321                         i40e_vsi_update_queue_map(vsi, &ctxt);
8322                         vsi->info.valid_sections = 0;
8323                 } else {
8324                         /* Default/Main VSI is only enabled for TC0
8325                          * reconfigure it to enable all TCs that are
8326                          * available on the port in SFP mode.
8327                          * For MFP case the iSCSI PF would use this
8328                          * flow to enable LAN+iSCSI TC.
8329                          */
8330                         ret = i40e_vsi_config_tc(vsi, enabled_tc);
8331                         if (ret) {
8332                                 dev_info(&pf->pdev->dev,
8333                                          "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
8334                                          enabled_tc, ret,
8335                                          pf->hw.aq.asq_last_status);
8336                                 ret = -ENOENT;
8337                         }
8338                 }
8339                 break;
8340
8341         case I40E_VSI_FDIR:
8342                 ctxt.pf_num = hw->pf_id;
8343                 ctxt.vf_num = 0;
8344                 ctxt.uplink_seid = vsi->uplink_seid;
8345                 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8346                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8347                 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
8348                         ctxt.info.valid_sections |=
8349                                 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8350                         ctxt.info.switch_id =
8351                                 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8352                 }
8353                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8354                 break;
8355
8356         case I40E_VSI_VMDQ2:
8357                 ctxt.pf_num = hw->pf_id;
8358                 ctxt.vf_num = 0;
8359                 ctxt.uplink_seid = vsi->uplink_seid;
8360                 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8361                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
8362
8363                 /* This VSI is connected to VEB so the switch_id
8364                  * should be set to zero by default.
8365                  */
8366                 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
8367                         ctxt.info.valid_sections |=
8368                                 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8369                         ctxt.info.switch_id =
8370                                 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8371                 }
8372
8373                 /* Setup the VSI tx/rx queue map for TC0 only for now */
8374                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8375                 break;
8376
8377         case I40E_VSI_SRIOV:
8378                 ctxt.pf_num = hw->pf_id;
8379                 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
8380                 ctxt.uplink_seid = vsi->uplink_seid;
8381                 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8382                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
8383
8384                 /* This VSI is connected to VEB so the switch_id
8385                  * should be set to zero by default.
8386                  */
8387                 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
8388                         ctxt.info.valid_sections |=
8389                                 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8390                         ctxt.info.switch_id =
8391                                 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8392                 }
8393
8394                 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
8395                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
8396                 if (pf->vf[vsi->vf_id].spoofchk) {
8397                         ctxt.info.valid_sections |=
8398                                 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
8399                         ctxt.info.sec_flags |=
8400                                 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
8401                                  I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
8402                 }
8403                 /* Setup the VSI tx/rx queue map for TC0 only for now */
8404                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8405                 break;
8406
8407 #ifdef I40E_FCOE
8408         case I40E_VSI_FCOE:
8409                 ret = i40e_fcoe_vsi_init(vsi, &ctxt);
8410                 if (ret) {
8411                         dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
8412                         return ret;
8413                 }
8414                 break;
8415
8416 #endif /* I40E_FCOE */
8417         default:
8418                 return -ENODEV;
8419         }
8420
8421         if (vsi->type != I40E_VSI_MAIN) {
8422                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
8423                 if (ret) {
8424                         dev_info(&vsi->back->pdev->dev,
8425                                  "add vsi failed, aq_err=%d\n",
8426                                  vsi->back->hw.aq.asq_last_status);
8427                         ret = -ENOENT;
8428                         goto err;
8429                 }
8430                 vsi->info = ctxt.info;
8431                 vsi->info.valid_sections = 0;
8432                 vsi->seid = ctxt.seid;
8433                 vsi->id = ctxt.vsi_number;
8434         }
8435
8436         /* If macvlan filters already exist, force them to get loaded */
8437         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
8438                 f->changed = true;
8439                 f_count++;
8440
8441                 if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
8442                         struct i40e_aqc_remove_macvlan_element_data element;
8443
8444                         memset(&element, 0, sizeof(element));
8445                         ether_addr_copy(element.mac_addr, f->macaddr);
8446                         element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
8447                         ret = i40e_aq_remove_macvlan(hw, vsi->seid,
8448                                                      &element, 1, NULL);
8449                         if (ret) {
8450                                 /* some older FW has a different default */
8451                                 element.flags |=
8452                                                I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
8453                                 i40e_aq_remove_macvlan(hw, vsi->seid,
8454                                                        &element, 1, NULL);
8455                         }
8456
8457                         i40e_aq_mac_address_write(hw,
8458                                                   I40E_AQC_WRITE_TYPE_LAA_WOL,
8459                                                   f->macaddr, NULL);
8460                 }
8461         }
8462         if (f_count) {
8463                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
8464                 pf->flags |= I40E_FLAG_FILTER_SYNC;
8465         }
8466
8467         /* Update VSI BW information */
8468         ret = i40e_vsi_get_bw_info(vsi);
8469         if (ret) {
8470                 dev_info(&pf->pdev->dev,
8471                          "couldn't get vsi bw info, err %d, aq_err %d\n",
8472                          ret, pf->hw.aq.asq_last_status);
8473                 /* VSI is already added so not tearing that up */
8474                 ret = 0;
8475         }
8476
8477 err:
8478         return ret;
8479 }
8480
8481 /**
8482  * i40e_vsi_release - Delete a VSI and free its resources
8483  * @vsi: the VSI being removed
8484  *
8485  * Returns 0 on success or < 0 on error
8486  **/
8487 int i40e_vsi_release(struct i40e_vsi *vsi)
8488 {
8489         struct i40e_mac_filter *f, *ftmp;
8490         struct i40e_veb *veb = NULL;
8491         struct i40e_pf *pf;
8492         u16 uplink_seid;
8493         int i, n;
8494
8495         pf = vsi->back;
8496
8497         /* release of a VEB-owner or last VSI is not allowed */
8498         if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
8499                 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
8500                          vsi->seid, vsi->uplink_seid);
8501                 return -ENODEV;
8502         }
8503         if (vsi == pf->vsi[pf->lan_vsi] &&
8504             !test_bit(__I40E_DOWN, &pf->state)) {
8505                 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
8506                 return -ENODEV;
8507         }
8508
8509         uplink_seid = vsi->uplink_seid;
8510         if (vsi->type != I40E_VSI_SRIOV) {
8511                 if (vsi->netdev_registered) {
8512                         vsi->netdev_registered = false;
8513                         if (vsi->netdev) {
8514                                 /* results in a call to i40e_close() */
8515                                 unregister_netdev(vsi->netdev);
8516                         }
8517                 } else {
8518                         i40e_vsi_close(vsi);
8519                 }
8520                 i40e_vsi_disable_irq(vsi);
8521         }
8522
8523         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
8524                 i40e_del_filter(vsi, f->macaddr, f->vlan,
8525                                 f->is_vf, f->is_netdev);
8526         i40e_sync_vsi_filters(vsi);
8527
8528         i40e_vsi_delete(vsi);
8529         i40e_vsi_free_q_vectors(vsi);
8530         if (vsi->netdev) {
8531                 free_netdev(vsi->netdev);
8532                 vsi->netdev = NULL;
8533         }
8534         i40e_vsi_clear_rings(vsi);
8535         i40e_vsi_clear(vsi);
8536
8537         /* If this was the last thing on the VEB, except for the
8538          * controlling VSI, remove the VEB, which puts the controlling
8539          * VSI onto the next level down in the switch.
8540          *
8541          * Well, okay, there's one more exception here: don't remove
8542          * the orphan VEBs yet.  We'll wait for an explicit remove request
8543          * from up the network stack.
8544          */
8545         for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
8546                 if (pf->vsi[i] &&
8547                     pf->vsi[i]->uplink_seid == uplink_seid &&
8548                     (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
8549                         n++;      /* count the VSIs */
8550                 }
8551         }
8552         for (i = 0; i < I40E_MAX_VEB; i++) {
8553                 if (!pf->veb[i])
8554                         continue;
8555                 if (pf->veb[i]->uplink_seid == uplink_seid)
8556                         n++;     /* count the VEBs */
8557                 if (pf->veb[i]->seid == uplink_seid)
8558                         veb = pf->veb[i];
8559         }
8560         if (n == 0 && veb && veb->uplink_seid != 0)
8561                 i40e_veb_release(veb);
8562
8563         return 0;
8564 }
8565
8566 /**
8567  * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
8568  * @vsi: ptr to the VSI
8569  *
8570  * This should only be called after i40e_vsi_mem_alloc() which allocates the
8571  * corresponding SW VSI structure and initializes num_queue_pairs for the
8572  * newly allocated VSI.
8573  *
8574  * Returns 0 on success or negative on failure
8575  **/
8576 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
8577 {
8578         int ret = -ENOENT;
8579         struct i40e_pf *pf = vsi->back;
8580
8581         if (vsi->q_vectors[0]) {
8582                 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
8583                          vsi->seid);
8584                 return -EEXIST;
8585         }
8586
8587         if (vsi->base_vector) {
8588                 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
8589                          vsi->seid, vsi->base_vector);
8590                 return -EEXIST;
8591         }
8592
8593         ret = i40e_vsi_alloc_q_vectors(vsi);
8594         if (ret) {
8595                 dev_info(&pf->pdev->dev,
8596                          "failed to allocate %d q_vector for VSI %d, ret=%d\n",
8597                          vsi->num_q_vectors, vsi->seid, ret);
8598                 vsi->num_q_vectors = 0;
8599                 goto vector_setup_out;
8600         }
8601
8602         if (vsi->num_q_vectors)
8603                 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
8604                                                  vsi->num_q_vectors, vsi->idx);
8605         if (vsi->base_vector < 0) {
8606                 dev_info(&pf->pdev->dev,
8607                          "failed to get tracking for %d vectors for VSI %d, err=%d\n",
8608                          vsi->num_q_vectors, vsi->seid, vsi->base_vector);
8609                 i40e_vsi_free_q_vectors(vsi);
8610                 ret = -ENOENT;
8611                 goto vector_setup_out;
8612         }
8613
8614 vector_setup_out:
8615         return ret;
8616 }
8617
8618 /**
8619  * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
8620  * @vsi: pointer to the vsi.
8621  *
8622  * This re-allocates a vsi's queue resources.
8623  *
8624  * Returns pointer to the successfully allocated and configured VSI sw struct
8625  * on success, otherwise returns NULL on failure.
8626  **/
8627 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
8628 {
8629         struct i40e_pf *pf = vsi->back;
8630         u8 enabled_tc;
8631         int ret;
8632
8633         i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
8634         i40e_vsi_clear_rings(vsi);
8635
8636         i40e_vsi_free_arrays(vsi, false);
8637         i40e_set_num_rings_in_vsi(vsi);
8638         ret = i40e_vsi_alloc_arrays(vsi, false);
8639         if (ret)
8640                 goto err_vsi;
8641
8642         ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
8643         if (ret < 0) {
8644                 dev_info(&pf->pdev->dev,
8645                          "failed to get tracking for %d queues for VSI %d err=%d\n",
8646                          vsi->alloc_queue_pairs, vsi->seid, ret);
8647                 goto err_vsi;
8648         }
8649         vsi->base_queue = ret;
8650
8651         /* Update the FW view of the VSI. Force a reset of TC and queue
8652          * layout configurations.
8653          */
8654         enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
8655         pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
8656         pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
8657         i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
8658
8659         /* assign it some queues */
8660         ret = i40e_alloc_rings(vsi);
8661         if (ret)
8662                 goto err_rings;
8663
8664         /* map all of the rings to the q_vectors */
8665         i40e_vsi_map_rings_to_vectors(vsi);
8666         return vsi;
8667
8668 err_rings:
8669         i40e_vsi_free_q_vectors(vsi);
8670         if (vsi->netdev_registered) {
8671                 vsi->netdev_registered = false;
8672                 unregister_netdev(vsi->netdev);
8673                 free_netdev(vsi->netdev);
8674                 vsi->netdev = NULL;
8675         }
8676         i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
8677 err_vsi:
8678         i40e_vsi_clear(vsi);
8679         return NULL;
8680 }
8681
8682 /**
8683  * i40e_vsi_setup - Set up a VSI by a given type
8684  * @pf: board private structure
8685  * @type: VSI type
8686  * @uplink_seid: the switch element to link to
8687  * @param1: usage depends upon VSI type. For VF types, indicates VF id
8688  *
8689  * This allocates the sw VSI structure and its queue resources, then add a VSI
8690  * to the identified VEB.
8691  *
8692  * Returns pointer to the successfully allocated and configure VSI sw struct on
8693  * success, otherwise returns NULL on failure.
8694  **/
8695 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
8696                                 u16 uplink_seid, u32 param1)
8697 {
8698         struct i40e_vsi *vsi = NULL;
8699         struct i40e_veb *veb = NULL;
8700         int ret, i;
8701         int v_idx;
8702
8703         /* The requested uplink_seid must be either
8704          *     - the PF's port seid
8705          *              no VEB is needed because this is the PF
8706          *              or this is a Flow Director special case VSI
8707          *     - seid of an existing VEB
8708          *     - seid of a VSI that owns an existing VEB
8709          *     - seid of a VSI that doesn't own a VEB
8710          *              a new VEB is created and the VSI becomes the owner
8711          *     - seid of the PF VSI, which is what creates the first VEB
8712          *              this is a special case of the previous
8713          *
8714          * Find which uplink_seid we were given and create a new VEB if needed
8715          */
8716         for (i = 0; i < I40E_MAX_VEB; i++) {
8717                 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
8718                         veb = pf->veb[i];
8719                         break;
8720                 }
8721         }
8722
8723         if (!veb && uplink_seid != pf->mac_seid) {
8724
8725                 for (i = 0; i < pf->num_alloc_vsi; i++) {
8726                         if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
8727                                 vsi = pf->vsi[i];
8728                                 break;
8729                         }
8730                 }
8731                 if (!vsi) {
8732                         dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
8733                                  uplink_seid);
8734                         return NULL;
8735                 }
8736
8737                 if (vsi->uplink_seid == pf->mac_seid)
8738                         veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
8739                                              vsi->tc_config.enabled_tc);
8740                 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
8741                         veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8742                                              vsi->tc_config.enabled_tc);
8743                 if (veb) {
8744                         if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
8745                                 dev_info(&vsi->back->pdev->dev,
8746                                          "%s: New VSI creation error, uplink seid of LAN VSI expected.\n",
8747                                          __func__);
8748                                 return NULL;
8749                         }
8750                         i40e_config_bridge_mode(veb);
8751                 }
8752                 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8753                         if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8754                                 veb = pf->veb[i];
8755                 }
8756                 if (!veb) {
8757                         dev_info(&pf->pdev->dev, "couldn't add VEB\n");
8758                         return NULL;
8759                 }
8760
8761                 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
8762                 uplink_seid = veb->seid;
8763         }
8764
8765         /* get vsi sw struct */
8766         v_idx = i40e_vsi_mem_alloc(pf, type);
8767         if (v_idx < 0)
8768                 goto err_alloc;
8769         vsi = pf->vsi[v_idx];
8770         if (!vsi)
8771                 goto err_alloc;
8772         vsi->type = type;
8773         vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
8774
8775         if (type == I40E_VSI_MAIN)
8776                 pf->lan_vsi = v_idx;
8777         else if (type == I40E_VSI_SRIOV)
8778                 vsi->vf_id = param1;
8779         /* assign it some queues */
8780         ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
8781                                 vsi->idx);
8782         if (ret < 0) {
8783                 dev_info(&pf->pdev->dev,
8784                          "failed to get tracking for %d queues for VSI %d err=%d\n",
8785                          vsi->alloc_queue_pairs, vsi->seid, ret);
8786                 goto err_vsi;
8787         }
8788         vsi->base_queue = ret;
8789
8790         /* get a VSI from the hardware */
8791         vsi->uplink_seid = uplink_seid;
8792         ret = i40e_add_vsi(vsi);
8793         if (ret)
8794                 goto err_vsi;
8795
8796         switch (vsi->type) {
8797         /* setup the netdev if needed */
8798         case I40E_VSI_MAIN:
8799         case I40E_VSI_VMDQ2:
8800         case I40E_VSI_FCOE:
8801                 ret = i40e_config_netdev(vsi);
8802                 if (ret)
8803                         goto err_netdev;
8804                 ret = register_netdev(vsi->netdev);
8805                 if (ret)
8806                         goto err_netdev;
8807                 vsi->netdev_registered = true;
8808                 netif_carrier_off(vsi->netdev);
8809 #ifdef CONFIG_I40E_DCB
8810                 /* Setup DCB netlink interface */
8811                 i40e_dcbnl_setup(vsi);
8812 #endif /* CONFIG_I40E_DCB */
8813                 /* fall through */
8814
8815         case I40E_VSI_FDIR:
8816                 /* set up vectors and rings if needed */
8817                 ret = i40e_vsi_setup_vectors(vsi);
8818                 if (ret)
8819                         goto err_msix;
8820
8821                 ret = i40e_alloc_rings(vsi);
8822                 if (ret)
8823                         goto err_rings;
8824
8825                 /* map all of the rings to the q_vectors */
8826                 i40e_vsi_map_rings_to_vectors(vsi);
8827
8828                 i40e_vsi_reset_stats(vsi);
8829                 break;
8830
8831         default:
8832                 /* no netdev or rings for the other VSI types */
8833                 break;
8834         }
8835
8836         return vsi;
8837
8838 err_rings:
8839         i40e_vsi_free_q_vectors(vsi);
8840 err_msix:
8841         if (vsi->netdev_registered) {
8842                 vsi->netdev_registered = false;
8843                 unregister_netdev(vsi->netdev);
8844                 free_netdev(vsi->netdev);
8845                 vsi->netdev = NULL;
8846         }
8847 err_netdev:
8848         i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
8849 err_vsi:
8850         i40e_vsi_clear(vsi);
8851 err_alloc:
8852         return NULL;
8853 }
8854
8855 /**
8856  * i40e_veb_get_bw_info - Query VEB BW information
8857  * @veb: the veb to query
8858  *
8859  * Query the Tx scheduler BW configuration data for given VEB
8860  **/
8861 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
8862 {
8863         struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
8864         struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
8865         struct i40e_pf *pf = veb->pf;
8866         struct i40e_hw *hw = &pf->hw;
8867         u32 tc_bw_max;
8868         int ret = 0;
8869         int i;
8870
8871         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
8872                                                   &bw_data, NULL);
8873         if (ret) {
8874                 dev_info(&pf->pdev->dev,
8875                          "query veb bw config failed, aq_err=%d\n",
8876                          hw->aq.asq_last_status);
8877                 goto out;
8878         }
8879
8880         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
8881                                                    &ets_data, NULL);
8882         if (ret) {
8883                 dev_info(&pf->pdev->dev,
8884                          "query veb bw ets config failed, aq_err=%d\n",
8885                          hw->aq.asq_last_status);
8886                 goto out;
8887         }
8888
8889         veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
8890         veb->bw_max_quanta = ets_data.tc_bw_max;
8891         veb->is_abs_credits = bw_data.absolute_credits_enable;
8892         veb->enabled_tc = ets_data.tc_valid_bits;
8893         tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
8894                     (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
8895         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
8896                 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
8897                 veb->bw_tc_limit_credits[i] =
8898                                         le16_to_cpu(bw_data.tc_bw_limits[i]);
8899                 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
8900         }
8901
8902 out:
8903         return ret;
8904 }
8905
8906 /**
8907  * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
8908  * @pf: board private structure
8909  *
8910  * On error: returns error code (negative)
8911  * On success: returns vsi index in PF (positive)
8912  **/
8913 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
8914 {
8915         int ret = -ENOENT;
8916         struct i40e_veb *veb;
8917         int i;
8918
8919         /* Need to protect the allocation of switch elements at the PF level */
8920         mutex_lock(&pf->switch_mutex);
8921
8922         /* VEB list may be fragmented if VEB creation/destruction has
8923          * been happening.  We can afford to do a quick scan to look
8924          * for any free slots in the list.
8925          *
8926          * find next empty veb slot, looping back around if necessary
8927          */
8928         i = 0;
8929         while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
8930                 i++;
8931         if (i >= I40E_MAX_VEB) {
8932                 ret = -ENOMEM;
8933                 goto err_alloc_veb;  /* out of VEB slots! */
8934         }
8935
8936         veb = kzalloc(sizeof(*veb), GFP_KERNEL);
8937         if (!veb) {
8938                 ret = -ENOMEM;
8939                 goto err_alloc_veb;
8940         }
8941         veb->pf = pf;
8942         veb->idx = i;
8943         veb->enabled_tc = 1;
8944
8945         pf->veb[i] = veb;
8946         ret = i;
8947 err_alloc_veb:
8948         mutex_unlock(&pf->switch_mutex);
8949         return ret;
8950 }
8951
8952 /**
8953  * i40e_switch_branch_release - Delete a branch of the switch tree
8954  * @branch: where to start deleting
8955  *
8956  * This uses recursion to find the tips of the branch to be
8957  * removed, deleting until we get back to and can delete this VEB.
8958  **/
8959 static void i40e_switch_branch_release(struct i40e_veb *branch)
8960 {
8961         struct i40e_pf *pf = branch->pf;
8962         u16 branch_seid = branch->seid;
8963         u16 veb_idx = branch->idx;
8964         int i;
8965
8966         /* release any VEBs on this VEB - RECURSION */
8967         for (i = 0; i < I40E_MAX_VEB; i++) {
8968                 if (!pf->veb[i])
8969                         continue;
8970                 if (pf->veb[i]->uplink_seid == branch->seid)
8971                         i40e_switch_branch_release(pf->veb[i]);
8972         }
8973
8974         /* Release the VSIs on this VEB, but not the owner VSI.
8975          *
8976          * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
8977          *       the VEB itself, so don't use (*branch) after this loop.
8978          */
8979         for (i = 0; i < pf->num_alloc_vsi; i++) {
8980                 if (!pf->vsi[i])
8981                         continue;
8982                 if (pf->vsi[i]->uplink_seid == branch_seid &&
8983                    (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
8984                         i40e_vsi_release(pf->vsi[i]);
8985                 }
8986         }
8987
8988         /* There's one corner case where the VEB might not have been
8989          * removed, so double check it here and remove it if needed.
8990          * This case happens if the veb was created from the debugfs
8991          * commands and no VSIs were added to it.
8992          */
8993         if (pf->veb[veb_idx])
8994                 i40e_veb_release(pf->veb[veb_idx]);
8995 }
8996
8997 /**
8998  * i40e_veb_clear - remove veb struct
8999  * @veb: the veb to remove
9000  **/
9001 static void i40e_veb_clear(struct i40e_veb *veb)
9002 {
9003         if (!veb)
9004                 return;
9005
9006         if (veb->pf) {
9007                 struct i40e_pf *pf = veb->pf;
9008
9009                 mutex_lock(&pf->switch_mutex);
9010                 if (pf->veb[veb->idx] == veb)
9011                         pf->veb[veb->idx] = NULL;
9012                 mutex_unlock(&pf->switch_mutex);
9013         }
9014
9015         kfree(veb);
9016 }
9017
9018 /**
9019  * i40e_veb_release - Delete a VEB and free its resources
9020  * @veb: the VEB being removed
9021  **/
9022 void i40e_veb_release(struct i40e_veb *veb)
9023 {
9024         struct i40e_vsi *vsi = NULL;
9025         struct i40e_pf *pf;
9026         int i, n = 0;
9027
9028         pf = veb->pf;
9029
9030         /* find the remaining VSI and check for extras */
9031         for (i = 0; i < pf->num_alloc_vsi; i++) {
9032                 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
9033                         n++;
9034                         vsi = pf->vsi[i];
9035                 }
9036         }
9037         if (n != 1) {
9038                 dev_info(&pf->pdev->dev,
9039                          "can't remove VEB %d with %d VSIs left\n",
9040                          veb->seid, n);
9041                 return;
9042         }
9043
9044         /* move the remaining VSI to uplink veb */
9045         vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
9046         if (veb->uplink_seid) {
9047                 vsi->uplink_seid = veb->uplink_seid;
9048                 if (veb->uplink_seid == pf->mac_seid)
9049                         vsi->veb_idx = I40E_NO_VEB;
9050                 else
9051                         vsi->veb_idx = veb->veb_idx;
9052         } else {
9053                 /* floating VEB */
9054                 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
9055                 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
9056         }
9057
9058         i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
9059         i40e_veb_clear(veb);
9060 }
9061
9062 /**
9063  * i40e_add_veb - create the VEB in the switch
9064  * @veb: the VEB to be instantiated
9065  * @vsi: the controlling VSI
9066  **/
9067 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
9068 {
9069         bool is_default = false;
9070         bool is_cloud = false;
9071         int ret;
9072
9073         /* get a VEB from the hardware */
9074         ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
9075                               veb->enabled_tc, is_default,
9076                               is_cloud, &veb->seid, NULL);
9077         if (ret) {
9078                 dev_info(&veb->pf->pdev->dev,
9079                          "couldn't add VEB, err %d, aq_err %d\n",
9080                          ret, veb->pf->hw.aq.asq_last_status);
9081                 return -EPERM;
9082         }
9083
9084         /* get statistics counter */
9085         ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
9086                                          &veb->stats_idx, NULL, NULL, NULL);
9087         if (ret) {
9088                 dev_info(&veb->pf->pdev->dev,
9089                          "couldn't get VEB statistics idx, err %d, aq_err %d\n",
9090                          ret, veb->pf->hw.aq.asq_last_status);
9091                 return -EPERM;
9092         }
9093         ret = i40e_veb_get_bw_info(veb);
9094         if (ret) {
9095                 dev_info(&veb->pf->pdev->dev,
9096                          "couldn't get VEB bw info, err %d, aq_err %d\n",
9097                          ret, veb->pf->hw.aq.asq_last_status);
9098                 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
9099                 return -ENOENT;
9100         }
9101
9102         vsi->uplink_seid = veb->seid;
9103         vsi->veb_idx = veb->idx;
9104         vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
9105
9106         return 0;
9107 }
9108
9109 /**
9110  * i40e_veb_setup - Set up a VEB
9111  * @pf: board private structure
9112  * @flags: VEB setup flags
9113  * @uplink_seid: the switch element to link to
9114  * @vsi_seid: the initial VSI seid
9115  * @enabled_tc: Enabled TC bit-map
9116  *
9117  * This allocates the sw VEB structure and links it into the switch
9118  * It is possible and legal for this to be a duplicate of an already
9119  * existing VEB.  It is also possible for both uplink and vsi seids
9120  * to be zero, in order to create a floating VEB.
9121  *
9122  * Returns pointer to the successfully allocated VEB sw struct on
9123  * success, otherwise returns NULL on failure.
9124  **/
9125 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
9126                                 u16 uplink_seid, u16 vsi_seid,
9127                                 u8 enabled_tc)
9128 {
9129         struct i40e_veb *veb, *uplink_veb = NULL;
9130         int vsi_idx, veb_idx;
9131         int ret;
9132
9133         /* if one seid is 0, the other must be 0 to create a floating relay */
9134         if ((uplink_seid == 0 || vsi_seid == 0) &&
9135             (uplink_seid + vsi_seid != 0)) {
9136                 dev_info(&pf->pdev->dev,
9137                          "one, not both seid's are 0: uplink=%d vsi=%d\n",
9138                          uplink_seid, vsi_seid);
9139                 return NULL;
9140         }
9141
9142         /* make sure there is such a vsi and uplink */
9143         for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
9144                 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
9145                         break;
9146         if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
9147                 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
9148                          vsi_seid);
9149                 return NULL;
9150         }
9151
9152         if (uplink_seid && uplink_seid != pf->mac_seid) {
9153                 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
9154                         if (pf->veb[veb_idx] &&
9155                             pf->veb[veb_idx]->seid == uplink_seid) {
9156                                 uplink_veb = pf->veb[veb_idx];
9157                                 break;
9158                         }
9159                 }
9160                 if (!uplink_veb) {
9161                         dev_info(&pf->pdev->dev,
9162                                  "uplink seid %d not found\n", uplink_seid);
9163                         return NULL;
9164                 }
9165         }
9166
9167         /* get veb sw struct */
9168         veb_idx = i40e_veb_mem_alloc(pf);
9169         if (veb_idx < 0)
9170                 goto err_alloc;
9171         veb = pf->veb[veb_idx];
9172         veb->flags = flags;
9173         veb->uplink_seid = uplink_seid;
9174         veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
9175         veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
9176
9177         /* create the VEB in the switch */
9178         ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
9179         if (ret)
9180                 goto err_veb;
9181         if (vsi_idx == pf->lan_vsi)
9182                 pf->lan_veb = veb->idx;
9183
9184         return veb;
9185
9186 err_veb:
9187         i40e_veb_clear(veb);
9188 err_alloc:
9189         return NULL;
9190 }
9191
9192 /**
9193  * i40e_setup_pf_switch_element - set PF vars based on switch type
9194  * @pf: board private structure
9195  * @ele: element we are building info from
9196  * @num_reported: total number of elements
9197  * @printconfig: should we print the contents
9198  *
9199  * helper function to assist in extracting a few useful SEID values.
9200  **/
9201 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
9202                                 struct i40e_aqc_switch_config_element_resp *ele,
9203                                 u16 num_reported, bool printconfig)
9204 {
9205         u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
9206         u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
9207         u8 element_type = ele->element_type;
9208         u16 seid = le16_to_cpu(ele->seid);
9209
9210         if (printconfig)
9211                 dev_info(&pf->pdev->dev,
9212                          "type=%d seid=%d uplink=%d downlink=%d\n",
9213                          element_type, seid, uplink_seid, downlink_seid);
9214
9215         switch (element_type) {
9216         case I40E_SWITCH_ELEMENT_TYPE_MAC:
9217                 pf->mac_seid = seid;
9218                 break;
9219         case I40E_SWITCH_ELEMENT_TYPE_VEB:
9220                 /* Main VEB? */
9221                 if (uplink_seid != pf->mac_seid)
9222                         break;
9223                 if (pf->lan_veb == I40E_NO_VEB) {
9224                         int v;
9225
9226                         /* find existing or else empty VEB */
9227                         for (v = 0; v < I40E_MAX_VEB; v++) {
9228                                 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
9229                                         pf->lan_veb = v;
9230                                         break;
9231                                 }
9232                         }
9233                         if (pf->lan_veb == I40E_NO_VEB) {
9234                                 v = i40e_veb_mem_alloc(pf);
9235                                 if (v < 0)
9236                                         break;
9237                                 pf->lan_veb = v;
9238                         }
9239                 }
9240
9241                 pf->veb[pf->lan_veb]->seid = seid;
9242                 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
9243                 pf->veb[pf->lan_veb]->pf = pf;
9244                 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
9245                 break;
9246         case I40E_SWITCH_ELEMENT_TYPE_VSI:
9247                 if (num_reported != 1)
9248                         break;
9249                 /* This is immediately after a reset so we can assume this is
9250                  * the PF's VSI
9251                  */
9252                 pf->mac_seid = uplink_seid;
9253                 pf->pf_seid = downlink_seid;
9254                 pf->main_vsi_seid = seid;
9255                 if (printconfig)
9256                         dev_info(&pf->pdev->dev,
9257                                  "pf_seid=%d main_vsi_seid=%d\n",
9258                                  pf->pf_seid, pf->main_vsi_seid);
9259                 break;
9260         case I40E_SWITCH_ELEMENT_TYPE_PF:
9261         case I40E_SWITCH_ELEMENT_TYPE_VF:
9262         case I40E_SWITCH_ELEMENT_TYPE_EMP:
9263         case I40E_SWITCH_ELEMENT_TYPE_BMC:
9264         case I40E_SWITCH_ELEMENT_TYPE_PE:
9265         case I40E_SWITCH_ELEMENT_TYPE_PA:
9266                 /* ignore these for now */
9267                 break;
9268         default:
9269                 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
9270                          element_type, seid);
9271                 break;
9272         }
9273 }
9274
9275 /**
9276  * i40e_fetch_switch_configuration - Get switch config from firmware
9277  * @pf: board private structure
9278  * @printconfig: should we print the contents
9279  *
9280  * Get the current switch configuration from the device and
9281  * extract a few useful SEID values.
9282  **/
9283 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
9284 {
9285         struct i40e_aqc_get_switch_config_resp *sw_config;
9286         u16 next_seid = 0;
9287         int ret = 0;
9288         u8 *aq_buf;
9289         int i;
9290
9291         aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
9292         if (!aq_buf)
9293                 return -ENOMEM;
9294
9295         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
9296         do {
9297                 u16 num_reported, num_total;
9298
9299                 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
9300                                                 I40E_AQ_LARGE_BUF,
9301                                                 &next_seid, NULL);
9302                 if (ret) {
9303                         dev_info(&pf->pdev->dev,
9304                                  "get switch config failed %d aq_err=%x\n",
9305                                  ret, pf->hw.aq.asq_last_status);
9306                         kfree(aq_buf);
9307                         return -ENOENT;
9308                 }
9309
9310                 num_reported = le16_to_cpu(sw_config->header.num_reported);
9311                 num_total = le16_to_cpu(sw_config->header.num_total);
9312
9313                 if (printconfig)
9314                         dev_info(&pf->pdev->dev,
9315                                  "header: %d reported %d total\n",
9316                                  num_reported, num_total);
9317
9318                 for (i = 0; i < num_reported; i++) {
9319                         struct i40e_aqc_switch_config_element_resp *ele =
9320                                 &sw_config->element[i];
9321
9322                         i40e_setup_pf_switch_element(pf, ele, num_reported,
9323                                                      printconfig);
9324                 }
9325         } while (next_seid != 0);
9326
9327         kfree(aq_buf);
9328         return ret;
9329 }
9330
9331 /**
9332  * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
9333  * @pf: board private structure
9334  * @reinit: if the Main VSI needs to re-initialized.
9335  *
9336  * Returns 0 on success, negative value on failure
9337  **/
9338 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
9339 {
9340         int ret;
9341
9342         /* find out what's out there already */
9343         ret = i40e_fetch_switch_configuration(pf, false);
9344         if (ret) {
9345                 dev_info(&pf->pdev->dev,
9346                          "couldn't fetch switch config, err %d, aq_err %d\n",
9347                          ret, pf->hw.aq.asq_last_status);
9348                 return ret;
9349         }
9350         i40e_pf_reset_stats(pf);
9351
9352         /* first time setup */
9353         if (pf->lan_vsi == I40E_NO_VSI || reinit) {
9354                 struct i40e_vsi *vsi = NULL;
9355                 u16 uplink_seid;
9356
9357                 /* Set up the PF VSI associated with the PF's main VSI
9358                  * that is already in the HW switch
9359                  */
9360                 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
9361                         uplink_seid = pf->veb[pf->lan_veb]->seid;
9362                 else
9363                         uplink_seid = pf->mac_seid;
9364                 if (pf->lan_vsi == I40E_NO_VSI)
9365                         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
9366                 else if (reinit)
9367                         vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
9368                 if (!vsi) {
9369                         dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
9370                         i40e_fdir_teardown(pf);
9371                         return -EAGAIN;
9372                 }
9373         } else {
9374                 /* force a reset of TC and queue layout configurations */
9375                 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
9376                 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
9377                 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
9378                 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
9379         }
9380         i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
9381
9382         i40e_fdir_sb_setup(pf);
9383
9384         /* Setup static PF queue filter control settings */
9385         ret = i40e_setup_pf_filter_control(pf);
9386         if (ret) {
9387                 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
9388                          ret);
9389                 /* Failure here should not stop continuing other steps */
9390         }
9391
9392         /* enable RSS in the HW, even for only one queue, as the stack can use
9393          * the hash
9394          */
9395         if ((pf->flags & I40E_FLAG_RSS_ENABLED))
9396                 i40e_config_rss(pf);
9397
9398         /* fill in link information and enable LSE reporting */
9399         i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
9400         i40e_link_event(pf);
9401
9402         /* Initialize user-specific link properties */
9403         pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
9404                                   I40E_AQ_AN_COMPLETED) ? true : false);
9405
9406         i40e_ptp_init(pf);
9407
9408         return ret;
9409 }
9410
9411 /**
9412  * i40e_determine_queue_usage - Work out queue distribution
9413  * @pf: board private structure
9414  **/
9415 static void i40e_determine_queue_usage(struct i40e_pf *pf)
9416 {
9417         int queues_left;
9418
9419         pf->num_lan_qps = 0;
9420 #ifdef I40E_FCOE
9421         pf->num_fcoe_qps = 0;
9422 #endif
9423
9424         /* Find the max queues to be put into basic use.  We'll always be
9425          * using TC0, whether or not DCB is running, and TC0 will get the
9426          * big RSS set.
9427          */
9428         queues_left = pf->hw.func_caps.num_tx_qp;
9429
9430         if ((queues_left == 1) ||
9431             !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
9432                 /* one qp for PF, no queues for anything else */
9433                 queues_left = 0;
9434                 pf->rss_size = pf->num_lan_qps = 1;
9435
9436                 /* make sure all the fancies are disabled */
9437                 pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
9438 #ifdef I40E_FCOE
9439                                I40E_FLAG_FCOE_ENABLED   |
9440 #endif
9441                                I40E_FLAG_FD_SB_ENABLED  |
9442                                I40E_FLAG_FD_ATR_ENABLED |
9443                                I40E_FLAG_DCB_CAPABLE    |
9444                                I40E_FLAG_SRIOV_ENABLED  |
9445                                I40E_FLAG_VMDQ_ENABLED);
9446         } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
9447                                   I40E_FLAG_FD_SB_ENABLED |
9448                                   I40E_FLAG_FD_ATR_ENABLED |
9449                                   I40E_FLAG_DCB_CAPABLE))) {
9450                 /* one qp for PF */
9451                 pf->rss_size = pf->num_lan_qps = 1;
9452                 queues_left -= pf->num_lan_qps;
9453
9454                 pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
9455 #ifdef I40E_FCOE
9456                                I40E_FLAG_FCOE_ENABLED   |
9457 #endif
9458                                I40E_FLAG_FD_SB_ENABLED  |
9459                                I40E_FLAG_FD_ATR_ENABLED |
9460                                I40E_FLAG_DCB_ENABLED    |
9461                                I40E_FLAG_VMDQ_ENABLED);
9462         } else {
9463                 /* Not enough queues for all TCs */
9464                 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
9465                     (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
9466                         pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9467                         dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
9468                 }
9469                 pf->num_lan_qps = max_t(int, pf->rss_size_max,
9470                                         num_online_cpus());
9471                 pf->num_lan_qps = min_t(int, pf->num_lan_qps,
9472                                         pf->hw.func_caps.num_tx_qp);
9473
9474                 queues_left -= pf->num_lan_qps;
9475         }
9476
9477 #ifdef I40E_FCOE
9478         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
9479                 if (I40E_DEFAULT_FCOE <= queues_left) {
9480                         pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
9481                 } else if (I40E_MINIMUM_FCOE <= queues_left) {
9482                         pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
9483                 } else {
9484                         pf->num_fcoe_qps = 0;
9485                         pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
9486                         dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
9487                 }
9488
9489                 queues_left -= pf->num_fcoe_qps;
9490         }
9491
9492 #endif
9493         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9494                 if (queues_left > 1) {
9495                         queues_left -= 1; /* save 1 queue for FD */
9496                 } else {
9497                         pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
9498                         dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
9499                 }
9500         }
9501
9502         if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
9503             pf->num_vf_qps && pf->num_req_vfs && queues_left) {
9504                 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
9505                                         (queues_left / pf->num_vf_qps));
9506                 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
9507         }
9508
9509         if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
9510             pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
9511                 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
9512                                           (queues_left / pf->num_vmdq_qps));
9513                 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
9514         }
9515
9516         pf->queues_left = queues_left;
9517 #ifdef I40E_FCOE
9518         dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
9519 #endif
9520 }
9521
9522 /**
9523  * i40e_setup_pf_filter_control - Setup PF static filter control
9524  * @pf: PF to be setup
9525  *
9526  * i40e_setup_pf_filter_control sets up a PF's initial filter control
9527  * settings. If PE/FCoE are enabled then it will also set the per PF
9528  * based filter sizes required for them. It also enables Flow director,
9529  * ethertype and macvlan type filter settings for the pf.
9530  *
9531  * Returns 0 on success, negative on failure
9532  **/
9533 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
9534 {
9535         struct i40e_filter_control_settings *settings = &pf->filter_settings;
9536
9537         settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
9538
9539         /* Flow Director is enabled */
9540         if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
9541                 settings->enable_fdir = true;
9542
9543         /* Ethtype and MACVLAN filters enabled for PF */
9544         settings->enable_ethtype = true;
9545         settings->enable_macvlan = true;
9546
9547         if (i40e_set_filter_control(&pf->hw, settings))
9548                 return -ENOENT;
9549
9550         return 0;
9551 }
9552
9553 #define INFO_STRING_LEN 255
9554 static void i40e_print_features(struct i40e_pf *pf)
9555 {
9556         struct i40e_hw *hw = &pf->hw;
9557         char *buf, *string;
9558
9559         string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
9560         if (!string) {
9561                 dev_err(&pf->pdev->dev, "Features string allocation failed\n");
9562                 return;
9563         }
9564
9565         buf = string;
9566
9567         buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
9568 #ifdef CONFIG_PCI_IOV
9569         buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
9570 #endif
9571         buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ",
9572                        pf->hw.func_caps.num_vsis,
9573                        pf->vsi[pf->lan_vsi]->num_queue_pairs,
9574                        pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
9575
9576         if (pf->flags & I40E_FLAG_RSS_ENABLED)
9577                 buf += sprintf(buf, "RSS ");
9578         if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
9579                 buf += sprintf(buf, "FD_ATR ");
9580         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9581                 buf += sprintf(buf, "FD_SB ");
9582                 buf += sprintf(buf, "NTUPLE ");
9583         }
9584         if (pf->flags & I40E_FLAG_DCB_CAPABLE)
9585                 buf += sprintf(buf, "DCB ");
9586         if (pf->flags & I40E_FLAG_PTP)
9587                 buf += sprintf(buf, "PTP ");
9588 #ifdef I40E_FCOE
9589         if (pf->flags & I40E_FLAG_FCOE_ENABLED)
9590                 buf += sprintf(buf, "FCOE ");
9591 #endif
9592
9593         BUG_ON(buf > (string + INFO_STRING_LEN));
9594         dev_info(&pf->pdev->dev, "%s\n", string);
9595         kfree(string);
9596 }
9597
9598 /**
9599  * i40e_probe - Device initialization routine
9600  * @pdev: PCI device information struct
9601  * @ent: entry in i40e_pci_tbl
9602  *
9603  * i40e_probe initializes a PF identified by a pci_dev structure.
9604  * The OS initialization, configuring of the PF private structure,
9605  * and a hardware reset occur.
9606  *
9607  * Returns 0 on success, negative on failure
9608  **/
9609 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9610 {
9611         struct i40e_aq_get_phy_abilities_resp abilities;
9612         unsigned long ioremap_len;
9613         struct i40e_pf *pf;
9614         struct i40e_hw *hw;
9615         static u16 pfs_found;
9616         u16 link_status;
9617         int err = 0;
9618         u32 len;
9619         u32 i;
9620
9621         err = pci_enable_device_mem(pdev);
9622         if (err)
9623                 return err;
9624
9625         /* set up for high or low dma */
9626         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9627         if (err) {
9628                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9629                 if (err) {
9630                         dev_err(&pdev->dev,
9631                                 "DMA configuration failed: 0x%x\n", err);
9632                         goto err_dma;
9633                 }
9634         }
9635
9636         /* set up pci connections */
9637         err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
9638                                            IORESOURCE_MEM), i40e_driver_name);
9639         if (err) {
9640                 dev_info(&pdev->dev,
9641                          "pci_request_selected_regions failed %d\n", err);
9642                 goto err_pci_reg;
9643         }
9644
9645         pci_enable_pcie_error_reporting(pdev);
9646         pci_set_master(pdev);
9647
9648         /* Now that we have a PCI connection, we need to do the
9649          * low level device setup.  This is primarily setting up
9650          * the Admin Queue structures and then querying for the
9651          * device's current profile information.
9652          */
9653         pf = kzalloc(sizeof(*pf), GFP_KERNEL);
9654         if (!pf) {
9655                 err = -ENOMEM;
9656                 goto err_pf_alloc;
9657         }
9658         pf->next_vsi = 0;
9659         pf->pdev = pdev;
9660         set_bit(__I40E_DOWN, &pf->state);
9661
9662         hw = &pf->hw;
9663         hw->back = pf;
9664
9665         ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0),
9666                             I40E_MAX_CSR_SPACE);
9667
9668         hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len);
9669         if (!hw->hw_addr) {
9670                 err = -EIO;
9671                 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
9672                          (unsigned int)pci_resource_start(pdev, 0),
9673                          (unsigned int)pci_resource_len(pdev, 0), err);
9674                 goto err_ioremap;
9675         }
9676         hw->vendor_id = pdev->vendor;
9677         hw->device_id = pdev->device;
9678         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
9679         hw->subsystem_vendor_id = pdev->subsystem_vendor;
9680         hw->subsystem_device_id = pdev->subsystem_device;
9681         hw->bus.device = PCI_SLOT(pdev->devfn);
9682         hw->bus.func = PCI_FUNC(pdev->devfn);
9683         pf->instance = pfs_found;
9684
9685         if (debug != -1) {
9686                 pf->msg_enable = pf->hw.debug_mask;
9687                 pf->msg_enable = debug;
9688         }
9689
9690         /* do a special CORER for clearing PXE mode once at init */
9691         if (hw->revision_id == 0 &&
9692             (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
9693                 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
9694                 i40e_flush(hw);
9695                 msleep(200);
9696                 pf->corer_count++;
9697
9698                 i40e_clear_pxe_mode(hw);
9699         }
9700
9701         /* Reset here to make sure all is clean and to define PF 'n' */
9702         i40e_clear_hw(hw);
9703         err = i40e_pf_reset(hw);
9704         if (err) {
9705                 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
9706                 goto err_pf_reset;
9707         }
9708         pf->pfr_count++;
9709
9710         hw->aq.num_arq_entries = I40E_AQ_LEN;
9711         hw->aq.num_asq_entries = I40E_AQ_LEN;
9712         hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
9713         hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
9714         pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
9715
9716         snprintf(pf->int_name, sizeof(pf->int_name) - 1,
9717                  "%s-%s:misc",
9718                  dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
9719
9720         err = i40e_init_shared_code(hw);
9721         if (err) {
9722                 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
9723                 goto err_pf_reset;
9724         }
9725
9726         /* set up a default setting for link flow control */
9727         pf->hw.fc.requested_mode = I40E_FC_NONE;
9728
9729         err = i40e_init_adminq(hw);
9730         dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
9731         if (err) {
9732                 dev_info(&pdev->dev,
9733                          "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
9734                 goto err_pf_reset;
9735         }
9736
9737         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
9738             hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
9739                 dev_info(&pdev->dev,
9740                          "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
9741         else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
9742                  hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
9743                 dev_info(&pdev->dev,
9744                          "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
9745
9746         i40e_verify_eeprom(pf);
9747
9748         /* Rev 0 hardware was never productized */
9749         if (hw->revision_id < 1)
9750                 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
9751
9752         i40e_clear_pxe_mode(hw);
9753         err = i40e_get_capabilities(pf);
9754         if (err)
9755                 goto err_adminq_setup;
9756
9757         err = i40e_sw_init(pf);
9758         if (err) {
9759                 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
9760                 goto err_sw_init;
9761         }
9762
9763         err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
9764                                 hw->func_caps.num_rx_qp,
9765                                 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
9766         if (err) {
9767                 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
9768                 goto err_init_lan_hmc;
9769         }
9770
9771         err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
9772         if (err) {
9773                 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
9774                 err = -ENOENT;
9775                 goto err_configure_lan_hmc;
9776         }
9777
9778         /* Disable LLDP for NICs that have firmware versions lower than v4.3.
9779          * Ignore error return codes because if it was already disabled via
9780          * hardware settings this will fail
9781          */
9782         if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
9783             (pf->hw.aq.fw_maj_ver < 4)) {
9784                 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
9785                 i40e_aq_stop_lldp(hw, true, NULL);
9786         }
9787
9788         i40e_get_mac_addr(hw, hw->mac.addr);
9789         if (!is_valid_ether_addr(hw->mac.addr)) {
9790                 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
9791                 err = -EIO;
9792                 goto err_mac_addr;
9793         }
9794         dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
9795         ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
9796         i40e_get_port_mac_addr(hw, hw->mac.port_addr);
9797         if (is_valid_ether_addr(hw->mac.port_addr))
9798                 pf->flags |= I40E_FLAG_PORT_ID_VALID;
9799 #ifdef I40E_FCOE
9800         err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
9801         if (err)
9802                 dev_info(&pdev->dev,
9803                          "(non-fatal) SAN MAC retrieval failed: %d\n", err);
9804         if (!is_valid_ether_addr(hw->mac.san_addr)) {
9805                 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
9806                          hw->mac.san_addr);
9807                 ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
9808         }
9809         dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
9810 #endif /* I40E_FCOE */
9811
9812         pci_set_drvdata(pdev, pf);
9813         pci_save_state(pdev);
9814 #ifdef CONFIG_I40E_DCB
9815         err = i40e_init_pf_dcb(pf);
9816         if (err) {
9817                 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
9818                 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9819                 /* Continue without DCB enabled */
9820         }
9821 #endif /* CONFIG_I40E_DCB */
9822
9823         /* set up periodic task facility */
9824         setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
9825         pf->service_timer_period = HZ;
9826
9827         INIT_WORK(&pf->service_task, i40e_service_task);
9828         clear_bit(__I40E_SERVICE_SCHED, &pf->state);
9829         pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
9830         pf->link_check_timeout = jiffies;
9831
9832         /* WoL defaults to disabled */
9833         pf->wol_en = false;
9834         device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
9835
9836         /* set up the main switch operations */
9837         i40e_determine_queue_usage(pf);
9838         err = i40e_init_interrupt_scheme(pf);
9839         if (err)
9840                 goto err_switch_setup;
9841
9842         /* The number of VSIs reported by the FW is the minimum guaranteed
9843          * to us; HW supports far more and we share the remaining pool with
9844          * the other PFs. We allocate space for more than the guarantee with
9845          * the understanding that we might not get them all later.
9846          */
9847         if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
9848                 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
9849         else
9850                 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
9851
9852         /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
9853         len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
9854         pf->vsi = kzalloc(len, GFP_KERNEL);
9855         if (!pf->vsi) {
9856                 err = -ENOMEM;
9857                 goto err_switch_setup;
9858         }
9859
9860         err = i40e_setup_pf_switch(pf, false);
9861         if (err) {
9862                 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
9863                 goto err_vsis;
9864         }
9865         /* if FDIR VSI was set up, start it now */
9866         for (i = 0; i < pf->num_alloc_vsi; i++) {
9867                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
9868                         i40e_vsi_open(pf->vsi[i]);
9869                         break;
9870                 }
9871         }
9872
9873         /* driver is only interested in link up/down and module qualification
9874          * reports from firmware
9875          */
9876         err = i40e_aq_set_phy_int_mask(&pf->hw,
9877                                        I40E_AQ_EVENT_LINK_UPDOWN |
9878                                        I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
9879         if (err)
9880                 dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
9881
9882         if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
9883             (pf->hw.aq.fw_maj_ver < 4)) {
9884                 msleep(75);
9885                 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
9886                 if (err)
9887                         dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
9888                                  pf->hw.aq.asq_last_status);
9889         }
9890         /* The main driver is (mostly) up and happy. We need to set this state
9891          * before setting up the misc vector or we get a race and the vector
9892          * ends up disabled forever.
9893          */
9894         clear_bit(__I40E_DOWN, &pf->state);
9895
9896         /* In case of MSIX we are going to setup the misc vector right here
9897          * to handle admin queue events etc. In case of legacy and MSI
9898          * the misc functionality and queue processing is combined in
9899          * the same vector and that gets setup at open.
9900          */
9901         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
9902                 err = i40e_setup_misc_vector(pf);
9903                 if (err) {
9904                         dev_info(&pdev->dev,
9905                                  "setup of misc vector failed: %d\n", err);
9906                         goto err_vsis;
9907                 }
9908         }
9909
9910 #ifdef CONFIG_PCI_IOV
9911         /* prep for VF support */
9912         if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
9913             (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
9914             !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
9915                 u32 val;
9916
9917                 /* disable link interrupts for VFs */
9918                 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
9919                 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
9920                 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
9921                 i40e_flush(hw);
9922
9923                 if (pci_num_vf(pdev)) {
9924                         dev_info(&pdev->dev,
9925                                  "Active VFs found, allocating resources.\n");
9926                         err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
9927                         if (err)
9928                                 dev_info(&pdev->dev,
9929                                          "Error %d allocating resources for existing VFs\n",
9930                                          err);
9931                 }
9932         }
9933 #endif /* CONFIG_PCI_IOV */
9934
9935         pfs_found++;
9936
9937         i40e_dbg_pf_init(pf);
9938
9939         /* tell the firmware that we're starting */
9940         i40e_send_version(pf);
9941
9942         /* since everything's happy, start the service_task timer */
9943         mod_timer(&pf->service_timer,
9944                   round_jiffies(jiffies + pf->service_timer_period));
9945
9946 #ifdef I40E_FCOE
9947         /* create FCoE interface */
9948         i40e_fcoe_vsi_setup(pf);
9949
9950 #endif
9951         /* Get the negotiated link width and speed from PCI config space */
9952         pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
9953
9954         i40e_set_pci_config_data(hw, link_status);
9955
9956         dev_info(&pdev->dev, "PCI-Express: %s %s\n",
9957                 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
9958                  hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
9959                  hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
9960                  "Unknown"),
9961                 (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
9962                  hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
9963                  hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
9964                  hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
9965                  "Unknown"));
9966
9967         if (hw->bus.width < i40e_bus_width_pcie_x8 ||
9968             hw->bus.speed < i40e_bus_speed_8000) {
9969                 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
9970                 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
9971         }
9972
9973         /* get the requested speeds from the fw */
9974         err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
9975         if (err)
9976                 dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n",
9977                          err);
9978         pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
9979
9980         /* print a string summarizing features */
9981         i40e_print_features(pf);
9982
9983         return 0;
9984
9985         /* Unwind what we've done if something failed in the setup */
9986 err_vsis:
9987         set_bit(__I40E_DOWN, &pf->state);
9988         i40e_clear_interrupt_scheme(pf);
9989         kfree(pf->vsi);
9990 err_switch_setup:
9991         i40e_reset_interrupt_capability(pf);
9992         del_timer_sync(&pf->service_timer);
9993 err_mac_addr:
9994 err_configure_lan_hmc:
9995         (void)i40e_shutdown_lan_hmc(hw);
9996 err_init_lan_hmc:
9997         kfree(pf->qp_pile);
9998 err_sw_init:
9999 err_adminq_setup:
10000         (void)i40e_shutdown_adminq(hw);
10001 err_pf_reset:
10002         iounmap(hw->hw_addr);
10003 err_ioremap:
10004         kfree(pf);
10005 err_pf_alloc:
10006         pci_disable_pcie_error_reporting(pdev);
10007         pci_release_selected_regions(pdev,
10008                                      pci_select_bars(pdev, IORESOURCE_MEM));
10009 err_pci_reg:
10010 err_dma:
10011         pci_disable_device(pdev);
10012         return err;
10013 }
10014
10015 /**
10016  * i40e_remove - Device removal routine
10017  * @pdev: PCI device information struct
10018  *
10019  * i40e_remove is called by the PCI subsystem to alert the driver
10020  * that is should release a PCI device.  This could be caused by a
10021  * Hot-Plug event, or because the driver is going to be removed from
10022  * memory.
10023  **/
10024 static void i40e_remove(struct pci_dev *pdev)
10025 {
10026         struct i40e_pf *pf = pci_get_drvdata(pdev);
10027         i40e_status ret_code;
10028         int i;
10029
10030         i40e_dbg_pf_exit(pf);
10031
10032         i40e_ptp_stop(pf);
10033
10034         /* no more scheduling of any task */
10035         set_bit(__I40E_DOWN, &pf->state);
10036         del_timer_sync(&pf->service_timer);
10037         cancel_work_sync(&pf->service_task);
10038         i40e_fdir_teardown(pf);
10039
10040         if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
10041                 i40e_free_vfs(pf);
10042                 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
10043         }
10044
10045         i40e_fdir_teardown(pf);
10046
10047         /* If there is a switch structure or any orphans, remove them.
10048          * This will leave only the PF's VSI remaining.
10049          */
10050         for (i = 0; i < I40E_MAX_VEB; i++) {
10051                 if (!pf->veb[i])
10052                         continue;
10053
10054                 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
10055                     pf->veb[i]->uplink_seid == 0)
10056                         i40e_switch_branch_release(pf->veb[i]);
10057         }
10058
10059         /* Now we can shutdown the PF's VSI, just before we kill
10060          * adminq and hmc.
10061          */
10062         if (pf->vsi[pf->lan_vsi])
10063                 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
10064
10065         /* shutdown and destroy the HMC */
10066         if (pf->hw.hmc.hmc_obj) {
10067                 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
10068                 if (ret_code)
10069                         dev_warn(&pdev->dev,
10070                                  "Failed to destroy the HMC resources: %d\n",
10071                                  ret_code);
10072         }
10073
10074         /* shutdown the adminq */
10075         ret_code = i40e_shutdown_adminq(&pf->hw);
10076         if (ret_code)
10077                 dev_warn(&pdev->dev,
10078                          "Failed to destroy the Admin Queue resources: %d\n",
10079                          ret_code);
10080
10081         /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
10082         i40e_clear_interrupt_scheme(pf);
10083         for (i = 0; i < pf->num_alloc_vsi; i++) {
10084                 if (pf->vsi[i]) {
10085                         i40e_vsi_clear_rings(pf->vsi[i]);
10086                         i40e_vsi_clear(pf->vsi[i]);
10087                         pf->vsi[i] = NULL;
10088                 }
10089         }
10090
10091         for (i = 0; i < I40E_MAX_VEB; i++) {
10092                 kfree(pf->veb[i]);
10093                 pf->veb[i] = NULL;
10094         }
10095
10096         kfree(pf->qp_pile);
10097         kfree(pf->vsi);
10098
10099         iounmap(pf->hw.hw_addr);
10100         kfree(pf);
10101         pci_release_selected_regions(pdev,
10102                                      pci_select_bars(pdev, IORESOURCE_MEM));
10103
10104         pci_disable_pcie_error_reporting(pdev);
10105         pci_disable_device(pdev);
10106 }
10107
10108 /**
10109  * i40e_pci_error_detected - warning that something funky happened in PCI land
10110  * @pdev: PCI device information struct
10111  *
10112  * Called to warn that something happened and the error handling steps
10113  * are in progress.  Allows the driver to quiesce things, be ready for
10114  * remediation.
10115  **/
10116 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
10117                                                 enum pci_channel_state error)
10118 {
10119         struct i40e_pf *pf = pci_get_drvdata(pdev);
10120
10121         dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
10122
10123         /* shutdown all operations */
10124         if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
10125                 rtnl_lock();
10126                 i40e_prep_for_reset(pf);
10127                 rtnl_unlock();
10128         }
10129
10130         /* Request a slot reset */
10131         return PCI_ERS_RESULT_NEED_RESET;
10132 }
10133
10134 /**
10135  * i40e_pci_error_slot_reset - a PCI slot reset just happened
10136  * @pdev: PCI device information struct
10137  *
10138  * Called to find if the driver can work with the device now that
10139  * the pci slot has been reset.  If a basic connection seems good
10140  * (registers are readable and have sane content) then return a
10141  * happy little PCI_ERS_RESULT_xxx.
10142  **/
10143 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
10144 {
10145         struct i40e_pf *pf = pci_get_drvdata(pdev);
10146         pci_ers_result_t result;
10147         int err;
10148         u32 reg;
10149
10150         dev_info(&pdev->dev, "%s\n", __func__);
10151         if (pci_enable_device_mem(pdev)) {
10152                 dev_info(&pdev->dev,
10153                          "Cannot re-enable PCI device after reset.\n");
10154                 result = PCI_ERS_RESULT_DISCONNECT;
10155         } else {
10156                 pci_set_master(pdev);
10157                 pci_restore_state(pdev);
10158                 pci_save_state(pdev);
10159                 pci_wake_from_d3(pdev, false);
10160
10161                 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
10162                 if (reg == 0)
10163                         result = PCI_ERS_RESULT_RECOVERED;
10164                 else
10165                         result = PCI_ERS_RESULT_DISCONNECT;
10166         }
10167
10168         err = pci_cleanup_aer_uncorrect_error_status(pdev);
10169         if (err) {
10170                 dev_info(&pdev->dev,
10171                          "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
10172                          err);
10173                 /* non-fatal, continue */
10174         }
10175
10176         return result;
10177 }
10178
10179 /**
10180  * i40e_pci_error_resume - restart operations after PCI error recovery
10181  * @pdev: PCI device information struct
10182  *
10183  * Called to allow the driver to bring things back up after PCI error
10184  * and/or reset recovery has finished.
10185  **/
10186 static void i40e_pci_error_resume(struct pci_dev *pdev)
10187 {
10188         struct i40e_pf *pf = pci_get_drvdata(pdev);
10189
10190         dev_info(&pdev->dev, "%s\n", __func__);
10191         if (test_bit(__I40E_SUSPENDED, &pf->state))
10192                 return;
10193
10194         rtnl_lock();
10195         i40e_handle_reset_warning(pf);
10196         rtnl_lock();
10197 }
10198
10199 /**
10200  * i40e_shutdown - PCI callback for shutting down
10201  * @pdev: PCI device information struct
10202  **/
10203 static void i40e_shutdown(struct pci_dev *pdev)
10204 {
10205         struct i40e_pf *pf = pci_get_drvdata(pdev);
10206         struct i40e_hw *hw = &pf->hw;
10207
10208         set_bit(__I40E_SUSPENDED, &pf->state);
10209         set_bit(__I40E_DOWN, &pf->state);
10210         rtnl_lock();
10211         i40e_prep_for_reset(pf);
10212         rtnl_unlock();
10213
10214         wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
10215         wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
10216
10217         i40e_clear_interrupt_scheme(pf);
10218
10219         if (system_state == SYSTEM_POWER_OFF) {
10220                 pci_wake_from_d3(pdev, pf->wol_en);
10221                 pci_set_power_state(pdev, PCI_D3hot);
10222         }
10223 }
10224
10225 #ifdef CONFIG_PM
10226 /**
10227  * i40e_suspend - PCI callback for moving to D3
10228  * @pdev: PCI device information struct
10229  **/
10230 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
10231 {
10232         struct i40e_pf *pf = pci_get_drvdata(pdev);
10233         struct i40e_hw *hw = &pf->hw;
10234
10235         set_bit(__I40E_SUSPENDED, &pf->state);
10236         set_bit(__I40E_DOWN, &pf->state);
10237         del_timer_sync(&pf->service_timer);
10238         cancel_work_sync(&pf->service_task);
10239         i40e_fdir_teardown(pf);
10240
10241         rtnl_lock();
10242         i40e_prep_for_reset(pf);
10243         rtnl_unlock();
10244
10245         wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
10246         wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
10247
10248         pci_wake_from_d3(pdev, pf->wol_en);
10249         pci_set_power_state(pdev, PCI_D3hot);
10250
10251         return 0;
10252 }
10253
10254 /**
10255  * i40e_resume - PCI callback for waking up from D3
10256  * @pdev: PCI device information struct
10257  **/
10258 static int i40e_resume(struct pci_dev *pdev)
10259 {
10260         struct i40e_pf *pf = pci_get_drvdata(pdev);
10261         u32 err;
10262
10263         pci_set_power_state(pdev, PCI_D0);
10264         pci_restore_state(pdev);
10265         /* pci_restore_state() clears dev->state_saves, so
10266          * call pci_save_state() again to restore it.
10267          */
10268         pci_save_state(pdev);
10269
10270         err = pci_enable_device_mem(pdev);
10271         if (err) {
10272                 dev_err(&pdev->dev,
10273                         "%s: Cannot enable PCI device from suspend\n",
10274                         __func__);
10275                 return err;
10276         }
10277         pci_set_master(pdev);
10278
10279         /* no wakeup events while running */
10280         pci_wake_from_d3(pdev, false);
10281
10282         /* handling the reset will rebuild the device state */
10283         if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
10284                 clear_bit(__I40E_DOWN, &pf->state);
10285                 rtnl_lock();
10286                 i40e_reset_and_rebuild(pf, false);
10287                 rtnl_unlock();
10288         }
10289
10290         return 0;
10291 }
10292
10293 #endif
10294 static const struct pci_error_handlers i40e_err_handler = {
10295         .error_detected = i40e_pci_error_detected,
10296         .slot_reset = i40e_pci_error_slot_reset,
10297         .resume = i40e_pci_error_resume,
10298 };
10299
10300 static struct pci_driver i40e_driver = {
10301         .name     = i40e_driver_name,
10302         .id_table = i40e_pci_tbl,
10303         .probe    = i40e_probe,
10304         .remove   = i40e_remove,
10305 #ifdef CONFIG_PM
10306         .suspend  = i40e_suspend,
10307         .resume   = i40e_resume,
10308 #endif
10309         .shutdown = i40e_shutdown,
10310         .err_handler = &i40e_err_handler,
10311         .sriov_configure = i40e_pci_sriov_configure,
10312 };
10313
10314 /**
10315  * i40e_init_module - Driver registration routine
10316  *
10317  * i40e_init_module is the first routine called when the driver is
10318  * loaded. All it does is register with the PCI subsystem.
10319  **/
10320 static int __init i40e_init_module(void)
10321 {
10322         pr_info("%s: %s - version %s\n", i40e_driver_name,
10323                 i40e_driver_string, i40e_driver_version_str);
10324         pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
10325
10326         i40e_dbg_init();
10327         return pci_register_driver(&i40e_driver);
10328 }
10329 module_init(i40e_init_module);
10330
10331 /**
10332  * i40e_exit_module - Driver exit cleanup routine
10333  *
10334  * i40e_exit_module is called just before the driver is removed
10335  * from memory.
10336  **/
10337 static void __exit i40e_exit_module(void)
10338 {
10339         pci_unregister_driver(&i40e_driver);
10340         i40e_dbg_exit();
10341 }
10342 module_exit(i40e_exit_module);