]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/intel/i40e/i40e_main.c
291480650d113bf2e8a42fde45c10e04fb26b21e
[karo-tx-linux.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 - 2015 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26
27 #include <linux/etherdevice.h>
28 #include <linux/of_net.h>
29 #include <linux/pci.h>
30
31 #ifdef CONFIG_SPARC
32 #include <asm/idprom.h>
33 #include <asm/prom.h>
34 #endif
35
36 /* Local includes */
37 #include "i40e.h"
38 #include "i40e_diag.h"
39 #if IS_ENABLED(CONFIG_VXLAN)
40 #include <net/vxlan.h>
41 #endif
42 #if IS_ENABLED(CONFIG_GENEVE)
43 #include <net/geneve.h>
44 #endif
45
46 const char i40e_driver_name[] = "i40e";
47 static const char i40e_driver_string[] =
48                         "Intel(R) Ethernet Connection XL710 Network Driver";
49
50 #define DRV_KERN "-k"
51
52 #define DRV_VERSION_MAJOR 1
53 #define DRV_VERSION_MINOR 4
54 #define DRV_VERSION_BUILD 8
55 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
56              __stringify(DRV_VERSION_MINOR) "." \
57              __stringify(DRV_VERSION_BUILD)    DRV_KERN
58 const char i40e_driver_version_str[] = DRV_VERSION;
59 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
60
61 /* a bit of forward declarations */
62 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
63 static void i40e_handle_reset_warning(struct i40e_pf *pf);
64 static int i40e_add_vsi(struct i40e_vsi *vsi);
65 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
66 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
67 static int i40e_setup_misc_vector(struct i40e_pf *pf);
68 static void i40e_determine_queue_usage(struct i40e_pf *pf);
69 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
70 static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
71                               u16 rss_table_size, u16 rss_size);
72 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
73 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
74
75 /* i40e_pci_tbl - PCI Device ID Table
76  *
77  * Last entry must be all 0s
78  *
79  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
80  *   Class, Class Mask, private data (not used) }
81  */
82 static const struct pci_device_id i40e_pci_tbl[] = {
83         {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
84         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
85         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
86         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
87         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
88         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
89         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
90         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
91         {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
92         {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
93         {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
94         {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
95         {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
96         {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
97         {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
98         {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
99         /* required last entry */
100         {0, }
101 };
102 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
103
104 #define I40E_MAX_VF_COUNT 128
105 static int debug = -1;
106 module_param(debug, int, 0);
107 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
108
109 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
110 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
111 MODULE_LICENSE("GPL");
112 MODULE_VERSION(DRV_VERSION);
113
114 /**
115  * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
116  * @hw:   pointer to the HW structure
117  * @mem:  ptr to mem struct to fill out
118  * @size: size of memory requested
119  * @alignment: what to align the allocation to
120  **/
121 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
122                             u64 size, u32 alignment)
123 {
124         struct i40e_pf *pf = (struct i40e_pf *)hw->back;
125
126         mem->size = ALIGN(size, alignment);
127         mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
128                                       &mem->pa, GFP_KERNEL);
129         if (!mem->va)
130                 return -ENOMEM;
131
132         return 0;
133 }
134
135 /**
136  * i40e_free_dma_mem_d - OS specific memory free for shared code
137  * @hw:   pointer to the HW structure
138  * @mem:  ptr to mem struct to free
139  **/
140 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
141 {
142         struct i40e_pf *pf = (struct i40e_pf *)hw->back;
143
144         dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
145         mem->va = NULL;
146         mem->pa = 0;
147         mem->size = 0;
148
149         return 0;
150 }
151
152 /**
153  * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
154  * @hw:   pointer to the HW structure
155  * @mem:  ptr to mem struct to fill out
156  * @size: size of memory requested
157  **/
158 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
159                              u32 size)
160 {
161         mem->size = size;
162         mem->va = kzalloc(size, GFP_KERNEL);
163
164         if (!mem->va)
165                 return -ENOMEM;
166
167         return 0;
168 }
169
170 /**
171  * i40e_free_virt_mem_d - OS specific memory free for shared code
172  * @hw:   pointer to the HW structure
173  * @mem:  ptr to mem struct to free
174  **/
175 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
176 {
177         /* it's ok to kfree a NULL pointer */
178         kfree(mem->va);
179         mem->va = NULL;
180         mem->size = 0;
181
182         return 0;
183 }
184
185 /**
186  * i40e_get_lump - find a lump of free generic resource
187  * @pf: board private structure
188  * @pile: the pile of resource to search
189  * @needed: the number of items needed
190  * @id: an owner id to stick on the items assigned
191  *
192  * Returns the base item index of the lump, or negative for error
193  *
194  * The search_hint trick and lack of advanced fit-finding only work
195  * because we're highly likely to have all the same size lump requests.
196  * Linear search time and any fragmentation should be minimal.
197  **/
198 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
199                          u16 needed, u16 id)
200 {
201         int ret = -ENOMEM;
202         int i, j;
203
204         if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
205                 dev_info(&pf->pdev->dev,
206                          "param err: pile=%p needed=%d id=0x%04x\n",
207                          pile, needed, id);
208                 return -EINVAL;
209         }
210
211         /* start the linear search with an imperfect hint */
212         i = pile->search_hint;
213         while (i < pile->num_entries) {
214                 /* skip already allocated entries */
215                 if (pile->list[i] & I40E_PILE_VALID_BIT) {
216                         i++;
217                         continue;
218                 }
219
220                 /* do we have enough in this lump? */
221                 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
222                         if (pile->list[i+j] & I40E_PILE_VALID_BIT)
223                                 break;
224                 }
225
226                 if (j == needed) {
227                         /* there was enough, so assign it to the requestor */
228                         for (j = 0; j < needed; j++)
229                                 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
230                         ret = i;
231                         pile->search_hint = i + j;
232                         break;
233                 }
234
235                 /* not enough, so skip over it and continue looking */
236                 i += j;
237         }
238
239         return ret;
240 }
241
242 /**
243  * i40e_put_lump - return a lump of generic resource
244  * @pile: the pile of resource to search
245  * @index: the base item index
246  * @id: the owner id of the items assigned
247  *
248  * Returns the count of items in the lump
249  **/
250 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
251 {
252         int valid_id = (id | I40E_PILE_VALID_BIT);
253         int count = 0;
254         int i;
255
256         if (!pile || index >= pile->num_entries)
257                 return -EINVAL;
258
259         for (i = index;
260              i < pile->num_entries && pile->list[i] == valid_id;
261              i++) {
262                 pile->list[i] = 0;
263                 count++;
264         }
265
266         if (count && index < pile->search_hint)
267                 pile->search_hint = index;
268
269         return count;
270 }
271
272 /**
273  * i40e_find_vsi_from_id - searches for the vsi with the given id
274  * @pf - the pf structure to search for the vsi
275  * @id - id of the vsi it is searching for
276  **/
277 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
278 {
279         int i;
280
281         for (i = 0; i < pf->num_alloc_vsi; i++)
282                 if (pf->vsi[i] && (pf->vsi[i]->id == id))
283                         return pf->vsi[i];
284
285         return NULL;
286 }
287
288 /**
289  * i40e_service_event_schedule - Schedule the service task to wake up
290  * @pf: board private structure
291  *
292  * If not already scheduled, this puts the task into the work queue
293  **/
294 static void i40e_service_event_schedule(struct i40e_pf *pf)
295 {
296         if (!test_bit(__I40E_DOWN, &pf->state) &&
297             !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
298             !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
299                 schedule_work(&pf->service_task);
300 }
301
302 /**
303  * i40e_tx_timeout - Respond to a Tx Hang
304  * @netdev: network interface device structure
305  *
306  * If any port has noticed a Tx timeout, it is likely that the whole
307  * device is munged, not just the one netdev port, so go for the full
308  * reset.
309  **/
310 #ifdef I40E_FCOE
311 void i40e_tx_timeout(struct net_device *netdev)
312 #else
313 static void i40e_tx_timeout(struct net_device *netdev)
314 #endif
315 {
316         struct i40e_netdev_priv *np = netdev_priv(netdev);
317         struct i40e_vsi *vsi = np->vsi;
318         struct i40e_pf *pf = vsi->back;
319         struct i40e_ring *tx_ring = NULL;
320         unsigned int i, hung_queue = 0;
321         u32 head, val;
322
323         pf->tx_timeout_count++;
324
325         /* find the stopped queue the same way the stack does */
326         for (i = 0; i < netdev->num_tx_queues; i++) {
327                 struct netdev_queue *q;
328                 unsigned long trans_start;
329
330                 q = netdev_get_tx_queue(netdev, i);
331                 trans_start = q->trans_start ? : netdev->trans_start;
332                 if (netif_xmit_stopped(q) &&
333                     time_after(jiffies,
334                                (trans_start + netdev->watchdog_timeo))) {
335                         hung_queue = i;
336                         break;
337                 }
338         }
339
340         if (i == netdev->num_tx_queues) {
341                 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
342         } else {
343                 /* now that we have an index, find the tx_ring struct */
344                 for (i = 0; i < vsi->num_queue_pairs; i++) {
345                         if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
346                                 if (hung_queue ==
347                                     vsi->tx_rings[i]->queue_index) {
348                                         tx_ring = vsi->tx_rings[i];
349                                         break;
350                                 }
351                         }
352                 }
353         }
354
355         if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
356                 pf->tx_timeout_recovery_level = 1;  /* reset after some time */
357         else if (time_before(jiffies,
358                       (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
359                 return;   /* don't do any new action before the next timeout */
360
361         if (tx_ring) {
362                 head = i40e_get_head(tx_ring);
363                 /* Read interrupt register */
364                 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
365                         val = rd32(&pf->hw,
366                              I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
367                                                 tx_ring->vsi->base_vector - 1));
368                 else
369                         val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
370
371                 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
372                             vsi->seid, hung_queue, tx_ring->next_to_clean,
373                             head, tx_ring->next_to_use,
374                             readl(tx_ring->tail), val);
375         }
376
377         pf->tx_timeout_last_recovery = jiffies;
378         netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
379                     pf->tx_timeout_recovery_level, hung_queue);
380
381         switch (pf->tx_timeout_recovery_level) {
382         case 1:
383                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
384                 break;
385         case 2:
386                 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
387                 break;
388         case 3:
389                 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
390                 break;
391         default:
392                 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
393                 break;
394         }
395
396         i40e_service_event_schedule(pf);
397         pf->tx_timeout_recovery_level++;
398 }
399
400 /**
401  * i40e_release_rx_desc - Store the new tail and head values
402  * @rx_ring: ring to bump
403  * @val: new head index
404  **/
405 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
406 {
407         rx_ring->next_to_use = val;
408
409         /* Force memory writes to complete before letting h/w
410          * know there are new descriptors to fetch.  (Only
411          * applicable for weak-ordered memory model archs,
412          * such as IA-64).
413          */
414         wmb();
415         writel(val, rx_ring->tail);
416 }
417
418 /**
419  * i40e_get_vsi_stats_struct - Get System Network Statistics
420  * @vsi: the VSI we care about
421  *
422  * Returns the address of the device statistics structure.
423  * The statistics are actually updated from the service task.
424  **/
425 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
426 {
427         return &vsi->net_stats;
428 }
429
430 /**
431  * i40e_get_netdev_stats_struct - Get statistics for netdev interface
432  * @netdev: network interface device structure
433  *
434  * Returns the address of the device statistics structure.
435  * The statistics are actually updated from the service task.
436  **/
437 #ifdef I40E_FCOE
438 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
439                                              struct net_device *netdev,
440                                              struct rtnl_link_stats64 *stats)
441 #else
442 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
443                                              struct net_device *netdev,
444                                              struct rtnl_link_stats64 *stats)
445 #endif
446 {
447         struct i40e_netdev_priv *np = netdev_priv(netdev);
448         struct i40e_ring *tx_ring, *rx_ring;
449         struct i40e_vsi *vsi = np->vsi;
450         struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
451         int i;
452
453         if (test_bit(__I40E_DOWN, &vsi->state))
454                 return stats;
455
456         if (!vsi->tx_rings)
457                 return stats;
458
459         rcu_read_lock();
460         for (i = 0; i < vsi->num_queue_pairs; i++) {
461                 u64 bytes, packets;
462                 unsigned int start;
463
464                 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
465                 if (!tx_ring)
466                         continue;
467
468                 do {
469                         start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
470                         packets = tx_ring->stats.packets;
471                         bytes   = tx_ring->stats.bytes;
472                 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
473
474                 stats->tx_packets += packets;
475                 stats->tx_bytes   += bytes;
476                 rx_ring = &tx_ring[1];
477
478                 do {
479                         start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
480                         packets = rx_ring->stats.packets;
481                         bytes   = rx_ring->stats.bytes;
482                 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
483
484                 stats->rx_packets += packets;
485                 stats->rx_bytes   += bytes;
486         }
487         rcu_read_unlock();
488
489         /* following stats updated by i40e_watchdog_subtask() */
490         stats->multicast        = vsi_stats->multicast;
491         stats->tx_errors        = vsi_stats->tx_errors;
492         stats->tx_dropped       = vsi_stats->tx_dropped;
493         stats->rx_errors        = vsi_stats->rx_errors;
494         stats->rx_dropped       = vsi_stats->rx_dropped;
495         stats->rx_crc_errors    = vsi_stats->rx_crc_errors;
496         stats->rx_length_errors = vsi_stats->rx_length_errors;
497
498         return stats;
499 }
500
501 /**
502  * i40e_vsi_reset_stats - Resets all stats of the given vsi
503  * @vsi: the VSI to have its stats reset
504  **/
505 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
506 {
507         struct rtnl_link_stats64 *ns;
508         int i;
509
510         if (!vsi)
511                 return;
512
513         ns = i40e_get_vsi_stats_struct(vsi);
514         memset(ns, 0, sizeof(*ns));
515         memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
516         memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
517         memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
518         if (vsi->rx_rings && vsi->rx_rings[0]) {
519                 for (i = 0; i < vsi->num_queue_pairs; i++) {
520                         memset(&vsi->rx_rings[i]->stats, 0,
521                                sizeof(vsi->rx_rings[i]->stats));
522                         memset(&vsi->rx_rings[i]->rx_stats, 0,
523                                sizeof(vsi->rx_rings[i]->rx_stats));
524                         memset(&vsi->tx_rings[i]->stats, 0,
525                                sizeof(vsi->tx_rings[i]->stats));
526                         memset(&vsi->tx_rings[i]->tx_stats, 0,
527                                sizeof(vsi->tx_rings[i]->tx_stats));
528                 }
529         }
530         vsi->stat_offsets_loaded = false;
531 }
532
533 /**
534  * i40e_pf_reset_stats - Reset all of the stats for the given PF
535  * @pf: the PF to be reset
536  **/
537 void i40e_pf_reset_stats(struct i40e_pf *pf)
538 {
539         int i;
540
541         memset(&pf->stats, 0, sizeof(pf->stats));
542         memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
543         pf->stat_offsets_loaded = false;
544
545         for (i = 0; i < I40E_MAX_VEB; i++) {
546                 if (pf->veb[i]) {
547                         memset(&pf->veb[i]->stats, 0,
548                                sizeof(pf->veb[i]->stats));
549                         memset(&pf->veb[i]->stats_offsets, 0,
550                                sizeof(pf->veb[i]->stats_offsets));
551                         pf->veb[i]->stat_offsets_loaded = false;
552                 }
553         }
554 }
555
556 /**
557  * i40e_stat_update48 - read and update a 48 bit stat from the chip
558  * @hw: ptr to the hardware info
559  * @hireg: the high 32 bit reg to read
560  * @loreg: the low 32 bit reg to read
561  * @offset_loaded: has the initial offset been loaded yet
562  * @offset: ptr to current offset value
563  * @stat: ptr to the stat
564  *
565  * Since the device stats are not reset at PFReset, they likely will not
566  * be zeroed when the driver starts.  We'll save the first values read
567  * and use them as offsets to be subtracted from the raw values in order
568  * to report stats that count from zero.  In the process, we also manage
569  * the potential roll-over.
570  **/
571 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
572                                bool offset_loaded, u64 *offset, u64 *stat)
573 {
574         u64 new_data;
575
576         if (hw->device_id == I40E_DEV_ID_QEMU) {
577                 new_data = rd32(hw, loreg);
578                 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
579         } else {
580                 new_data = rd64(hw, loreg);
581         }
582         if (!offset_loaded)
583                 *offset = new_data;
584         if (likely(new_data >= *offset))
585                 *stat = new_data - *offset;
586         else
587                 *stat = (new_data + BIT_ULL(48)) - *offset;
588         *stat &= 0xFFFFFFFFFFFFULL;
589 }
590
591 /**
592  * i40e_stat_update32 - read and update a 32 bit stat from the chip
593  * @hw: ptr to the hardware info
594  * @reg: the hw reg to read
595  * @offset_loaded: has the initial offset been loaded yet
596  * @offset: ptr to current offset value
597  * @stat: ptr to the stat
598  **/
599 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
600                                bool offset_loaded, u64 *offset, u64 *stat)
601 {
602         u32 new_data;
603
604         new_data = rd32(hw, reg);
605         if (!offset_loaded)
606                 *offset = new_data;
607         if (likely(new_data >= *offset))
608                 *stat = (u32)(new_data - *offset);
609         else
610                 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
611 }
612
613 /**
614  * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
615  * @vsi: the VSI to be updated
616  **/
617 void i40e_update_eth_stats(struct i40e_vsi *vsi)
618 {
619         int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
620         struct i40e_pf *pf = vsi->back;
621         struct i40e_hw *hw = &pf->hw;
622         struct i40e_eth_stats *oes;
623         struct i40e_eth_stats *es;     /* device's eth stats */
624
625         es = &vsi->eth_stats;
626         oes = &vsi->eth_stats_offsets;
627
628         /* Gather up the stats that the hw collects */
629         i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
630                            vsi->stat_offsets_loaded,
631                            &oes->tx_errors, &es->tx_errors);
632         i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
633                            vsi->stat_offsets_loaded,
634                            &oes->rx_discards, &es->rx_discards);
635         i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
636                            vsi->stat_offsets_loaded,
637                            &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
638         i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
639                            vsi->stat_offsets_loaded,
640                            &oes->tx_errors, &es->tx_errors);
641
642         i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
643                            I40E_GLV_GORCL(stat_idx),
644                            vsi->stat_offsets_loaded,
645                            &oes->rx_bytes, &es->rx_bytes);
646         i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
647                            I40E_GLV_UPRCL(stat_idx),
648                            vsi->stat_offsets_loaded,
649                            &oes->rx_unicast, &es->rx_unicast);
650         i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
651                            I40E_GLV_MPRCL(stat_idx),
652                            vsi->stat_offsets_loaded,
653                            &oes->rx_multicast, &es->rx_multicast);
654         i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
655                            I40E_GLV_BPRCL(stat_idx),
656                            vsi->stat_offsets_loaded,
657                            &oes->rx_broadcast, &es->rx_broadcast);
658
659         i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
660                            I40E_GLV_GOTCL(stat_idx),
661                            vsi->stat_offsets_loaded,
662                            &oes->tx_bytes, &es->tx_bytes);
663         i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
664                            I40E_GLV_UPTCL(stat_idx),
665                            vsi->stat_offsets_loaded,
666                            &oes->tx_unicast, &es->tx_unicast);
667         i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
668                            I40E_GLV_MPTCL(stat_idx),
669                            vsi->stat_offsets_loaded,
670                            &oes->tx_multicast, &es->tx_multicast);
671         i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
672                            I40E_GLV_BPTCL(stat_idx),
673                            vsi->stat_offsets_loaded,
674                            &oes->tx_broadcast, &es->tx_broadcast);
675         vsi->stat_offsets_loaded = true;
676 }
677
678 /**
679  * i40e_update_veb_stats - Update Switch component statistics
680  * @veb: the VEB being updated
681  **/
682 static void i40e_update_veb_stats(struct i40e_veb *veb)
683 {
684         struct i40e_pf *pf = veb->pf;
685         struct i40e_hw *hw = &pf->hw;
686         struct i40e_eth_stats *oes;
687         struct i40e_eth_stats *es;     /* device's eth stats */
688         struct i40e_veb_tc_stats *veb_oes;
689         struct i40e_veb_tc_stats *veb_es;
690         int i, idx = 0;
691
692         idx = veb->stats_idx;
693         es = &veb->stats;
694         oes = &veb->stats_offsets;
695         veb_es = &veb->tc_stats;
696         veb_oes = &veb->tc_stats_offsets;
697
698         /* Gather up the stats that the hw collects */
699         i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
700                            veb->stat_offsets_loaded,
701                            &oes->tx_discards, &es->tx_discards);
702         if (hw->revision_id > 0)
703                 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
704                                    veb->stat_offsets_loaded,
705                                    &oes->rx_unknown_protocol,
706                                    &es->rx_unknown_protocol);
707         i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
708                            veb->stat_offsets_loaded,
709                            &oes->rx_bytes, &es->rx_bytes);
710         i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
711                            veb->stat_offsets_loaded,
712                            &oes->rx_unicast, &es->rx_unicast);
713         i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
714                            veb->stat_offsets_loaded,
715                            &oes->rx_multicast, &es->rx_multicast);
716         i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
717                            veb->stat_offsets_loaded,
718                            &oes->rx_broadcast, &es->rx_broadcast);
719
720         i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
721                            veb->stat_offsets_loaded,
722                            &oes->tx_bytes, &es->tx_bytes);
723         i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
724                            veb->stat_offsets_loaded,
725                            &oes->tx_unicast, &es->tx_unicast);
726         i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
727                            veb->stat_offsets_loaded,
728                            &oes->tx_multicast, &es->tx_multicast);
729         i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
730                            veb->stat_offsets_loaded,
731                            &oes->tx_broadcast, &es->tx_broadcast);
732         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
733                 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
734                                    I40E_GLVEBTC_RPCL(i, idx),
735                                    veb->stat_offsets_loaded,
736                                    &veb_oes->tc_rx_packets[i],
737                                    &veb_es->tc_rx_packets[i]);
738                 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
739                                    I40E_GLVEBTC_RBCL(i, idx),
740                                    veb->stat_offsets_loaded,
741                                    &veb_oes->tc_rx_bytes[i],
742                                    &veb_es->tc_rx_bytes[i]);
743                 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
744                                    I40E_GLVEBTC_TPCL(i, idx),
745                                    veb->stat_offsets_loaded,
746                                    &veb_oes->tc_tx_packets[i],
747                                    &veb_es->tc_tx_packets[i]);
748                 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
749                                    I40E_GLVEBTC_TBCL(i, idx),
750                                    veb->stat_offsets_loaded,
751                                    &veb_oes->tc_tx_bytes[i],
752                                    &veb_es->tc_tx_bytes[i]);
753         }
754         veb->stat_offsets_loaded = true;
755 }
756
757 #ifdef I40E_FCOE
758 /**
759  * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
760  * @vsi: the VSI that is capable of doing FCoE
761  **/
762 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
763 {
764         struct i40e_pf *pf = vsi->back;
765         struct i40e_hw *hw = &pf->hw;
766         struct i40e_fcoe_stats *ofs;
767         struct i40e_fcoe_stats *fs;     /* device's eth stats */
768         int idx;
769
770         if (vsi->type != I40E_VSI_FCOE)
771                 return;
772
773         idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
774         fs = &vsi->fcoe_stats;
775         ofs = &vsi->fcoe_stats_offsets;
776
777         i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
778                            vsi->fcoe_stat_offsets_loaded,
779                            &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
780         i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
781                            vsi->fcoe_stat_offsets_loaded,
782                            &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
783         i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
784                            vsi->fcoe_stat_offsets_loaded,
785                            &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
786         i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
787                            vsi->fcoe_stat_offsets_loaded,
788                            &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
789         i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
790                            vsi->fcoe_stat_offsets_loaded,
791                            &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
792         i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
793                            vsi->fcoe_stat_offsets_loaded,
794                            &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
795         i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
796                            vsi->fcoe_stat_offsets_loaded,
797                            &ofs->fcoe_last_error, &fs->fcoe_last_error);
798         i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
799                            vsi->fcoe_stat_offsets_loaded,
800                            &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
801
802         vsi->fcoe_stat_offsets_loaded = true;
803 }
804
805 #endif
806 /**
807  * i40e_update_vsi_stats - Update the vsi statistics counters.
808  * @vsi: the VSI to be updated
809  *
810  * There are a few instances where we store the same stat in a
811  * couple of different structs.  This is partly because we have
812  * the netdev stats that need to be filled out, which is slightly
813  * different from the "eth_stats" defined by the chip and used in
814  * VF communications.  We sort it out here.
815  **/
816 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
817 {
818         struct i40e_pf *pf = vsi->back;
819         struct rtnl_link_stats64 *ons;
820         struct rtnl_link_stats64 *ns;   /* netdev stats */
821         struct i40e_eth_stats *oes;
822         struct i40e_eth_stats *es;     /* device's eth stats */
823         u32 tx_restart, tx_busy;
824         struct i40e_ring *p;
825         u32 rx_page, rx_buf;
826         u64 bytes, packets;
827         unsigned int start;
828         u64 tx_linearize;
829         u64 tx_force_wb;
830         u64 rx_p, rx_b;
831         u64 tx_p, tx_b;
832         u16 q;
833
834         if (test_bit(__I40E_DOWN, &vsi->state) ||
835             test_bit(__I40E_CONFIG_BUSY, &pf->state))
836                 return;
837
838         ns = i40e_get_vsi_stats_struct(vsi);
839         ons = &vsi->net_stats_offsets;
840         es = &vsi->eth_stats;
841         oes = &vsi->eth_stats_offsets;
842
843         /* Gather up the netdev and vsi stats that the driver collects
844          * on the fly during packet processing
845          */
846         rx_b = rx_p = 0;
847         tx_b = tx_p = 0;
848         tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
849         rx_page = 0;
850         rx_buf = 0;
851         rcu_read_lock();
852         for (q = 0; q < vsi->num_queue_pairs; q++) {
853                 /* locate Tx ring */
854                 p = ACCESS_ONCE(vsi->tx_rings[q]);
855
856                 do {
857                         start = u64_stats_fetch_begin_irq(&p->syncp);
858                         packets = p->stats.packets;
859                         bytes = p->stats.bytes;
860                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
861                 tx_b += bytes;
862                 tx_p += packets;
863                 tx_restart += p->tx_stats.restart_queue;
864                 tx_busy += p->tx_stats.tx_busy;
865                 tx_linearize += p->tx_stats.tx_linearize;
866                 tx_force_wb += p->tx_stats.tx_force_wb;
867
868                 /* Rx queue is part of the same block as Tx queue */
869                 p = &p[1];
870                 do {
871                         start = u64_stats_fetch_begin_irq(&p->syncp);
872                         packets = p->stats.packets;
873                         bytes = p->stats.bytes;
874                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
875                 rx_b += bytes;
876                 rx_p += packets;
877                 rx_buf += p->rx_stats.alloc_buff_failed;
878                 rx_page += p->rx_stats.alloc_page_failed;
879         }
880         rcu_read_unlock();
881         vsi->tx_restart = tx_restart;
882         vsi->tx_busy = tx_busy;
883         vsi->tx_linearize = tx_linearize;
884         vsi->tx_force_wb = tx_force_wb;
885         vsi->rx_page_failed = rx_page;
886         vsi->rx_buf_failed = rx_buf;
887
888         ns->rx_packets = rx_p;
889         ns->rx_bytes = rx_b;
890         ns->tx_packets = tx_p;
891         ns->tx_bytes = tx_b;
892
893         /* update netdev stats from eth stats */
894         i40e_update_eth_stats(vsi);
895         ons->tx_errors = oes->tx_errors;
896         ns->tx_errors = es->tx_errors;
897         ons->multicast = oes->rx_multicast;
898         ns->multicast = es->rx_multicast;
899         ons->rx_dropped = oes->rx_discards;
900         ns->rx_dropped = es->rx_discards;
901         ons->tx_dropped = oes->tx_discards;
902         ns->tx_dropped = es->tx_discards;
903
904         /* pull in a couple PF stats if this is the main vsi */
905         if (vsi == pf->vsi[pf->lan_vsi]) {
906                 ns->rx_crc_errors = pf->stats.crc_errors;
907                 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
908                 ns->rx_length_errors = pf->stats.rx_length_errors;
909         }
910 }
911
912 /**
913  * i40e_update_pf_stats - Update the PF statistics counters.
914  * @pf: the PF to be updated
915  **/
916 static void i40e_update_pf_stats(struct i40e_pf *pf)
917 {
918         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
919         struct i40e_hw_port_stats *nsd = &pf->stats;
920         struct i40e_hw *hw = &pf->hw;
921         u32 val;
922         int i;
923
924         i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
925                            I40E_GLPRT_GORCL(hw->port),
926                            pf->stat_offsets_loaded,
927                            &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
928         i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
929                            I40E_GLPRT_GOTCL(hw->port),
930                            pf->stat_offsets_loaded,
931                            &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
932         i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
933                            pf->stat_offsets_loaded,
934                            &osd->eth.rx_discards,
935                            &nsd->eth.rx_discards);
936         i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
937                            I40E_GLPRT_UPRCL(hw->port),
938                            pf->stat_offsets_loaded,
939                            &osd->eth.rx_unicast,
940                            &nsd->eth.rx_unicast);
941         i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
942                            I40E_GLPRT_MPRCL(hw->port),
943                            pf->stat_offsets_loaded,
944                            &osd->eth.rx_multicast,
945                            &nsd->eth.rx_multicast);
946         i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
947                            I40E_GLPRT_BPRCL(hw->port),
948                            pf->stat_offsets_loaded,
949                            &osd->eth.rx_broadcast,
950                            &nsd->eth.rx_broadcast);
951         i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
952                            I40E_GLPRT_UPTCL(hw->port),
953                            pf->stat_offsets_loaded,
954                            &osd->eth.tx_unicast,
955                            &nsd->eth.tx_unicast);
956         i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
957                            I40E_GLPRT_MPTCL(hw->port),
958                            pf->stat_offsets_loaded,
959                            &osd->eth.tx_multicast,
960                            &nsd->eth.tx_multicast);
961         i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
962                            I40E_GLPRT_BPTCL(hw->port),
963                            pf->stat_offsets_loaded,
964                            &osd->eth.tx_broadcast,
965                            &nsd->eth.tx_broadcast);
966
967         i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
968                            pf->stat_offsets_loaded,
969                            &osd->tx_dropped_link_down,
970                            &nsd->tx_dropped_link_down);
971
972         i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
973                            pf->stat_offsets_loaded,
974                            &osd->crc_errors, &nsd->crc_errors);
975
976         i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
977                            pf->stat_offsets_loaded,
978                            &osd->illegal_bytes, &nsd->illegal_bytes);
979
980         i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
981                            pf->stat_offsets_loaded,
982                            &osd->mac_local_faults,
983                            &nsd->mac_local_faults);
984         i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
985                            pf->stat_offsets_loaded,
986                            &osd->mac_remote_faults,
987                            &nsd->mac_remote_faults);
988
989         i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
990                            pf->stat_offsets_loaded,
991                            &osd->rx_length_errors,
992                            &nsd->rx_length_errors);
993
994         i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
995                            pf->stat_offsets_loaded,
996                            &osd->link_xon_rx, &nsd->link_xon_rx);
997         i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
998                            pf->stat_offsets_loaded,
999                            &osd->link_xon_tx, &nsd->link_xon_tx);
1000         i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1001                            pf->stat_offsets_loaded,
1002                            &osd->link_xoff_rx, &nsd->link_xoff_rx);
1003         i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1004                            pf->stat_offsets_loaded,
1005                            &osd->link_xoff_tx, &nsd->link_xoff_tx);
1006
1007         for (i = 0; i < 8; i++) {
1008                 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1009                                    pf->stat_offsets_loaded,
1010                                    &osd->priority_xoff_rx[i],
1011                                    &nsd->priority_xoff_rx[i]);
1012                 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1013                                    pf->stat_offsets_loaded,
1014                                    &osd->priority_xon_rx[i],
1015                                    &nsd->priority_xon_rx[i]);
1016                 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1017                                    pf->stat_offsets_loaded,
1018                                    &osd->priority_xon_tx[i],
1019                                    &nsd->priority_xon_tx[i]);
1020                 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1021                                    pf->stat_offsets_loaded,
1022                                    &osd->priority_xoff_tx[i],
1023                                    &nsd->priority_xoff_tx[i]);
1024                 i40e_stat_update32(hw,
1025                                    I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1026                                    pf->stat_offsets_loaded,
1027                                    &osd->priority_xon_2_xoff[i],
1028                                    &nsd->priority_xon_2_xoff[i]);
1029         }
1030
1031         i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1032                            I40E_GLPRT_PRC64L(hw->port),
1033                            pf->stat_offsets_loaded,
1034                            &osd->rx_size_64, &nsd->rx_size_64);
1035         i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1036                            I40E_GLPRT_PRC127L(hw->port),
1037                            pf->stat_offsets_loaded,
1038                            &osd->rx_size_127, &nsd->rx_size_127);
1039         i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1040                            I40E_GLPRT_PRC255L(hw->port),
1041                            pf->stat_offsets_loaded,
1042                            &osd->rx_size_255, &nsd->rx_size_255);
1043         i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1044                            I40E_GLPRT_PRC511L(hw->port),
1045                            pf->stat_offsets_loaded,
1046                            &osd->rx_size_511, &nsd->rx_size_511);
1047         i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1048                            I40E_GLPRT_PRC1023L(hw->port),
1049                            pf->stat_offsets_loaded,
1050                            &osd->rx_size_1023, &nsd->rx_size_1023);
1051         i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1052                            I40E_GLPRT_PRC1522L(hw->port),
1053                            pf->stat_offsets_loaded,
1054                            &osd->rx_size_1522, &nsd->rx_size_1522);
1055         i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1056                            I40E_GLPRT_PRC9522L(hw->port),
1057                            pf->stat_offsets_loaded,
1058                            &osd->rx_size_big, &nsd->rx_size_big);
1059
1060         i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1061                            I40E_GLPRT_PTC64L(hw->port),
1062                            pf->stat_offsets_loaded,
1063                            &osd->tx_size_64, &nsd->tx_size_64);
1064         i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1065                            I40E_GLPRT_PTC127L(hw->port),
1066                            pf->stat_offsets_loaded,
1067                            &osd->tx_size_127, &nsd->tx_size_127);
1068         i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1069                            I40E_GLPRT_PTC255L(hw->port),
1070                            pf->stat_offsets_loaded,
1071                            &osd->tx_size_255, &nsd->tx_size_255);
1072         i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1073                            I40E_GLPRT_PTC511L(hw->port),
1074                            pf->stat_offsets_loaded,
1075                            &osd->tx_size_511, &nsd->tx_size_511);
1076         i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1077                            I40E_GLPRT_PTC1023L(hw->port),
1078                            pf->stat_offsets_loaded,
1079                            &osd->tx_size_1023, &nsd->tx_size_1023);
1080         i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1081                            I40E_GLPRT_PTC1522L(hw->port),
1082                            pf->stat_offsets_loaded,
1083                            &osd->tx_size_1522, &nsd->tx_size_1522);
1084         i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1085                            I40E_GLPRT_PTC9522L(hw->port),
1086                            pf->stat_offsets_loaded,
1087                            &osd->tx_size_big, &nsd->tx_size_big);
1088
1089         i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1090                            pf->stat_offsets_loaded,
1091                            &osd->rx_undersize, &nsd->rx_undersize);
1092         i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1093                            pf->stat_offsets_loaded,
1094                            &osd->rx_fragments, &nsd->rx_fragments);
1095         i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1096                            pf->stat_offsets_loaded,
1097                            &osd->rx_oversize, &nsd->rx_oversize);
1098         i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1099                            pf->stat_offsets_loaded,
1100                            &osd->rx_jabber, &nsd->rx_jabber);
1101
1102         /* FDIR stats */
1103         i40e_stat_update32(hw,
1104                            I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
1105                            pf->stat_offsets_loaded,
1106                            &osd->fd_atr_match, &nsd->fd_atr_match);
1107         i40e_stat_update32(hw,
1108                            I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
1109                            pf->stat_offsets_loaded,
1110                            &osd->fd_sb_match, &nsd->fd_sb_match);
1111         i40e_stat_update32(hw,
1112                       I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
1113                       pf->stat_offsets_loaded,
1114                       &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
1115
1116         val = rd32(hw, I40E_PRTPM_EEE_STAT);
1117         nsd->tx_lpi_status =
1118                        (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1119                         I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1120         nsd->rx_lpi_status =
1121                        (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1122                         I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1123         i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1124                            pf->stat_offsets_loaded,
1125                            &osd->tx_lpi_count, &nsd->tx_lpi_count);
1126         i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1127                            pf->stat_offsets_loaded,
1128                            &osd->rx_lpi_count, &nsd->rx_lpi_count);
1129
1130         if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1131             !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
1132                 nsd->fd_sb_status = true;
1133         else
1134                 nsd->fd_sb_status = false;
1135
1136         if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1137             !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1138                 nsd->fd_atr_status = true;
1139         else
1140                 nsd->fd_atr_status = false;
1141
1142         pf->stat_offsets_loaded = true;
1143 }
1144
1145 /**
1146  * i40e_update_stats - Update the various statistics counters.
1147  * @vsi: the VSI to be updated
1148  *
1149  * Update the various stats for this VSI and its related entities.
1150  **/
1151 void i40e_update_stats(struct i40e_vsi *vsi)
1152 {
1153         struct i40e_pf *pf = vsi->back;
1154
1155         if (vsi == pf->vsi[pf->lan_vsi])
1156                 i40e_update_pf_stats(pf);
1157
1158         i40e_update_vsi_stats(vsi);
1159 #ifdef I40E_FCOE
1160         i40e_update_fcoe_stats(vsi);
1161 #endif
1162 }
1163
1164 /**
1165  * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1166  * @vsi: the VSI to be searched
1167  * @macaddr: the MAC address
1168  * @vlan: the vlan
1169  * @is_vf: make sure its a VF filter, else doesn't matter
1170  * @is_netdev: make sure its a netdev filter, else doesn't matter
1171  *
1172  * Returns ptr to the filter object or NULL
1173  **/
1174 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1175                                                 u8 *macaddr, s16 vlan,
1176                                                 bool is_vf, bool is_netdev)
1177 {
1178         struct i40e_mac_filter *f;
1179
1180         if (!vsi || !macaddr)
1181                 return NULL;
1182
1183         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1184                 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1185                     (vlan == f->vlan)    &&
1186                     (!is_vf || f->is_vf) &&
1187                     (!is_netdev || f->is_netdev))
1188                         return f;
1189         }
1190         return NULL;
1191 }
1192
1193 /**
1194  * i40e_find_mac - Find a mac addr in the macvlan filters list
1195  * @vsi: the VSI to be searched
1196  * @macaddr: the MAC address we are searching for
1197  * @is_vf: make sure its a VF filter, else doesn't matter
1198  * @is_netdev: make sure its a netdev filter, else doesn't matter
1199  *
1200  * Returns the first filter with the provided MAC address or NULL if
1201  * MAC address was not found
1202  **/
1203 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1204                                       bool is_vf, bool is_netdev)
1205 {
1206         struct i40e_mac_filter *f;
1207
1208         if (!vsi || !macaddr)
1209                 return NULL;
1210
1211         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1212                 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1213                     (!is_vf || f->is_vf) &&
1214                     (!is_netdev || f->is_netdev))
1215                         return f;
1216         }
1217         return NULL;
1218 }
1219
1220 /**
1221  * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1222  * @vsi: the VSI to be searched
1223  *
1224  * Returns true if VSI is in vlan mode or false otherwise
1225  **/
1226 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1227 {
1228         struct i40e_mac_filter *f;
1229
1230         /* Only -1 for all the filters denotes not in vlan mode
1231          * so we have to go through all the list in order to make sure
1232          */
1233         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1234                 if (f->vlan >= 0 || vsi->info.pvid)
1235                         return true;
1236         }
1237
1238         return false;
1239 }
1240
1241 /**
1242  * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1243  * @vsi: the VSI to be searched
1244  * @macaddr: the mac address to be filtered
1245  * @is_vf: true if it is a VF
1246  * @is_netdev: true if it is a netdev
1247  *
1248  * Goes through all the macvlan filters and adds a
1249  * macvlan filter for each unique vlan that already exists
1250  *
1251  * Returns first filter found on success, else NULL
1252  **/
1253 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1254                                              bool is_vf, bool is_netdev)
1255 {
1256         struct i40e_mac_filter *f;
1257
1258         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1259                 if (vsi->info.pvid)
1260                         f->vlan = le16_to_cpu(vsi->info.pvid);
1261                 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1262                                       is_vf, is_netdev)) {
1263                         if (!i40e_add_filter(vsi, macaddr, f->vlan,
1264                                              is_vf, is_netdev))
1265                                 return NULL;
1266                 }
1267         }
1268
1269         return list_first_entry_or_null(&vsi->mac_filter_list,
1270                                         struct i40e_mac_filter, list);
1271 }
1272
1273 /**
1274  * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
1275  * @vsi: the VSI to be searched
1276  * @macaddr: the mac address to be removed
1277  * @is_vf: true if it is a VF
1278  * @is_netdev: true if it is a netdev
1279  *
1280  * Removes a given MAC address from a VSI, regardless of VLAN
1281  *
1282  * Returns 0 for success, or error
1283  **/
1284 int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1285                           bool is_vf, bool is_netdev)
1286 {
1287         struct i40e_mac_filter *f = NULL;
1288         int changed = 0;
1289
1290         WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
1291              "Missing mac_filter_list_lock\n");
1292         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1293                 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1294                     (is_vf == f->is_vf) &&
1295                     (is_netdev == f->is_netdev)) {
1296                         f->counter--;
1297                         f->changed = true;
1298                         changed = 1;
1299                 }
1300         }
1301         if (changed) {
1302                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1303                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1304                 return 0;
1305         }
1306         return -ENOENT;
1307 }
1308
1309 /**
1310  * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1311  * @vsi: the PF Main VSI - inappropriate for any other VSI
1312  * @macaddr: the MAC address
1313  *
1314  * Some older firmware configurations set up a default promiscuous VLAN
1315  * filter that needs to be removed.
1316  **/
1317 static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1318 {
1319         struct i40e_aqc_remove_macvlan_element_data element;
1320         struct i40e_pf *pf = vsi->back;
1321         i40e_status ret;
1322
1323         /* Only appropriate for the PF main VSI */
1324         if (vsi->type != I40E_VSI_MAIN)
1325                 return -EINVAL;
1326
1327         memset(&element, 0, sizeof(element));
1328         ether_addr_copy(element.mac_addr, macaddr);
1329         element.vlan_tag = 0;
1330         element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1331                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1332         ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1333         if (ret)
1334                 return -ENOENT;
1335
1336         return 0;
1337 }
1338
1339 /**
1340  * i40e_add_filter - Add a mac/vlan filter to the VSI
1341  * @vsi: the VSI to be searched
1342  * @macaddr: the MAC address
1343  * @vlan: the vlan
1344  * @is_vf: make sure its a VF filter, else doesn't matter
1345  * @is_netdev: make sure its a netdev filter, else doesn't matter
1346  *
1347  * Returns ptr to the filter object or NULL when no memory available.
1348  *
1349  * NOTE: This function is expected to be called with mac_filter_list_lock
1350  * being held.
1351  **/
1352 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1353                                         u8 *macaddr, s16 vlan,
1354                                         bool is_vf, bool is_netdev)
1355 {
1356         struct i40e_mac_filter *f;
1357
1358         if (!vsi || !macaddr)
1359                 return NULL;
1360
1361         f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1362         if (!f) {
1363                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1364                 if (!f)
1365                         goto add_filter_out;
1366
1367                 ether_addr_copy(f->macaddr, macaddr);
1368                 f->vlan = vlan;
1369                 f->changed = true;
1370
1371                 INIT_LIST_HEAD(&f->list);
1372                 list_add(&f->list, &vsi->mac_filter_list);
1373         }
1374
1375         /* increment counter and add a new flag if needed */
1376         if (is_vf) {
1377                 if (!f->is_vf) {
1378                         f->is_vf = true;
1379                         f->counter++;
1380                 }
1381         } else if (is_netdev) {
1382                 if (!f->is_netdev) {
1383                         f->is_netdev = true;
1384                         f->counter++;
1385                 }
1386         } else {
1387                 f->counter++;
1388         }
1389
1390         /* changed tells sync_filters_subtask to
1391          * push the filter down to the firmware
1392          */
1393         if (f->changed) {
1394                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1395                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1396         }
1397
1398 add_filter_out:
1399         return f;
1400 }
1401
1402 /**
1403  * i40e_del_filter - Remove a mac/vlan filter from the VSI
1404  * @vsi: the VSI to be searched
1405  * @macaddr: the MAC address
1406  * @vlan: the vlan
1407  * @is_vf: make sure it's a VF filter, else doesn't matter
1408  * @is_netdev: make sure it's a netdev filter, else doesn't matter
1409  *
1410  * NOTE: This function is expected to be called with mac_filter_list_lock
1411  * being held.
1412  **/
1413 void i40e_del_filter(struct i40e_vsi *vsi,
1414                      u8 *macaddr, s16 vlan,
1415                      bool is_vf, bool is_netdev)
1416 {
1417         struct i40e_mac_filter *f;
1418
1419         if (!vsi || !macaddr)
1420                 return;
1421
1422         f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1423         if (!f || f->counter == 0)
1424                 return;
1425
1426         if (is_vf) {
1427                 if (f->is_vf) {
1428                         f->is_vf = false;
1429                         f->counter--;
1430                 }
1431         } else if (is_netdev) {
1432                 if (f->is_netdev) {
1433                         f->is_netdev = false;
1434                         f->counter--;
1435                 }
1436         } else {
1437                 /* make sure we don't remove a filter in use by VF or netdev */
1438                 int min_f = 0;
1439
1440                 min_f += (f->is_vf ? 1 : 0);
1441                 min_f += (f->is_netdev ? 1 : 0);
1442
1443                 if (f->counter > min_f)
1444                         f->counter--;
1445         }
1446
1447         /* counter == 0 tells sync_filters_subtask to
1448          * remove the filter from the firmware's list
1449          */
1450         if (f->counter == 0) {
1451                 f->changed = true;
1452                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1453                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1454         }
1455 }
1456
1457 /**
1458  * i40e_set_mac - NDO callback to set mac address
1459  * @netdev: network interface device structure
1460  * @p: pointer to an address structure
1461  *
1462  * Returns 0 on success, negative on failure
1463  **/
1464 #ifdef I40E_FCOE
1465 int i40e_set_mac(struct net_device *netdev, void *p)
1466 #else
1467 static int i40e_set_mac(struct net_device *netdev, void *p)
1468 #endif
1469 {
1470         struct i40e_netdev_priv *np = netdev_priv(netdev);
1471         struct i40e_vsi *vsi = np->vsi;
1472         struct i40e_pf *pf = vsi->back;
1473         struct i40e_hw *hw = &pf->hw;
1474         struct sockaddr *addr = p;
1475         struct i40e_mac_filter *f;
1476
1477         if (!is_valid_ether_addr(addr->sa_data))
1478                 return -EADDRNOTAVAIL;
1479
1480         if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1481                 netdev_info(netdev, "already using mac address %pM\n",
1482                             addr->sa_data);
1483                 return 0;
1484         }
1485
1486         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1487             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1488                 return -EADDRNOTAVAIL;
1489
1490         if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1491                 netdev_info(netdev, "returning to hw mac address %pM\n",
1492                             hw->mac.addr);
1493         else
1494                 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1495
1496         if (vsi->type == I40E_VSI_MAIN) {
1497                 i40e_status ret;
1498
1499                 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1500                                                 I40E_AQC_WRITE_TYPE_LAA_WOL,
1501                                                 addr->sa_data, NULL);
1502                 if (ret) {
1503                         netdev_info(netdev,
1504                                     "Addr change for Main VSI failed: %d\n",
1505                                     ret);
1506                         return -EADDRNOTAVAIL;
1507                 }
1508         }
1509
1510         if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
1511                 struct i40e_aqc_remove_macvlan_element_data element;
1512
1513                 memset(&element, 0, sizeof(element));
1514                 ether_addr_copy(element.mac_addr, netdev->dev_addr);
1515                 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1516                 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1517         } else {
1518                 spin_lock_bh(&vsi->mac_filter_list_lock);
1519                 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1520                                 false, false);
1521                 spin_unlock_bh(&vsi->mac_filter_list_lock);
1522         }
1523
1524         if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
1525                 struct i40e_aqc_add_macvlan_element_data element;
1526
1527                 memset(&element, 0, sizeof(element));
1528                 ether_addr_copy(element.mac_addr, hw->mac.addr);
1529                 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
1530                 i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1531         } else {
1532                 spin_lock_bh(&vsi->mac_filter_list_lock);
1533                 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
1534                                     false, false);
1535                 if (f)
1536                         f->is_laa = true;
1537                 spin_unlock_bh(&vsi->mac_filter_list_lock);
1538         }
1539
1540         ether_addr_copy(netdev->dev_addr, addr->sa_data);
1541
1542         return i40e_sync_vsi_filters(vsi);
1543 }
1544
1545 /**
1546  * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1547  * @vsi: the VSI being setup
1548  * @ctxt: VSI context structure
1549  * @enabled_tc: Enabled TCs bitmap
1550  * @is_add: True if called before Add VSI
1551  *
1552  * Setup VSI queue mapping for enabled traffic classes.
1553  **/
1554 #ifdef I40E_FCOE
1555 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1556                               struct i40e_vsi_context *ctxt,
1557                               u8 enabled_tc,
1558                               bool is_add)
1559 #else
1560 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1561                                      struct i40e_vsi_context *ctxt,
1562                                      u8 enabled_tc,
1563                                      bool is_add)
1564 #endif
1565 {
1566         struct i40e_pf *pf = vsi->back;
1567         u16 sections = 0;
1568         u8 netdev_tc = 0;
1569         u16 numtc = 0;
1570         u16 qcount;
1571         u8 offset;
1572         u16 qmap;
1573         int i;
1574         u16 num_tc_qps = 0;
1575
1576         sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1577         offset = 0;
1578
1579         if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1580                 /* Find numtc from enabled TC bitmap */
1581                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1582                         if (enabled_tc & BIT(i)) /* TC is enabled */
1583                                 numtc++;
1584                 }
1585                 if (!numtc) {
1586                         dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1587                         numtc = 1;
1588                 }
1589         } else {
1590                 /* At least TC0 is enabled in case of non-DCB case */
1591                 numtc = 1;
1592         }
1593
1594         vsi->tc_config.numtc = numtc;
1595         vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1596         /* Number of queues per enabled TC */
1597         /* In MFP case we can have a much lower count of MSIx
1598          * vectors available and so we need to lower the used
1599          * q count.
1600          */
1601         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1602                 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
1603         else
1604                 qcount = vsi->alloc_queue_pairs;
1605         num_tc_qps = qcount / numtc;
1606         num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
1607
1608         /* Setup queue offset/count for all TCs for given VSI */
1609         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1610                 /* See if the given TC is enabled for the given VSI */
1611                 if (vsi->tc_config.enabled_tc & BIT(i)) {
1612                         /* TC is enabled */
1613                         int pow, num_qps;
1614
1615                         switch (vsi->type) {
1616                         case I40E_VSI_MAIN:
1617                                 qcount = min_t(int, pf->alloc_rss_size,
1618                                                num_tc_qps);
1619                                 break;
1620 #ifdef I40E_FCOE
1621                         case I40E_VSI_FCOE:
1622                                 qcount = num_tc_qps;
1623                                 break;
1624 #endif
1625                         case I40E_VSI_FDIR:
1626                         case I40E_VSI_SRIOV:
1627                         case I40E_VSI_VMDQ2:
1628                         default:
1629                                 qcount = num_tc_qps;
1630                                 WARN_ON(i != 0);
1631                                 break;
1632                         }
1633                         vsi->tc_config.tc_info[i].qoffset = offset;
1634                         vsi->tc_config.tc_info[i].qcount = qcount;
1635
1636                         /* find the next higher power-of-2 of num queue pairs */
1637                         num_qps = qcount;
1638                         pow = 0;
1639                         while (num_qps && (BIT_ULL(pow) < qcount)) {
1640                                 pow++;
1641                                 num_qps >>= 1;
1642                         }
1643
1644                         vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1645                         qmap =
1646                             (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1647                             (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1648
1649                         offset += qcount;
1650                 } else {
1651                         /* TC is not enabled so set the offset to
1652                          * default queue and allocate one queue
1653                          * for the given TC.
1654                          */
1655                         vsi->tc_config.tc_info[i].qoffset = 0;
1656                         vsi->tc_config.tc_info[i].qcount = 1;
1657                         vsi->tc_config.tc_info[i].netdev_tc = 0;
1658
1659                         qmap = 0;
1660                 }
1661                 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1662         }
1663
1664         /* Set actual Tx/Rx queue pairs */
1665         vsi->num_queue_pairs = offset;
1666         if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1667                 if (vsi->req_queue_pairs > 0)
1668                         vsi->num_queue_pairs = vsi->req_queue_pairs;
1669                 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1670                         vsi->num_queue_pairs = pf->num_lan_msix;
1671         }
1672
1673         /* Scheduler section valid can only be set for ADD VSI */
1674         if (is_add) {
1675                 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1676
1677                 ctxt->info.up_enable_bits = enabled_tc;
1678         }
1679         if (vsi->type == I40E_VSI_SRIOV) {
1680                 ctxt->info.mapping_flags |=
1681                                      cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1682                 for (i = 0; i < vsi->num_queue_pairs; i++)
1683                         ctxt->info.queue_mapping[i] =
1684                                                cpu_to_le16(vsi->base_queue + i);
1685         } else {
1686                 ctxt->info.mapping_flags |=
1687                                         cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1688                 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1689         }
1690         ctxt->info.valid_sections |= cpu_to_le16(sections);
1691 }
1692
1693 /**
1694  * i40e_set_rx_mode - NDO callback to set the netdev filters
1695  * @netdev: network interface device structure
1696  **/
1697 #ifdef I40E_FCOE
1698 void i40e_set_rx_mode(struct net_device *netdev)
1699 #else
1700 static void i40e_set_rx_mode(struct net_device *netdev)
1701 #endif
1702 {
1703         struct i40e_netdev_priv *np = netdev_priv(netdev);
1704         struct i40e_mac_filter *f, *ftmp;
1705         struct i40e_vsi *vsi = np->vsi;
1706         struct netdev_hw_addr *uca;
1707         struct netdev_hw_addr *mca;
1708         struct netdev_hw_addr *ha;
1709
1710         spin_lock_bh(&vsi->mac_filter_list_lock);
1711
1712         /* add addr if not already in the filter list */
1713         netdev_for_each_uc_addr(uca, netdev) {
1714                 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1715                         if (i40e_is_vsi_in_vlan(vsi))
1716                                 i40e_put_mac_in_vlan(vsi, uca->addr,
1717                                                      false, true);
1718                         else
1719                                 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1720                                                 false, true);
1721                 }
1722         }
1723
1724         netdev_for_each_mc_addr(mca, netdev) {
1725                 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1726                         if (i40e_is_vsi_in_vlan(vsi))
1727                                 i40e_put_mac_in_vlan(vsi, mca->addr,
1728                                                      false, true);
1729                         else
1730                                 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1731                                                 false, true);
1732                 }
1733         }
1734
1735         /* remove filter if not in netdev list */
1736         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1737
1738                 if (!f->is_netdev)
1739                         continue;
1740
1741                 netdev_for_each_mc_addr(mca, netdev)
1742                         if (ether_addr_equal(mca->addr, f->macaddr))
1743                                 goto bottom_of_search_loop;
1744
1745                 netdev_for_each_uc_addr(uca, netdev)
1746                         if (ether_addr_equal(uca->addr, f->macaddr))
1747                                 goto bottom_of_search_loop;
1748
1749                 for_each_dev_addr(netdev, ha)
1750                         if (ether_addr_equal(ha->addr, f->macaddr))
1751                                 goto bottom_of_search_loop;
1752
1753                 /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
1754                 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1755
1756 bottom_of_search_loop:
1757                 continue;
1758         }
1759         spin_unlock_bh(&vsi->mac_filter_list_lock);
1760
1761         /* check for other flag changes */
1762         if (vsi->current_netdev_flags != vsi->netdev->flags) {
1763                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1764                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1765         }
1766 }
1767
1768 /**
1769  * i40e_mac_filter_entry_clone - Clones a MAC filter entry
1770  * @src: source MAC filter entry to be clones
1771  *
1772  * Returns the pointer to newly cloned MAC filter entry or NULL
1773  * in case of error
1774  **/
1775 static struct i40e_mac_filter *i40e_mac_filter_entry_clone(
1776                                         struct i40e_mac_filter *src)
1777 {
1778         struct i40e_mac_filter *f;
1779
1780         f = kzalloc(sizeof(*f), GFP_ATOMIC);
1781         if (!f)
1782                 return NULL;
1783         *f = *src;
1784
1785         INIT_LIST_HEAD(&f->list);
1786
1787         return f;
1788 }
1789
1790 /**
1791  * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1792  * @vsi: pointer to vsi struct
1793  * @from: Pointer to list which contains MAC filter entries - changes to
1794  *        those entries needs to be undone.
1795  *
1796  * MAC filter entries from list were slated to be removed from device.
1797  **/
1798 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1799                                          struct list_head *from)
1800 {
1801         struct i40e_mac_filter *f, *ftmp;
1802
1803         list_for_each_entry_safe(f, ftmp, from, list) {
1804                 f->changed = true;
1805                 /* Move the element back into MAC filter list*/
1806                 list_move_tail(&f->list, &vsi->mac_filter_list);
1807         }
1808 }
1809
1810 /**
1811  * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
1812  * @vsi: pointer to vsi struct
1813  *
1814  * MAC filter entries from list were slated to be added from device.
1815  **/
1816 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi)
1817 {
1818         struct i40e_mac_filter *f, *ftmp;
1819
1820         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1821                 if (!f->changed && f->counter)
1822                         f->changed = true;
1823         }
1824 }
1825
1826 /**
1827  * i40e_cleanup_add_list - Deletes the element from add list and release
1828  *                      memory
1829  * @add_list: Pointer to list which contains MAC filter entries
1830  **/
1831 static void i40e_cleanup_add_list(struct list_head *add_list)
1832 {
1833         struct i40e_mac_filter *f, *ftmp;
1834
1835         list_for_each_entry_safe(f, ftmp, add_list, list) {
1836                 list_del(&f->list);
1837                 kfree(f);
1838         }
1839 }
1840
1841 /**
1842  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1843  * @vsi: ptr to the VSI
1844  *
1845  * Push any outstanding VSI filter changes through the AdminQ.
1846  *
1847  * Returns 0 or error value
1848  **/
1849 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1850 {
1851         struct list_head tmp_del_list, tmp_add_list;
1852         struct i40e_mac_filter *f, *ftmp, *fclone;
1853         bool promisc_forced_on = false;
1854         bool add_happened = false;
1855         int filter_list_len = 0;
1856         u32 changed_flags = 0;
1857         i40e_status aq_ret = 0;
1858         bool err_cond = false;
1859         int retval = 0;
1860         struct i40e_pf *pf;
1861         int num_add = 0;
1862         int num_del = 0;
1863         int aq_err = 0;
1864         u16 cmd_flags;
1865
1866         /* empty array typed pointers, kcalloc later */
1867         struct i40e_aqc_add_macvlan_element_data *add_list;
1868         struct i40e_aqc_remove_macvlan_element_data *del_list;
1869
1870         while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1871                 usleep_range(1000, 2000);
1872         pf = vsi->back;
1873
1874         if (vsi->netdev) {
1875                 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1876                 vsi->current_netdev_flags = vsi->netdev->flags;
1877         }
1878
1879         INIT_LIST_HEAD(&tmp_del_list);
1880         INIT_LIST_HEAD(&tmp_add_list);
1881
1882         if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1883                 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1884
1885                 spin_lock_bh(&vsi->mac_filter_list_lock);
1886                 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1887                         if (!f->changed)
1888                                 continue;
1889
1890                         if (f->counter != 0)
1891                                 continue;
1892                         f->changed = false;
1893
1894                         /* Move the element into temporary del_list */
1895                         list_move_tail(&f->list, &tmp_del_list);
1896                 }
1897
1898                 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1899                         if (!f->changed)
1900                                 continue;
1901
1902                         if (f->counter == 0)
1903                                 continue;
1904                         f->changed = false;
1905
1906                         /* Clone MAC filter entry and add into temporary list */
1907                         fclone = i40e_mac_filter_entry_clone(f);
1908                         if (!fclone) {
1909                                 err_cond = true;
1910                                 break;
1911                         }
1912                         list_add_tail(&fclone->list, &tmp_add_list);
1913                 }
1914
1915                 /* if failed to clone MAC filter entry - undo */
1916                 if (err_cond) {
1917                         i40e_undo_del_filter_entries(vsi, &tmp_del_list);
1918                         i40e_undo_add_filter_entries(vsi);
1919                 }
1920                 spin_unlock_bh(&vsi->mac_filter_list_lock);
1921
1922                 if (err_cond) {
1923                         i40e_cleanup_add_list(&tmp_add_list);
1924                         retval = -ENOMEM;
1925                         goto out;
1926                 }
1927         }
1928
1929         /* Now process 'del_list' outside the lock */
1930         if (!list_empty(&tmp_del_list)) {
1931                 int del_list_size;
1932
1933                 filter_list_len = pf->hw.aq.asq_buf_size /
1934                             sizeof(struct i40e_aqc_remove_macvlan_element_data);
1935                 del_list_size = filter_list_len *
1936                             sizeof(struct i40e_aqc_remove_macvlan_element_data);
1937                 del_list = kzalloc(del_list_size, GFP_KERNEL);
1938                 if (!del_list) {
1939                         i40e_cleanup_add_list(&tmp_add_list);
1940
1941                         /* Undo VSI's MAC filter entry element updates */
1942                         spin_lock_bh(&vsi->mac_filter_list_lock);
1943                         i40e_undo_del_filter_entries(vsi, &tmp_del_list);
1944                         i40e_undo_add_filter_entries(vsi);
1945                         spin_unlock_bh(&vsi->mac_filter_list_lock);
1946                         retval = -ENOMEM;
1947                         goto out;
1948                 }
1949
1950                 list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
1951                         cmd_flags = 0;
1952
1953                         /* add to delete list */
1954                         ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
1955                         del_list[num_del].vlan_tag =
1956                                 cpu_to_le16((u16)(f->vlan ==
1957                                             I40E_VLAN_ANY ? 0 : f->vlan));
1958
1959                         cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1960                         del_list[num_del].flags = cmd_flags;
1961                         num_del++;
1962
1963                         /* flush a full buffer */
1964                         if (num_del == filter_list_len) {
1965                                 aq_ret = i40e_aq_remove_macvlan(&pf->hw,
1966                                                                 vsi->seid,
1967                                                                 del_list,
1968                                                                 num_del,
1969                                                                 NULL);
1970                                 aq_err = pf->hw.aq.asq_last_status;
1971                                 num_del = 0;
1972                                 memset(del_list, 0, del_list_size);
1973
1974                                 if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) {
1975                                         retval = -EIO;
1976                                         dev_err(&pf->pdev->dev,
1977                                                 "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
1978                                                 i40e_stat_str(&pf->hw, aq_ret),
1979                                                 i40e_aq_str(&pf->hw, aq_err));
1980                                 }
1981                         }
1982                         /* Release memory for MAC filter entries which were
1983                          * synced up with HW.
1984                          */
1985                         list_del(&f->list);
1986                         kfree(f);
1987                 }
1988
1989                 if (num_del) {
1990                         aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1991                                                         del_list, num_del,
1992                                                         NULL);
1993                         aq_err = pf->hw.aq.asq_last_status;
1994                         num_del = 0;
1995
1996                         if (aq_ret && aq_err != I40E_AQ_RC_ENOENT)
1997                                 dev_info(&pf->pdev->dev,
1998                                          "ignoring delete macvlan error, err %s aq_err %s\n",
1999                                          i40e_stat_str(&pf->hw, aq_ret),
2000                                          i40e_aq_str(&pf->hw, aq_err));
2001                 }
2002
2003                 kfree(del_list);
2004                 del_list = NULL;
2005         }
2006
2007         if (!list_empty(&tmp_add_list)) {
2008                 int add_list_size;
2009
2010                 /* do all the adds now */
2011                 filter_list_len = pf->hw.aq.asq_buf_size /
2012                                sizeof(struct i40e_aqc_add_macvlan_element_data),
2013                 add_list_size = filter_list_len *
2014                                sizeof(struct i40e_aqc_add_macvlan_element_data);
2015                 add_list = kzalloc(add_list_size, GFP_KERNEL);
2016                 if (!add_list) {
2017                         /* Purge element from temporary lists */
2018                         i40e_cleanup_add_list(&tmp_add_list);
2019
2020                         /* Undo add filter entries from VSI MAC filter list */
2021                         spin_lock_bh(&vsi->mac_filter_list_lock);
2022                         i40e_undo_add_filter_entries(vsi);
2023                         spin_unlock_bh(&vsi->mac_filter_list_lock);
2024                         retval = -ENOMEM;
2025                         goto out;
2026                 }
2027
2028                 list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
2029
2030                         add_happened = true;
2031                         cmd_flags = 0;
2032
2033                         /* add to add array */
2034                         ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
2035                         add_list[num_add].vlan_tag =
2036                                 cpu_to_le16(
2037                                  (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
2038                         add_list[num_add].queue_number = 0;
2039
2040                         cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2041                         add_list[num_add].flags = cpu_to_le16(cmd_flags);
2042                         num_add++;
2043
2044                         /* flush a full buffer */
2045                         if (num_add == filter_list_len) {
2046                                 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
2047                                                              add_list, num_add,
2048                                                              NULL);
2049                                 aq_err = pf->hw.aq.asq_last_status;
2050                                 num_add = 0;
2051
2052                                 if (aq_ret)
2053                                         break;
2054                                 memset(add_list, 0, add_list_size);
2055                         }
2056                         /* Entries from tmp_add_list were cloned from MAC
2057                          * filter list, hence clean those cloned entries
2058                          */
2059                         list_del(&f->list);
2060                         kfree(f);
2061                 }
2062
2063                 if (num_add) {
2064                         aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
2065                                                      add_list, num_add, NULL);
2066                         aq_err = pf->hw.aq.asq_last_status;
2067                         num_add = 0;
2068                 }
2069                 kfree(add_list);
2070                 add_list = NULL;
2071
2072                 if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) {
2073                         retval = i40e_aq_rc_to_posix(aq_ret, aq_err);
2074                         dev_info(&pf->pdev->dev,
2075                                  "add filter failed, err %s aq_err %s\n",
2076                                  i40e_stat_str(&pf->hw, aq_ret),
2077                                  i40e_aq_str(&pf->hw, aq_err));
2078                         if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
2079                             !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2080                                       &vsi->state)) {
2081                                 promisc_forced_on = true;
2082                                 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2083                                         &vsi->state);
2084                                 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
2085                         }
2086                 }
2087         }
2088
2089         /* check for changes in promiscuous modes */
2090         if (changed_flags & IFF_ALLMULTI) {
2091                 bool cur_multipromisc;
2092
2093                 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2094                 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2095                                                                vsi->seid,
2096                                                                cur_multipromisc,
2097                                                                NULL);
2098                 if (aq_ret) {
2099                         retval = i40e_aq_rc_to_posix(aq_ret,
2100                                                      pf->hw.aq.asq_last_status);
2101                         dev_info(&pf->pdev->dev,
2102                                  "set multi promisc failed, err %s aq_err %s\n",
2103                                  i40e_stat_str(&pf->hw, aq_ret),
2104                                  i40e_aq_str(&pf->hw,
2105                                              pf->hw.aq.asq_last_status));
2106                 }
2107         }
2108         if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
2109                 bool cur_promisc;
2110
2111                 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2112                                test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2113                                         &vsi->state));
2114                 if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) {
2115                         /* set defport ON for Main VSI instead of true promisc
2116                          * this way we will get all unicast/multicast and VLAN
2117                          * promisc behavior but will not get VF or VMDq traffic
2118                          * replicated on the Main VSI.
2119                          */
2120                         if (pf->cur_promisc != cur_promisc) {
2121                                 pf->cur_promisc = cur_promisc;
2122                                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
2123                         }
2124                 } else {
2125                         aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2126                                                           &vsi->back->hw,
2127                                                           vsi->seid,
2128                                                           cur_promisc, NULL);
2129                         if (aq_ret) {
2130                                 retval =
2131                                 i40e_aq_rc_to_posix(aq_ret,
2132                                                     pf->hw.aq.asq_last_status);
2133                                 dev_info(&pf->pdev->dev,
2134                                          "set unicast promisc failed, err %d, aq_err %d\n",
2135                                          aq_ret, pf->hw.aq.asq_last_status);
2136                         }
2137                         aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2138                                                           &vsi->back->hw,
2139                                                           vsi->seid,
2140                                                           cur_promisc, NULL);
2141                         if (aq_ret) {
2142                                 retval =
2143                                 i40e_aq_rc_to_posix(aq_ret,
2144                                                     pf->hw.aq.asq_last_status);
2145                                 dev_info(&pf->pdev->dev,
2146                                          "set multicast promisc failed, err %d, aq_err %d\n",
2147                                          aq_ret, pf->hw.aq.asq_last_status);
2148                         }
2149                 }
2150                 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
2151                                                    vsi->seid,
2152                                                    cur_promisc, NULL);
2153                 if (aq_ret) {
2154                         retval = i40e_aq_rc_to_posix(aq_ret,
2155                                                      pf->hw.aq.asq_last_status);
2156                         dev_info(&pf->pdev->dev,
2157                                  "set brdcast promisc failed, err %s, aq_err %s\n",
2158                                  i40e_stat_str(&pf->hw, aq_ret),
2159                                  i40e_aq_str(&pf->hw,
2160                                              pf->hw.aq.asq_last_status));
2161                 }
2162         }
2163 out:
2164         clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
2165         return retval;
2166 }
2167
2168 /**
2169  * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2170  * @pf: board private structure
2171  **/
2172 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2173 {
2174         int v;
2175
2176         if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
2177                 return;
2178         pf->flags &= ~I40E_FLAG_FILTER_SYNC;
2179
2180         for (v = 0; v < pf->num_alloc_vsi; v++) {
2181                 if (pf->vsi[v] &&
2182                     (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2183                         int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2184
2185                         if (ret) {
2186                                 /* come back and try again later */
2187                                 pf->flags |= I40E_FLAG_FILTER_SYNC;
2188                                 break;
2189                         }
2190                 }
2191         }
2192 }
2193
2194 /**
2195  * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2196  * @netdev: network interface device structure
2197  * @new_mtu: new value for maximum frame size
2198  *
2199  * Returns 0 on success, negative on failure
2200  **/
2201 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2202 {
2203         struct i40e_netdev_priv *np = netdev_priv(netdev);
2204         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2205         struct i40e_vsi *vsi = np->vsi;
2206
2207         /* MTU < 68 is an error and causes problems on some kernels */
2208         if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
2209                 return -EINVAL;
2210
2211         netdev_info(netdev, "changing MTU from %d to %d\n",
2212                     netdev->mtu, new_mtu);
2213         netdev->mtu = new_mtu;
2214         if (netif_running(netdev))
2215                 i40e_vsi_reinit_locked(vsi);
2216
2217         return 0;
2218 }
2219
2220 /**
2221  * i40e_ioctl - Access the hwtstamp interface
2222  * @netdev: network interface device structure
2223  * @ifr: interface request data
2224  * @cmd: ioctl command
2225  **/
2226 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2227 {
2228         struct i40e_netdev_priv *np = netdev_priv(netdev);
2229         struct i40e_pf *pf = np->vsi->back;
2230
2231         switch (cmd) {
2232         case SIOCGHWTSTAMP:
2233                 return i40e_ptp_get_ts_config(pf, ifr);
2234         case SIOCSHWTSTAMP:
2235                 return i40e_ptp_set_ts_config(pf, ifr);
2236         default:
2237                 return -EOPNOTSUPP;
2238         }
2239 }
2240
2241 /**
2242  * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2243  * @vsi: the vsi being adjusted
2244  **/
2245 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2246 {
2247         struct i40e_vsi_context ctxt;
2248         i40e_status ret;
2249
2250         if ((vsi->info.valid_sections &
2251              cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2252             ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2253                 return;  /* already enabled */
2254
2255         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2256         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2257                                     I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2258
2259         ctxt.seid = vsi->seid;
2260         ctxt.info = vsi->info;
2261         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2262         if (ret) {
2263                 dev_info(&vsi->back->pdev->dev,
2264                          "update vlan stripping failed, err %s aq_err %s\n",
2265                          i40e_stat_str(&vsi->back->hw, ret),
2266                          i40e_aq_str(&vsi->back->hw,
2267                                      vsi->back->hw.aq.asq_last_status));
2268         }
2269 }
2270
2271 /**
2272  * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2273  * @vsi: the vsi being adjusted
2274  **/
2275 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2276 {
2277         struct i40e_vsi_context ctxt;
2278         i40e_status ret;
2279
2280         if ((vsi->info.valid_sections &
2281              cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2282             ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2283              I40E_AQ_VSI_PVLAN_EMOD_MASK))
2284                 return;  /* already disabled */
2285
2286         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2287         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2288                                     I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2289
2290         ctxt.seid = vsi->seid;
2291         ctxt.info = vsi->info;
2292         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2293         if (ret) {
2294                 dev_info(&vsi->back->pdev->dev,
2295                          "update vlan stripping failed, err %s aq_err %s\n",
2296                          i40e_stat_str(&vsi->back->hw, ret),
2297                          i40e_aq_str(&vsi->back->hw,
2298                                      vsi->back->hw.aq.asq_last_status));
2299         }
2300 }
2301
2302 /**
2303  * i40e_vlan_rx_register - Setup or shutdown vlan offload
2304  * @netdev: network interface to be adjusted
2305  * @features: netdev features to test if VLAN offload is enabled or not
2306  **/
2307 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2308 {
2309         struct i40e_netdev_priv *np = netdev_priv(netdev);
2310         struct i40e_vsi *vsi = np->vsi;
2311
2312         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2313                 i40e_vlan_stripping_enable(vsi);
2314         else
2315                 i40e_vlan_stripping_disable(vsi);
2316 }
2317
2318 /**
2319  * i40e_vsi_add_vlan - Add vsi membership for given vlan
2320  * @vsi: the vsi being configured
2321  * @vid: vlan id to be added (0 = untagged only , -1 = any)
2322  **/
2323 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
2324 {
2325         struct i40e_mac_filter *f, *add_f;
2326         bool is_netdev, is_vf;
2327
2328         is_vf = (vsi->type == I40E_VSI_SRIOV);
2329         is_netdev = !!(vsi->netdev);
2330
2331         /* Locked once because all functions invoked below iterates list*/
2332         spin_lock_bh(&vsi->mac_filter_list_lock);
2333
2334         if (is_netdev) {
2335                 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
2336                                         is_vf, is_netdev);
2337                 if (!add_f) {
2338                         dev_info(&vsi->back->pdev->dev,
2339                                  "Could not add vlan filter %d for %pM\n",
2340                                  vid, vsi->netdev->dev_addr);
2341                         spin_unlock_bh(&vsi->mac_filter_list_lock);
2342                         return -ENOMEM;
2343                 }
2344         }
2345
2346         list_for_each_entry(f, &vsi->mac_filter_list, list) {
2347                 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2348                 if (!add_f) {
2349                         dev_info(&vsi->back->pdev->dev,
2350                                  "Could not add vlan filter %d for %pM\n",
2351                                  vid, f->macaddr);
2352                         spin_unlock_bh(&vsi->mac_filter_list_lock);
2353                         return -ENOMEM;
2354                 }
2355         }
2356
2357         /* Now if we add a vlan tag, make sure to check if it is the first
2358          * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
2359          * with 0, so we now accept untagged and specified tagged traffic
2360          * (and not any taged and untagged)
2361          */
2362         if (vid > 0) {
2363                 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
2364                                                   I40E_VLAN_ANY,
2365                                                   is_vf, is_netdev)) {
2366                         i40e_del_filter(vsi, vsi->netdev->dev_addr,
2367                                         I40E_VLAN_ANY, is_vf, is_netdev);
2368                         add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
2369                                                 is_vf, is_netdev);
2370                         if (!add_f) {
2371                                 dev_info(&vsi->back->pdev->dev,
2372                                          "Could not add filter 0 for %pM\n",
2373                                          vsi->netdev->dev_addr);
2374                                 spin_unlock_bh(&vsi->mac_filter_list_lock);
2375                                 return -ENOMEM;
2376                         }
2377                 }
2378         }
2379
2380         /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
2381         if (vid > 0 && !vsi->info.pvid) {
2382                 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2383                         if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2384                                               is_vf, is_netdev))
2385                                 continue;
2386                         i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2387                                         is_vf, is_netdev);
2388                         add_f = i40e_add_filter(vsi, f->macaddr,
2389                                                 0, is_vf, is_netdev);
2390                         if (!add_f) {
2391                                 dev_info(&vsi->back->pdev->dev,
2392                                          "Could not add filter 0 for %pM\n",
2393                                         f->macaddr);
2394                                 spin_unlock_bh(&vsi->mac_filter_list_lock);
2395                                 return -ENOMEM;
2396                         }
2397                 }
2398         }
2399
2400         spin_unlock_bh(&vsi->mac_filter_list_lock);
2401
2402         /* schedule our worker thread which will take care of
2403          * applying the new filter changes
2404          */
2405         i40e_service_event_schedule(vsi->back);
2406         return 0;
2407 }
2408
2409 /**
2410  * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
2411  * @vsi: the vsi being configured
2412  * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2413  *
2414  * Return: 0 on success or negative otherwise
2415  **/
2416 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2417 {
2418         struct net_device *netdev = vsi->netdev;
2419         struct i40e_mac_filter *f, *add_f;
2420         bool is_vf, is_netdev;
2421         int filter_count = 0;
2422
2423         is_vf = (vsi->type == I40E_VSI_SRIOV);
2424         is_netdev = !!(netdev);
2425
2426         /* Locked once because all functions invoked below iterates list */
2427         spin_lock_bh(&vsi->mac_filter_list_lock);
2428
2429         if (is_netdev)
2430                 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
2431
2432         list_for_each_entry(f, &vsi->mac_filter_list, list)
2433                 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2434
2435         /* go through all the filters for this VSI and if there is only
2436          * vid == 0 it means there are no other filters, so vid 0 must
2437          * be replaced with -1. This signifies that we should from now
2438          * on accept any traffic (with any tag present, or untagged)
2439          */
2440         list_for_each_entry(f, &vsi->mac_filter_list, list) {
2441                 if (is_netdev) {
2442                         if (f->vlan &&
2443                             ether_addr_equal(netdev->dev_addr, f->macaddr))
2444                                 filter_count++;
2445                 }
2446
2447                 if (f->vlan)
2448                         filter_count++;
2449         }
2450
2451         if (!filter_count && is_netdev) {
2452                 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
2453                 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
2454                                     is_vf, is_netdev);
2455                 if (!f) {
2456                         dev_info(&vsi->back->pdev->dev,
2457                                  "Could not add filter %d for %pM\n",
2458                                  I40E_VLAN_ANY, netdev->dev_addr);
2459                         spin_unlock_bh(&vsi->mac_filter_list_lock);
2460                         return -ENOMEM;
2461                 }
2462         }
2463
2464         if (!filter_count) {
2465                 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2466                         i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
2467                         add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2468                                                 is_vf, is_netdev);
2469                         if (!add_f) {
2470                                 dev_info(&vsi->back->pdev->dev,
2471                                          "Could not add filter %d for %pM\n",
2472                                          I40E_VLAN_ANY, f->macaddr);
2473                                 spin_unlock_bh(&vsi->mac_filter_list_lock);
2474                                 return -ENOMEM;
2475                         }
2476                 }
2477         }
2478
2479         spin_unlock_bh(&vsi->mac_filter_list_lock);
2480
2481         /* schedule our worker thread which will take care of
2482          * applying the new filter changes
2483          */
2484         i40e_service_event_schedule(vsi->back);
2485         return 0;
2486 }
2487
2488 /**
2489  * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2490  * @netdev: network interface to be adjusted
2491  * @vid: vlan id to be added
2492  *
2493  * net_device_ops implementation for adding vlan ids
2494  **/
2495 #ifdef I40E_FCOE
2496 int i40e_vlan_rx_add_vid(struct net_device *netdev,
2497                          __always_unused __be16 proto, u16 vid)
2498 #else
2499 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2500                                 __always_unused __be16 proto, u16 vid)
2501 #endif
2502 {
2503         struct i40e_netdev_priv *np = netdev_priv(netdev);
2504         struct i40e_vsi *vsi = np->vsi;
2505         int ret = 0;
2506
2507         if (vid > 4095)
2508                 return -EINVAL;
2509
2510         netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
2511
2512         /* If the network stack called us with vid = 0 then
2513          * it is asking to receive priority tagged packets with
2514          * vlan id 0.  Our HW receives them by default when configured
2515          * to receive untagged packets so there is no need to add an
2516          * extra filter for vlan 0 tagged packets.
2517          */
2518         if (vid)
2519                 ret = i40e_vsi_add_vlan(vsi, vid);
2520
2521         if (!ret && (vid < VLAN_N_VID))
2522                 set_bit(vid, vsi->active_vlans);
2523
2524         return ret;
2525 }
2526
2527 /**
2528  * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2529  * @netdev: network interface to be adjusted
2530  * @vid: vlan id to be removed
2531  *
2532  * net_device_ops implementation for removing vlan ids
2533  **/
2534 #ifdef I40E_FCOE
2535 int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2536                           __always_unused __be16 proto, u16 vid)
2537 #else
2538 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2539                                  __always_unused __be16 proto, u16 vid)
2540 #endif
2541 {
2542         struct i40e_netdev_priv *np = netdev_priv(netdev);
2543         struct i40e_vsi *vsi = np->vsi;
2544
2545         netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
2546
2547         /* return code is ignored as there is nothing a user
2548          * can do about failure to remove and a log message was
2549          * already printed from the other function
2550          */
2551         i40e_vsi_kill_vlan(vsi, vid);
2552
2553         clear_bit(vid, vsi->active_vlans);
2554
2555         return 0;
2556 }
2557
2558 /**
2559  * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2560  * @vsi: the vsi being brought back up
2561  **/
2562 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2563 {
2564         u16 vid;
2565
2566         if (!vsi->netdev)
2567                 return;
2568
2569         i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2570
2571         for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2572                 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2573                                      vid);
2574 }
2575
2576 /**
2577  * i40e_vsi_add_pvid - Add pvid for the VSI
2578  * @vsi: the vsi being adjusted
2579  * @vid: the vlan id to set as a PVID
2580  **/
2581 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2582 {
2583         struct i40e_vsi_context ctxt;
2584         i40e_status ret;
2585
2586         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2587         vsi->info.pvid = cpu_to_le16(vid);
2588         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2589                                     I40E_AQ_VSI_PVLAN_INSERT_PVID |
2590                                     I40E_AQ_VSI_PVLAN_EMOD_STR;
2591
2592         ctxt.seid = vsi->seid;
2593         ctxt.info = vsi->info;
2594         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2595         if (ret) {
2596                 dev_info(&vsi->back->pdev->dev,
2597                          "add pvid failed, err %s aq_err %s\n",
2598                          i40e_stat_str(&vsi->back->hw, ret),
2599                          i40e_aq_str(&vsi->back->hw,
2600                                      vsi->back->hw.aq.asq_last_status));
2601                 return -ENOENT;
2602         }
2603
2604         return 0;
2605 }
2606
2607 /**
2608  * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2609  * @vsi: the vsi being adjusted
2610  *
2611  * Just use the vlan_rx_register() service to put it back to normal
2612  **/
2613 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2614 {
2615         i40e_vlan_stripping_disable(vsi);
2616
2617         vsi->info.pvid = 0;
2618 }
2619
2620 /**
2621  * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2622  * @vsi: ptr to the VSI
2623  *
2624  * If this function returns with an error, then it's possible one or
2625  * more of the rings is populated (while the rest are not).  It is the
2626  * callers duty to clean those orphaned rings.
2627  *
2628  * Return 0 on success, negative on failure
2629  **/
2630 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2631 {
2632         int i, err = 0;
2633
2634         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2635                 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2636
2637         return err;
2638 }
2639
2640 /**
2641  * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2642  * @vsi: ptr to the VSI
2643  *
2644  * Free VSI's transmit software resources
2645  **/
2646 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2647 {
2648         int i;
2649
2650         if (!vsi->tx_rings)
2651                 return;
2652
2653         for (i = 0; i < vsi->num_queue_pairs; i++)
2654                 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2655                         i40e_free_tx_resources(vsi->tx_rings[i]);
2656 }
2657
2658 /**
2659  * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2660  * @vsi: ptr to the VSI
2661  *
2662  * If this function returns with an error, then it's possible one or
2663  * more of the rings is populated (while the rest are not).  It is the
2664  * callers duty to clean those orphaned rings.
2665  *
2666  * Return 0 on success, negative on failure
2667  **/
2668 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2669 {
2670         int i, err = 0;
2671
2672         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2673                 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2674 #ifdef I40E_FCOE
2675         i40e_fcoe_setup_ddp_resources(vsi);
2676 #endif
2677         return err;
2678 }
2679
2680 /**
2681  * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2682  * @vsi: ptr to the VSI
2683  *
2684  * Free all receive software resources
2685  **/
2686 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2687 {
2688         int i;
2689
2690         if (!vsi->rx_rings)
2691                 return;
2692
2693         for (i = 0; i < vsi->num_queue_pairs; i++)
2694                 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2695                         i40e_free_rx_resources(vsi->rx_rings[i]);
2696 #ifdef I40E_FCOE
2697         i40e_fcoe_free_ddp_resources(vsi);
2698 #endif
2699 }
2700
2701 /**
2702  * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2703  * @ring: The Tx ring to configure
2704  *
2705  * This enables/disables XPS for a given Tx descriptor ring
2706  * based on the TCs enabled for the VSI that ring belongs to.
2707  **/
2708 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2709 {
2710         struct i40e_vsi *vsi = ring->vsi;
2711         cpumask_var_t mask;
2712
2713         if (!ring->q_vector || !ring->netdev)
2714                 return;
2715
2716         /* Single TC mode enable XPS */
2717         if (vsi->tc_config.numtc <= 1) {
2718                 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2719                         netif_set_xps_queue(ring->netdev,
2720                                             &ring->q_vector->affinity_mask,
2721                                             ring->queue_index);
2722         } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2723                 /* Disable XPS to allow selection based on TC */
2724                 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2725                 netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
2726                 free_cpumask_var(mask);
2727         }
2728
2729         /* schedule our worker thread which will take care of
2730          * applying the new filter changes
2731          */
2732         i40e_service_event_schedule(vsi->back);
2733 }
2734
2735 /**
2736  * i40e_configure_tx_ring - Configure a transmit ring context and rest
2737  * @ring: The Tx ring to configure
2738  *
2739  * Configure the Tx descriptor ring in the HMC context.
2740  **/
2741 static int i40e_configure_tx_ring(struct i40e_ring *ring)
2742 {
2743         struct i40e_vsi *vsi = ring->vsi;
2744         u16 pf_q = vsi->base_queue + ring->queue_index;
2745         struct i40e_hw *hw = &vsi->back->hw;
2746         struct i40e_hmc_obj_txq tx_ctx;
2747         i40e_status err = 0;
2748         u32 qtx_ctl = 0;
2749
2750         /* some ATR related tx ring init */
2751         if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2752                 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2753                 ring->atr_count = 0;
2754         } else {
2755                 ring->atr_sample_rate = 0;
2756         }
2757
2758         /* configure XPS */
2759         i40e_config_xps_tx_ring(ring);
2760
2761         /* clear the context structure first */
2762         memset(&tx_ctx, 0, sizeof(tx_ctx));
2763
2764         tx_ctx.new_context = 1;
2765         tx_ctx.base = (ring->dma / 128);
2766         tx_ctx.qlen = ring->count;
2767         tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2768                                                I40E_FLAG_FD_ATR_ENABLED));
2769 #ifdef I40E_FCOE
2770         tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2771 #endif
2772         tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2773         /* FDIR VSI tx ring can still use RS bit and writebacks */
2774         if (vsi->type != I40E_VSI_FDIR)
2775                 tx_ctx.head_wb_ena = 1;
2776         tx_ctx.head_wb_addr = ring->dma +
2777                               (ring->count * sizeof(struct i40e_tx_desc));
2778
2779         /* As part of VSI creation/update, FW allocates certain
2780          * Tx arbitration queue sets for each TC enabled for
2781          * the VSI. The FW returns the handles to these queue
2782          * sets as part of the response buffer to Add VSI,
2783          * Update VSI, etc. AQ commands. It is expected that
2784          * these queue set handles be associated with the Tx
2785          * queues by the driver as part of the TX queue context
2786          * initialization. This has to be done regardless of
2787          * DCB as by default everything is mapped to TC0.
2788          */
2789         tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2790         tx_ctx.rdylist_act = 0;
2791
2792         /* clear the context in the HMC */
2793         err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2794         if (err) {
2795                 dev_info(&vsi->back->pdev->dev,
2796                          "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2797                          ring->queue_index, pf_q, err);
2798                 return -ENOMEM;
2799         }
2800
2801         /* set the context in the HMC */
2802         err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2803         if (err) {
2804                 dev_info(&vsi->back->pdev->dev,
2805                          "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2806                          ring->queue_index, pf_q, err);
2807                 return -ENOMEM;
2808         }
2809
2810         /* Now associate this queue with this PCI function */
2811         if (vsi->type == I40E_VSI_VMDQ2) {
2812                 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2813                 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2814                            I40E_QTX_CTL_VFVM_INDX_MASK;
2815         } else {
2816                 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2817         }
2818
2819         qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2820                     I40E_QTX_CTL_PF_INDX_MASK);
2821         wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2822         i40e_flush(hw);
2823
2824         /* cache tail off for easier writes later */
2825         ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2826
2827         return 0;
2828 }
2829
2830 /**
2831  * i40e_configure_rx_ring - Configure a receive ring context
2832  * @ring: The Rx ring to configure
2833  *
2834  * Configure the Rx descriptor ring in the HMC context.
2835  **/
2836 static int i40e_configure_rx_ring(struct i40e_ring *ring)
2837 {
2838         struct i40e_vsi *vsi = ring->vsi;
2839         u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2840         u16 pf_q = vsi->base_queue + ring->queue_index;
2841         struct i40e_hw *hw = &vsi->back->hw;
2842         struct i40e_hmc_obj_rxq rx_ctx;
2843         i40e_status err = 0;
2844
2845         ring->state = 0;
2846
2847         /* clear the context structure first */
2848         memset(&rx_ctx, 0, sizeof(rx_ctx));
2849
2850         ring->rx_buf_len = vsi->rx_buf_len;
2851         ring->rx_hdr_len = vsi->rx_hdr_len;
2852
2853         rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2854         rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2855
2856         rx_ctx.base = (ring->dma / 128);
2857         rx_ctx.qlen = ring->count;
2858
2859         if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2860                 set_ring_16byte_desc_enabled(ring);
2861                 rx_ctx.dsize = 0;
2862         } else {
2863                 rx_ctx.dsize = 1;
2864         }
2865
2866         rx_ctx.dtype = vsi->dtype;
2867         if (vsi->dtype) {
2868                 set_ring_ps_enabled(ring);
2869                 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
2870                                   I40E_RX_SPLIT_IP      |
2871                                   I40E_RX_SPLIT_TCP_UDP |
2872                                   I40E_RX_SPLIT_SCTP;
2873         } else {
2874                 rx_ctx.hsplit_0 = 0;
2875         }
2876
2877         rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2878                                   (chain_len * ring->rx_buf_len));
2879         if (hw->revision_id == 0)
2880                 rx_ctx.lrxqthresh = 0;
2881         else
2882                 rx_ctx.lrxqthresh = 2;
2883         rx_ctx.crcstrip = 1;
2884         rx_ctx.l2tsel = 1;
2885         /* this controls whether VLAN is stripped from inner headers */
2886         rx_ctx.showiv = 0;
2887 #ifdef I40E_FCOE
2888         rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2889 #endif
2890         /* set the prefena field to 1 because the manual says to */
2891         rx_ctx.prefena = 1;
2892
2893         /* clear the context in the HMC */
2894         err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2895         if (err) {
2896                 dev_info(&vsi->back->pdev->dev,
2897                          "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2898                          ring->queue_index, pf_q, err);
2899                 return -ENOMEM;
2900         }
2901
2902         /* set the context in the HMC */
2903         err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2904         if (err) {
2905                 dev_info(&vsi->back->pdev->dev,
2906                          "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2907                          ring->queue_index, pf_q, err);
2908                 return -ENOMEM;
2909         }
2910
2911         /* cache tail for quicker writes, and clear the reg before use */
2912         ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2913         writel(0, ring->tail);
2914
2915         if (ring_is_ps_enabled(ring)) {
2916                 i40e_alloc_rx_headers(ring);
2917                 i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
2918         } else {
2919                 i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
2920         }
2921
2922         return 0;
2923 }
2924
2925 /**
2926  * i40e_vsi_configure_tx - Configure the VSI for Tx
2927  * @vsi: VSI structure describing this set of rings and resources
2928  *
2929  * Configure the Tx VSI for operation.
2930  **/
2931 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2932 {
2933         int err = 0;
2934         u16 i;
2935
2936         for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2937                 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2938
2939         return err;
2940 }
2941
2942 /**
2943  * i40e_vsi_configure_rx - Configure the VSI for Rx
2944  * @vsi: the VSI being configured
2945  *
2946  * Configure the Rx VSI for operation.
2947  **/
2948 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2949 {
2950         int err = 0;
2951         u16 i;
2952
2953         if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2954                 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2955                                + ETH_FCS_LEN + VLAN_HLEN;
2956         else
2957                 vsi->max_frame = I40E_RXBUFFER_2048;
2958
2959         /* figure out correct receive buffer length */
2960         switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2961                                     I40E_FLAG_RX_PS_ENABLED)) {
2962         case I40E_FLAG_RX_1BUF_ENABLED:
2963                 vsi->rx_hdr_len = 0;
2964                 vsi->rx_buf_len = vsi->max_frame;
2965                 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2966                 break;
2967         case I40E_FLAG_RX_PS_ENABLED:
2968                 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2969                 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2970                 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2971                 break;
2972         default:
2973                 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2974                 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2975                 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2976                 break;
2977         }
2978
2979 #ifdef I40E_FCOE
2980         /* setup rx buffer for FCoE */
2981         if ((vsi->type == I40E_VSI_FCOE) &&
2982             (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
2983                 vsi->rx_hdr_len = 0;
2984                 vsi->rx_buf_len = I40E_RXBUFFER_3072;
2985                 vsi->max_frame = I40E_RXBUFFER_3072;
2986                 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2987         }
2988
2989 #endif /* I40E_FCOE */
2990         /* round up for the chip's needs */
2991         vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2992                                 BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
2993         vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2994                                 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
2995
2996         /* set up individual rings */
2997         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2998                 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
2999
3000         return err;
3001 }
3002
3003 /**
3004  * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3005  * @vsi: ptr to the VSI
3006  **/
3007 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3008 {
3009         struct i40e_ring *tx_ring, *rx_ring;
3010         u16 qoffset, qcount;
3011         int i, n;
3012
3013         if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3014                 /* Reset the TC information */
3015                 for (i = 0; i < vsi->num_queue_pairs; i++) {
3016                         rx_ring = vsi->rx_rings[i];
3017                         tx_ring = vsi->tx_rings[i];
3018                         rx_ring->dcb_tc = 0;
3019                         tx_ring->dcb_tc = 0;
3020                 }
3021         }
3022
3023         for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3024                 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3025                         continue;
3026
3027                 qoffset = vsi->tc_config.tc_info[n].qoffset;
3028                 qcount = vsi->tc_config.tc_info[n].qcount;
3029                 for (i = qoffset; i < (qoffset + qcount); i++) {
3030                         rx_ring = vsi->rx_rings[i];
3031                         tx_ring = vsi->tx_rings[i];
3032                         rx_ring->dcb_tc = n;
3033                         tx_ring->dcb_tc = n;
3034                 }
3035         }
3036 }
3037
3038 /**
3039  * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3040  * @vsi: ptr to the VSI
3041  **/
3042 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3043 {
3044         if (vsi->netdev)
3045                 i40e_set_rx_mode(vsi->netdev);
3046 }
3047
3048 /**
3049  * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3050  * @vsi: Pointer to the targeted VSI
3051  *
3052  * This function replays the hlist on the hw where all the SB Flow Director
3053  * filters were saved.
3054  **/
3055 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3056 {
3057         struct i40e_fdir_filter *filter;
3058         struct i40e_pf *pf = vsi->back;
3059         struct hlist_node *node;
3060
3061         if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3062                 return;
3063
3064         hlist_for_each_entry_safe(filter, node,
3065                                   &pf->fdir_filter_list, fdir_node) {
3066                 i40e_add_del_fdir(vsi, filter, true);
3067         }
3068 }
3069
3070 /**
3071  * i40e_vsi_configure - Set up the VSI for action
3072  * @vsi: the VSI being configured
3073  **/
3074 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3075 {
3076         int err;
3077
3078         i40e_set_vsi_rx_mode(vsi);
3079         i40e_restore_vlan(vsi);
3080         i40e_vsi_config_dcb_rings(vsi);
3081         err = i40e_vsi_configure_tx(vsi);
3082         if (!err)
3083                 err = i40e_vsi_configure_rx(vsi);
3084
3085         return err;
3086 }
3087
3088 /**
3089  * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3090  * @vsi: the VSI being configured
3091  **/
3092 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3093 {
3094         struct i40e_pf *pf = vsi->back;
3095         struct i40e_hw *hw = &pf->hw;
3096         u16 vector;
3097         int i, q;
3098         u32 qp;
3099
3100         /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3101          * and PFINT_LNKLSTn registers, e.g.:
3102          *   PFINT_ITRn[0..n-1] gets msix-1..msix-n  (qpair interrupts)
3103          */
3104         qp = vsi->base_queue;
3105         vector = vsi->base_vector;
3106         for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3107                 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3108
3109                 q_vector->itr_countdown = ITR_COUNTDOWN_START;
3110                 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
3111                 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3112                 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3113                      q_vector->rx.itr);
3114                 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
3115                 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3116                 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3117                      q_vector->tx.itr);
3118                 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3119                      INTRL_USEC_TO_REG(vsi->int_rate_limit));
3120
3121                 /* Linked list for the queuepairs assigned to this vector */
3122                 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3123                 for (q = 0; q < q_vector->num_ringpairs; q++) {
3124                         u32 val;
3125
3126                         val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3127                               (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
3128                               (vector      << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3129                               (qp          << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3130                               (I40E_QUEUE_TYPE_TX
3131                                       << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3132
3133                         wr32(hw, I40E_QINT_RQCTL(qp), val);
3134
3135                         val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3136                               (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)  |
3137                               (vector      << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3138                               ((qp+1)      << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
3139                               (I40E_QUEUE_TYPE_RX
3140                                       << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3141
3142                         /* Terminate the linked list */
3143                         if (q == (q_vector->num_ringpairs - 1))
3144                                 val |= (I40E_QUEUE_END_OF_LIST
3145                                            << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3146
3147                         wr32(hw, I40E_QINT_TQCTL(qp), val);
3148                         qp++;
3149                 }
3150         }
3151
3152         i40e_flush(hw);
3153 }
3154
3155 /**
3156  * i40e_enable_misc_int_causes - enable the non-queue interrupts
3157  * @hw: ptr to the hardware info
3158  **/
3159 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3160 {
3161         struct i40e_hw *hw = &pf->hw;
3162         u32 val;
3163
3164         /* clear things first */
3165         wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
3166         rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
3167
3168         val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK       |
3169               I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK    |
3170               I40E_PFINT_ICR0_ENA_GRST_MASK          |
3171               I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3172               I40E_PFINT_ICR0_ENA_GPIO_MASK          |
3173               I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
3174               I40E_PFINT_ICR0_ENA_VFLR_MASK          |
3175               I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3176
3177         if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3178                 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3179
3180         if (pf->flags & I40E_FLAG_PTP)
3181                 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3182
3183         wr32(hw, I40E_PFINT_ICR0_ENA, val);
3184
3185         /* SW_ITR_IDX = 0, but don't change INTENA */
3186         wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3187                                         I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3188
3189         /* OTHER_ITR_IDX = 0 */
3190         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3191 }
3192
3193 /**
3194  * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3195  * @vsi: the VSI being configured
3196  **/
3197 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3198 {
3199         struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3200         struct i40e_pf *pf = vsi->back;
3201         struct i40e_hw *hw = &pf->hw;
3202         u32 val;
3203
3204         /* set the ITR configuration */
3205         q_vector->itr_countdown = ITR_COUNTDOWN_START;
3206         q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
3207         q_vector->rx.latency_range = I40E_LOW_LATENCY;
3208         wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
3209         q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
3210         q_vector->tx.latency_range = I40E_LOW_LATENCY;
3211         wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
3212
3213         i40e_enable_misc_int_causes(pf);
3214
3215         /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3216         wr32(hw, I40E_PFINT_LNKLST0, 0);
3217
3218         /* Associate the queue pair to the vector and enable the queue int */
3219         val = I40E_QINT_RQCTL_CAUSE_ENA_MASK                  |
3220               (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3221               (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3222
3223         wr32(hw, I40E_QINT_RQCTL(0), val);
3224
3225         val = I40E_QINT_TQCTL_CAUSE_ENA_MASK                  |
3226               (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3227               (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3228
3229         wr32(hw, I40E_QINT_TQCTL(0), val);
3230         i40e_flush(hw);
3231 }
3232
3233 /**
3234  * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3235  * @pf: board private structure
3236  **/
3237 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3238 {
3239         struct i40e_hw *hw = &pf->hw;
3240
3241         wr32(hw, I40E_PFINT_DYN_CTL0,
3242              I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3243         i40e_flush(hw);
3244 }
3245
3246 /**
3247  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3248  * @pf: board private structure
3249  **/
3250 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3251 {
3252         struct i40e_hw *hw = &pf->hw;
3253         u32 val;
3254
3255         val = I40E_PFINT_DYN_CTL0_INTENA_MASK   |
3256               I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3257               (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3258
3259         wr32(hw, I40E_PFINT_DYN_CTL0, val);
3260         i40e_flush(hw);
3261 }
3262
3263 /**
3264  * i40e_irq_dynamic_disable - Disable default interrupt generation settings
3265  * @vsi: pointer to a vsi
3266  * @vector: disable a particular Hw Interrupt vector
3267  **/
3268 void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
3269 {
3270         struct i40e_pf *pf = vsi->back;
3271         struct i40e_hw *hw = &pf->hw;
3272         u32 val;
3273
3274         val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3275         wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
3276         i40e_flush(hw);
3277 }
3278
3279 /**
3280  * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3281  * @irq: interrupt number
3282  * @data: pointer to a q_vector
3283  **/
3284 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3285 {
3286         struct i40e_q_vector *q_vector = data;
3287
3288         if (!q_vector->tx.ring && !q_vector->rx.ring)
3289                 return IRQ_HANDLED;
3290
3291         napi_schedule_irqoff(&q_vector->napi);
3292
3293         return IRQ_HANDLED;
3294 }
3295
3296 /**
3297  * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3298  * @vsi: the VSI being configured
3299  * @basename: name for the vector
3300  *
3301  * Allocates MSI-X vectors and requests interrupts from the kernel.
3302  **/
3303 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3304 {
3305         int q_vectors = vsi->num_q_vectors;
3306         struct i40e_pf *pf = vsi->back;
3307         int base = vsi->base_vector;
3308         int rx_int_idx = 0;
3309         int tx_int_idx = 0;
3310         int vector, err;
3311
3312         for (vector = 0; vector < q_vectors; vector++) {
3313                 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3314
3315                 if (q_vector->tx.ring && q_vector->rx.ring) {
3316                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3317                                  "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3318                         tx_int_idx++;
3319                 } else if (q_vector->rx.ring) {
3320                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3321                                  "%s-%s-%d", basename, "rx", rx_int_idx++);
3322                 } else if (q_vector->tx.ring) {
3323                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3324                                  "%s-%s-%d", basename, "tx", tx_int_idx++);
3325                 } else {
3326                         /* skip this unused q_vector */
3327                         continue;
3328                 }
3329                 err = request_irq(pf->msix_entries[base + vector].vector,
3330                                   vsi->irq_handler,
3331                                   0,
3332                                   q_vector->name,
3333                                   q_vector);
3334                 if (err) {
3335                         dev_info(&pf->pdev->dev,
3336                                  "MSIX request_irq failed, error: %d\n", err);
3337                         goto free_queue_irqs;
3338                 }
3339                 /* assign the mask for this irq */
3340                 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3341                                       &q_vector->affinity_mask);
3342         }
3343
3344         vsi->irqs_ready = true;
3345         return 0;
3346
3347 free_queue_irqs:
3348         while (vector) {
3349                 vector--;
3350                 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3351                                       NULL);
3352                 free_irq(pf->msix_entries[base + vector].vector,
3353                          &(vsi->q_vectors[vector]));
3354         }
3355         return err;
3356 }
3357
3358 /**
3359  * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3360  * @vsi: the VSI being un-configured
3361  **/
3362 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3363 {
3364         struct i40e_pf *pf = vsi->back;
3365         struct i40e_hw *hw = &pf->hw;
3366         int base = vsi->base_vector;
3367         int i;
3368
3369         for (i = 0; i < vsi->num_queue_pairs; i++) {
3370                 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3371                 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
3372         }
3373
3374         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3375                 for (i = vsi->base_vector;
3376                      i < (vsi->num_q_vectors + vsi->base_vector); i++)
3377                         wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3378
3379                 i40e_flush(hw);
3380                 for (i = 0; i < vsi->num_q_vectors; i++)
3381                         synchronize_irq(pf->msix_entries[i + base].vector);
3382         } else {
3383                 /* Legacy and MSI mode - this stops all interrupt handling */
3384                 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3385                 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3386                 i40e_flush(hw);
3387                 synchronize_irq(pf->pdev->irq);
3388         }
3389 }
3390
3391 /**
3392  * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3393  * @vsi: the VSI being configured
3394  **/
3395 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3396 {
3397         struct i40e_pf *pf = vsi->back;
3398         int i;
3399
3400         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3401                 for (i = 0; i < vsi->num_q_vectors; i++)
3402                         i40e_irq_dynamic_enable(vsi, i);
3403         } else {
3404                 i40e_irq_dynamic_enable_icr0(pf);
3405         }
3406
3407         i40e_flush(&pf->hw);
3408         return 0;
3409 }
3410
3411 /**
3412  * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3413  * @pf: board private structure
3414  **/
3415 static void i40e_stop_misc_vector(struct i40e_pf *pf)
3416 {
3417         /* Disable ICR 0 */
3418         wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3419         i40e_flush(&pf->hw);
3420 }
3421
3422 /**
3423  * i40e_intr - MSI/Legacy and non-queue interrupt handler
3424  * @irq: interrupt number
3425  * @data: pointer to a q_vector
3426  *
3427  * This is the handler used for all MSI/Legacy interrupts, and deals
3428  * with both queue and non-queue interrupts.  This is also used in
3429  * MSIX mode to handle the non-queue interrupts.
3430  **/
3431 static irqreturn_t i40e_intr(int irq, void *data)
3432 {
3433         struct i40e_pf *pf = (struct i40e_pf *)data;
3434         struct i40e_hw *hw = &pf->hw;
3435         irqreturn_t ret = IRQ_NONE;
3436         u32 icr0, icr0_remaining;
3437         u32 val, ena_mask;
3438
3439         icr0 = rd32(hw, I40E_PFINT_ICR0);
3440         ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3441
3442         /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3443         if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3444                 goto enable_intr;
3445
3446         /* if interrupt but no bits showing, must be SWINT */
3447         if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3448             (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3449                 pf->sw_int_count++;
3450
3451         if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3452             (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3453                 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3454                 icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3455                 dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n");
3456         }
3457
3458         /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3459         if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3460                 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3461                 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3462
3463                 /* temporarily disable queue cause for NAPI processing */
3464                 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
3465
3466                 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3467                 wr32(hw, I40E_QINT_RQCTL(0), qval);
3468
3469                 qval = rd32(hw, I40E_QINT_TQCTL(0));
3470                 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3471                 wr32(hw, I40E_QINT_TQCTL(0), qval);
3472
3473                 if (!test_bit(__I40E_DOWN, &pf->state))
3474                         napi_schedule_irqoff(&q_vector->napi);
3475         }
3476
3477         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3478                 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3479                 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
3480         }
3481
3482         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3483                 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3484                 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3485         }
3486
3487         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3488                 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3489                 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3490         }
3491
3492         if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3493                 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3494                         set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3495                 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3496                 val = rd32(hw, I40E_GLGEN_RSTAT);
3497                 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3498                        >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3499                 if (val == I40E_RESET_CORER) {
3500                         pf->corer_count++;
3501                 } else if (val == I40E_RESET_GLOBR) {
3502                         pf->globr_count++;
3503                 } else if (val == I40E_RESET_EMPR) {
3504                         pf->empr_count++;
3505                         set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
3506                 }
3507         }
3508
3509         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3510                 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3511                 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3512                 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3513                          rd32(hw, I40E_PFHMC_ERRORINFO),
3514                          rd32(hw, I40E_PFHMC_ERRORDATA));
3515         }
3516
3517         if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3518                 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3519
3520                 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3521                         icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3522                         i40e_ptp_tx_hwtstamp(pf);
3523                 }
3524         }
3525
3526         /* If a critical error is pending we have no choice but to reset the
3527          * device.
3528          * Report and mask out any remaining unexpected interrupts.
3529          */
3530         icr0_remaining = icr0 & ena_mask;
3531         if (icr0_remaining) {
3532                 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3533                          icr0_remaining);
3534                 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3535                     (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3536                     (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3537                         dev_info(&pf->pdev->dev, "device will be reset\n");
3538                         set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3539                         i40e_service_event_schedule(pf);
3540                 }
3541                 ena_mask &= ~icr0_remaining;
3542         }
3543         ret = IRQ_HANDLED;
3544
3545 enable_intr:
3546         /* re-enable interrupt causes */
3547         wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3548         if (!test_bit(__I40E_DOWN, &pf->state)) {
3549                 i40e_service_event_schedule(pf);
3550                 i40e_irq_dynamic_enable_icr0(pf);
3551         }
3552
3553         return ret;
3554 }
3555
3556 /**
3557  * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3558  * @tx_ring:  tx ring to clean
3559  * @budget:   how many cleans we're allowed
3560  *
3561  * Returns true if there's any budget left (e.g. the clean is finished)
3562  **/
3563 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3564 {
3565         struct i40e_vsi *vsi = tx_ring->vsi;
3566         u16 i = tx_ring->next_to_clean;
3567         struct i40e_tx_buffer *tx_buf;
3568         struct i40e_tx_desc *tx_desc;
3569
3570         tx_buf = &tx_ring->tx_bi[i];
3571         tx_desc = I40E_TX_DESC(tx_ring, i);
3572         i -= tx_ring->count;
3573
3574         do {
3575                 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3576
3577                 /* if next_to_watch is not set then there is no work pending */
3578                 if (!eop_desc)
3579                         break;
3580
3581                 /* prevent any other reads prior to eop_desc */
3582                 read_barrier_depends();
3583
3584                 /* if the descriptor isn't done, no work yet to do */
3585                 if (!(eop_desc->cmd_type_offset_bsz &
3586                       cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3587                         break;
3588
3589                 /* clear next_to_watch to prevent false hangs */
3590                 tx_buf->next_to_watch = NULL;
3591
3592                 tx_desc->buffer_addr = 0;
3593                 tx_desc->cmd_type_offset_bsz = 0;
3594                 /* move past filter desc */
3595                 tx_buf++;
3596                 tx_desc++;
3597                 i++;
3598                 if (unlikely(!i)) {
3599                         i -= tx_ring->count;
3600                         tx_buf = tx_ring->tx_bi;
3601                         tx_desc = I40E_TX_DESC(tx_ring, 0);
3602                 }
3603                 /* unmap skb header data */
3604                 dma_unmap_single(tx_ring->dev,
3605                                  dma_unmap_addr(tx_buf, dma),
3606                                  dma_unmap_len(tx_buf, len),
3607                                  DMA_TO_DEVICE);
3608                 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3609                         kfree(tx_buf->raw_buf);
3610
3611                 tx_buf->raw_buf = NULL;
3612                 tx_buf->tx_flags = 0;
3613                 tx_buf->next_to_watch = NULL;
3614                 dma_unmap_len_set(tx_buf, len, 0);
3615                 tx_desc->buffer_addr = 0;
3616                 tx_desc->cmd_type_offset_bsz = 0;
3617
3618                 /* move us past the eop_desc for start of next FD desc */
3619                 tx_buf++;
3620                 tx_desc++;
3621                 i++;
3622                 if (unlikely(!i)) {
3623                         i -= tx_ring->count;
3624                         tx_buf = tx_ring->tx_bi;
3625                         tx_desc = I40E_TX_DESC(tx_ring, 0);
3626                 }
3627
3628                 /* update budget accounting */
3629                 budget--;
3630         } while (likely(budget));
3631
3632         i += tx_ring->count;
3633         tx_ring->next_to_clean = i;
3634
3635         if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
3636                 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
3637
3638         return budget > 0;
3639 }
3640
3641 /**
3642  * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3643  * @irq: interrupt number
3644  * @data: pointer to a q_vector
3645  **/
3646 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3647 {
3648         struct i40e_q_vector *q_vector = data;
3649         struct i40e_vsi *vsi;
3650
3651         if (!q_vector->tx.ring)
3652                 return IRQ_HANDLED;
3653
3654         vsi = q_vector->tx.ring->vsi;
3655         i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3656
3657         return IRQ_HANDLED;
3658 }
3659
3660 /**
3661  * i40e_map_vector_to_qp - Assigns the queue pair to the vector
3662  * @vsi: the VSI being configured
3663  * @v_idx: vector index
3664  * @qp_idx: queue pair index
3665  **/
3666 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3667 {
3668         struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3669         struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3670         struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3671
3672         tx_ring->q_vector = q_vector;
3673         tx_ring->next = q_vector->tx.ring;
3674         q_vector->tx.ring = tx_ring;
3675         q_vector->tx.count++;
3676
3677         rx_ring->q_vector = q_vector;
3678         rx_ring->next = q_vector->rx.ring;
3679         q_vector->rx.ring = rx_ring;
3680         q_vector->rx.count++;
3681 }
3682
3683 /**
3684  * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3685  * @vsi: the VSI being configured
3686  *
3687  * This function maps descriptor rings to the queue-specific vectors
3688  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
3689  * one vector per queue pair, but on a constrained vector budget, we
3690  * group the queue pairs as "efficiently" as possible.
3691  **/
3692 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3693 {
3694         int qp_remaining = vsi->num_queue_pairs;
3695         int q_vectors = vsi->num_q_vectors;
3696         int num_ringpairs;
3697         int v_start = 0;
3698         int qp_idx = 0;
3699
3700         /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3701          * group them so there are multiple queues per vector.
3702          * It is also important to go through all the vectors available to be
3703          * sure that if we don't use all the vectors, that the remaining vectors
3704          * are cleared. This is especially important when decreasing the
3705          * number of queues in use.
3706          */
3707         for (; v_start < q_vectors; v_start++) {
3708                 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3709
3710                 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3711
3712                 q_vector->num_ringpairs = num_ringpairs;
3713
3714                 q_vector->rx.count = 0;
3715                 q_vector->tx.count = 0;
3716                 q_vector->rx.ring = NULL;
3717                 q_vector->tx.ring = NULL;
3718
3719                 while (num_ringpairs--) {
3720                         i40e_map_vector_to_qp(vsi, v_start, qp_idx);
3721                         qp_idx++;
3722                         qp_remaining--;
3723                 }
3724         }
3725 }
3726
3727 /**
3728  * i40e_vsi_request_irq - Request IRQ from the OS
3729  * @vsi: the VSI being configured
3730  * @basename: name for the vector
3731  **/
3732 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3733 {
3734         struct i40e_pf *pf = vsi->back;
3735         int err;
3736
3737         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3738                 err = i40e_vsi_request_irq_msix(vsi, basename);
3739         else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3740                 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3741                                   pf->int_name, pf);
3742         else
3743                 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3744                                   pf->int_name, pf);
3745
3746         if (err)
3747                 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3748
3749         return err;
3750 }
3751
3752 #ifdef CONFIG_NET_POLL_CONTROLLER
3753 /**
3754  * i40e_netpoll - A Polling 'interrupt'handler
3755  * @netdev: network interface device structure
3756  *
3757  * This is used by netconsole to send skbs without having to re-enable
3758  * interrupts.  It's not called while the normal interrupt routine is executing.
3759  **/
3760 #ifdef I40E_FCOE
3761 void i40e_netpoll(struct net_device *netdev)
3762 #else
3763 static void i40e_netpoll(struct net_device *netdev)
3764 #endif
3765 {
3766         struct i40e_netdev_priv *np = netdev_priv(netdev);
3767         struct i40e_vsi *vsi = np->vsi;
3768         struct i40e_pf *pf = vsi->back;
3769         int i;
3770
3771         /* if interface is down do nothing */
3772         if (test_bit(__I40E_DOWN, &vsi->state))
3773                 return;
3774
3775         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3776                 for (i = 0; i < vsi->num_q_vectors; i++)
3777                         i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3778         } else {
3779                 i40e_intr(pf->pdev->irq, netdev);
3780         }
3781 }
3782 #endif
3783
3784 /**
3785  * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3786  * @pf: the PF being configured
3787  * @pf_q: the PF queue
3788  * @enable: enable or disable state of the queue
3789  *
3790  * This routine will wait for the given Tx queue of the PF to reach the
3791  * enabled or disabled state.
3792  * Returns -ETIMEDOUT in case of failing to reach the requested state after
3793  * multiple retries; else will return 0 in case of success.
3794  **/
3795 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3796 {
3797         int i;
3798         u32 tx_reg;
3799
3800         for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3801                 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3802                 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3803                         break;
3804
3805                 usleep_range(10, 20);
3806         }
3807         if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3808                 return -ETIMEDOUT;
3809
3810         return 0;
3811 }
3812
3813 /**
3814  * i40e_vsi_control_tx - Start or stop a VSI's rings
3815  * @vsi: the VSI being configured
3816  * @enable: start or stop the rings
3817  **/
3818 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3819 {
3820         struct i40e_pf *pf = vsi->back;
3821         struct i40e_hw *hw = &pf->hw;
3822         int i, j, pf_q, ret = 0;
3823         u32 tx_reg;
3824
3825         pf_q = vsi->base_queue;
3826         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3827
3828                 /* warn the TX unit of coming changes */
3829                 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3830                 if (!enable)
3831                         usleep_range(10, 20);
3832
3833                 for (j = 0; j < 50; j++) {
3834                         tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3835                         if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3836                             ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3837                                 break;
3838                         usleep_range(1000, 2000);
3839                 }
3840                 /* Skip if the queue is already in the requested state */
3841                 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3842                         continue;
3843
3844                 /* turn on/off the queue */
3845                 if (enable) {
3846                         wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3847                         tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3848                 } else {
3849                         tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3850                 }
3851
3852                 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3853                 /* No waiting for the Tx queue to disable */
3854                 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3855                         continue;
3856
3857                 /* wait for the change to finish */
3858                 ret = i40e_pf_txq_wait(pf, pf_q, enable);
3859                 if (ret) {
3860                         dev_info(&pf->pdev->dev,
3861                                  "VSI seid %d Tx ring %d %sable timeout\n",
3862                                  vsi->seid, pf_q, (enable ? "en" : "dis"));
3863                         break;
3864                 }
3865         }
3866
3867         if (hw->revision_id == 0)
3868                 mdelay(50);
3869         return ret;
3870 }
3871
3872 /**
3873  * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3874  * @pf: the PF being configured
3875  * @pf_q: the PF queue
3876  * @enable: enable or disable state of the queue
3877  *
3878  * This routine will wait for the given Rx queue of the PF to reach the
3879  * enabled or disabled state.
3880  * Returns -ETIMEDOUT in case of failing to reach the requested state after
3881  * multiple retries; else will return 0 in case of success.
3882  **/
3883 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3884 {
3885         int i;
3886         u32 rx_reg;
3887
3888         for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3889                 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
3890                 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3891                         break;
3892
3893                 usleep_range(10, 20);
3894         }
3895         if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3896                 return -ETIMEDOUT;
3897
3898         return 0;
3899 }
3900
3901 /**
3902  * i40e_vsi_control_rx - Start or stop a VSI's rings
3903  * @vsi: the VSI being configured
3904  * @enable: start or stop the rings
3905  **/
3906 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3907 {
3908         struct i40e_pf *pf = vsi->back;
3909         struct i40e_hw *hw = &pf->hw;
3910         int i, j, pf_q, ret = 0;
3911         u32 rx_reg;
3912
3913         pf_q = vsi->base_queue;
3914         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3915                 for (j = 0; j < 50; j++) {
3916                         rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3917                         if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3918                             ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3919                                 break;
3920                         usleep_range(1000, 2000);
3921                 }
3922
3923                 /* Skip if the queue is already in the requested state */
3924                 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3925                         continue;
3926
3927                 /* turn on/off the queue */
3928                 if (enable)
3929                         rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3930                 else
3931                         rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3932                 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3933
3934                 /* wait for the change to finish */
3935                 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
3936                 if (ret) {
3937                         dev_info(&pf->pdev->dev,
3938                                  "VSI seid %d Rx ring %d %sable timeout\n",
3939                                  vsi->seid, pf_q, (enable ? "en" : "dis"));
3940                         break;
3941                 }
3942         }
3943
3944         return ret;
3945 }
3946
3947 /**
3948  * i40e_vsi_control_rings - Start or stop a VSI's rings
3949  * @vsi: the VSI being configured
3950  * @enable: start or stop the rings
3951  **/
3952 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3953 {
3954         int ret = 0;
3955
3956         /* do rx first for enable and last for disable */
3957         if (request) {
3958                 ret = i40e_vsi_control_rx(vsi, request);
3959                 if (ret)
3960                         return ret;
3961                 ret = i40e_vsi_control_tx(vsi, request);
3962         } else {
3963                 /* Ignore return value, we need to shutdown whatever we can */
3964                 i40e_vsi_control_tx(vsi, request);
3965                 i40e_vsi_control_rx(vsi, request);
3966         }
3967
3968         return ret;
3969 }
3970
3971 /**
3972  * i40e_vsi_free_irq - Free the irq association with the OS
3973  * @vsi: the VSI being configured
3974  **/
3975 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3976 {
3977         struct i40e_pf *pf = vsi->back;
3978         struct i40e_hw *hw = &pf->hw;
3979         int base = vsi->base_vector;
3980         u32 val, qp;
3981         int i;
3982
3983         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3984                 if (!vsi->q_vectors)
3985                         return;
3986
3987                 if (!vsi->irqs_ready)
3988                         return;
3989
3990                 vsi->irqs_ready = false;
3991                 for (i = 0; i < vsi->num_q_vectors; i++) {
3992                         u16 vector = i + base;
3993
3994                         /* free only the irqs that were actually requested */
3995                         if (!vsi->q_vectors[i] ||
3996                             !vsi->q_vectors[i]->num_ringpairs)
3997                                 continue;
3998
3999                         /* clear the affinity_mask in the IRQ descriptor */
4000                         irq_set_affinity_hint(pf->msix_entries[vector].vector,
4001                                               NULL);
4002                         free_irq(pf->msix_entries[vector].vector,
4003                                  vsi->q_vectors[i]);
4004
4005                         /* Tear down the interrupt queue link list
4006                          *
4007                          * We know that they come in pairs and always
4008                          * the Rx first, then the Tx.  To clear the
4009                          * link list, stick the EOL value into the
4010                          * next_q field of the registers.
4011                          */
4012                         val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4013                         qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4014                                 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4015                         val |= I40E_QUEUE_END_OF_LIST
4016                                 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4017                         wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4018
4019                         while (qp != I40E_QUEUE_END_OF_LIST) {
4020                                 u32 next;
4021
4022                                 val = rd32(hw, I40E_QINT_RQCTL(qp));
4023
4024                                 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
4025                                          I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4026                                          I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
4027                                          I40E_QINT_RQCTL_INTEVENT_MASK);
4028
4029                                 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4030                                          I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4031
4032                                 wr32(hw, I40E_QINT_RQCTL(qp), val);
4033
4034                                 val = rd32(hw, I40E_QINT_TQCTL(qp));
4035
4036                                 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4037                                         >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4038
4039                                 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
4040                                          I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4041                                          I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
4042                                          I40E_QINT_TQCTL_INTEVENT_MASK);
4043
4044                                 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4045                                          I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4046
4047                                 wr32(hw, I40E_QINT_TQCTL(qp), val);
4048                                 qp = next;
4049                         }
4050                 }
4051         } else {
4052                 free_irq(pf->pdev->irq, pf);
4053
4054                 val = rd32(hw, I40E_PFINT_LNKLST0);
4055                 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4056                         >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4057                 val |= I40E_QUEUE_END_OF_LIST
4058                         << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4059                 wr32(hw, I40E_PFINT_LNKLST0, val);
4060
4061                 val = rd32(hw, I40E_QINT_RQCTL(qp));
4062                 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
4063                          I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4064                          I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
4065                          I40E_QINT_RQCTL_INTEVENT_MASK);
4066
4067                 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4068                         I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4069
4070                 wr32(hw, I40E_QINT_RQCTL(qp), val);
4071
4072                 val = rd32(hw, I40E_QINT_TQCTL(qp));
4073
4074                 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
4075                          I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4076                          I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
4077                          I40E_QINT_TQCTL_INTEVENT_MASK);
4078
4079                 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4080                         I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4081
4082                 wr32(hw, I40E_QINT_TQCTL(qp), val);
4083         }
4084 }
4085
4086 /**
4087  * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4088  * @vsi: the VSI being configured
4089  * @v_idx: Index of vector to be freed
4090  *
4091  * This function frees the memory allocated to the q_vector.  In addition if
4092  * NAPI is enabled it will delete any references to the NAPI struct prior
4093  * to freeing the q_vector.
4094  **/
4095 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4096 {
4097         struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4098         struct i40e_ring *ring;
4099
4100         if (!q_vector)
4101                 return;
4102
4103         /* disassociate q_vector from rings */
4104         i40e_for_each_ring(ring, q_vector->tx)
4105                 ring->q_vector = NULL;
4106
4107         i40e_for_each_ring(ring, q_vector->rx)
4108                 ring->q_vector = NULL;
4109
4110         /* only VSI w/ an associated netdev is set up w/ NAPI */
4111         if (vsi->netdev)
4112                 netif_napi_del(&q_vector->napi);
4113
4114         vsi->q_vectors[v_idx] = NULL;
4115
4116         kfree_rcu(q_vector, rcu);
4117 }
4118
4119 /**
4120  * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4121  * @vsi: the VSI being un-configured
4122  *
4123  * This frees the memory allocated to the q_vectors and
4124  * deletes references to the NAPI struct.
4125  **/
4126 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4127 {
4128         int v_idx;
4129
4130         for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4131                 i40e_free_q_vector(vsi, v_idx);
4132 }
4133
4134 /**
4135  * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4136  * @pf: board private structure
4137  **/
4138 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4139 {
4140         /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4141         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4142                 pci_disable_msix(pf->pdev);
4143                 kfree(pf->msix_entries);
4144                 pf->msix_entries = NULL;
4145                 kfree(pf->irq_pile);
4146                 pf->irq_pile = NULL;
4147         } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4148                 pci_disable_msi(pf->pdev);
4149         }
4150         pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4151 }
4152
4153 /**
4154  * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4155  * @pf: board private structure
4156  *
4157  * We go through and clear interrupt specific resources and reset the structure
4158  * to pre-load conditions
4159  **/
4160 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4161 {
4162         int i;
4163
4164         i40e_stop_misc_vector(pf);
4165         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4166                 synchronize_irq(pf->msix_entries[0].vector);
4167                 free_irq(pf->msix_entries[0].vector, pf);
4168         }
4169
4170         i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4171         for (i = 0; i < pf->num_alloc_vsi; i++)
4172                 if (pf->vsi[i])
4173                         i40e_vsi_free_q_vectors(pf->vsi[i]);
4174         i40e_reset_interrupt_capability(pf);
4175 }
4176
4177 /**
4178  * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4179  * @vsi: the VSI being configured
4180  **/
4181 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4182 {
4183         int q_idx;
4184
4185         if (!vsi->netdev)
4186                 return;
4187
4188         for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
4189                 napi_enable(&vsi->q_vectors[q_idx]->napi);
4190 }
4191
4192 /**
4193  * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4194  * @vsi: the VSI being configured
4195  **/
4196 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4197 {
4198         int q_idx;
4199
4200         if (!vsi->netdev)
4201                 return;
4202
4203         for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
4204                 napi_disable(&vsi->q_vectors[q_idx]->napi);
4205 }
4206
4207 /**
4208  * i40e_vsi_close - Shut down a VSI
4209  * @vsi: the vsi to be quelled
4210  **/
4211 static void i40e_vsi_close(struct i40e_vsi *vsi)
4212 {
4213         if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
4214                 i40e_down(vsi);
4215         i40e_vsi_free_irq(vsi);
4216         i40e_vsi_free_tx_resources(vsi);
4217         i40e_vsi_free_rx_resources(vsi);
4218         vsi->current_netdev_flags = 0;
4219 }
4220
4221 /**
4222  * i40e_quiesce_vsi - Pause a given VSI
4223  * @vsi: the VSI being paused
4224  **/
4225 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4226 {
4227         if (test_bit(__I40E_DOWN, &vsi->state))
4228                 return;
4229
4230         /* No need to disable FCoE VSI when Tx suspended */
4231         if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
4232             vsi->type == I40E_VSI_FCOE) {
4233                 dev_dbg(&vsi->back->pdev->dev,
4234                          "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
4235                 return;
4236         }
4237
4238         set_bit(__I40E_NEEDS_RESTART, &vsi->state);
4239         if (vsi->netdev && netif_running(vsi->netdev))
4240                 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4241         else
4242                 i40e_vsi_close(vsi);
4243 }
4244
4245 /**
4246  * i40e_unquiesce_vsi - Resume a given VSI
4247  * @vsi: the VSI being resumed
4248  **/
4249 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4250 {
4251         if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
4252                 return;
4253
4254         clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4255         if (vsi->netdev && netif_running(vsi->netdev))
4256                 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4257         else
4258                 i40e_vsi_open(vsi);   /* this clears the DOWN bit */
4259 }
4260
4261 /**
4262  * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4263  * @pf: the PF
4264  **/
4265 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4266 {
4267         int v;
4268
4269         for (v = 0; v < pf->num_alloc_vsi; v++) {
4270                 if (pf->vsi[v])
4271                         i40e_quiesce_vsi(pf->vsi[v]);
4272         }
4273 }
4274
4275 /**
4276  * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4277  * @pf: the PF
4278  **/
4279 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4280 {
4281         int v;
4282
4283         for (v = 0; v < pf->num_alloc_vsi; v++) {
4284                 if (pf->vsi[v])
4285                         i40e_unquiesce_vsi(pf->vsi[v]);
4286         }
4287 }
4288
4289 #ifdef CONFIG_I40E_DCB
4290 /**
4291  * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled
4292  * @vsi: the VSI being configured
4293  *
4294  * This function waits for the given VSI's Tx queues to be disabled.
4295  **/
4296 static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
4297 {
4298         struct i40e_pf *pf = vsi->back;
4299         int i, pf_q, ret;
4300
4301         pf_q = vsi->base_queue;
4302         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4303                 /* Check and wait for the disable status of the queue */
4304                 ret = i40e_pf_txq_wait(pf, pf_q, false);
4305                 if (ret) {
4306                         dev_info(&pf->pdev->dev,
4307                                  "VSI seid %d Tx ring %d disable timeout\n",
4308                                  vsi->seid, pf_q);
4309                         return ret;
4310                 }
4311         }
4312
4313         return 0;
4314 }
4315
4316 /**
4317  * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled
4318  * @pf: the PF
4319  *
4320  * This function waits for the Tx queues to be in disabled state for all the
4321  * VSIs that are managed by this PF.
4322  **/
4323 static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
4324 {
4325         int v, ret = 0;
4326
4327         for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4328                 /* No need to wait for FCoE VSI queues */
4329                 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
4330                         ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]);
4331                         if (ret)
4332                                 break;
4333                 }
4334         }
4335
4336         return ret;
4337 }
4338
4339 #endif
4340
4341 /**
4342  * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
4343  * @q_idx: TX queue number
4344  * @vsi: Pointer to VSI struct
4345  *
4346  * This function checks specified queue for given VSI. Detects hung condition.
4347  * Sets hung bit since it is two step process. Before next run of service task
4348  * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
4349  * hung condition remain unchanged and during subsequent run, this function
4350  * issues SW interrupt to recover from hung condition.
4351  **/
4352 static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
4353 {
4354         struct i40e_ring *tx_ring = NULL;
4355         struct i40e_pf  *pf;
4356         u32 head, val, tx_pending;
4357         int i;
4358
4359         pf = vsi->back;
4360
4361         /* now that we have an index, find the tx_ring struct */
4362         for (i = 0; i < vsi->num_queue_pairs; i++) {
4363                 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
4364                         if (q_idx == vsi->tx_rings[i]->queue_index) {
4365                                 tx_ring = vsi->tx_rings[i];
4366                                 break;
4367                         }
4368                 }
4369         }
4370
4371         if (!tx_ring)
4372                 return;
4373
4374         /* Read interrupt register */
4375         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4376                 val = rd32(&pf->hw,
4377                            I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
4378                                                tx_ring->vsi->base_vector - 1));
4379         else
4380                 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
4381
4382         /* Bail out if interrupts are disabled because napi_poll
4383          * execution in-progress or will get scheduled soon.
4384          * napi_poll cleans TX and RX queues and updates 'next_to_clean'.
4385          */
4386         if (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))
4387                 return;
4388
4389         head = i40e_get_head(tx_ring);
4390
4391         tx_pending = i40e_get_tx_pending(tx_ring);
4392
4393         /* HW is done executing descriptors, updated HEAD write back,
4394          * but SW hasn't processed those descriptors. If interrupt is
4395          * not generated from this point ON, it could result into
4396          * dev_watchdog detecting timeout on those netdev_queue,
4397          * hence proactively trigger SW interrupt.
4398          */
4399         if (tx_pending) {
4400                 /* NAPI Poll didn't run and clear since it was set */
4401                 if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT,
4402                                        &tx_ring->q_vector->hung_detected)) {
4403                         netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
4404                                     vsi->seid, q_idx, tx_pending,
4405                                     tx_ring->next_to_clean, head,
4406                                     tx_ring->next_to_use,
4407                                     readl(tx_ring->tail));
4408                         netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n",
4409                                     vsi->seid, q_idx, val);
4410                         i40e_force_wb(vsi, tx_ring->q_vector);
4411                 } else {
4412                         /* First Chance - detected possible hung */
4413                         set_bit(I40E_Q_VECTOR_HUNG_DETECT,
4414                                 &tx_ring->q_vector->hung_detected);
4415                 }
4416         }
4417 }
4418
4419 /**
4420  * i40e_detect_recover_hung - Function to detect and recover hung_queues
4421  * @pf:  pointer to PF struct
4422  *
4423  * LAN VSI has netdev and netdev has TX queues. This function is to check
4424  * each of those TX queues if they are hung, trigger recovery by issuing
4425  * SW interrupt.
4426  **/
4427 static void i40e_detect_recover_hung(struct i40e_pf *pf)
4428 {
4429         struct net_device *netdev;
4430         struct i40e_vsi *vsi;
4431         int i;
4432
4433         /* Only for LAN VSI */
4434         vsi = pf->vsi[pf->lan_vsi];
4435
4436         if (!vsi)
4437                 return;
4438
4439         /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
4440         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
4441             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4442                 return;
4443
4444         /* Make sure type is MAIN VSI */
4445         if (vsi->type != I40E_VSI_MAIN)
4446                 return;
4447
4448         netdev = vsi->netdev;
4449         if (!netdev)
4450                 return;
4451
4452         /* Bail out if netif_carrier is not OK */
4453         if (!netif_carrier_ok(netdev))
4454                 return;
4455
4456         /* Go thru' TX queues for netdev */
4457         for (i = 0; i < netdev->num_tx_queues; i++) {
4458                 struct netdev_queue *q;
4459
4460                 q = netdev_get_tx_queue(netdev, i);
4461                 if (q)
4462                         i40e_detect_recover_hung_queue(i, vsi);
4463         }
4464 }
4465
4466 /**
4467  * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4468  * @pf: pointer to PF
4469  *
4470  * Get TC map for ISCSI PF type that will include iSCSI TC
4471  * and LAN TC.
4472  **/
4473 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4474 {
4475         struct i40e_dcb_app_priority_table app;
4476         struct i40e_hw *hw = &pf->hw;
4477         u8 enabled_tc = 1; /* TC0 is always enabled */
4478         u8 tc, i;
4479         /* Get the iSCSI APP TLV */
4480         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4481
4482         for (i = 0; i < dcbcfg->numapps; i++) {
4483                 app = dcbcfg->app[i];
4484                 if (app.selector == I40E_APP_SEL_TCPIP &&
4485                     app.protocolid == I40E_APP_PROTOID_ISCSI) {
4486                         tc = dcbcfg->etscfg.prioritytable[app.priority];
4487                         enabled_tc |= BIT(tc);
4488                         break;
4489                 }
4490         }
4491
4492         return enabled_tc;
4493 }
4494
4495 /**
4496  * i40e_dcb_get_num_tc -  Get the number of TCs from DCBx config
4497  * @dcbcfg: the corresponding DCBx configuration structure
4498  *
4499  * Return the number of TCs from given DCBx configuration
4500  **/
4501 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4502 {
4503         u8 num_tc = 0;
4504         int i;
4505
4506         /* Scan the ETS Config Priority Table to find
4507          * traffic class enabled for a given priority
4508          * and use the traffic class index to get the
4509          * number of traffic classes enabled
4510          */
4511         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4512                 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
4513                         num_tc = dcbcfg->etscfg.prioritytable[i];
4514         }
4515
4516         /* Traffic class index starts from zero so
4517          * increment to return the actual count
4518          */
4519         return num_tc + 1;
4520 }
4521
4522 /**
4523  * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4524  * @dcbcfg: the corresponding DCBx configuration structure
4525  *
4526  * Query the current DCB configuration and return the number of
4527  * traffic classes enabled from the given DCBX config
4528  **/
4529 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4530 {
4531         u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4532         u8 enabled_tc = 1;
4533         u8 i;
4534
4535         for (i = 0; i < num_tc; i++)
4536                 enabled_tc |= BIT(i);
4537
4538         return enabled_tc;
4539 }
4540
4541 /**
4542  * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4543  * @pf: PF being queried
4544  *
4545  * Return number of traffic classes enabled for the given PF
4546  **/
4547 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4548 {
4549         struct i40e_hw *hw = &pf->hw;
4550         u8 i, enabled_tc;
4551         u8 num_tc = 0;
4552         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4553
4554         /* If DCB is not enabled then always in single TC */
4555         if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4556                 return 1;
4557
4558         /* SFP mode will be enabled for all TCs on port */
4559         if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4560                 return i40e_dcb_get_num_tc(dcbcfg);
4561
4562         /* MFP mode return count of enabled TCs for this PF */
4563         if (pf->hw.func_caps.iscsi)
4564                 enabled_tc =  i40e_get_iscsi_tc_map(pf);
4565         else
4566                 return 1; /* Only TC0 */
4567
4568         /* At least have TC0 */
4569         enabled_tc = (enabled_tc ? enabled_tc : 0x1);
4570         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4571                 if (enabled_tc & BIT(i))
4572                         num_tc++;
4573         }
4574         return num_tc;
4575 }
4576
4577 /**
4578  * i40e_pf_get_default_tc - Get bitmap for first enabled TC
4579  * @pf: PF being queried
4580  *
4581  * Return a bitmap for first enabled traffic class for this PF.
4582  **/
4583 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
4584 {
4585         u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
4586         u8 i = 0;
4587
4588         if (!enabled_tc)
4589                 return 0x1; /* TC0 */
4590
4591         /* Find the first enabled TC */
4592         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4593                 if (enabled_tc & BIT(i))
4594                         break;
4595         }
4596
4597         return BIT(i);
4598 }
4599
4600 /**
4601  * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4602  * @pf: PF being queried
4603  *
4604  * Return a bitmap for enabled traffic classes for this PF.
4605  **/
4606 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4607 {
4608         /* If DCB is not enabled for this PF then just return default TC */
4609         if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4610                 return i40e_pf_get_default_tc(pf);
4611
4612         /* SFP mode we want PF to be enabled for all TCs */
4613         if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4614                 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4615
4616         /* MFP enabled and iSCSI PF type */
4617         if (pf->hw.func_caps.iscsi)
4618                 return i40e_get_iscsi_tc_map(pf);
4619         else
4620                 return i40e_pf_get_default_tc(pf);
4621 }
4622
4623 /**
4624  * i40e_vsi_get_bw_info - Query VSI BW Information
4625  * @vsi: the VSI being queried
4626  *
4627  * Returns 0 on success, negative value on failure
4628  **/
4629 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4630 {
4631         struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4632         struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4633         struct i40e_pf *pf = vsi->back;
4634         struct i40e_hw *hw = &pf->hw;
4635         i40e_status ret;
4636         u32 tc_bw_max;
4637         int i;
4638
4639         /* Get the VSI level BW configuration */
4640         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4641         if (ret) {
4642                 dev_info(&pf->pdev->dev,
4643                          "couldn't get PF vsi bw config, err %s aq_err %s\n",
4644                          i40e_stat_str(&pf->hw, ret),
4645                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4646                 return -EINVAL;
4647         }
4648
4649         /* Get the VSI level BW configuration per TC */
4650         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4651                                                NULL);
4652         if (ret) {
4653                 dev_info(&pf->pdev->dev,
4654                          "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
4655                          i40e_stat_str(&pf->hw, ret),
4656                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4657                 return -EINVAL;
4658         }
4659
4660         if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4661                 dev_info(&pf->pdev->dev,
4662                          "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4663                          bw_config.tc_valid_bits,
4664                          bw_ets_config.tc_valid_bits);
4665                 /* Still continuing */
4666         }
4667
4668         vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4669         vsi->bw_max_quanta = bw_config.max_bw;
4670         tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4671                     (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4672         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4673                 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4674                 vsi->bw_ets_limit_credits[i] =
4675                                         le16_to_cpu(bw_ets_config.credits[i]);
4676                 /* 3 bits out of 4 for each TC */
4677                 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4678         }
4679
4680         return 0;
4681 }
4682
4683 /**
4684  * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4685  * @vsi: the VSI being configured
4686  * @enabled_tc: TC bitmap
4687  * @bw_credits: BW shared credits per TC
4688  *
4689  * Returns 0 on success, negative value on failure
4690  **/
4691 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
4692                                        u8 *bw_share)
4693 {
4694         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
4695         i40e_status ret;
4696         int i;
4697
4698         bw_data.tc_valid_bits = enabled_tc;
4699         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4700                 bw_data.tc_bw_credits[i] = bw_share[i];
4701
4702         ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4703                                        NULL);
4704         if (ret) {
4705                 dev_info(&vsi->back->pdev->dev,
4706                          "AQ command Config VSI BW allocation per TC failed = %d\n",
4707                          vsi->back->hw.aq.asq_last_status);
4708                 return -EINVAL;
4709         }
4710
4711         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4712                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4713
4714         return 0;
4715 }
4716
4717 /**
4718  * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4719  * @vsi: the VSI being configured
4720  * @enabled_tc: TC map to be enabled
4721  *
4722  **/
4723 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4724 {
4725         struct net_device *netdev = vsi->netdev;
4726         struct i40e_pf *pf = vsi->back;
4727         struct i40e_hw *hw = &pf->hw;
4728         u8 netdev_tc = 0;
4729         int i;
4730         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4731
4732         if (!netdev)
4733                 return;
4734
4735         if (!enabled_tc) {
4736                 netdev_reset_tc(netdev);
4737                 return;
4738         }
4739
4740         /* Set up actual enabled TCs on the VSI */
4741         if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4742                 return;
4743
4744         /* set per TC queues for the VSI */
4745         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4746                 /* Only set TC queues for enabled tcs
4747                  *
4748                  * e.g. For a VSI that has TC0 and TC3 enabled the
4749                  * enabled_tc bitmap would be 0x00001001; the driver
4750                  * will set the numtc for netdev as 2 that will be
4751                  * referenced by the netdev layer as TC 0 and 1.
4752                  */
4753                 if (vsi->tc_config.enabled_tc & BIT(i))
4754                         netdev_set_tc_queue(netdev,
4755                                         vsi->tc_config.tc_info[i].netdev_tc,
4756                                         vsi->tc_config.tc_info[i].qcount,
4757                                         vsi->tc_config.tc_info[i].qoffset);
4758         }
4759
4760         /* Assign UP2TC map for the VSI */
4761         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4762                 /* Get the actual TC# for the UP */
4763                 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4764                 /* Get the mapped netdev TC# for the UP */
4765                 netdev_tc =  vsi->tc_config.tc_info[ets_tc].netdev_tc;
4766                 netdev_set_prio_tc_map(netdev, i, netdev_tc);
4767         }
4768 }
4769
4770 /**
4771  * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4772  * @vsi: the VSI being configured
4773  * @ctxt: the ctxt buffer returned from AQ VSI update param command
4774  **/
4775 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4776                                       struct i40e_vsi_context *ctxt)
4777 {
4778         /* copy just the sections touched not the entire info
4779          * since not all sections are valid as returned by
4780          * update vsi params
4781          */
4782         vsi->info.mapping_flags = ctxt->info.mapping_flags;
4783         memcpy(&vsi->info.queue_mapping,
4784                &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4785         memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4786                sizeof(vsi->info.tc_mapping));
4787 }
4788
4789 /**
4790  * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4791  * @vsi: VSI to be configured
4792  * @enabled_tc: TC bitmap
4793  *
4794  * This configures a particular VSI for TCs that are mapped to the
4795  * given TC bitmap. It uses default bandwidth share for TCs across
4796  * VSIs to configure TC for a particular VSI.
4797  *
4798  * NOTE:
4799  * It is expected that the VSI queues have been quisced before calling
4800  * this function.
4801  **/
4802 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4803 {
4804         u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4805         struct i40e_vsi_context ctxt;
4806         int ret = 0;
4807         int i;
4808
4809         /* Check if enabled_tc is same as existing or new TCs */
4810         if (vsi->tc_config.enabled_tc == enabled_tc)
4811                 return ret;
4812
4813         /* Enable ETS TCs with equal BW Share for now across all VSIs */
4814         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4815                 if (enabled_tc & BIT(i))
4816                         bw_share[i] = 1;
4817         }
4818
4819         ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4820         if (ret) {
4821                 dev_info(&vsi->back->pdev->dev,
4822                          "Failed configuring TC map %d for VSI %d\n",
4823                          enabled_tc, vsi->seid);
4824                 goto out;
4825         }
4826
4827         /* Update Queue Pairs Mapping for currently enabled UPs */
4828         ctxt.seid = vsi->seid;
4829         ctxt.pf_num = vsi->back->hw.pf_id;
4830         ctxt.vf_num = 0;
4831         ctxt.uplink_seid = vsi->uplink_seid;
4832         ctxt.info = vsi->info;
4833         i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4834
4835         /* Update the VSI after updating the VSI queue-mapping information */
4836         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4837         if (ret) {
4838                 dev_info(&vsi->back->pdev->dev,
4839                          "Update vsi tc config failed, err %s aq_err %s\n",
4840                          i40e_stat_str(&vsi->back->hw, ret),
4841                          i40e_aq_str(&vsi->back->hw,
4842                                      vsi->back->hw.aq.asq_last_status));
4843                 goto out;
4844         }
4845         /* update the local VSI info with updated queue map */
4846         i40e_vsi_update_queue_map(vsi, &ctxt);
4847         vsi->info.valid_sections = 0;
4848
4849         /* Update current VSI BW information */
4850         ret = i40e_vsi_get_bw_info(vsi);
4851         if (ret) {
4852                 dev_info(&vsi->back->pdev->dev,
4853                          "Failed updating vsi bw info, err %s aq_err %s\n",
4854                          i40e_stat_str(&vsi->back->hw, ret),
4855                          i40e_aq_str(&vsi->back->hw,
4856                                      vsi->back->hw.aq.asq_last_status));
4857                 goto out;
4858         }
4859
4860         /* Update the netdev TC setup */
4861         i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4862 out:
4863         return ret;
4864 }
4865
4866 /**
4867  * i40e_veb_config_tc - Configure TCs for given VEB
4868  * @veb: given VEB
4869  * @enabled_tc: TC bitmap
4870  *
4871  * Configures given TC bitmap for VEB (switching) element
4872  **/
4873 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4874 {
4875         struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4876         struct i40e_pf *pf = veb->pf;
4877         int ret = 0;
4878         int i;
4879
4880         /* No TCs or already enabled TCs just return */
4881         if (!enabled_tc || veb->enabled_tc == enabled_tc)
4882                 return ret;
4883
4884         bw_data.tc_valid_bits = enabled_tc;
4885         /* bw_data.absolute_credits is not set (relative) */
4886
4887         /* Enable ETS TCs with equal BW Share for now */
4888         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4889                 if (enabled_tc & BIT(i))
4890                         bw_data.tc_bw_share_credits[i] = 1;
4891         }
4892
4893         ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4894                                                    &bw_data, NULL);
4895         if (ret) {
4896                 dev_info(&pf->pdev->dev,
4897                          "VEB bw config failed, err %s aq_err %s\n",
4898                          i40e_stat_str(&pf->hw, ret),
4899                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4900                 goto out;
4901         }
4902
4903         /* Update the BW information */
4904         ret = i40e_veb_get_bw_info(veb);
4905         if (ret) {
4906                 dev_info(&pf->pdev->dev,
4907                          "Failed getting veb bw config, err %s aq_err %s\n",
4908                          i40e_stat_str(&pf->hw, ret),
4909                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4910         }
4911
4912 out:
4913         return ret;
4914 }
4915
4916 #ifdef CONFIG_I40E_DCB
4917 /**
4918  * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4919  * @pf: PF struct
4920  *
4921  * Reconfigure VEB/VSIs on a given PF; it is assumed that
4922  * the caller would've quiesce all the VSIs before calling
4923  * this function
4924  **/
4925 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4926 {
4927         u8 tc_map = 0;
4928         int ret;
4929         u8 v;
4930
4931         /* Enable the TCs available on PF to all VEBs */
4932         tc_map = i40e_pf_get_tc_map(pf);
4933         for (v = 0; v < I40E_MAX_VEB; v++) {
4934                 if (!pf->veb[v])
4935                         continue;
4936                 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
4937                 if (ret) {
4938                         dev_info(&pf->pdev->dev,
4939                                  "Failed configuring TC for VEB seid=%d\n",
4940                                  pf->veb[v]->seid);
4941                         /* Will try to configure as many components */
4942                 }
4943         }
4944
4945         /* Update each VSI */
4946         for (v = 0; v < pf->num_alloc_vsi; v++) {
4947                 if (!pf->vsi[v])
4948                         continue;
4949
4950                 /* - Enable all TCs for the LAN VSI
4951 #ifdef I40E_FCOE
4952                  * - For FCoE VSI only enable the TC configured
4953                  *   as per the APP TLV
4954 #endif
4955                  * - For all others keep them at TC0 for now
4956                  */
4957                 if (v == pf->lan_vsi)
4958                         tc_map = i40e_pf_get_tc_map(pf);
4959                 else
4960                         tc_map = i40e_pf_get_default_tc(pf);
4961 #ifdef I40E_FCOE
4962                 if (pf->vsi[v]->type == I40E_VSI_FCOE)
4963                         tc_map = i40e_get_fcoe_tc_map(pf);
4964 #endif /* #ifdef I40E_FCOE */
4965
4966                 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4967                 if (ret) {
4968                         dev_info(&pf->pdev->dev,
4969                                  "Failed configuring TC for VSI seid=%d\n",
4970                                  pf->vsi[v]->seid);
4971                         /* Will try to configure as many components */
4972                 } else {
4973                         /* Re-configure VSI vectors based on updated TC map */
4974                         i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4975                         if (pf->vsi[v]->netdev)
4976                                 i40e_dcbnl_set_all(pf->vsi[v]);
4977                 }
4978         }
4979 }
4980
4981 /**
4982  * i40e_resume_port_tx - Resume port Tx
4983  * @pf: PF struct
4984  *
4985  * Resume a port's Tx and issue a PF reset in case of failure to
4986  * resume.
4987  **/
4988 static int i40e_resume_port_tx(struct i40e_pf *pf)
4989 {
4990         struct i40e_hw *hw = &pf->hw;
4991         int ret;
4992
4993         ret = i40e_aq_resume_port_tx(hw, NULL);
4994         if (ret) {
4995                 dev_info(&pf->pdev->dev,
4996                          "Resume Port Tx failed, err %s aq_err %s\n",
4997                           i40e_stat_str(&pf->hw, ret),
4998                           i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4999                 /* Schedule PF reset to recover */
5000                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5001                 i40e_service_event_schedule(pf);
5002         }
5003
5004         return ret;
5005 }
5006
5007 /**
5008  * i40e_init_pf_dcb - Initialize DCB configuration
5009  * @pf: PF being configured
5010  *
5011  * Query the current DCB configuration and cache it
5012  * in the hardware structure
5013  **/
5014 static int i40e_init_pf_dcb(struct i40e_pf *pf)
5015 {
5016         struct i40e_hw *hw = &pf->hw;
5017         int err = 0;
5018
5019         /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
5020         if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
5021             (pf->hw.aq.fw_maj_ver < 4))
5022                 goto out;
5023
5024         /* Get the initial DCB configuration */
5025         err = i40e_init_dcb(hw);
5026         if (!err) {
5027                 /* Device/Function is not DCBX capable */
5028                 if ((!hw->func_caps.dcb) ||
5029                     (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
5030                         dev_info(&pf->pdev->dev,
5031                                  "DCBX offload is not supported or is disabled for this PF.\n");
5032
5033                         if (pf->flags & I40E_FLAG_MFP_ENABLED)
5034                                 goto out;
5035
5036                 } else {
5037                         /* When status is not DISABLED then DCBX in FW */
5038                         pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
5039                                        DCB_CAP_DCBX_VER_IEEE;
5040
5041                         pf->flags |= I40E_FLAG_DCB_CAPABLE;
5042                         /* Enable DCB tagging only when more than one TC */
5043                         if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5044                                 pf->flags |= I40E_FLAG_DCB_ENABLED;
5045                         dev_dbg(&pf->pdev->dev,
5046                                 "DCBX offload is supported for this PF.\n");
5047                 }
5048         } else {
5049                 dev_info(&pf->pdev->dev,
5050                          "Query for DCB configuration failed, err %s aq_err %s\n",
5051                          i40e_stat_str(&pf->hw, err),
5052                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5053         }
5054
5055 out:
5056         return err;
5057 }
5058 #endif /* CONFIG_I40E_DCB */
5059 #define SPEED_SIZE 14
5060 #define FC_SIZE 8
5061 /**
5062  * i40e_print_link_message - print link up or down
5063  * @vsi: the VSI for which link needs a message
5064  */
5065 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
5066 {
5067         char *speed = "Unknown";
5068         char *fc = "Unknown";
5069
5070         if (vsi->current_isup == isup)
5071                 return;
5072         vsi->current_isup = isup;
5073         if (!isup) {
5074                 netdev_info(vsi->netdev, "NIC Link is Down\n");
5075                 return;
5076         }
5077
5078         /* Warn user if link speed on NPAR enabled partition is not at
5079          * least 10GB
5080          */
5081         if (vsi->back->hw.func_caps.npar_enable &&
5082             (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
5083              vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
5084                 netdev_warn(vsi->netdev,
5085                             "The partition detected link speed that is less than 10Gbps\n");
5086
5087         switch (vsi->back->hw.phy.link_info.link_speed) {
5088         case I40E_LINK_SPEED_40GB:
5089                 speed = "40 G";
5090                 break;
5091         case I40E_LINK_SPEED_20GB:
5092                 speed = "20 G";
5093                 break;
5094         case I40E_LINK_SPEED_10GB:
5095                 speed = "10 G";
5096                 break;
5097         case I40E_LINK_SPEED_1GB:
5098                 speed = "1000 M";
5099                 break;
5100         case I40E_LINK_SPEED_100MB:
5101                 speed = "100 M";
5102                 break;
5103         default:
5104                 break;
5105         }
5106
5107         switch (vsi->back->hw.fc.current_mode) {
5108         case I40E_FC_FULL:
5109                 fc = "RX/TX";
5110                 break;
5111         case I40E_FC_TX_PAUSE:
5112                 fc = "TX";
5113                 break;
5114         case I40E_FC_RX_PAUSE:
5115                 fc = "RX";
5116                 break;
5117         default:
5118                 fc = "None";
5119                 break;
5120         }
5121
5122         netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
5123                     speed, fc);
5124 }
5125
5126 /**
5127  * i40e_up_complete - Finish the last steps of bringing up a connection
5128  * @vsi: the VSI being configured
5129  **/
5130 static int i40e_up_complete(struct i40e_vsi *vsi)
5131 {
5132         struct i40e_pf *pf = vsi->back;
5133         int err;
5134
5135         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5136                 i40e_vsi_configure_msix(vsi);
5137         else
5138                 i40e_configure_msi_and_legacy(vsi);
5139
5140         /* start rings */
5141         err = i40e_vsi_control_rings(vsi, true);
5142         if (err)
5143                 return err;
5144
5145         clear_bit(__I40E_DOWN, &vsi->state);
5146         i40e_napi_enable_all(vsi);
5147         i40e_vsi_enable_irq(vsi);
5148
5149         if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
5150             (vsi->netdev)) {
5151                 i40e_print_link_message(vsi, true);
5152                 netif_tx_start_all_queues(vsi->netdev);
5153                 netif_carrier_on(vsi->netdev);
5154         } else if (vsi->netdev) {
5155                 i40e_print_link_message(vsi, false);
5156                 /* need to check for qualified module here*/
5157                 if ((pf->hw.phy.link_info.link_info &
5158                         I40E_AQ_MEDIA_AVAILABLE) &&
5159                     (!(pf->hw.phy.link_info.an_info &
5160                         I40E_AQ_QUALIFIED_MODULE)))
5161                         netdev_err(vsi->netdev,
5162                                    "the driver failed to link because an unqualified module was detected.");
5163         }
5164
5165         /* replay FDIR SB filters */
5166         if (vsi->type == I40E_VSI_FDIR) {
5167                 /* reset fd counters */
5168                 pf->fd_add_err = pf->fd_atr_cnt = 0;
5169                 if (pf->fd_tcp_rule > 0) {
5170                         pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5171                         if (I40E_DEBUG_FD & pf->hw.debug_mask)
5172                                 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
5173                         pf->fd_tcp_rule = 0;
5174                 }
5175                 i40e_fdir_filter_restore(vsi);
5176         }
5177         i40e_service_event_schedule(pf);
5178
5179         return 0;
5180 }
5181
5182 /**
5183  * i40e_vsi_reinit_locked - Reset the VSI
5184  * @vsi: the VSI being configured
5185  *
5186  * Rebuild the ring structs after some configuration
5187  * has changed, e.g. MTU size.
5188  **/
5189 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
5190 {
5191         struct i40e_pf *pf = vsi->back;
5192
5193         WARN_ON(in_interrupt());
5194         while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
5195                 usleep_range(1000, 2000);
5196         i40e_down(vsi);
5197
5198         /* Give a VF some time to respond to the reset.  The
5199          * two second wait is based upon the watchdog cycle in
5200          * the VF driver.
5201          */
5202         if (vsi->type == I40E_VSI_SRIOV)
5203                 msleep(2000);
5204         i40e_up(vsi);
5205         clear_bit(__I40E_CONFIG_BUSY, &pf->state);
5206 }
5207
5208 /**
5209  * i40e_up - Bring the connection back up after being down
5210  * @vsi: the VSI being configured
5211  **/
5212 int i40e_up(struct i40e_vsi *vsi)
5213 {
5214         int err;
5215
5216         err = i40e_vsi_configure(vsi);
5217         if (!err)
5218                 err = i40e_up_complete(vsi);
5219
5220         return err;
5221 }
5222
5223 /**
5224  * i40e_down - Shutdown the connection processing
5225  * @vsi: the VSI being stopped
5226  **/
5227 void i40e_down(struct i40e_vsi *vsi)
5228 {
5229         int i;
5230
5231         /* It is assumed that the caller of this function
5232          * sets the vsi->state __I40E_DOWN bit.
5233          */
5234         if (vsi->netdev) {
5235                 netif_carrier_off(vsi->netdev);
5236                 netif_tx_disable(vsi->netdev);
5237         }
5238         i40e_vsi_disable_irq(vsi);
5239         i40e_vsi_control_rings(vsi, false);
5240         i40e_napi_disable_all(vsi);
5241
5242         for (i = 0; i < vsi->num_queue_pairs; i++) {
5243                 i40e_clean_tx_ring(vsi->tx_rings[i]);
5244                 i40e_clean_rx_ring(vsi->rx_rings[i]);
5245         }
5246 }
5247
5248 /**
5249  * i40e_setup_tc - configure multiple traffic classes
5250  * @netdev: net device to configure
5251  * @tc: number of traffic classes to enable
5252  **/
5253 #ifdef I40E_FCOE
5254 int i40e_setup_tc(struct net_device *netdev, u8 tc)
5255 #else
5256 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
5257 #endif
5258 {
5259         struct i40e_netdev_priv *np = netdev_priv(netdev);
5260         struct i40e_vsi *vsi = np->vsi;
5261         struct i40e_pf *pf = vsi->back;
5262         u8 enabled_tc = 0;
5263         int ret = -EINVAL;
5264         int i;
5265
5266         /* Check if DCB enabled to continue */
5267         if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5268                 netdev_info(netdev, "DCB is not enabled for adapter\n");
5269                 goto exit;
5270         }
5271
5272         /* Check if MFP enabled */
5273         if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5274                 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
5275                 goto exit;
5276         }
5277
5278         /* Check whether tc count is within enabled limit */
5279         if (tc > i40e_pf_get_num_tc(pf)) {
5280                 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
5281                 goto exit;
5282         }
5283
5284         /* Generate TC map for number of tc requested */
5285         for (i = 0; i < tc; i++)
5286                 enabled_tc |= BIT(i);
5287
5288         /* Requesting same TC configuration as already enabled */
5289         if (enabled_tc == vsi->tc_config.enabled_tc)
5290                 return 0;
5291
5292         /* Quiesce VSI queues */
5293         i40e_quiesce_vsi(vsi);
5294
5295         /* Configure VSI for enabled TCs */
5296         ret = i40e_vsi_config_tc(vsi, enabled_tc);
5297         if (ret) {
5298                 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
5299                             vsi->seid);
5300                 goto exit;
5301         }
5302
5303         /* Unquiesce VSI */
5304         i40e_unquiesce_vsi(vsi);
5305
5306 exit:
5307         return ret;
5308 }
5309
5310 /**
5311  * i40e_open - Called when a network interface is made active
5312  * @netdev: network interface device structure
5313  *
5314  * The open entry point is called when a network interface is made
5315  * active by the system (IFF_UP).  At this point all resources needed
5316  * for transmit and receive operations are allocated, the interrupt
5317  * handler is registered with the OS, the netdev watchdog subtask is
5318  * enabled, and the stack is notified that the interface is ready.
5319  *
5320  * Returns 0 on success, negative value on failure
5321  **/
5322 int i40e_open(struct net_device *netdev)
5323 {
5324         struct i40e_netdev_priv *np = netdev_priv(netdev);
5325         struct i40e_vsi *vsi = np->vsi;
5326         struct i40e_pf *pf = vsi->back;
5327         int err;
5328
5329         /* disallow open during test or if eeprom is broken */
5330         if (test_bit(__I40E_TESTING, &pf->state) ||
5331             test_bit(__I40E_BAD_EEPROM, &pf->state))
5332                 return -EBUSY;
5333
5334         netif_carrier_off(netdev);
5335
5336         err = i40e_vsi_open(vsi);
5337         if (err)
5338                 return err;
5339
5340         /* configure global TSO hardware offload settings */
5341         wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
5342                                                        TCP_FLAG_FIN) >> 16);
5343         wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
5344                                                        TCP_FLAG_FIN |
5345                                                        TCP_FLAG_CWR) >> 16);
5346         wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
5347
5348 #ifdef CONFIG_I40E_VXLAN
5349         vxlan_get_rx_port(netdev);
5350 #endif
5351 #ifdef CONFIG_I40E_GENEVE
5352         geneve_get_rx_port(netdev);
5353 #endif
5354
5355         return 0;
5356 }
5357
5358 /**
5359  * i40e_vsi_open -
5360  * @vsi: the VSI to open
5361  *
5362  * Finish initialization of the VSI.
5363  *
5364  * Returns 0 on success, negative value on failure
5365  **/
5366 int i40e_vsi_open(struct i40e_vsi *vsi)
5367 {
5368         struct i40e_pf *pf = vsi->back;
5369         char int_name[I40E_INT_NAME_STR_LEN];
5370         int err;
5371
5372         /* allocate descriptors */
5373         err = i40e_vsi_setup_tx_resources(vsi);
5374         if (err)
5375                 goto err_setup_tx;
5376         err = i40e_vsi_setup_rx_resources(vsi);
5377         if (err)
5378                 goto err_setup_rx;
5379
5380         err = i40e_vsi_configure(vsi);
5381         if (err)
5382                 goto err_setup_rx;
5383
5384         if (vsi->netdev) {
5385                 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5386                          dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5387                 err = i40e_vsi_request_irq(vsi, int_name);
5388                 if (err)
5389                         goto err_setup_rx;
5390
5391                 /* Notify the stack of the actual queue counts. */
5392                 err = netif_set_real_num_tx_queues(vsi->netdev,
5393                                                    vsi->num_queue_pairs);
5394                 if (err)
5395                         goto err_set_queues;
5396
5397                 err = netif_set_real_num_rx_queues(vsi->netdev,
5398                                                    vsi->num_queue_pairs);
5399                 if (err)
5400                         goto err_set_queues;
5401
5402         } else if (vsi->type == I40E_VSI_FDIR) {
5403                 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
5404                          dev_driver_string(&pf->pdev->dev),
5405                          dev_name(&pf->pdev->dev));
5406                 err = i40e_vsi_request_irq(vsi, int_name);
5407
5408         } else {
5409                 err = -EINVAL;
5410                 goto err_setup_rx;
5411         }
5412
5413         err = i40e_up_complete(vsi);
5414         if (err)
5415                 goto err_up_complete;
5416
5417         return 0;
5418
5419 err_up_complete:
5420         i40e_down(vsi);
5421 err_set_queues:
5422         i40e_vsi_free_irq(vsi);
5423 err_setup_rx:
5424         i40e_vsi_free_rx_resources(vsi);
5425 err_setup_tx:
5426         i40e_vsi_free_tx_resources(vsi);
5427         if (vsi == pf->vsi[pf->lan_vsi])
5428                 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
5429
5430         return err;
5431 }
5432
5433 /**
5434  * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
5435  * @pf: Pointer to PF
5436  *
5437  * This function destroys the hlist where all the Flow Director
5438  * filters were saved.
5439  **/
5440 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5441 {
5442         struct i40e_fdir_filter *filter;
5443         struct hlist_node *node2;
5444
5445         hlist_for_each_entry_safe(filter, node2,
5446                                   &pf->fdir_filter_list, fdir_node) {
5447                 hlist_del(&filter->fdir_node);
5448                 kfree(filter);
5449         }
5450         pf->fdir_pf_active_filters = 0;
5451 }
5452
5453 /**
5454  * i40e_close - Disables a network interface
5455  * @netdev: network interface device structure
5456  *
5457  * The close entry point is called when an interface is de-activated
5458  * by the OS.  The hardware is still under the driver's control, but
5459  * this netdev interface is disabled.
5460  *
5461  * Returns 0, this is not allowed to fail
5462  **/
5463 #ifdef I40E_FCOE
5464 int i40e_close(struct net_device *netdev)
5465 #else
5466 static int i40e_close(struct net_device *netdev)
5467 #endif
5468 {
5469         struct i40e_netdev_priv *np = netdev_priv(netdev);
5470         struct i40e_vsi *vsi = np->vsi;
5471
5472         i40e_vsi_close(vsi);
5473
5474         return 0;
5475 }
5476
5477 /**
5478  * i40e_do_reset - Start a PF or Core Reset sequence
5479  * @pf: board private structure
5480  * @reset_flags: which reset is requested
5481  *
5482  * The essential difference in resets is that the PF Reset
5483  * doesn't clear the packet buffers, doesn't reset the PE
5484  * firmware, and doesn't bother the other PFs on the chip.
5485  **/
5486 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5487 {
5488         u32 val;
5489
5490         WARN_ON(in_interrupt());
5491
5492         if (i40e_check_asq_alive(&pf->hw))
5493                 i40e_vc_notify_reset(pf);
5494
5495         /* do the biggest reset indicated */
5496         if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
5497
5498                 /* Request a Global Reset
5499                  *
5500                  * This will start the chip's countdown to the actual full
5501                  * chip reset event, and a warning interrupt to be sent
5502                  * to all PFs, including the requestor.  Our handler
5503                  * for the warning interrupt will deal with the shutdown
5504                  * and recovery of the switch setup.
5505                  */
5506                 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
5507                 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5508                 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5509                 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5510
5511         } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
5512
5513                 /* Request a Core Reset
5514                  *
5515                  * Same as Global Reset, except does *not* include the MAC/PHY
5516                  */
5517                 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
5518                 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5519                 val |= I40E_GLGEN_RTRIG_CORER_MASK;
5520                 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5521                 i40e_flush(&pf->hw);
5522
5523         } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
5524
5525                 /* Request a PF Reset
5526                  *
5527                  * Resets only the PF-specific registers
5528                  *
5529                  * This goes directly to the tear-down and rebuild of
5530                  * the switch, since we need to do all the recovery as
5531                  * for the Core Reset.
5532                  */
5533                 dev_dbg(&pf->pdev->dev, "PFR requested\n");
5534                 i40e_handle_reset_warning(pf);
5535
5536         } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
5537                 int v;
5538
5539                 /* Find the VSI(s) that requested a re-init */
5540                 dev_info(&pf->pdev->dev,
5541                          "VSI reinit requested\n");
5542                 for (v = 0; v < pf->num_alloc_vsi; v++) {
5543                         struct i40e_vsi *vsi = pf->vsi[v];
5544
5545                         if (vsi != NULL &&
5546                             test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
5547                                 i40e_vsi_reinit_locked(pf->vsi[v]);
5548                                 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
5549                         }
5550                 }
5551         } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
5552                 int v;
5553
5554                 /* Find the VSI(s) that needs to be brought down */
5555                 dev_info(&pf->pdev->dev, "VSI down requested\n");
5556                 for (v = 0; v < pf->num_alloc_vsi; v++) {
5557                         struct i40e_vsi *vsi = pf->vsi[v];
5558
5559                         if (vsi != NULL &&
5560                             test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
5561                                 set_bit(__I40E_DOWN, &vsi->state);
5562                                 i40e_down(vsi);
5563                                 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
5564                         }
5565                 }
5566         } else {
5567                 dev_info(&pf->pdev->dev,
5568                          "bad reset request 0x%08x\n", reset_flags);
5569         }
5570 }
5571
5572 #ifdef CONFIG_I40E_DCB
5573 /**
5574  * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5575  * @pf: board private structure
5576  * @old_cfg: current DCB config
5577  * @new_cfg: new DCB config
5578  **/
5579 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5580                             struct i40e_dcbx_config *old_cfg,
5581                             struct i40e_dcbx_config *new_cfg)
5582 {
5583         bool need_reconfig = false;
5584
5585         /* Check if ETS configuration has changed */
5586         if (memcmp(&new_cfg->etscfg,
5587                    &old_cfg->etscfg,
5588                    sizeof(new_cfg->etscfg))) {
5589                 /* If Priority Table has changed reconfig is needed */
5590                 if (memcmp(&new_cfg->etscfg.prioritytable,
5591                            &old_cfg->etscfg.prioritytable,
5592                            sizeof(new_cfg->etscfg.prioritytable))) {
5593                         need_reconfig = true;
5594                         dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
5595                 }
5596
5597                 if (memcmp(&new_cfg->etscfg.tcbwtable,
5598                            &old_cfg->etscfg.tcbwtable,
5599                            sizeof(new_cfg->etscfg.tcbwtable)))
5600                         dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
5601
5602                 if (memcmp(&new_cfg->etscfg.tsatable,
5603                            &old_cfg->etscfg.tsatable,
5604                            sizeof(new_cfg->etscfg.tsatable)))
5605                         dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
5606         }
5607
5608         /* Check if PFC configuration has changed */
5609         if (memcmp(&new_cfg->pfc,
5610                    &old_cfg->pfc,
5611                    sizeof(new_cfg->pfc))) {
5612                 need_reconfig = true;
5613                 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
5614         }
5615
5616         /* Check if APP Table has changed */
5617         if (memcmp(&new_cfg->app,
5618                    &old_cfg->app,
5619                    sizeof(new_cfg->app))) {
5620                 need_reconfig = true;
5621                 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
5622         }
5623
5624         dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
5625         return need_reconfig;
5626 }
5627
5628 /**
5629  * i40e_handle_lldp_event - Handle LLDP Change MIB event
5630  * @pf: board private structure
5631  * @e: event info posted on ARQ
5632  **/
5633 static int i40e_handle_lldp_event(struct i40e_pf *pf,
5634                                   struct i40e_arq_event_info *e)
5635 {
5636         struct i40e_aqc_lldp_get_mib *mib =
5637                 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5638         struct i40e_hw *hw = &pf->hw;
5639         struct i40e_dcbx_config tmp_dcbx_cfg;
5640         bool need_reconfig = false;
5641         int ret = 0;
5642         u8 type;
5643
5644         /* Not DCB capable or capability disabled */
5645         if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
5646                 return ret;
5647
5648         /* Ignore if event is not for Nearest Bridge */
5649         type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
5650                 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
5651         dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
5652         if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
5653                 return ret;
5654
5655         /* Check MIB Type and return if event for Remote MIB update */
5656         type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
5657         dev_dbg(&pf->pdev->dev,
5658                 "LLDP event mib type %s\n", type ? "remote" : "local");
5659         if (type == I40E_AQ_LLDP_MIB_REMOTE) {
5660                 /* Update the remote cached instance and return */
5661                 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
5662                                 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
5663                                 &hw->remote_dcbx_config);
5664                 goto exit;
5665         }
5666
5667         /* Store the old configuration */
5668         tmp_dcbx_cfg = hw->local_dcbx_config;
5669
5670         /* Reset the old DCBx configuration data */
5671         memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
5672         /* Get updated DCBX data from firmware */
5673         ret = i40e_get_dcb_config(&pf->hw);
5674         if (ret) {
5675                 dev_info(&pf->pdev->dev,
5676                          "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
5677                          i40e_stat_str(&pf->hw, ret),
5678                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5679                 goto exit;
5680         }
5681
5682         /* No change detected in DCBX configs */
5683         if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
5684                     sizeof(tmp_dcbx_cfg))) {
5685                 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
5686                 goto exit;
5687         }
5688
5689         need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
5690                                                &hw->local_dcbx_config);
5691
5692         i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
5693
5694         if (!need_reconfig)
5695                 goto exit;
5696
5697         /* Enable DCB tagging only when more than one TC */
5698         if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5699                 pf->flags |= I40E_FLAG_DCB_ENABLED;
5700         else
5701                 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5702
5703         set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5704         /* Reconfiguration needed quiesce all VSIs */
5705         i40e_pf_quiesce_all_vsi(pf);
5706
5707         /* Changes in configuration update VEB/VSI */
5708         i40e_dcb_reconfigure(pf);
5709
5710         ret = i40e_resume_port_tx(pf);
5711
5712         clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5713         /* In case of error no point in resuming VSIs */
5714         if (ret)
5715                 goto exit;
5716
5717         /* Wait for the PF's Tx queues to be disabled */
5718         ret = i40e_pf_wait_txq_disabled(pf);
5719         if (ret) {
5720                 /* Schedule PF reset to recover */
5721                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5722                 i40e_service_event_schedule(pf);
5723         } else {
5724                 i40e_pf_unquiesce_all_vsi(pf);
5725         }
5726
5727 exit:
5728         return ret;
5729 }
5730 #endif /* CONFIG_I40E_DCB */
5731
5732 /**
5733  * i40e_do_reset_safe - Protected reset path for userland calls.
5734  * @pf: board private structure
5735  * @reset_flags: which reset is requested
5736  *
5737  **/
5738 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
5739 {
5740         rtnl_lock();
5741         i40e_do_reset(pf, reset_flags);
5742         rtnl_unlock();
5743 }
5744
5745 /**
5746  * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
5747  * @pf: board private structure
5748  * @e: event info posted on ARQ
5749  *
5750  * Handler for LAN Queue Overflow Event generated by the firmware for PF
5751  * and VF queues
5752  **/
5753 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
5754                                            struct i40e_arq_event_info *e)
5755 {
5756         struct i40e_aqc_lan_overflow *data =
5757                 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
5758         u32 queue = le32_to_cpu(data->prtdcb_rupto);
5759         u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
5760         struct i40e_hw *hw = &pf->hw;
5761         struct i40e_vf *vf;
5762         u16 vf_id;
5763
5764         dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5765                 queue, qtx_ctl);
5766
5767         /* Queue belongs to VF, find the VF and issue VF reset */
5768         if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
5769             >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
5770                 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
5771                          >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
5772                 vf_id -= hw->func_caps.vf_base_id;
5773                 vf = &pf->vf[vf_id];
5774                 i40e_vc_notify_vf_reset(vf);
5775                 /* Allow VF to process pending reset notification */
5776                 msleep(20);
5777                 i40e_reset_vf(vf, false);
5778         }
5779 }
5780
5781 /**
5782  * i40e_service_event_complete - Finish up the service event
5783  * @pf: board private structure
5784  **/
5785 static void i40e_service_event_complete(struct i40e_pf *pf)
5786 {
5787         WARN_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
5788
5789         /* flush memory to make sure state is correct before next watchog */
5790         smp_mb__before_atomic();
5791         clear_bit(__I40E_SERVICE_SCHED, &pf->state);
5792 }
5793
5794 /**
5795  * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
5796  * @pf: board private structure
5797  **/
5798 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
5799 {
5800         u32 val, fcnt_prog;
5801
5802         val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5803         fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
5804         return fcnt_prog;
5805 }
5806
5807 /**
5808  * i40e_get_current_fd_count - Get total FD filters programmed for this PF
5809  * @pf: board private structure
5810  **/
5811 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
5812 {
5813         u32 val, fcnt_prog;
5814
5815         val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5816         fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
5817                     ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
5818                       I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5819         return fcnt_prog;
5820 }
5821
5822 /**
5823  * i40e_get_global_fd_count - Get total FD filters programmed on device
5824  * @pf: board private structure
5825  **/
5826 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
5827 {
5828         u32 val, fcnt_prog;
5829
5830         val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
5831         fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
5832                     ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
5833                      I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
5834         return fcnt_prog;
5835 }
5836
5837 /**
5838  * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
5839  * @pf: board private structure
5840  **/
5841 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5842 {
5843         struct i40e_fdir_filter *filter;
5844         u32 fcnt_prog, fcnt_avail;
5845         struct hlist_node *node;
5846
5847         if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5848                 return;
5849
5850         /* Check if, FD SB or ATR was auto disabled and if there is enough room
5851          * to re-enable
5852          */
5853         fcnt_prog = i40e_get_global_fd_count(pf);
5854         fcnt_avail = pf->fdir_pf_filter_count;
5855         if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
5856             (pf->fd_add_err == 0) ||
5857             (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
5858                 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5859                     (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5860                         pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5861                         if (I40E_DEBUG_FD & pf->hw.debug_mask)
5862                                 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
5863                 }
5864         }
5865         /* Wait for some more space to be available to turn on ATR */
5866         if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
5867                 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
5868                     (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
5869                         pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5870                         if (I40E_DEBUG_FD & pf->hw.debug_mask)
5871                                 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
5872                 }
5873         }
5874
5875         /* if hw had a problem adding a filter, delete it */
5876         if (pf->fd_inv > 0) {
5877                 hlist_for_each_entry_safe(filter, node,
5878                                           &pf->fdir_filter_list, fdir_node) {
5879                         if (filter->fd_id == pf->fd_inv) {
5880                                 hlist_del(&filter->fdir_node);
5881                                 kfree(filter);
5882                                 pf->fdir_pf_active_filters--;
5883                         }
5884                 }
5885         }
5886 }
5887
5888 #define I40E_MIN_FD_FLUSH_INTERVAL 10
5889 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
5890 /**
5891  * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
5892  * @pf: board private structure
5893  **/
5894 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5895 {
5896         unsigned long min_flush_time;
5897         int flush_wait_retry = 50;
5898         bool disable_atr = false;
5899         int fd_room;
5900         int reg;
5901
5902         if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5903                 return;
5904
5905         if (!time_after(jiffies, pf->fd_flush_timestamp +
5906                                  (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
5907                 return;
5908
5909         /* If the flush is happening too quick and we have mostly SB rules we
5910          * should not re-enable ATR for some time.
5911          */
5912         min_flush_time = pf->fd_flush_timestamp +
5913                          (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
5914         fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
5915
5916         if (!(time_after(jiffies, min_flush_time)) &&
5917             (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
5918                 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5919                         dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
5920                 disable_atr = true;
5921         }
5922
5923         pf->fd_flush_timestamp = jiffies;
5924         pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5925         /* flush all filters */
5926         wr32(&pf->hw, I40E_PFQF_CTL_1,
5927              I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
5928         i40e_flush(&pf->hw);
5929         pf->fd_flush_cnt++;
5930         pf->fd_add_err = 0;
5931         do {
5932                 /* Check FD flush status every 5-6msec */
5933                 usleep_range(5000, 6000);
5934                 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
5935                 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
5936                         break;
5937         } while (flush_wait_retry--);
5938         if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
5939                 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
5940         } else {
5941                 /* replay sideband filters */
5942                 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
5943                 if (!disable_atr)
5944                         pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
5945                 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5946                 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5947                         dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
5948         }
5949
5950 }
5951
5952 /**
5953  * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
5954  * @pf: board private structure
5955  **/
5956 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
5957 {
5958         return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
5959 }
5960
5961 /* We can see up to 256 filter programming desc in transit if the filters are
5962  * being applied really fast; before we see the first
5963  * filter miss error on Rx queue 0. Accumulating enough error messages before
5964  * reacting will make sure we don't cause flush too often.
5965  */
5966 #define I40E_MAX_FD_PROGRAM_ERROR 256
5967
5968 /**
5969  * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
5970  * @pf: board private structure
5971  **/
5972 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
5973 {
5974
5975         /* if interface is down do nothing */
5976         if (test_bit(__I40E_DOWN, &pf->state))
5977                 return;
5978
5979         if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5980                 return;
5981
5982         if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5983                 i40e_fdir_flush_and_replay(pf);
5984
5985         i40e_fdir_check_and_reenable(pf);
5986
5987 }
5988
5989 /**
5990  * i40e_vsi_link_event - notify VSI of a link event
5991  * @vsi: vsi to be notified
5992  * @link_up: link up or down
5993  **/
5994 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
5995 {
5996         if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
5997                 return;
5998
5999         switch (vsi->type) {
6000         case I40E_VSI_MAIN:
6001 #ifdef I40E_FCOE
6002         case I40E_VSI_FCOE:
6003 #endif
6004                 if (!vsi->netdev || !vsi->netdev_registered)
6005                         break;
6006
6007                 if (link_up) {
6008                         netif_carrier_on(vsi->netdev);
6009                         netif_tx_wake_all_queues(vsi->netdev);
6010                 } else {
6011                         netif_carrier_off(vsi->netdev);
6012                         netif_tx_stop_all_queues(vsi->netdev);
6013                 }
6014                 break;
6015
6016         case I40E_VSI_SRIOV:
6017         case I40E_VSI_VMDQ2:
6018         case I40E_VSI_CTRL:
6019         case I40E_VSI_MIRROR:
6020         default:
6021                 /* there is no notification for other VSIs */
6022                 break;
6023         }
6024 }
6025
6026 /**
6027  * i40e_veb_link_event - notify elements on the veb of a link event
6028  * @veb: veb to be notified
6029  * @link_up: link up or down
6030  **/
6031 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
6032 {
6033         struct i40e_pf *pf;
6034         int i;
6035
6036         if (!veb || !veb->pf)
6037                 return;
6038         pf = veb->pf;
6039
6040         /* depth first... */
6041         for (i = 0; i < I40E_MAX_VEB; i++)
6042                 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
6043                         i40e_veb_link_event(pf->veb[i], link_up);
6044
6045         /* ... now the local VSIs */
6046         for (i = 0; i < pf->num_alloc_vsi; i++)
6047                 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
6048                         i40e_vsi_link_event(pf->vsi[i], link_up);
6049 }
6050
6051 /**
6052  * i40e_link_event - Update netif_carrier status
6053  * @pf: board private structure
6054  **/
6055 static void i40e_link_event(struct i40e_pf *pf)
6056 {
6057         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6058         u8 new_link_speed, old_link_speed;
6059         i40e_status status;
6060         bool new_link, old_link;
6061
6062         /* save off old link status information */
6063         pf->hw.phy.link_info_old = pf->hw.phy.link_info;
6064
6065         /* set this to force the get_link_status call to refresh state */
6066         pf->hw.phy.get_link_info = true;
6067
6068         old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
6069
6070         status = i40e_get_link_status(&pf->hw, &new_link);
6071         if (status) {
6072                 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
6073                         status);
6074                 return;
6075         }
6076
6077         old_link_speed = pf->hw.phy.link_info_old.link_speed;
6078         new_link_speed = pf->hw.phy.link_info.link_speed;
6079
6080         if (new_link == old_link &&
6081             new_link_speed == old_link_speed &&
6082             (test_bit(__I40E_DOWN, &vsi->state) ||
6083              new_link == netif_carrier_ok(vsi->netdev)))
6084                 return;
6085
6086         if (!test_bit(__I40E_DOWN, &vsi->state))
6087                 i40e_print_link_message(vsi, new_link);
6088
6089         /* Notify the base of the switch tree connected to
6090          * the link.  Floating VEBs are not notified.
6091          */
6092         if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
6093                 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
6094         else
6095                 i40e_vsi_link_event(vsi, new_link);
6096
6097         if (pf->vf)
6098                 i40e_vc_notify_link_state(pf);
6099
6100         if (pf->flags & I40E_FLAG_PTP)
6101                 i40e_ptp_set_increment(pf);
6102 }
6103
6104 /**
6105  * i40e_watchdog_subtask - periodic checks not using event driven response
6106  * @pf: board private structure
6107  **/
6108 static void i40e_watchdog_subtask(struct i40e_pf *pf)
6109 {
6110         int i;
6111
6112         /* if interface is down do nothing */
6113         if (test_bit(__I40E_DOWN, &pf->state) ||
6114             test_bit(__I40E_CONFIG_BUSY, &pf->state))
6115                 return;
6116
6117         /* make sure we don't do these things too often */
6118         if (time_before(jiffies, (pf->service_timer_previous +
6119                                   pf->service_timer_period)))
6120                 return;
6121         pf->service_timer_previous = jiffies;
6122
6123         if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
6124                 i40e_link_event(pf);
6125
6126         /* Update the stats for active netdevs so the network stack
6127          * can look at updated numbers whenever it cares to
6128          */
6129         for (i = 0; i < pf->num_alloc_vsi; i++)
6130                 if (pf->vsi[i] && pf->vsi[i]->netdev)
6131                         i40e_update_stats(pf->vsi[i]);
6132
6133         if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
6134                 /* Update the stats for the active switching components */
6135                 for (i = 0; i < I40E_MAX_VEB; i++)
6136                         if (pf->veb[i])
6137                                 i40e_update_veb_stats(pf->veb[i]);
6138         }
6139
6140         i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
6141 }
6142
6143 /**
6144  * i40e_reset_subtask - Set up for resetting the device and driver
6145  * @pf: board private structure
6146  **/
6147 static void i40e_reset_subtask(struct i40e_pf *pf)
6148 {
6149         u32 reset_flags = 0;
6150
6151         rtnl_lock();
6152         if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
6153                 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
6154                 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
6155         }
6156         if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
6157                 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
6158                 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6159         }
6160         if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
6161                 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
6162                 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
6163         }
6164         if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
6165                 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
6166                 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
6167         }
6168         if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
6169                 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
6170                 clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
6171         }
6172
6173         /* If there's a recovery already waiting, it takes
6174          * precedence before starting a new reset sequence.
6175          */
6176         if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
6177                 i40e_handle_reset_warning(pf);
6178                 goto unlock;
6179         }
6180
6181         /* If we're already down or resetting, just bail */
6182         if (reset_flags &&
6183             !test_bit(__I40E_DOWN, &pf->state) &&
6184             !test_bit(__I40E_CONFIG_BUSY, &pf->state))
6185                 i40e_do_reset(pf, reset_flags);
6186
6187 unlock:
6188         rtnl_unlock();
6189 }
6190
6191 /**
6192  * i40e_handle_link_event - Handle link event
6193  * @pf: board private structure
6194  * @e: event info posted on ARQ
6195  **/
6196 static void i40e_handle_link_event(struct i40e_pf *pf,
6197                                    struct i40e_arq_event_info *e)
6198 {
6199         struct i40e_aqc_get_link_status *status =
6200                 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
6201
6202         /* Do a new status request to re-enable LSE reporting
6203          * and load new status information into the hw struct
6204          * This completely ignores any state information
6205          * in the ARQ event info, instead choosing to always
6206          * issue the AQ update link status command.
6207          */
6208         i40e_link_event(pf);
6209
6210         /* check for unqualified module, if link is down */
6211         if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
6212             (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
6213             (!(status->link_info & I40E_AQ_LINK_UP)))
6214                 dev_err(&pf->pdev->dev,
6215                         "The driver failed to link because an unqualified module was detected.\n");
6216 }
6217
6218 /**
6219  * i40e_clean_adminq_subtask - Clean the AdminQ rings
6220  * @pf: board private structure
6221  **/
6222 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
6223 {
6224         struct i40e_arq_event_info event;
6225         struct i40e_hw *hw = &pf->hw;
6226         u16 pending, i = 0;
6227         i40e_status ret;
6228         u16 opcode;
6229         u32 oldval;
6230         u32 val;
6231
6232         /* Do not run clean AQ when PF reset fails */
6233         if (test_bit(__I40E_RESET_FAILED, &pf->state))
6234                 return;
6235
6236         /* check for error indications */
6237         val = rd32(&pf->hw, pf->hw.aq.arq.len);
6238         oldval = val;
6239         if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
6240                 if (hw->debug_mask & I40E_DEBUG_AQ)
6241                         dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
6242                 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
6243         }
6244         if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
6245                 if (hw->debug_mask & I40E_DEBUG_AQ)
6246                         dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
6247                 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
6248         }
6249         if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
6250                 if (hw->debug_mask & I40E_DEBUG_AQ)
6251                         dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
6252                 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
6253         }
6254         if (oldval != val)
6255                 wr32(&pf->hw, pf->hw.aq.arq.len, val);
6256
6257         val = rd32(&pf->hw, pf->hw.aq.asq.len);
6258         oldval = val;
6259         if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
6260                 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6261                         dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
6262                 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
6263         }
6264         if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
6265                 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6266                         dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
6267                 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
6268         }
6269         if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
6270                 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6271                         dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
6272                 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
6273         }
6274         if (oldval != val)
6275                 wr32(&pf->hw, pf->hw.aq.asq.len, val);
6276
6277         event.buf_len = I40E_MAX_AQ_BUF_SIZE;
6278         event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
6279         if (!event.msg_buf)
6280                 return;
6281
6282         do {
6283                 ret = i40e_clean_arq_element(hw, &event, &pending);
6284                 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
6285                         break;
6286                 else if (ret) {
6287                         dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
6288                         break;
6289                 }
6290
6291                 opcode = le16_to_cpu(event.desc.opcode);
6292                 switch (opcode) {
6293
6294                 case i40e_aqc_opc_get_link_status:
6295                         i40e_handle_link_event(pf, &event);
6296                         break;
6297                 case i40e_aqc_opc_send_msg_to_pf:
6298                         ret = i40e_vc_process_vf_msg(pf,
6299                                         le16_to_cpu(event.desc.retval),
6300                                         le32_to_cpu(event.desc.cookie_high),
6301                                         le32_to_cpu(event.desc.cookie_low),
6302                                         event.msg_buf,
6303                                         event.msg_len);
6304                         break;
6305                 case i40e_aqc_opc_lldp_update_mib:
6306                         dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
6307 #ifdef CONFIG_I40E_DCB
6308                         rtnl_lock();
6309                         ret = i40e_handle_lldp_event(pf, &event);
6310                         rtnl_unlock();
6311 #endif /* CONFIG_I40E_DCB */
6312                         break;
6313                 case i40e_aqc_opc_event_lan_overflow:
6314                         dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
6315                         i40e_handle_lan_overflow_event(pf, &event);
6316                         break;
6317                 case i40e_aqc_opc_send_msg_to_peer:
6318                         dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
6319                         break;
6320                 case i40e_aqc_opc_nvm_erase:
6321                 case i40e_aqc_opc_nvm_update:
6322                 case i40e_aqc_opc_oem_post_update:
6323                         i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n");
6324                         break;
6325                 default:
6326                         dev_info(&pf->pdev->dev,
6327                                  "ARQ Error: Unknown event 0x%04x received\n",
6328                                  opcode);
6329                         break;
6330                 }
6331         } while (pending && (i++ < pf->adminq_work_limit));
6332
6333         clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
6334         /* re-enable Admin queue interrupt cause */
6335         val = rd32(hw, I40E_PFINT_ICR0_ENA);
6336         val |=  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
6337         wr32(hw, I40E_PFINT_ICR0_ENA, val);
6338         i40e_flush(hw);
6339
6340         kfree(event.msg_buf);
6341 }
6342
6343 /**
6344  * i40e_verify_eeprom - make sure eeprom is good to use
6345  * @pf: board private structure
6346  **/
6347 static void i40e_verify_eeprom(struct i40e_pf *pf)
6348 {
6349         int err;
6350
6351         err = i40e_diag_eeprom_test(&pf->hw);
6352         if (err) {
6353                 /* retry in case of garbage read */
6354                 err = i40e_diag_eeprom_test(&pf->hw);
6355                 if (err) {
6356                         dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
6357                                  err);
6358                         set_bit(__I40E_BAD_EEPROM, &pf->state);
6359                 }
6360         }
6361
6362         if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
6363                 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
6364                 clear_bit(__I40E_BAD_EEPROM, &pf->state);
6365         }
6366 }
6367
6368 /**
6369  * i40e_enable_pf_switch_lb
6370  * @pf: pointer to the PF structure
6371  *
6372  * enable switch loop back or die - no point in a return value
6373  **/
6374 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
6375 {
6376         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6377         struct i40e_vsi_context ctxt;
6378         int ret;
6379
6380         ctxt.seid = pf->main_vsi_seid;
6381         ctxt.pf_num = pf->hw.pf_id;
6382         ctxt.vf_num = 0;
6383         ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6384         if (ret) {
6385                 dev_info(&pf->pdev->dev,
6386                          "couldn't get PF vsi config, err %s aq_err %s\n",
6387                          i40e_stat_str(&pf->hw, ret),
6388                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6389                 return;
6390         }
6391         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6392         ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6393         ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6394
6395         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6396         if (ret) {
6397                 dev_info(&pf->pdev->dev,
6398                          "update vsi switch failed, err %s aq_err %s\n",
6399                          i40e_stat_str(&pf->hw, ret),
6400                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6401         }
6402 }
6403
6404 /**
6405  * i40e_disable_pf_switch_lb
6406  * @pf: pointer to the PF structure
6407  *
6408  * disable switch loop back or die - no point in a return value
6409  **/
6410 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6411 {
6412         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6413         struct i40e_vsi_context ctxt;
6414         int ret;
6415
6416         ctxt.seid = pf->main_vsi_seid;
6417         ctxt.pf_num = pf->hw.pf_id;
6418         ctxt.vf_num = 0;
6419         ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6420         if (ret) {
6421                 dev_info(&pf->pdev->dev,
6422                          "couldn't get PF vsi config, err %s aq_err %s\n",
6423                          i40e_stat_str(&pf->hw, ret),
6424                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6425                 return;
6426         }
6427         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6428         ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6429         ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6430
6431         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6432         if (ret) {
6433                 dev_info(&pf->pdev->dev,
6434                          "update vsi switch failed, err %s aq_err %s\n",
6435                          i40e_stat_str(&pf->hw, ret),
6436                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6437         }
6438 }
6439
6440 /**
6441  * i40e_config_bridge_mode - Configure the HW bridge mode
6442  * @veb: pointer to the bridge instance
6443  *
6444  * Configure the loop back mode for the LAN VSI that is downlink to the
6445  * specified HW bridge instance. It is expected this function is called
6446  * when a new HW bridge is instantiated.
6447  **/
6448 static void i40e_config_bridge_mode(struct i40e_veb *veb)
6449 {
6450         struct i40e_pf *pf = veb->pf;
6451
6452         if (pf->hw.debug_mask & I40E_DEBUG_LAN)
6453                 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6454                          veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
6455         if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6456                 i40e_disable_pf_switch_lb(pf);
6457         else
6458                 i40e_enable_pf_switch_lb(pf);
6459 }
6460
6461 /**
6462  * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
6463  * @veb: pointer to the VEB instance
6464  *
6465  * This is a recursive function that first builds the attached VSIs then
6466  * recurses in to build the next layer of VEB.  We track the connections
6467  * through our own index numbers because the seid's from the HW could
6468  * change across the reset.
6469  **/
6470 static int i40e_reconstitute_veb(struct i40e_veb *veb)
6471 {
6472         struct i40e_vsi *ctl_vsi = NULL;
6473         struct i40e_pf *pf = veb->pf;
6474         int v, veb_idx;
6475         int ret;
6476
6477         /* build VSI that owns this VEB, temporarily attached to base VEB */
6478         for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
6479                 if (pf->vsi[v] &&
6480                     pf->vsi[v]->veb_idx == veb->idx &&
6481                     pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6482                         ctl_vsi = pf->vsi[v];
6483                         break;
6484                 }
6485         }
6486         if (!ctl_vsi) {
6487                 dev_info(&pf->pdev->dev,
6488                          "missing owner VSI for veb_idx %d\n", veb->idx);
6489                 ret = -ENOENT;
6490                 goto end_reconstitute;
6491         }
6492         if (ctl_vsi != pf->vsi[pf->lan_vsi])
6493                 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6494         ret = i40e_add_vsi(ctl_vsi);
6495         if (ret) {
6496                 dev_info(&pf->pdev->dev,
6497                          "rebuild of veb_idx %d owner VSI failed: %d\n",
6498                          veb->idx, ret);
6499                 goto end_reconstitute;
6500         }
6501         i40e_vsi_reset_stats(ctl_vsi);
6502
6503         /* create the VEB in the switch and move the VSI onto the VEB */
6504         ret = i40e_add_veb(veb, ctl_vsi);
6505         if (ret)
6506                 goto end_reconstitute;
6507
6508         if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6509                 veb->bridge_mode = BRIDGE_MODE_VEB;
6510         else
6511                 veb->bridge_mode = BRIDGE_MODE_VEPA;
6512         i40e_config_bridge_mode(veb);
6513
6514         /* create the remaining VSIs attached to this VEB */
6515         for (v = 0; v < pf->num_alloc_vsi; v++) {
6516                 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6517                         continue;
6518
6519                 if (pf->vsi[v]->veb_idx == veb->idx) {
6520                         struct i40e_vsi *vsi = pf->vsi[v];
6521
6522                         vsi->uplink_seid = veb->seid;
6523                         ret = i40e_add_vsi(vsi);
6524                         if (ret) {
6525                                 dev_info(&pf->pdev->dev,
6526                                          "rebuild of vsi_idx %d failed: %d\n",
6527                                          v, ret);
6528                                 goto end_reconstitute;
6529                         }
6530                         i40e_vsi_reset_stats(vsi);
6531                 }
6532         }
6533
6534         /* create any VEBs attached to this VEB - RECURSION */
6535         for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6536                 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6537                         pf->veb[veb_idx]->uplink_seid = veb->seid;
6538                         ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6539                         if (ret)
6540                                 break;
6541                 }
6542         }
6543
6544 end_reconstitute:
6545         return ret;
6546 }
6547
6548 /**
6549  * i40e_get_capabilities - get info about the HW
6550  * @pf: the PF struct
6551  **/
6552 static int i40e_get_capabilities(struct i40e_pf *pf)
6553 {
6554         struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6555         u16 data_size;
6556         int buf_len;
6557         int err;
6558
6559         buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6560         do {
6561                 cap_buf = kzalloc(buf_len, GFP_KERNEL);
6562                 if (!cap_buf)
6563                         return -ENOMEM;
6564
6565                 /* this loads the data into the hw struct for us */
6566                 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6567                                             &data_size,
6568                                             i40e_aqc_opc_list_func_capabilities,
6569                                             NULL);
6570                 /* data loaded, buffer no longer needed */
6571                 kfree(cap_buf);
6572
6573                 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6574                         /* retry with a larger buffer */
6575                         buf_len = data_size;
6576                 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6577                         dev_info(&pf->pdev->dev,
6578                                  "capability discovery failed, err %s aq_err %s\n",
6579                                  i40e_stat_str(&pf->hw, err),
6580                                  i40e_aq_str(&pf->hw,
6581                                              pf->hw.aq.asq_last_status));
6582                         return -ENODEV;
6583                 }
6584         } while (err);
6585
6586         if (pf->hw.debug_mask & I40E_DEBUG_USER)
6587                 dev_info(&pf->pdev->dev,
6588                          "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6589                          pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6590                          pf->hw.func_caps.num_msix_vectors,
6591                          pf->hw.func_caps.num_msix_vectors_vf,
6592                          pf->hw.func_caps.fd_filters_guaranteed,
6593                          pf->hw.func_caps.fd_filters_best_effort,
6594                          pf->hw.func_caps.num_tx_qp,
6595                          pf->hw.func_caps.num_vsis);
6596
6597 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6598                        + pf->hw.func_caps.num_vfs)
6599         if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6600                 dev_info(&pf->pdev->dev,
6601                          "got num_vsis %d, setting num_vsis to %d\n",
6602                          pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6603                 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6604         }
6605
6606         return 0;
6607 }
6608
6609 static int i40e_vsi_clear(struct i40e_vsi *vsi);
6610
6611 /**
6612  * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
6613  * @pf: board private structure
6614  **/
6615 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
6616 {
6617         struct i40e_vsi *vsi;
6618         int i;
6619
6620         /* quick workaround for an NVM issue that leaves a critical register
6621          * uninitialized
6622          */
6623         if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6624                 static const u32 hkey[] = {
6625                         0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6626                         0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6627                         0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6628                         0x95b3a76d};
6629
6630                 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6631                         wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6632         }
6633
6634         if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6635                 return;
6636
6637         /* find existing VSI and see if it needs configuring */
6638         vsi = NULL;
6639         for (i = 0; i < pf->num_alloc_vsi; i++) {
6640                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6641                         vsi = pf->vsi[i];
6642                         break;
6643                 }
6644         }
6645
6646         /* create a new VSI if none exists */
6647         if (!vsi) {
6648                 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
6649                                      pf->vsi[pf->lan_vsi]->seid, 0);
6650                 if (!vsi) {
6651                         dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
6652                         pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6653                         return;
6654                 }
6655         }
6656
6657         i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
6658 }
6659
6660 /**
6661  * i40e_fdir_teardown - release the Flow Director resources
6662  * @pf: board private structure
6663  **/
6664 static void i40e_fdir_teardown(struct i40e_pf *pf)
6665 {
6666         int i;
6667
6668         i40e_fdir_filter_exit(pf);
6669         for (i = 0; i < pf->num_alloc_vsi; i++) {
6670                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6671                         i40e_vsi_release(pf->vsi[i]);
6672                         break;
6673                 }
6674         }
6675 }
6676
6677 /**
6678  * i40e_prep_for_reset - prep for the core to reset
6679  * @pf: board private structure
6680  *
6681  * Close up the VFs and other things in prep for PF Reset.
6682   **/
6683 static void i40e_prep_for_reset(struct i40e_pf *pf)
6684 {
6685         struct i40e_hw *hw = &pf->hw;
6686         i40e_status ret = 0;
6687         u32 v;
6688
6689         clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6690         if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
6691                 return;
6692
6693         dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
6694
6695         /* quiesce the VSIs and their queues that are not already DOWN */
6696         i40e_pf_quiesce_all_vsi(pf);
6697
6698         for (v = 0; v < pf->num_alloc_vsi; v++) {
6699                 if (pf->vsi[v])
6700                         pf->vsi[v]->seid = 0;
6701         }
6702
6703         i40e_shutdown_adminq(&pf->hw);
6704
6705         /* call shutdown HMC */
6706         if (hw->hmc.hmc_obj) {
6707                 ret = i40e_shutdown_lan_hmc(hw);
6708                 if (ret)
6709                         dev_warn(&pf->pdev->dev,
6710                                  "shutdown_lan_hmc failed: %d\n", ret);
6711         }
6712 }
6713
6714 /**
6715  * i40e_send_version - update firmware with driver version
6716  * @pf: PF struct
6717  */
6718 static void i40e_send_version(struct i40e_pf *pf)
6719 {
6720         struct i40e_driver_version dv;
6721
6722         dv.major_version = DRV_VERSION_MAJOR;
6723         dv.minor_version = DRV_VERSION_MINOR;
6724         dv.build_version = DRV_VERSION_BUILD;
6725         dv.subbuild_version = 0;
6726         strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
6727         i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
6728 }
6729
6730 /**
6731  * i40e_reset_and_rebuild - reset and rebuild using a saved config
6732  * @pf: board private structure
6733  * @reinit: if the Main VSI needs to re-initialized.
6734  **/
6735 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
6736 {
6737         struct i40e_hw *hw = &pf->hw;
6738         u8 set_fc_aq_fail = 0;
6739         i40e_status ret;
6740         u32 val;
6741         u32 v;
6742
6743         /* Now we wait for GRST to settle out.
6744          * We don't have to delete the VEBs or VSIs from the hw switch
6745          * because the reset will make them disappear.
6746          */
6747         ret = i40e_pf_reset(hw);
6748         if (ret) {
6749                 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
6750                 set_bit(__I40E_RESET_FAILED, &pf->state);
6751                 goto clear_recovery;
6752         }
6753         pf->pfr_count++;
6754
6755         if (test_bit(__I40E_DOWN, &pf->state))
6756                 goto clear_recovery;
6757         dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
6758
6759         /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
6760         ret = i40e_init_adminq(&pf->hw);
6761         if (ret) {
6762                 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
6763                          i40e_stat_str(&pf->hw, ret),
6764                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6765                 goto clear_recovery;
6766         }
6767
6768         /* re-verify the eeprom if we just had an EMP reset */
6769         if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
6770                 i40e_verify_eeprom(pf);
6771
6772         i40e_clear_pxe_mode(hw);
6773         ret = i40e_get_capabilities(pf);
6774         if (ret)
6775                 goto end_core_reset;
6776
6777         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
6778                                 hw->func_caps.num_rx_qp,
6779                                 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
6780         if (ret) {
6781                 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
6782                 goto end_core_reset;
6783         }
6784         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
6785         if (ret) {
6786                 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
6787                 goto end_core_reset;
6788         }
6789
6790 #ifdef CONFIG_I40E_DCB
6791         ret = i40e_init_pf_dcb(pf);
6792         if (ret) {
6793                 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6794                 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6795                 /* Continue without DCB enabled */
6796         }
6797 #endif /* CONFIG_I40E_DCB */
6798 #ifdef I40E_FCOE
6799         i40e_init_pf_fcoe(pf);
6800
6801 #endif
6802         /* do basic switch setup */
6803         ret = i40e_setup_pf_switch(pf, reinit);
6804         if (ret)
6805                 goto end_core_reset;
6806
6807         /* driver is only interested in link up/down and module qualification
6808          * reports from firmware
6809          */
6810         ret = i40e_aq_set_phy_int_mask(&pf->hw,
6811                                        I40E_AQ_EVENT_LINK_UPDOWN |
6812                                        I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
6813         if (ret)
6814                 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
6815                          i40e_stat_str(&pf->hw, ret),
6816                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6817
6818         /* make sure our flow control settings are restored */
6819         ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
6820         if (ret)
6821                 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
6822                         i40e_stat_str(&pf->hw, ret),
6823                         i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6824
6825         /* Rebuild the VSIs and VEBs that existed before reset.
6826          * They are still in our local switch element arrays, so only
6827          * need to rebuild the switch model in the HW.
6828          *
6829          * If there were VEBs but the reconstitution failed, we'll try
6830          * try to recover minimal use by getting the basic PF VSI working.
6831          */
6832         if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
6833                 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
6834                 /* find the one VEB connected to the MAC, and find orphans */
6835                 for (v = 0; v < I40E_MAX_VEB; v++) {
6836                         if (!pf->veb[v])
6837                                 continue;
6838
6839                         if (pf->veb[v]->uplink_seid == pf->mac_seid ||
6840                             pf->veb[v]->uplink_seid == 0) {
6841                                 ret = i40e_reconstitute_veb(pf->veb[v]);
6842
6843                                 if (!ret)
6844                                         continue;
6845
6846                                 /* If Main VEB failed, we're in deep doodoo,
6847                                  * so give up rebuilding the switch and set up
6848                                  * for minimal rebuild of PF VSI.
6849                                  * If orphan failed, we'll report the error
6850                                  * but try to keep going.
6851                                  */
6852                                 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
6853                                         dev_info(&pf->pdev->dev,
6854                                                  "rebuild of switch failed: %d, will try to set up simple PF connection\n",
6855                                                  ret);
6856                                         pf->vsi[pf->lan_vsi]->uplink_seid
6857                                                                 = pf->mac_seid;
6858                                         break;
6859                                 } else if (pf->veb[v]->uplink_seid == 0) {
6860                                         dev_info(&pf->pdev->dev,
6861                                                  "rebuild of orphan VEB failed: %d\n",
6862                                                  ret);
6863                                 }
6864                         }
6865                 }
6866         }
6867
6868         if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
6869                 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
6870                 /* no VEB, so rebuild only the Main VSI */
6871                 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
6872                 if (ret) {
6873                         dev_info(&pf->pdev->dev,
6874                                  "rebuild of Main VSI failed: %d\n", ret);
6875                         goto end_core_reset;
6876                 }
6877         }
6878
6879         /* Reconfigure hardware for allowing smaller MSS in the case
6880          * of TSO, so that we avoid the MDD being fired and causing
6881          * a reset in the case of small MSS+TSO.
6882          */
6883 #define I40E_REG_MSS          0x000E64DC
6884 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
6885 #define I40E_64BYTE_MSS       0x400000
6886         val = rd32(hw, I40E_REG_MSS);
6887         if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
6888                 val &= ~I40E_REG_MSS_MIN_MASK;
6889                 val |= I40E_64BYTE_MSS;
6890                 wr32(hw, I40E_REG_MSS, val);
6891         }
6892
6893         if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
6894             (pf->hw.aq.fw_maj_ver < 4)) {
6895                 msleep(75);
6896                 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
6897                 if (ret)
6898                         dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
6899                                  i40e_stat_str(&pf->hw, ret),
6900                                  i40e_aq_str(&pf->hw,
6901                                              pf->hw.aq.asq_last_status));
6902         }
6903         /* reinit the misc interrupt */
6904         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6905                 ret = i40e_setup_misc_vector(pf);
6906
6907         /* Add a filter to drop all Flow control frames from any VSI from being
6908          * transmitted. By doing so we stop a malicious VF from sending out
6909          * PAUSE or PFC frames and potentially controlling traffic for other
6910          * PF/VF VSIs.
6911          * The FW can still send Flow control frames if enabled.
6912          */
6913         i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
6914                                                        pf->main_vsi_seid);
6915
6916         /* restart the VSIs that were rebuilt and running before the reset */
6917         i40e_pf_unquiesce_all_vsi(pf);
6918
6919         if (pf->num_alloc_vfs) {
6920                 for (v = 0; v < pf->num_alloc_vfs; v++)
6921                         i40e_reset_vf(&pf->vf[v], true);
6922         }
6923
6924         /* tell the firmware that we're starting */
6925         i40e_send_version(pf);
6926
6927 end_core_reset:
6928         clear_bit(__I40E_RESET_FAILED, &pf->state);
6929 clear_recovery:
6930         clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
6931 }
6932
6933 /**
6934  * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
6935  * @pf: board private structure
6936  *
6937  * Close up the VFs and other things in prep for a Core Reset,
6938  * then get ready to rebuild the world.
6939  **/
6940 static void i40e_handle_reset_warning(struct i40e_pf *pf)
6941 {
6942         i40e_prep_for_reset(pf);
6943         i40e_reset_and_rebuild(pf, false);
6944 }
6945
6946 /**
6947  * i40e_handle_mdd_event
6948  * @pf: pointer to the PF structure
6949  *
6950  * Called from the MDD irq handler to identify possibly malicious vfs
6951  **/
6952 static void i40e_handle_mdd_event(struct i40e_pf *pf)
6953 {
6954         struct i40e_hw *hw = &pf->hw;
6955         bool mdd_detected = false;
6956         bool pf_mdd_detected = false;
6957         struct i40e_vf *vf;
6958         u32 reg;
6959         int i;
6960
6961         if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
6962                 return;
6963
6964         /* find what triggered the MDD event */
6965         reg = rd32(hw, I40E_GL_MDET_TX);
6966         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6967                 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6968                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
6969                 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6970                                 I40E_GL_MDET_TX_VF_NUM_SHIFT;
6971                 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6972                                 I40E_GL_MDET_TX_EVENT_SHIFT;
6973                 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6974                                 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6975                                 pf->hw.func_caps.base_queue;
6976                 if (netif_msg_tx_err(pf))
6977                         dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
6978                                  event, queue, pf_num, vf_num);
6979                 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
6980                 mdd_detected = true;
6981         }
6982         reg = rd32(hw, I40E_GL_MDET_RX);
6983         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6984                 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6985                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
6986                 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6987                                 I40E_GL_MDET_RX_EVENT_SHIFT;
6988                 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6989                                 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6990                                 pf->hw.func_caps.base_queue;
6991                 if (netif_msg_rx_err(pf))
6992                         dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
6993                                  event, queue, func);
6994                 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
6995                 mdd_detected = true;
6996         }
6997
6998         if (mdd_detected) {
6999                 reg = rd32(hw, I40E_PF_MDET_TX);
7000                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
7001                         wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
7002                         dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
7003                         pf_mdd_detected = true;
7004                 }
7005                 reg = rd32(hw, I40E_PF_MDET_RX);
7006                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
7007                         wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
7008                         dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
7009                         pf_mdd_detected = true;
7010                 }
7011                 /* Queue belongs to the PF, initiate a reset */
7012                 if (pf_mdd_detected) {
7013                         set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
7014                         i40e_service_event_schedule(pf);
7015                 }
7016         }
7017
7018         /* see if one of the VFs needs its hand slapped */
7019         for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
7020                 vf = &(pf->vf[i]);
7021                 reg = rd32(hw, I40E_VP_MDET_TX(i));
7022                 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
7023                         wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
7024                         vf->num_mdd_events++;
7025                         dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
7026                                  i);
7027                 }
7028
7029                 reg = rd32(hw, I40E_VP_MDET_RX(i));
7030                 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
7031                         wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
7032                         vf->num_mdd_events++;
7033                         dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
7034                                  i);
7035                 }
7036
7037                 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
7038                         dev_info(&pf->pdev->dev,
7039                                  "Too many MDD events on VF %d, disabled\n", i);
7040                         dev_info(&pf->pdev->dev,
7041                                  "Use PF Control I/F to re-enable the VF\n");
7042                         set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
7043                 }
7044         }
7045
7046         /* re-enable mdd interrupt cause */
7047         clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
7048         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
7049         reg |=  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
7050         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
7051         i40e_flush(hw);
7052 }
7053
7054 /**
7055  * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
7056  * @pf: board private structure
7057  **/
7058 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
7059 {
7060 #if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
7061         struct i40e_hw *hw = &pf->hw;
7062         i40e_status ret;
7063         __be16 port;
7064         int i;
7065
7066         if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
7067                 return;
7068
7069         pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
7070
7071         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7072                 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
7073                         pf->pending_udp_bitmap &= ~BIT_ULL(i);
7074                         port = pf->udp_ports[i].index;
7075                         if (port)
7076                                 ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
7077                                                      pf->udp_ports[i].type,
7078                                                      NULL, NULL);
7079                         else
7080                                 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
7081
7082                         if (ret) {
7083                                 dev_info(&pf->pdev->dev,
7084                                          "%s vxlan port %d, index %d failed, err %s aq_err %s\n",
7085                                          port ? "add" : "delete",
7086                                          ntohs(port), i,
7087                                          i40e_stat_str(&pf->hw, ret),
7088                                          i40e_aq_str(&pf->hw,
7089                                                     pf->hw.aq.asq_last_status));
7090                                 pf->udp_ports[i].index = 0;
7091                         }
7092                 }
7093         }
7094 #endif
7095 }
7096
7097 /**
7098  * i40e_service_task - Run the driver's async subtasks
7099  * @work: pointer to work_struct containing our data
7100  **/
7101 static void i40e_service_task(struct work_struct *work)
7102 {
7103         struct i40e_pf *pf = container_of(work,
7104                                           struct i40e_pf,
7105                                           service_task);
7106         unsigned long start_time = jiffies;
7107
7108         /* don't bother with service tasks if a reset is in progress */
7109         if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7110                 i40e_service_event_complete(pf);
7111                 return;
7112         }
7113
7114         i40e_detect_recover_hung(pf);
7115         i40e_reset_subtask(pf);
7116         i40e_handle_mdd_event(pf);
7117         i40e_vc_process_vflr_event(pf);
7118         i40e_watchdog_subtask(pf);
7119         i40e_fdir_reinit_subtask(pf);
7120         i40e_sync_filters_subtask(pf);
7121 #if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
7122         i40e_sync_udp_filters_subtask(pf);
7123 #endif
7124         i40e_clean_adminq_subtask(pf);
7125
7126         i40e_service_event_complete(pf);
7127
7128         /* If the tasks have taken longer than one timer cycle or there
7129          * is more work to be done, reschedule the service task now
7130          * rather than wait for the timer to tick again.
7131          */
7132         if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
7133             test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)            ||
7134             test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)               ||
7135             test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
7136                 i40e_service_event_schedule(pf);
7137 }
7138
7139 /**
7140  * i40e_service_timer - timer callback
7141  * @data: pointer to PF struct
7142  **/
7143 static void i40e_service_timer(unsigned long data)
7144 {
7145         struct i40e_pf *pf = (struct i40e_pf *)data;
7146
7147         mod_timer(&pf->service_timer,
7148                   round_jiffies(jiffies + pf->service_timer_period));
7149         i40e_service_event_schedule(pf);
7150 }
7151
7152 /**
7153  * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
7154  * @vsi: the VSI being configured
7155  **/
7156 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
7157 {
7158         struct i40e_pf *pf = vsi->back;
7159
7160         switch (vsi->type) {
7161         case I40E_VSI_MAIN:
7162                 vsi->alloc_queue_pairs = pf->num_lan_qps;
7163                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7164                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
7165                 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7166                         vsi->num_q_vectors = pf->num_lan_msix;
7167                 else
7168                         vsi->num_q_vectors = 1;
7169
7170                 break;
7171
7172         case I40E_VSI_FDIR:
7173                 vsi->alloc_queue_pairs = 1;
7174                 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
7175                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
7176                 vsi->num_q_vectors = 1;
7177                 break;
7178
7179         case I40E_VSI_VMDQ2:
7180                 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
7181                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7182                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
7183                 vsi->num_q_vectors = pf->num_vmdq_msix;
7184                 break;
7185
7186         case I40E_VSI_SRIOV:
7187                 vsi->alloc_queue_pairs = pf->num_vf_qps;
7188                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7189                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
7190                 break;
7191
7192 #ifdef I40E_FCOE
7193         case I40E_VSI_FCOE:
7194                 vsi->alloc_queue_pairs = pf->num_fcoe_qps;
7195                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7196                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
7197                 vsi->num_q_vectors = pf->num_fcoe_msix;
7198                 break;
7199
7200 #endif /* I40E_FCOE */
7201         default:
7202                 WARN_ON(1);
7203                 return -ENODATA;
7204         }
7205
7206         return 0;
7207 }
7208
7209 /**
7210  * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
7211  * @type: VSI pointer
7212  * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
7213  *
7214  * On error: returns error code (negative)
7215  * On success: returns 0
7216  **/
7217 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
7218 {
7219         int size;
7220         int ret = 0;
7221
7222         /* allocate memory for both Tx and Rx ring pointers */
7223         size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
7224         vsi->tx_rings = kzalloc(size, GFP_KERNEL);
7225         if (!vsi->tx_rings)
7226                 return -ENOMEM;
7227         vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
7228
7229         if (alloc_qvectors) {
7230                 /* allocate memory for q_vector pointers */
7231                 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
7232                 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
7233                 if (!vsi->q_vectors) {
7234                         ret = -ENOMEM;
7235                         goto err_vectors;
7236                 }
7237         }
7238         return ret;
7239
7240 err_vectors:
7241         kfree(vsi->tx_rings);
7242         return ret;
7243 }
7244
7245 /**
7246  * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
7247  * @pf: board private structure
7248  * @type: type of VSI
7249  *
7250  * On error: returns error code (negative)
7251  * On success: returns vsi index in PF (positive)
7252  **/
7253 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
7254 {
7255         int ret = -ENODEV;
7256         struct i40e_vsi *vsi;
7257         int vsi_idx;
7258         int i;
7259
7260         /* Need to protect the allocation of the VSIs at the PF level */
7261         mutex_lock(&pf->switch_mutex);
7262
7263         /* VSI list may be fragmented if VSI creation/destruction has
7264          * been happening.  We can afford to do a quick scan to look
7265          * for any free VSIs in the list.
7266          *
7267          * find next empty vsi slot, looping back around if necessary
7268          */
7269         i = pf->next_vsi;
7270         while (i < pf->num_alloc_vsi && pf->vsi[i])
7271                 i++;
7272         if (i >= pf->num_alloc_vsi) {
7273                 i = 0;
7274                 while (i < pf->next_vsi && pf->vsi[i])
7275                         i++;
7276         }
7277
7278         if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
7279                 vsi_idx = i;             /* Found one! */
7280         } else {
7281                 ret = -ENODEV;
7282                 goto unlock_pf;  /* out of VSI slots! */
7283         }
7284         pf->next_vsi = ++i;
7285
7286         vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
7287         if (!vsi) {
7288                 ret = -ENOMEM;
7289                 goto unlock_pf;
7290         }
7291         vsi->type = type;
7292         vsi->back = pf;
7293         set_bit(__I40E_DOWN, &vsi->state);
7294         vsi->flags = 0;
7295         vsi->idx = vsi_idx;
7296         vsi->rx_itr_setting = pf->rx_itr_default;
7297         vsi->tx_itr_setting = pf->tx_itr_default;
7298         vsi->int_rate_limit = 0;
7299         vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
7300                                 pf->rss_table_size : 64;
7301         vsi->netdev_registered = false;
7302         vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
7303         INIT_LIST_HEAD(&vsi->mac_filter_list);
7304         vsi->irqs_ready = false;
7305
7306         ret = i40e_set_num_rings_in_vsi(vsi);
7307         if (ret)
7308                 goto err_rings;
7309
7310         ret = i40e_vsi_alloc_arrays(vsi, true);
7311         if (ret)
7312                 goto err_rings;
7313
7314         /* Setup default MSIX irq handler for VSI */
7315         i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
7316
7317         /* Initialize VSI lock */
7318         spin_lock_init(&vsi->mac_filter_list_lock);
7319         pf->vsi[vsi_idx] = vsi;
7320         ret = vsi_idx;
7321         goto unlock_pf;
7322
7323 err_rings:
7324         pf->next_vsi = i - 1;
7325         kfree(vsi);
7326 unlock_pf:
7327         mutex_unlock(&pf->switch_mutex);
7328         return ret;
7329 }
7330
7331 /**
7332  * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
7333  * @type: VSI pointer
7334  * @free_qvectors: a bool to specify if q_vectors need to be freed.
7335  *
7336  * On error: returns error code (negative)
7337  * On success: returns 0
7338  **/
7339 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
7340 {
7341         /* free the ring and vector containers */
7342         if (free_qvectors) {
7343                 kfree(vsi->q_vectors);
7344                 vsi->q_vectors = NULL;
7345         }
7346         kfree(vsi->tx_rings);
7347         vsi->tx_rings = NULL;
7348         vsi->rx_rings = NULL;
7349 }
7350
7351 /**
7352  * i40e_clear_rss_config_user - clear the user configured RSS hash keys
7353  * and lookup table
7354  * @vsi: Pointer to VSI structure
7355  */
7356 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
7357 {
7358         if (!vsi)
7359                 return;
7360
7361         kfree(vsi->rss_hkey_user);
7362         vsi->rss_hkey_user = NULL;
7363
7364         kfree(vsi->rss_lut_user);
7365         vsi->rss_lut_user = NULL;
7366 }
7367
7368 /**
7369  * i40e_vsi_clear - Deallocate the VSI provided
7370  * @vsi: the VSI being un-configured
7371  **/
7372 static int i40e_vsi_clear(struct i40e_vsi *vsi)
7373 {
7374         struct i40e_pf *pf;
7375
7376         if (!vsi)
7377                 return 0;
7378
7379         if (!vsi->back)
7380                 goto free_vsi;
7381         pf = vsi->back;
7382
7383         mutex_lock(&pf->switch_mutex);
7384         if (!pf->vsi[vsi->idx]) {
7385                 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
7386                         vsi->idx, vsi->idx, vsi, vsi->type);
7387                 goto unlock_vsi;
7388         }
7389
7390         if (pf->vsi[vsi->idx] != vsi) {
7391                 dev_err(&pf->pdev->dev,
7392                         "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
7393                         pf->vsi[vsi->idx]->idx,
7394                         pf->vsi[vsi->idx],
7395                         pf->vsi[vsi->idx]->type,
7396                         vsi->idx, vsi, vsi->type);
7397                 goto unlock_vsi;
7398         }
7399
7400         /* updates the PF for this cleared vsi */
7401         i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7402         i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
7403
7404         i40e_vsi_free_arrays(vsi, true);
7405         i40e_clear_rss_config_user(vsi);
7406
7407         pf->vsi[vsi->idx] = NULL;
7408         if (vsi->idx < pf->next_vsi)
7409                 pf->next_vsi = vsi->idx;
7410
7411 unlock_vsi:
7412         mutex_unlock(&pf->switch_mutex);
7413 free_vsi:
7414         kfree(vsi);
7415
7416         return 0;
7417 }
7418
7419 /**
7420  * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
7421  * @vsi: the VSI being cleaned
7422  **/
7423 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
7424 {
7425         int i;
7426
7427         if (vsi->tx_rings && vsi->tx_rings[0]) {
7428                 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7429                         kfree_rcu(vsi->tx_rings[i], rcu);
7430                         vsi->tx_rings[i] = NULL;
7431                         vsi->rx_rings[i] = NULL;
7432                 }
7433         }
7434 }
7435
7436 /**
7437  * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
7438  * @vsi: the VSI being configured
7439  **/
7440 static int i40e_alloc_rings(struct i40e_vsi *vsi)
7441 {
7442         struct i40e_ring *tx_ring, *rx_ring;
7443         struct i40e_pf *pf = vsi->back;
7444         int i;
7445
7446         /* Set basic values in the rings to be used later during open() */
7447         for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7448                 /* allocate space for both Tx and Rx in one shot */
7449                 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
7450                 if (!tx_ring)
7451                         goto err_out;
7452
7453                 tx_ring->queue_index = i;
7454                 tx_ring->reg_idx = vsi->base_queue + i;
7455                 tx_ring->ring_active = false;
7456                 tx_ring->vsi = vsi;
7457                 tx_ring->netdev = vsi->netdev;
7458                 tx_ring->dev = &pf->pdev->dev;
7459                 tx_ring->count = vsi->num_desc;
7460                 tx_ring->size = 0;
7461                 tx_ring->dcb_tc = 0;
7462                 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
7463                         tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7464                 if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
7465                         tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM;
7466                 vsi->tx_rings[i] = tx_ring;
7467
7468                 rx_ring = &tx_ring[1];
7469                 rx_ring->queue_index = i;
7470                 rx_ring->reg_idx = vsi->base_queue + i;
7471                 rx_ring->ring_active = false;
7472                 rx_ring->vsi = vsi;
7473                 rx_ring->netdev = vsi->netdev;
7474                 rx_ring->dev = &pf->pdev->dev;
7475                 rx_ring->count = vsi->num_desc;
7476                 rx_ring->size = 0;
7477                 rx_ring->dcb_tc = 0;
7478                 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
7479                         set_ring_16byte_desc_enabled(rx_ring);
7480                 else
7481                         clear_ring_16byte_desc_enabled(rx_ring);
7482                 vsi->rx_rings[i] = rx_ring;
7483         }
7484
7485         return 0;
7486
7487 err_out:
7488         i40e_vsi_clear_rings(vsi);
7489         return -ENOMEM;
7490 }
7491
7492 /**
7493  * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
7494  * @pf: board private structure
7495  * @vectors: the number of MSI-X vectors to request
7496  *
7497  * Returns the number of vectors reserved, or error
7498  **/
7499 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
7500 {
7501         vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
7502                                         I40E_MIN_MSIX, vectors);
7503         if (vectors < 0) {
7504                 dev_info(&pf->pdev->dev,
7505                          "MSI-X vector reservation failed: %d\n", vectors);
7506                 vectors = 0;
7507         }
7508
7509         return vectors;
7510 }
7511
7512 /**
7513  * i40e_init_msix - Setup the MSIX capability
7514  * @pf: board private structure
7515  *
7516  * Work with the OS to set up the MSIX vectors needed.
7517  *
7518  * Returns the number of vectors reserved or negative on failure
7519  **/
7520 static int i40e_init_msix(struct i40e_pf *pf)
7521 {
7522         struct i40e_hw *hw = &pf->hw;
7523         int vectors_left;
7524         int v_budget, i;
7525         int v_actual;
7526
7527         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7528                 return -ENODEV;
7529
7530         /* The number of vectors we'll request will be comprised of:
7531          *   - Add 1 for "other" cause for Admin Queue events, etc.
7532          *   - The number of LAN queue pairs
7533          *      - Queues being used for RSS.
7534          *              We don't need as many as max_rss_size vectors.
7535          *              use rss_size instead in the calculation since that
7536          *              is governed by number of cpus in the system.
7537          *      - assumes symmetric Tx/Rx pairing
7538          *   - The number of VMDq pairs
7539 #ifdef I40E_FCOE
7540          *   - The number of FCOE qps.
7541 #endif
7542          * Once we count this up, try the request.
7543          *
7544          * If we can't get what we want, we'll simplify to nearly nothing
7545          * and try again.  If that still fails, we punt.
7546          */
7547         vectors_left = hw->func_caps.num_msix_vectors;
7548         v_budget = 0;
7549
7550         /* reserve one vector for miscellaneous handler */
7551         if (vectors_left) {
7552                 v_budget++;
7553                 vectors_left--;
7554         }
7555
7556         /* reserve vectors for the main PF traffic queues */
7557         pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
7558         vectors_left -= pf->num_lan_msix;
7559         v_budget += pf->num_lan_msix;
7560
7561         /* reserve one vector for sideband flow director */
7562         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7563                 if (vectors_left) {
7564                         v_budget++;
7565                         vectors_left--;
7566                 } else {
7567                         pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7568                 }
7569         }
7570
7571 #ifdef I40E_FCOE
7572         /* can we reserve enough for FCoE? */
7573         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7574                 if (!vectors_left)
7575                         pf->num_fcoe_msix = 0;
7576                 else if (vectors_left >= pf->num_fcoe_qps)
7577                         pf->num_fcoe_msix = pf->num_fcoe_qps;
7578                 else
7579                         pf->num_fcoe_msix = 1;
7580                 v_budget += pf->num_fcoe_msix;
7581                 vectors_left -= pf->num_fcoe_msix;
7582         }
7583
7584 #endif
7585         /* any vectors left over go for VMDq support */
7586         if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
7587                 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
7588                 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
7589
7590                 /* if we're short on vectors for what's desired, we limit
7591                  * the queues per vmdq.  If this is still more than are
7592                  * available, the user will need to change the number of
7593                  * queues/vectors used by the PF later with the ethtool
7594                  * channels command
7595                  */
7596                 if (vmdq_vecs < vmdq_vecs_wanted)
7597                         pf->num_vmdq_qps = 1;
7598                 pf->num_vmdq_msix = pf->num_vmdq_qps;
7599
7600                 v_budget += vmdq_vecs;
7601                 vectors_left -= vmdq_vecs;
7602         }
7603
7604         pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
7605                                    GFP_KERNEL);
7606         if (!pf->msix_entries)
7607                 return -ENOMEM;
7608
7609         for (i = 0; i < v_budget; i++)
7610                 pf->msix_entries[i].entry = i;
7611         v_actual = i40e_reserve_msix_vectors(pf, v_budget);
7612
7613         if (v_actual != v_budget) {
7614                 /* If we have limited resources, we will start with no vectors
7615                  * for the special features and then allocate vectors to some
7616                  * of these features based on the policy and at the end disable
7617                  * the features that did not get any vectors.
7618                  */
7619 #ifdef I40E_FCOE
7620                 pf->num_fcoe_qps = 0;
7621                 pf->num_fcoe_msix = 0;
7622 #endif
7623                 pf->num_vmdq_msix = 0;
7624         }
7625
7626         if (v_actual < I40E_MIN_MSIX) {
7627                 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
7628                 kfree(pf->msix_entries);
7629                 pf->msix_entries = NULL;
7630                 return -ENODEV;
7631
7632         } else if (v_actual == I40E_MIN_MSIX) {
7633                 /* Adjust for minimal MSIX use */
7634                 pf->num_vmdq_vsis = 0;
7635                 pf->num_vmdq_qps = 0;
7636                 pf->num_lan_qps = 1;
7637                 pf->num_lan_msix = 1;
7638
7639         } else if (v_actual != v_budget) {
7640                 int vec;
7641
7642                 /* reserve the misc vector */
7643                 vec = v_actual - 1;
7644
7645                 /* Scale vector usage down */
7646                 pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
7647                 pf->num_vmdq_vsis = 1;
7648                 pf->num_vmdq_qps = 1;
7649                 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7650
7651                 /* partition out the remaining vectors */
7652                 switch (vec) {
7653                 case 2:
7654                         pf->num_lan_msix = 1;
7655                         break;
7656                 case 3:
7657 #ifdef I40E_FCOE
7658                         /* give one vector to FCoE */
7659                         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7660                                 pf->num_lan_msix = 1;
7661                                 pf->num_fcoe_msix = 1;
7662                         }
7663 #else
7664                         pf->num_lan_msix = 2;
7665 #endif
7666                         break;
7667                 default:
7668 #ifdef I40E_FCOE
7669                         /* give one vector to FCoE */
7670                         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7671                                 pf->num_fcoe_msix = 1;
7672                                 vec--;
7673                         }
7674 #endif
7675                         /* give the rest to the PF */
7676                         pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps);
7677                         break;
7678                 }
7679         }
7680
7681         if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7682             (pf->num_vmdq_msix == 0)) {
7683                 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
7684                 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
7685         }
7686 #ifdef I40E_FCOE
7687
7688         if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
7689                 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
7690                 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
7691         }
7692 #endif
7693         return v_actual;
7694 }
7695
7696 /**
7697  * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
7698  * @vsi: the VSI being configured
7699  * @v_idx: index of the vector in the vsi struct
7700  *
7701  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
7702  **/
7703 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
7704 {
7705         struct i40e_q_vector *q_vector;
7706
7707         /* allocate q_vector */
7708         q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
7709         if (!q_vector)
7710                 return -ENOMEM;
7711
7712         q_vector->vsi = vsi;
7713         q_vector->v_idx = v_idx;
7714         cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
7715         if (vsi->netdev)
7716                 netif_napi_add(vsi->netdev, &q_vector->napi,
7717                                i40e_napi_poll, NAPI_POLL_WEIGHT);
7718
7719         q_vector->rx.latency_range = I40E_LOW_LATENCY;
7720         q_vector->tx.latency_range = I40E_LOW_LATENCY;
7721
7722         /* tie q_vector and vsi together */
7723         vsi->q_vectors[v_idx] = q_vector;
7724
7725         return 0;
7726 }
7727
7728 /**
7729  * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
7730  * @vsi: the VSI being configured
7731  *
7732  * We allocate one q_vector per queue interrupt.  If allocation fails we
7733  * return -ENOMEM.
7734  **/
7735 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
7736 {
7737         struct i40e_pf *pf = vsi->back;
7738         int v_idx, num_q_vectors;
7739         int err;
7740
7741         /* if not MSIX, give the one vector only to the LAN VSI */
7742         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7743                 num_q_vectors = vsi->num_q_vectors;
7744         else if (vsi == pf->vsi[pf->lan_vsi])
7745                 num_q_vectors = 1;
7746         else
7747                 return -EINVAL;
7748
7749         for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
7750                 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
7751                 if (err)
7752                         goto err_out;
7753         }
7754
7755         return 0;
7756
7757 err_out:
7758         while (v_idx--)
7759                 i40e_free_q_vector(vsi, v_idx);
7760
7761         return err;
7762 }
7763
7764 /**
7765  * i40e_init_interrupt_scheme - Determine proper interrupt scheme
7766  * @pf: board private structure to initialize
7767  **/
7768 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
7769 {
7770         int vectors = 0;
7771         ssize_t size;
7772
7773         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7774                 vectors = i40e_init_msix(pf);
7775                 if (vectors < 0) {
7776                         pf->flags &= ~(I40E_FLAG_MSIX_ENABLED   |
7777 #ifdef I40E_FCOE
7778                                        I40E_FLAG_FCOE_ENABLED   |
7779 #endif
7780                                        I40E_FLAG_RSS_ENABLED    |
7781                                        I40E_FLAG_DCB_CAPABLE    |
7782                                        I40E_FLAG_SRIOV_ENABLED  |
7783                                        I40E_FLAG_FD_SB_ENABLED  |
7784                                        I40E_FLAG_FD_ATR_ENABLED |
7785                                        I40E_FLAG_VMDQ_ENABLED);
7786
7787                         /* rework the queue expectations without MSIX */
7788                         i40e_determine_queue_usage(pf);
7789                 }
7790         }
7791
7792         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
7793             (pf->flags & I40E_FLAG_MSI_ENABLED)) {
7794                 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
7795                 vectors = pci_enable_msi(pf->pdev);
7796                 if (vectors < 0) {
7797                         dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
7798                                  vectors);
7799                         pf->flags &= ~I40E_FLAG_MSI_ENABLED;
7800                 }
7801                 vectors = 1;  /* one MSI or Legacy vector */
7802         }
7803
7804         if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
7805                 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
7806
7807         /* set up vector assignment tracking */
7808         size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
7809         pf->irq_pile = kzalloc(size, GFP_KERNEL);
7810         if (!pf->irq_pile) {
7811                 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
7812                 return -ENOMEM;
7813         }
7814         pf->irq_pile->num_entries = vectors;
7815         pf->irq_pile->search_hint = 0;
7816
7817         /* track first vector for misc interrupts, ignore return */
7818         (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
7819
7820         return 0;
7821 }
7822
7823 /**
7824  * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
7825  * @pf: board private structure
7826  *
7827  * This sets up the handler for MSIX 0, which is used to manage the
7828  * non-queue interrupts, e.g. AdminQ and errors.  This is not used
7829  * when in MSI or Legacy interrupt mode.
7830  **/
7831 static int i40e_setup_misc_vector(struct i40e_pf *pf)
7832 {
7833         struct i40e_hw *hw = &pf->hw;
7834         int err = 0;
7835
7836         /* Only request the irq if this is the first time through, and
7837          * not when we're rebuilding after a Reset
7838          */
7839         if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7840                 err = request_irq(pf->msix_entries[0].vector,
7841                                   i40e_intr, 0, pf->int_name, pf);
7842                 if (err) {
7843                         dev_info(&pf->pdev->dev,
7844                                  "request_irq for %s failed: %d\n",
7845                                  pf->int_name, err);
7846                         return -EFAULT;
7847                 }
7848         }
7849
7850         i40e_enable_misc_int_causes(pf);
7851
7852         /* associate no queues to the misc vector */
7853         wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
7854         wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
7855
7856         i40e_flush(hw);
7857
7858         i40e_irq_dynamic_enable_icr0(pf);
7859
7860         return err;
7861 }
7862
7863 /**
7864  * i40e_config_rss_aq - Prepare for RSS using AQ commands
7865  * @vsi: vsi structure
7866  * @seed: RSS hash seed
7867  **/
7868 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
7869                               u8 *lut, u16 lut_size)
7870 {
7871         struct i40e_aqc_get_set_rss_key_data rss_key;
7872         struct i40e_pf *pf = vsi->back;
7873         struct i40e_hw *hw = &pf->hw;
7874         bool pf_lut = false;
7875         u8 *rss_lut;
7876         int ret, i;
7877
7878         memset(&rss_key, 0, sizeof(rss_key));
7879         memcpy(&rss_key, seed, sizeof(rss_key));
7880
7881         rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL);
7882         if (!rss_lut)
7883                 return -ENOMEM;
7884
7885         /* Populate the LUT with max no. of queues in round robin fashion */
7886         for (i = 0; i < vsi->rss_table_size; i++)
7887                 rss_lut[i] = i % vsi->rss_size;
7888
7889         ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key);
7890         if (ret) {
7891                 dev_info(&pf->pdev->dev,
7892                          "Cannot set RSS key, err %s aq_err %s\n",
7893                          i40e_stat_str(&pf->hw, ret),
7894                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7895                 goto config_rss_aq_out;
7896         }
7897
7898         if (vsi->type == I40E_VSI_MAIN)
7899                 pf_lut = true;
7900
7901         ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut,
7902                                   vsi->rss_table_size);
7903         if (ret)
7904                 dev_info(&pf->pdev->dev,
7905                          "Cannot set RSS lut, err %s aq_err %s\n",
7906                          i40e_stat_str(&pf->hw, ret),
7907                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7908
7909 config_rss_aq_out:
7910         kfree(rss_lut);
7911         return ret;
7912 }
7913
7914 /**
7915  * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
7916  * @vsi: VSI structure
7917  **/
7918 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
7919 {
7920         u8 seed[I40E_HKEY_ARRAY_SIZE];
7921         struct i40e_pf *pf = vsi->back;
7922         u8 *lut;
7923         int ret;
7924
7925         if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE))
7926                 return 0;
7927
7928         lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
7929         if (!lut)
7930                 return -ENOMEM;
7931
7932         i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
7933         netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
7934         vsi->rss_size = min_t(int, pf->alloc_rss_size, vsi->num_queue_pairs);
7935         ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
7936         kfree(lut);
7937
7938         return ret;
7939 }
7940
7941 /**
7942  * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
7943  * @vsi: Pointer to vsi structure
7944  * @seed: RSS hash seed
7945  * @lut: Lookup table
7946  * @lut_size: Lookup table size
7947  *
7948  * Returns 0 on success, negative on failure
7949  **/
7950 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
7951                                const u8 *lut, u16 lut_size)
7952 {
7953         struct i40e_pf *pf = vsi->back;
7954         struct i40e_hw *hw = &pf->hw;
7955         u8 i;
7956
7957         /* Fill out hash function seed */
7958         if (seed) {
7959                 u32 *seed_dw = (u32 *)seed;
7960
7961                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7962                         wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
7963         }
7964
7965         if (lut) {
7966                 u32 *lut_dw = (u32 *)lut;
7967
7968                 if (lut_size != I40E_HLUT_ARRAY_SIZE)
7969                         return -EINVAL;
7970
7971                 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
7972                         wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
7973         }
7974         i40e_flush(hw);
7975
7976         return 0;
7977 }
7978
7979 /**
7980  * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
7981  * @vsi: Pointer to VSI structure
7982  * @seed: Buffer to store the keys
7983  * @lut: Buffer to store the lookup table entries
7984  * @lut_size: Size of buffer to store the lookup table entries
7985  *
7986  * Returns 0 on success, negative on failure
7987  */
7988 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
7989                             u8 *lut, u16 lut_size)
7990 {
7991         struct i40e_pf *pf = vsi->back;
7992         struct i40e_hw *hw = &pf->hw;
7993         u16 i;
7994
7995         if (seed) {
7996                 u32 *seed_dw = (u32 *)seed;
7997
7998                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7999                         seed_dw[i] = rd32(hw, I40E_PFQF_HKEY(i));
8000         }
8001         if (lut) {
8002                 u32 *lut_dw = (u32 *)lut;
8003
8004                 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8005                         return -EINVAL;
8006                 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8007                         lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
8008         }
8009
8010         return 0;
8011 }
8012
8013 /**
8014  * i40e_config_rss - Configure RSS keys and lut
8015  * @vsi: Pointer to VSI structure
8016  * @seed: RSS hash seed
8017  * @lut: Lookup table
8018  * @lut_size: Lookup table size
8019  *
8020  * Returns 0 on success, negative on failure
8021  */
8022 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8023 {
8024         struct i40e_pf *pf = vsi->back;
8025
8026         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
8027                 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
8028         else
8029                 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
8030 }
8031
8032 /**
8033  * i40e_get_rss - Get RSS keys and lut
8034  * @vsi: Pointer to VSI structure
8035  * @seed: Buffer to store the keys
8036  * @lut: Buffer to store the lookup table entries
8037  * lut_size: Size of buffer to store the lookup table entries
8038  *
8039  * Returns 0 on success, negative on failure
8040  */
8041 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8042 {
8043         return i40e_get_rss_reg(vsi, seed, lut, lut_size);
8044 }
8045
8046 /**
8047  * i40e_fill_rss_lut - Fill the RSS lookup table with default values
8048  * @pf: Pointer to board private structure
8049  * @lut: Lookup table
8050  * @rss_table_size: Lookup table size
8051  * @rss_size: Range of queue number for hashing
8052  */
8053 static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
8054                               u16 rss_table_size, u16 rss_size)
8055 {
8056         u16 i;
8057
8058         for (i = 0; i < rss_table_size; i++)
8059                 lut[i] = i % rss_size;
8060 }
8061
8062 /**
8063  * i40e_pf_config_rss - Prepare for RSS if used
8064  * @pf: board private structure
8065  **/
8066 static int i40e_pf_config_rss(struct i40e_pf *pf)
8067 {
8068         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8069         u8 seed[I40E_HKEY_ARRAY_SIZE];
8070         u8 *lut;
8071         struct i40e_hw *hw = &pf->hw;
8072         u32 reg_val;
8073         u64 hena;
8074         int ret;
8075
8076         /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
8077         hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
8078                 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
8079         hena |= i40e_pf_get_default_rss_hena(pf);
8080
8081         wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
8082         wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
8083
8084         /* Determine the RSS table size based on the hardware capabilities */
8085         reg_val = rd32(hw, I40E_PFQF_CTL_0);
8086         reg_val = (pf->rss_table_size == 512) ?
8087                         (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
8088                         (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
8089         wr32(hw, I40E_PFQF_CTL_0, reg_val);
8090
8091         /* Determine the RSS size of the VSI */
8092         if (!vsi->rss_size)
8093                 vsi->rss_size = min_t(int, pf->alloc_rss_size,
8094                                       vsi->num_queue_pairs);
8095
8096         lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8097         if (!lut)
8098                 return -ENOMEM;
8099
8100         /* Use user configured lut if there is one, otherwise use default */
8101         if (vsi->rss_lut_user)
8102                 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8103         else
8104                 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8105
8106         /* Use user configured hash key if there is one, otherwise
8107          * use default.
8108          */
8109         if (vsi->rss_hkey_user)
8110                 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8111         else
8112                 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
8113         ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
8114         kfree(lut);
8115
8116         return ret;
8117 }
8118
8119 /**
8120  * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
8121  * @pf: board private structure
8122  * @queue_count: the requested queue count for rss.
8123  *
8124  * returns 0 if rss is not enabled, if enabled returns the final rss queue
8125  * count which may be different from the requested queue count.
8126  **/
8127 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
8128 {
8129         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8130         int new_rss_size;
8131
8132         if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
8133                 return 0;
8134
8135         new_rss_size = min_t(int, queue_count, pf->rss_size_max);
8136
8137         if (queue_count != vsi->num_queue_pairs) {
8138                 vsi->req_queue_pairs = queue_count;
8139                 i40e_prep_for_reset(pf);
8140
8141                 pf->alloc_rss_size = new_rss_size;
8142
8143                 i40e_reset_and_rebuild(pf, true);
8144
8145                 /* Discard the user configured hash keys and lut, if less
8146                  * queues are enabled.
8147                  */
8148                 if (queue_count < vsi->rss_size) {
8149                         i40e_clear_rss_config_user(vsi);
8150                         dev_dbg(&pf->pdev->dev,
8151                                 "discard user configured hash keys and lut\n");
8152                 }
8153
8154                 /* Reset vsi->rss_size, as number of enabled queues changed */
8155                 vsi->rss_size = min_t(int, pf->alloc_rss_size,
8156                                       vsi->num_queue_pairs);
8157
8158                 i40e_pf_config_rss(pf);
8159         }
8160         dev_info(&pf->pdev->dev, "RSS count/HW max RSS count:  %d/%d\n",
8161                  pf->alloc_rss_size, pf->rss_size_max);
8162         return pf->alloc_rss_size;
8163 }
8164
8165 /**
8166  * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
8167  * @pf: board private structure
8168  **/
8169 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
8170 {
8171         i40e_status status;
8172         bool min_valid, max_valid;
8173         u32 max_bw, min_bw;
8174
8175         status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
8176                                            &min_valid, &max_valid);
8177
8178         if (!status) {
8179                 if (min_valid)
8180                         pf->npar_min_bw = min_bw;
8181                 if (max_valid)
8182                         pf->npar_max_bw = max_bw;
8183         }
8184
8185         return status;
8186 }
8187
8188 /**
8189  * i40e_set_npar_bw_setting - Set BW settings for this PF partition
8190  * @pf: board private structure
8191  **/
8192 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
8193 {
8194         struct i40e_aqc_configure_partition_bw_data bw_data;
8195         i40e_status status;
8196
8197         /* Set the valid bit for this PF */
8198         bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
8199         bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
8200         bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
8201
8202         /* Set the new bandwidths */
8203         status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
8204
8205         return status;
8206 }
8207
8208 /**
8209  * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
8210  * @pf: board private structure
8211  **/
8212 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
8213 {
8214         /* Commit temporary BW setting to permanent NVM image */
8215         enum i40e_admin_queue_err last_aq_status;
8216         i40e_status ret;
8217         u16 nvm_word;
8218
8219         if (pf->hw.partition_id != 1) {
8220                 dev_info(&pf->pdev->dev,
8221                          "Commit BW only works on partition 1! This is partition %d",
8222                          pf->hw.partition_id);
8223                 ret = I40E_NOT_SUPPORTED;
8224                 goto bw_commit_out;
8225         }
8226
8227         /* Acquire NVM for read access */
8228         ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
8229         last_aq_status = pf->hw.aq.asq_last_status;
8230         if (ret) {
8231                 dev_info(&pf->pdev->dev,
8232                          "Cannot acquire NVM for read access, err %s aq_err %s\n",
8233                          i40e_stat_str(&pf->hw, ret),
8234                          i40e_aq_str(&pf->hw, last_aq_status));
8235                 goto bw_commit_out;
8236         }
8237
8238         /* Read word 0x10 of NVM - SW compatibility word 1 */
8239         ret = i40e_aq_read_nvm(&pf->hw,
8240                                I40E_SR_NVM_CONTROL_WORD,
8241                                0x10, sizeof(nvm_word), &nvm_word,
8242                                false, NULL);
8243         /* Save off last admin queue command status before releasing
8244          * the NVM
8245          */
8246         last_aq_status = pf->hw.aq.asq_last_status;
8247         i40e_release_nvm(&pf->hw);
8248         if (ret) {
8249                 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
8250                          i40e_stat_str(&pf->hw, ret),
8251                          i40e_aq_str(&pf->hw, last_aq_status));
8252                 goto bw_commit_out;
8253         }
8254
8255         /* Wait a bit for NVM release to complete */
8256         msleep(50);
8257
8258         /* Acquire NVM for write access */
8259         ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
8260         last_aq_status = pf->hw.aq.asq_last_status;
8261         if (ret) {
8262                 dev_info(&pf->pdev->dev,
8263                          "Cannot acquire NVM for write access, err %s aq_err %s\n",
8264                          i40e_stat_str(&pf->hw, ret),
8265                          i40e_aq_str(&pf->hw, last_aq_status));
8266                 goto bw_commit_out;
8267         }
8268         /* Write it back out unchanged to initiate update NVM,
8269          * which will force a write of the shadow (alt) RAM to
8270          * the NVM - thus storing the bandwidth values permanently.
8271          */
8272         ret = i40e_aq_update_nvm(&pf->hw,
8273                                  I40E_SR_NVM_CONTROL_WORD,
8274                                  0x10, sizeof(nvm_word),
8275                                  &nvm_word, true, NULL);
8276         /* Save off last admin queue command status before releasing
8277          * the NVM
8278          */
8279         last_aq_status = pf->hw.aq.asq_last_status;
8280         i40e_release_nvm(&pf->hw);
8281         if (ret)
8282                 dev_info(&pf->pdev->dev,
8283                          "BW settings NOT SAVED, err %s aq_err %s\n",
8284                          i40e_stat_str(&pf->hw, ret),
8285                          i40e_aq_str(&pf->hw, last_aq_status));
8286 bw_commit_out:
8287
8288         return ret;
8289 }
8290
8291 /**
8292  * i40e_sw_init - Initialize general software structures (struct i40e_pf)
8293  * @pf: board private structure to initialize
8294  *
8295  * i40e_sw_init initializes the Adapter private data structure.
8296  * Fields are initialized based on PCI device information and
8297  * OS network device settings (MTU size).
8298  **/
8299 static int i40e_sw_init(struct i40e_pf *pf)
8300 {
8301         int err = 0;
8302         int size;
8303
8304         pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
8305                                 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
8306         pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
8307         if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
8308                 if (I40E_DEBUG_USER & debug)
8309                         pf->hw.debug_mask = debug;
8310                 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
8311                                                 I40E_DEFAULT_MSG_ENABLE);
8312         }
8313
8314         /* Set default capability flags */
8315         pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
8316                     I40E_FLAG_MSI_ENABLED     |
8317                     I40E_FLAG_LINK_POLLING_ENABLED |
8318                     I40E_FLAG_MSIX_ENABLED;
8319
8320         if (iommu_present(&pci_bus_type))
8321                 pf->flags |= I40E_FLAG_RX_PS_ENABLED;
8322         else
8323                 pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
8324
8325         /* Set default ITR */
8326         pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
8327         pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
8328
8329         /* Depending on PF configurations, it is possible that the RSS
8330          * maximum might end up larger than the available queues
8331          */
8332         pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
8333         pf->alloc_rss_size = 1;
8334         pf->rss_table_size = pf->hw.func_caps.rss_table_size;
8335         pf->rss_size_max = min_t(int, pf->rss_size_max,
8336                                  pf->hw.func_caps.num_tx_qp);
8337         if (pf->hw.func_caps.rss) {
8338                 pf->flags |= I40E_FLAG_RSS_ENABLED;
8339                 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
8340                                            num_online_cpus());
8341         }
8342
8343         /* MFP mode enabled */
8344         if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
8345                 pf->flags |= I40E_FLAG_MFP_ENABLED;
8346                 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
8347                 if (i40e_get_npar_bw_setting(pf))
8348                         dev_warn(&pf->pdev->dev,
8349                                  "Could not get NPAR bw settings\n");
8350                 else
8351                         dev_info(&pf->pdev->dev,
8352                                  "Min BW = %8.8x, Max BW = %8.8x\n",
8353                                  pf->npar_min_bw, pf->npar_max_bw);
8354         }
8355
8356         /* FW/NVM is not yet fixed in this regard */
8357         if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
8358             (pf->hw.func_caps.fd_filters_best_effort > 0)) {
8359                 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
8360                 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
8361                 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
8362                     pf->hw.num_partitions > 1)
8363                         dev_info(&pf->pdev->dev,
8364                                  "Flow Director Sideband mode Disabled in MFP mode\n");
8365                 else
8366                         pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8367                 pf->fdir_pf_filter_count =
8368                                  pf->hw.func_caps.fd_filters_guaranteed;
8369                 pf->hw.fdir_shared_filter_count =
8370                                  pf->hw.func_caps.fd_filters_best_effort;
8371         }
8372
8373         if (pf->hw.func_caps.vmdq) {
8374                 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
8375                 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
8376                 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
8377         }
8378
8379 #ifdef I40E_FCOE
8380         i40e_init_pf_fcoe(pf);
8381
8382 #endif /* I40E_FCOE */
8383 #ifdef CONFIG_PCI_IOV
8384         if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
8385                 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
8386                 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
8387                 pf->num_req_vfs = min_t(int,
8388                                         pf->hw.func_caps.num_vfs,
8389                                         I40E_MAX_VF_COUNT);
8390         }
8391 #endif /* CONFIG_PCI_IOV */
8392         if (pf->hw.mac.type == I40E_MAC_X722) {
8393                 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
8394                              I40E_FLAG_128_QP_RSS_CAPABLE |
8395                              I40E_FLAG_HW_ATR_EVICT_CAPABLE |
8396                              I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
8397                              I40E_FLAG_WB_ON_ITR_CAPABLE |
8398                              I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
8399                              I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
8400         }
8401         pf->eeprom_version = 0xDEAD;
8402         pf->lan_veb = I40E_NO_VEB;
8403         pf->lan_vsi = I40E_NO_VSI;
8404
8405         /* By default FW has this off for performance reasons */
8406         pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
8407
8408         /* set up queue assignment tracking */
8409         size = sizeof(struct i40e_lump_tracking)
8410                 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
8411         pf->qp_pile = kzalloc(size, GFP_KERNEL);
8412         if (!pf->qp_pile) {
8413                 err = -ENOMEM;
8414                 goto sw_init_done;
8415         }
8416         pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
8417         pf->qp_pile->search_hint = 0;
8418
8419         pf->tx_timeout_recovery_level = 1;
8420
8421         mutex_init(&pf->switch_mutex);
8422
8423         /* If NPAR is enabled nudge the Tx scheduler */
8424         if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
8425                 i40e_set_npar_bw_setting(pf);
8426
8427 sw_init_done:
8428         return err;
8429 }
8430
8431 /**
8432  * i40e_set_ntuple - set the ntuple feature flag and take action
8433  * @pf: board private structure to initialize
8434  * @features: the feature set that the stack is suggesting
8435  *
8436  * returns a bool to indicate if reset needs to happen
8437  **/
8438 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
8439 {
8440         bool need_reset = false;
8441
8442         /* Check if Flow Director n-tuple support was enabled or disabled.  If
8443          * the state changed, we need to reset.
8444          */
8445         if (features & NETIF_F_NTUPLE) {
8446                 /* Enable filters and mark for reset */
8447                 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
8448                         need_reset = true;
8449                 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8450         } else {
8451                 /* turn off filters, mark for reset and clear SW filter list */
8452                 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8453                         need_reset = true;
8454                         i40e_fdir_filter_exit(pf);
8455                 }
8456                 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8457                 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
8458                 /* reset fd counters */
8459                 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
8460                 pf->fdir_pf_active_filters = 0;
8461                 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
8462                 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8463                         dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
8464                 /* if ATR was auto disabled it can be re-enabled. */
8465                 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8466                     (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
8467                         pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
8468         }
8469         return need_reset;
8470 }
8471
8472 /**
8473  * i40e_set_features - set the netdev feature flags
8474  * @netdev: ptr to the netdev being adjusted
8475  * @features: the feature set that the stack is suggesting
8476  **/
8477 static int i40e_set_features(struct net_device *netdev,
8478                              netdev_features_t features)
8479 {
8480         struct i40e_netdev_priv *np = netdev_priv(netdev);
8481         struct i40e_vsi *vsi = np->vsi;
8482         struct i40e_pf *pf = vsi->back;
8483         bool need_reset;
8484
8485         if (features & NETIF_F_HW_VLAN_CTAG_RX)
8486                 i40e_vlan_stripping_enable(vsi);
8487         else
8488                 i40e_vlan_stripping_disable(vsi);
8489
8490         need_reset = i40e_set_ntuple(pf, features);
8491
8492         if (need_reset)
8493                 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8494
8495         return 0;
8496 }
8497
8498 #if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
8499 /**
8500  * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
8501  * @pf: board private structure
8502  * @port: The UDP port to look up
8503  *
8504  * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
8505  **/
8506 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
8507 {
8508         u8 i;
8509
8510         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8511                 if (pf->udp_ports[i].index == port)
8512                         return i;
8513         }
8514
8515         return i;
8516 }
8517
8518 #endif
8519 /**
8520  * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
8521  * @netdev: This physical port's netdev
8522  * @sa_family: Socket Family that VXLAN is notifying us about
8523  * @port: New UDP port number that VXLAN started listening to
8524  **/
8525 static void i40e_add_vxlan_port(struct net_device *netdev,
8526                                 sa_family_t sa_family, __be16 port)
8527 {
8528 #if IS_ENABLED(CONFIG_VXLAN)
8529         struct i40e_netdev_priv *np = netdev_priv(netdev);
8530         struct i40e_vsi *vsi = np->vsi;
8531         struct i40e_pf *pf = vsi->back;
8532         u8 next_idx;
8533         u8 idx;
8534
8535         if (sa_family == AF_INET6)
8536                 return;
8537
8538         idx = i40e_get_udp_port_idx(pf, port);
8539
8540         /* Check if port already exists */
8541         if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8542                 netdev_info(netdev, "vxlan port %d already offloaded\n",
8543                             ntohs(port));
8544                 return;
8545         }
8546
8547         /* Now check if there is space to add the new port */
8548         next_idx = i40e_get_udp_port_idx(pf, 0);
8549
8550         if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8551                 netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
8552                             ntohs(port));
8553                 return;
8554         }
8555
8556         /* New port: add it and mark its index in the bitmap */
8557         pf->udp_ports[next_idx].index = port;
8558         pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
8559         pf->pending_udp_bitmap |= BIT_ULL(next_idx);
8560         pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8561 #endif
8562 }
8563
8564 /**
8565  * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
8566  * @netdev: This physical port's netdev
8567  * @sa_family: Socket Family that VXLAN is notifying us about
8568  * @port: UDP port number that VXLAN stopped listening to
8569  **/
8570 static void i40e_del_vxlan_port(struct net_device *netdev,
8571                                 sa_family_t sa_family, __be16 port)
8572 {
8573 #if IS_ENABLED(CONFIG_VXLAN)
8574         struct i40e_netdev_priv *np = netdev_priv(netdev);
8575         struct i40e_vsi *vsi = np->vsi;
8576         struct i40e_pf *pf = vsi->back;
8577         u8 idx;
8578
8579         if (sa_family == AF_INET6)
8580                 return;
8581
8582         idx = i40e_get_udp_port_idx(pf, port);
8583
8584         /* Check if port already exists */
8585         if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8586                 /* if port exists, set it to 0 (mark for deletion)
8587                  * and make it pending
8588                  */
8589                 pf->udp_ports[idx].index = 0;
8590                 pf->pending_udp_bitmap |= BIT_ULL(idx);
8591                 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8592         } else {
8593                 netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
8594                             ntohs(port));
8595         }
8596 #endif
8597 }
8598
8599 /**
8600  * i40e_add_geneve_port - Get notifications about GENEVE ports that come up
8601  * @netdev: This physical port's netdev
8602  * @sa_family: Socket Family that GENEVE is notifying us about
8603  * @port: New UDP port number that GENEVE started listening to
8604  **/
8605 static void i40e_add_geneve_port(struct net_device *netdev,
8606                                  sa_family_t sa_family, __be16 port)
8607 {
8608 #if IS_ENABLED(CONFIG_GENEVE)
8609         struct i40e_netdev_priv *np = netdev_priv(netdev);
8610         struct i40e_vsi *vsi = np->vsi;
8611         struct i40e_pf *pf = vsi->back;
8612         u8 next_idx;
8613         u8 idx;
8614
8615         if (sa_family == AF_INET6)
8616                 return;
8617
8618         idx = i40e_get_udp_port_idx(pf, port);
8619
8620         /* Check if port already exists */
8621         if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8622                 netdev_info(netdev, "udp port %d already offloaded\n",
8623                             ntohs(port));
8624                 return;
8625         }
8626
8627         /* Now check if there is space to add the new port */
8628         next_idx = i40e_get_udp_port_idx(pf, 0);
8629
8630         if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8631                 netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n",
8632                             ntohs(port));
8633                 return;
8634         }
8635
8636         /* New port: add it and mark its index in the bitmap */
8637         pf->udp_ports[next_idx].index = port;
8638         pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
8639         pf->pending_udp_bitmap |= BIT_ULL(next_idx);
8640         pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8641
8642         dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port));
8643 #endif
8644 }
8645
8646 /**
8647  * i40e_del_geneve_port - Get notifications about GENEVE ports that go away
8648  * @netdev: This physical port's netdev
8649  * @sa_family: Socket Family that GENEVE is notifying us about
8650  * @port: UDP port number that GENEVE stopped listening to
8651  **/
8652 static void i40e_del_geneve_port(struct net_device *netdev,
8653                                  sa_family_t sa_family, __be16 port)
8654 {
8655 #if IS_ENABLED(CONFIG_GENEVE)
8656         struct i40e_netdev_priv *np = netdev_priv(netdev);
8657         struct i40e_vsi *vsi = np->vsi;
8658         struct i40e_pf *pf = vsi->back;
8659         u8 idx;
8660
8661         if (sa_family == AF_INET6)
8662                 return;
8663
8664         idx = i40e_get_udp_port_idx(pf, port);
8665
8666         /* Check if port already exists */
8667         if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8668                 /* if port exists, set it to 0 (mark for deletion)
8669                  * and make it pending
8670                  */
8671                 pf->udp_ports[idx].index = 0;
8672                 pf->pending_udp_bitmap |= BIT_ULL(idx);
8673                 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8674
8675                 dev_info(&pf->pdev->dev, "deleting geneve port %d\n",
8676                          ntohs(port));
8677         } else {
8678                 netdev_warn(netdev, "geneve port %d was not found, not deleting\n",
8679                             ntohs(port));
8680         }
8681 #endif
8682 }
8683
8684 static int i40e_get_phys_port_id(struct net_device *netdev,
8685                                  struct netdev_phys_item_id *ppid)
8686 {
8687         struct i40e_netdev_priv *np = netdev_priv(netdev);
8688         struct i40e_pf *pf = np->vsi->back;
8689         struct i40e_hw *hw = &pf->hw;
8690
8691         if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
8692                 return -EOPNOTSUPP;
8693
8694         ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
8695         memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
8696
8697         return 0;
8698 }
8699
8700 /**
8701  * i40e_ndo_fdb_add - add an entry to the hardware database
8702  * @ndm: the input from the stack
8703  * @tb: pointer to array of nladdr (unused)
8704  * @dev: the net device pointer
8705  * @addr: the MAC address entry being added
8706  * @flags: instructions from stack about fdb operation
8707  */
8708 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
8709                             struct net_device *dev,
8710                             const unsigned char *addr, u16 vid,
8711                             u16 flags)
8712 {
8713         struct i40e_netdev_priv *np = netdev_priv(dev);
8714         struct i40e_pf *pf = np->vsi->back;
8715         int err = 0;
8716
8717         if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
8718                 return -EOPNOTSUPP;
8719
8720         if (vid) {
8721                 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
8722                 return -EINVAL;
8723         }
8724
8725         /* Hardware does not support aging addresses so if a
8726          * ndm_state is given only allow permanent addresses
8727          */
8728         if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
8729                 netdev_info(dev, "FDB only supports static addresses\n");
8730                 return -EINVAL;
8731         }
8732
8733         if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
8734                 err = dev_uc_add_excl(dev, addr);
8735         else if (is_multicast_ether_addr(addr))
8736                 err = dev_mc_add_excl(dev, addr);
8737         else
8738                 err = -EINVAL;
8739
8740         /* Only return duplicate errors if NLM_F_EXCL is set */
8741         if (err == -EEXIST && !(flags & NLM_F_EXCL))
8742                 err = 0;
8743
8744         return err;
8745 }
8746
8747 /**
8748  * i40e_ndo_bridge_setlink - Set the hardware bridge mode
8749  * @dev: the netdev being configured
8750  * @nlh: RTNL message
8751  *
8752  * Inserts a new hardware bridge if not already created and
8753  * enables the bridging mode requested (VEB or VEPA). If the
8754  * hardware bridge has already been inserted and the request
8755  * is to change the mode then that requires a PF reset to
8756  * allow rebuild of the components with required hardware
8757  * bridge mode enabled.
8758  **/
8759 static int i40e_ndo_bridge_setlink(struct net_device *dev,
8760                                    struct nlmsghdr *nlh,
8761                                    u16 flags)
8762 {
8763         struct i40e_netdev_priv *np = netdev_priv(dev);
8764         struct i40e_vsi *vsi = np->vsi;
8765         struct i40e_pf *pf = vsi->back;
8766         struct i40e_veb *veb = NULL;
8767         struct nlattr *attr, *br_spec;
8768         int i, rem;
8769
8770         /* Only for PF VSI for now */
8771         if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8772                 return -EOPNOTSUPP;
8773
8774         /* Find the HW bridge for PF VSI */
8775         for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8776                 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8777                         veb = pf->veb[i];
8778         }
8779
8780         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8781
8782         nla_for_each_nested(attr, br_spec, rem) {
8783                 __u16 mode;
8784
8785                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
8786                         continue;
8787
8788                 mode = nla_get_u16(attr);
8789                 if ((mode != BRIDGE_MODE_VEPA) &&
8790                     (mode != BRIDGE_MODE_VEB))
8791                         return -EINVAL;
8792
8793                 /* Insert a new HW bridge */
8794                 if (!veb) {
8795                         veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8796                                              vsi->tc_config.enabled_tc);
8797                         if (veb) {
8798                                 veb->bridge_mode = mode;
8799                                 i40e_config_bridge_mode(veb);
8800                         } else {
8801                                 /* No Bridge HW offload available */
8802                                 return -ENOENT;
8803                         }
8804                         break;
8805                 } else if (mode != veb->bridge_mode) {
8806                         /* Existing HW bridge but different mode needs reset */
8807                         veb->bridge_mode = mode;
8808                         /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
8809                         if (mode == BRIDGE_MODE_VEB)
8810                                 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
8811                         else
8812                                 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
8813                         i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8814                         break;
8815                 }
8816         }
8817
8818         return 0;
8819 }
8820
8821 /**
8822  * i40e_ndo_bridge_getlink - Get the hardware bridge mode
8823  * @skb: skb buff
8824  * @pid: process id
8825  * @seq: RTNL message seq #
8826  * @dev: the netdev being configured
8827  * @filter_mask: unused
8828  * @nlflags: netlink flags passed in
8829  *
8830  * Return the mode in which the hardware bridge is operating in
8831  * i.e VEB or VEPA.
8832  **/
8833 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8834                                    struct net_device *dev,
8835                                    u32 __always_unused filter_mask,
8836                                    int nlflags)
8837 {
8838         struct i40e_netdev_priv *np = netdev_priv(dev);
8839         struct i40e_vsi *vsi = np->vsi;
8840         struct i40e_pf *pf = vsi->back;
8841         struct i40e_veb *veb = NULL;
8842         int i;
8843
8844         /* Only for PF VSI for now */
8845         if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8846                 return -EOPNOTSUPP;
8847
8848         /* Find the HW bridge for the PF VSI */
8849         for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8850                 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8851                         veb = pf->veb[i];
8852         }
8853
8854         if (!veb)
8855                 return 0;
8856
8857         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
8858                                        nlflags, 0, 0, filter_mask, NULL);
8859 }
8860
8861 /* Hardware supports L4 tunnel length of 128B (=2^7) which includes
8862  * inner mac plus all inner ethertypes.
8863  */
8864 #define I40E_MAX_TUNNEL_HDR_LEN 128
8865 /**
8866  * i40e_features_check - Validate encapsulated packet conforms to limits
8867  * @skb: skb buff
8868  * @dev: This physical port's netdev
8869  * @features: Offload features that the stack believes apply
8870  **/
8871 static netdev_features_t i40e_features_check(struct sk_buff *skb,
8872                                              struct net_device *dev,
8873                                              netdev_features_t features)
8874 {
8875         if (skb->encapsulation &&
8876             ((skb_inner_network_header(skb) - skb_transport_header(skb)) >
8877              I40E_MAX_TUNNEL_HDR_LEN))
8878                 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
8879
8880         return features;
8881 }
8882
8883 static const struct net_device_ops i40e_netdev_ops = {
8884         .ndo_open               = i40e_open,
8885         .ndo_stop               = i40e_close,
8886         .ndo_start_xmit         = i40e_lan_xmit_frame,
8887         .ndo_get_stats64        = i40e_get_netdev_stats_struct,
8888         .ndo_set_rx_mode        = i40e_set_rx_mode,
8889         .ndo_validate_addr      = eth_validate_addr,
8890         .ndo_set_mac_address    = i40e_set_mac,
8891         .ndo_change_mtu         = i40e_change_mtu,
8892         .ndo_do_ioctl           = i40e_ioctl,
8893         .ndo_tx_timeout         = i40e_tx_timeout,
8894         .ndo_vlan_rx_add_vid    = i40e_vlan_rx_add_vid,
8895         .ndo_vlan_rx_kill_vid   = i40e_vlan_rx_kill_vid,
8896 #ifdef CONFIG_NET_POLL_CONTROLLER
8897         .ndo_poll_controller    = i40e_netpoll,
8898 #endif
8899         .ndo_setup_tc           = i40e_setup_tc,
8900 #ifdef I40E_FCOE
8901         .ndo_fcoe_enable        = i40e_fcoe_enable,
8902         .ndo_fcoe_disable       = i40e_fcoe_disable,
8903 #endif
8904         .ndo_set_features       = i40e_set_features,
8905         .ndo_set_vf_mac         = i40e_ndo_set_vf_mac,
8906         .ndo_set_vf_vlan        = i40e_ndo_set_vf_port_vlan,
8907         .ndo_set_vf_rate        = i40e_ndo_set_vf_bw,
8908         .ndo_get_vf_config      = i40e_ndo_get_vf_config,
8909         .ndo_set_vf_link_state  = i40e_ndo_set_vf_link_state,
8910         .ndo_set_vf_spoofchk    = i40e_ndo_set_vf_spoofchk,
8911 #if IS_ENABLED(CONFIG_VXLAN)
8912         .ndo_add_vxlan_port     = i40e_add_vxlan_port,
8913         .ndo_del_vxlan_port     = i40e_del_vxlan_port,
8914 #endif
8915 #if IS_ENABLED(CONFIG_GENEVE)
8916         .ndo_add_geneve_port    = i40e_add_geneve_port,
8917         .ndo_del_geneve_port    = i40e_del_geneve_port,
8918 #endif
8919         .ndo_get_phys_port_id   = i40e_get_phys_port_id,
8920         .ndo_fdb_add            = i40e_ndo_fdb_add,
8921         .ndo_features_check     = i40e_features_check,
8922         .ndo_bridge_getlink     = i40e_ndo_bridge_getlink,
8923         .ndo_bridge_setlink     = i40e_ndo_bridge_setlink,
8924 };
8925
8926 /**
8927  * i40e_config_netdev - Setup the netdev flags
8928  * @vsi: the VSI being configured
8929  *
8930  * Returns 0 on success, negative value on failure
8931  **/
8932 static int i40e_config_netdev(struct i40e_vsi *vsi)
8933 {
8934         u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
8935         struct i40e_pf *pf = vsi->back;
8936         struct i40e_hw *hw = &pf->hw;
8937         struct i40e_netdev_priv *np;
8938         struct net_device *netdev;
8939         u8 mac_addr[ETH_ALEN];
8940         int etherdev_size;
8941
8942         etherdev_size = sizeof(struct i40e_netdev_priv);
8943         netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
8944         if (!netdev)
8945                 return -ENOMEM;
8946
8947         vsi->netdev = netdev;
8948         np = netdev_priv(netdev);
8949         np->vsi = vsi;
8950
8951         netdev->hw_enc_features |= NETIF_F_IP_CSUM       |
8952                                   NETIF_F_RXCSUM         |
8953                                   NETIF_F_GSO_UDP_TUNNEL |
8954                                   NETIF_F_GSO_GRE        |
8955                                   NETIF_F_TSO;
8956
8957         netdev->features = NETIF_F_SG                  |
8958                            NETIF_F_IP_CSUM             |
8959                            NETIF_F_SCTP_CRC            |
8960                            NETIF_F_HIGHDMA             |
8961                            NETIF_F_GSO_UDP_TUNNEL      |
8962                            NETIF_F_GSO_GRE             |
8963                            NETIF_F_HW_VLAN_CTAG_TX     |
8964                            NETIF_F_HW_VLAN_CTAG_RX     |
8965                            NETIF_F_HW_VLAN_CTAG_FILTER |
8966                            NETIF_F_IPV6_CSUM           |
8967                            NETIF_F_TSO                 |
8968                            NETIF_F_TSO_ECN             |
8969                            NETIF_F_TSO6                |
8970                            NETIF_F_RXCSUM              |
8971                            NETIF_F_RXHASH              |
8972                            0;
8973
8974         if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
8975                 netdev->features |= NETIF_F_NTUPLE;
8976
8977         /* copy netdev features into list of user selectable features */
8978         netdev->hw_features |= netdev->features;
8979
8980         if (vsi->type == I40E_VSI_MAIN) {
8981                 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
8982                 ether_addr_copy(mac_addr, hw->mac.perm_addr);
8983                 /* The following steps are necessary to prevent reception
8984                  * of tagged packets - some older NVM configurations load a
8985                  * default a MAC-VLAN filter that accepts any tagged packet
8986                  * which must be replaced by a normal filter.
8987                  */
8988                 if (!i40e_rm_default_mac_filter(vsi, mac_addr)) {
8989                         spin_lock_bh(&vsi->mac_filter_list_lock);
8990                         i40e_add_filter(vsi, mac_addr,
8991                                         I40E_VLAN_ANY, false, true);
8992                         spin_unlock_bh(&vsi->mac_filter_list_lock);
8993                 }
8994         } else {
8995                 /* relate the VSI_VMDQ name to the VSI_MAIN name */
8996                 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
8997                          pf->vsi[pf->lan_vsi]->netdev->name);
8998                 random_ether_addr(mac_addr);
8999
9000                 spin_lock_bh(&vsi->mac_filter_list_lock);
9001                 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
9002                 spin_unlock_bh(&vsi->mac_filter_list_lock);
9003         }
9004
9005         spin_lock_bh(&vsi->mac_filter_list_lock);
9006         i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
9007         spin_unlock_bh(&vsi->mac_filter_list_lock);
9008
9009         ether_addr_copy(netdev->dev_addr, mac_addr);
9010         ether_addr_copy(netdev->perm_addr, mac_addr);
9011         /* vlan gets same features (except vlan offload)
9012          * after any tweaks for specific VSI types
9013          */
9014         netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
9015                                                      NETIF_F_HW_VLAN_CTAG_RX |
9016                                                    NETIF_F_HW_VLAN_CTAG_FILTER);
9017         netdev->priv_flags |= IFF_UNICAST_FLT;
9018         netdev->priv_flags |= IFF_SUPP_NOFCS;
9019         /* Setup netdev TC information */
9020         i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
9021
9022         netdev->netdev_ops = &i40e_netdev_ops;
9023         netdev->watchdog_timeo = 5 * HZ;
9024         i40e_set_ethtool_ops(netdev);
9025 #ifdef I40E_FCOE
9026         i40e_fcoe_config_netdev(netdev, vsi);
9027 #endif
9028
9029         return 0;
9030 }
9031
9032 /**
9033  * i40e_vsi_delete - Delete a VSI from the switch
9034  * @vsi: the VSI being removed
9035  *
9036  * Returns 0 on success, negative value on failure
9037  **/
9038 static void i40e_vsi_delete(struct i40e_vsi *vsi)
9039 {
9040         /* remove default VSI is not allowed */
9041         if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
9042                 return;
9043
9044         i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
9045 }
9046
9047 /**
9048  * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
9049  * @vsi: the VSI being queried
9050  *
9051  * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
9052  **/
9053 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
9054 {
9055         struct i40e_veb *veb;
9056         struct i40e_pf *pf = vsi->back;
9057
9058         /* Uplink is not a bridge so default to VEB */
9059         if (vsi->veb_idx == I40E_NO_VEB)
9060                 return 1;
9061
9062         veb = pf->veb[vsi->veb_idx];
9063         if (!veb) {
9064                 dev_info(&pf->pdev->dev,
9065                          "There is no veb associated with the bridge\n");
9066                 return -ENOENT;
9067         }
9068
9069         /* Uplink is a bridge in VEPA mode */
9070         if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
9071                 return 0;
9072         } else {
9073                 /* Uplink is a bridge in VEB mode */
9074                 return 1;
9075         }
9076
9077         /* VEPA is now default bridge, so return 0 */
9078         return 0;
9079 }
9080
9081 /**
9082  * i40e_add_vsi - Add a VSI to the switch
9083  * @vsi: the VSI being configured
9084  *
9085  * This initializes a VSI context depending on the VSI type to be added and
9086  * passes it down to the add_vsi aq command.
9087  **/
9088 static int i40e_add_vsi(struct i40e_vsi *vsi)
9089 {
9090         int ret = -ENODEV;
9091         u8 laa_macaddr[ETH_ALEN];
9092         bool found_laa_mac_filter = false;
9093         struct i40e_pf *pf = vsi->back;
9094         struct i40e_hw *hw = &pf->hw;
9095         struct i40e_vsi_context ctxt;
9096         struct i40e_mac_filter *f, *ftmp;
9097
9098         u8 enabled_tc = 0x1; /* TC0 enabled */
9099         int f_count = 0;
9100
9101         memset(&ctxt, 0, sizeof(ctxt));
9102         switch (vsi->type) {
9103         case I40E_VSI_MAIN:
9104                 /* The PF's main VSI is already setup as part of the
9105                  * device initialization, so we'll not bother with
9106                  * the add_vsi call, but we will retrieve the current
9107                  * VSI context.
9108                  */
9109                 ctxt.seid = pf->main_vsi_seid;
9110                 ctxt.pf_num = pf->hw.pf_id;
9111                 ctxt.vf_num = 0;
9112                 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9113                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9114                 if (ret) {
9115                         dev_info(&pf->pdev->dev,
9116                                  "couldn't get PF vsi config, err %s aq_err %s\n",
9117                                  i40e_stat_str(&pf->hw, ret),
9118                                  i40e_aq_str(&pf->hw,
9119                                              pf->hw.aq.asq_last_status));
9120                         return -ENOENT;
9121                 }
9122                 vsi->info = ctxt.info;
9123                 vsi->info.valid_sections = 0;
9124
9125                 vsi->seid = ctxt.seid;
9126                 vsi->id = ctxt.vsi_number;
9127
9128                 enabled_tc = i40e_pf_get_tc_map(pf);
9129
9130                 /* MFP mode setup queue map and update VSI */
9131                 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
9132                     !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
9133                         memset(&ctxt, 0, sizeof(ctxt));
9134                         ctxt.seid = pf->main_vsi_seid;
9135                         ctxt.pf_num = pf->hw.pf_id;
9136                         ctxt.vf_num = 0;
9137                         i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
9138                         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
9139                         if (ret) {
9140                                 dev_info(&pf->pdev->dev,
9141                                          "update vsi failed, err %s aq_err %s\n",
9142                                          i40e_stat_str(&pf->hw, ret),
9143                                          i40e_aq_str(&pf->hw,
9144                                                     pf->hw.aq.asq_last_status));
9145                                 ret = -ENOENT;
9146                                 goto err;
9147                         }
9148                         /* update the local VSI info queue map */
9149                         i40e_vsi_update_queue_map(vsi, &ctxt);
9150                         vsi->info.valid_sections = 0;
9151                 } else {
9152                         /* Default/Main VSI is only enabled for TC0
9153                          * reconfigure it to enable all TCs that are
9154                          * available on the port in SFP mode.
9155                          * For MFP case the iSCSI PF would use this
9156                          * flow to enable LAN+iSCSI TC.
9157                          */
9158                         ret = i40e_vsi_config_tc(vsi, enabled_tc);
9159                         if (ret) {
9160                                 dev_info(&pf->pdev->dev,
9161                                          "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
9162                                          enabled_tc,
9163                                          i40e_stat_str(&pf->hw, ret),
9164                                          i40e_aq_str(&pf->hw,
9165                                                     pf->hw.aq.asq_last_status));
9166                                 ret = -ENOENT;
9167                         }
9168                 }
9169                 break;
9170
9171         case I40E_VSI_FDIR:
9172                 ctxt.pf_num = hw->pf_id;
9173                 ctxt.vf_num = 0;
9174                 ctxt.uplink_seid = vsi->uplink_seid;
9175                 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9176                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9177                 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
9178                     (i40e_is_vsi_uplink_mode_veb(vsi))) {
9179                         ctxt.info.valid_sections |=
9180                              cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9181                         ctxt.info.switch_id =
9182                            cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9183                 }
9184                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9185                 break;
9186
9187         case I40E_VSI_VMDQ2:
9188                 ctxt.pf_num = hw->pf_id;
9189                 ctxt.vf_num = 0;
9190                 ctxt.uplink_seid = vsi->uplink_seid;
9191                 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9192                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
9193
9194                 /* This VSI is connected to VEB so the switch_id
9195                  * should be set to zero by default.
9196                  */
9197                 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9198                         ctxt.info.valid_sections |=
9199                                 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9200                         ctxt.info.switch_id =
9201                                 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9202                 }
9203
9204                 /* Setup the VSI tx/rx queue map for TC0 only for now */
9205                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9206                 break;
9207
9208         case I40E_VSI_SRIOV:
9209                 ctxt.pf_num = hw->pf_id;
9210                 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
9211                 ctxt.uplink_seid = vsi->uplink_seid;
9212                 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9213                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
9214
9215                 /* This VSI is connected to VEB so the switch_id
9216                  * should be set to zero by default.
9217                  */
9218                 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9219                         ctxt.info.valid_sections |=
9220                                 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9221                         ctxt.info.switch_id =
9222                                 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9223                 }
9224
9225                 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
9226                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
9227                 if (pf->vf[vsi->vf_id].spoofchk) {
9228                         ctxt.info.valid_sections |=
9229                                 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
9230                         ctxt.info.sec_flags |=
9231                                 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
9232                                  I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
9233                 }
9234                 /* Setup the VSI tx/rx queue map for TC0 only for now */
9235                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9236                 break;
9237
9238 #ifdef I40E_FCOE
9239         case I40E_VSI_FCOE:
9240                 ret = i40e_fcoe_vsi_init(vsi, &ctxt);
9241                 if (ret) {
9242                         dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
9243                         return ret;
9244                 }
9245                 break;
9246
9247 #endif /* I40E_FCOE */
9248         default:
9249                 return -ENODEV;
9250         }
9251
9252         if (vsi->type != I40E_VSI_MAIN) {
9253                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
9254                 if (ret) {
9255                         dev_info(&vsi->back->pdev->dev,
9256                                  "add vsi failed, err %s aq_err %s\n",
9257                                  i40e_stat_str(&pf->hw, ret),
9258                                  i40e_aq_str(&pf->hw,
9259                                              pf->hw.aq.asq_last_status));
9260                         ret = -ENOENT;
9261                         goto err;
9262                 }
9263                 vsi->info = ctxt.info;
9264                 vsi->info.valid_sections = 0;
9265                 vsi->seid = ctxt.seid;
9266                 vsi->id = ctxt.vsi_number;
9267         }
9268
9269         spin_lock_bh(&vsi->mac_filter_list_lock);
9270         /* If macvlan filters already exist, force them to get loaded */
9271         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
9272                 f->changed = true;
9273                 f_count++;
9274
9275                 /* Expected to have only one MAC filter entry for LAA in list */
9276                 if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
9277                         ether_addr_copy(laa_macaddr, f->macaddr);
9278                         found_laa_mac_filter = true;
9279                 }
9280         }
9281         spin_unlock_bh(&vsi->mac_filter_list_lock);
9282
9283         if (found_laa_mac_filter) {
9284                 struct i40e_aqc_remove_macvlan_element_data element;
9285
9286                 memset(&element, 0, sizeof(element));
9287                 ether_addr_copy(element.mac_addr, laa_macaddr);
9288                 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
9289                 ret = i40e_aq_remove_macvlan(hw, vsi->seid,
9290                                              &element, 1, NULL);
9291                 if (ret) {
9292                         /* some older FW has a different default */
9293                         element.flags |=
9294                                        I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
9295                         i40e_aq_remove_macvlan(hw, vsi->seid,
9296                                                &element, 1, NULL);
9297                 }
9298
9299                 i40e_aq_mac_address_write(hw,
9300                                           I40E_AQC_WRITE_TYPE_LAA_WOL,
9301                                           laa_macaddr, NULL);
9302         }
9303
9304         if (f_count) {
9305                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
9306                 pf->flags |= I40E_FLAG_FILTER_SYNC;
9307         }
9308
9309         /* Update VSI BW information */
9310         ret = i40e_vsi_get_bw_info(vsi);
9311         if (ret) {
9312                 dev_info(&pf->pdev->dev,
9313                          "couldn't get vsi bw info, err %s aq_err %s\n",
9314                          i40e_stat_str(&pf->hw, ret),
9315                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9316                 /* VSI is already added so not tearing that up */
9317                 ret = 0;
9318         }
9319
9320 err:
9321         return ret;
9322 }
9323
9324 /**
9325  * i40e_vsi_release - Delete a VSI and free its resources
9326  * @vsi: the VSI being removed
9327  *
9328  * Returns 0 on success or < 0 on error
9329  **/
9330 int i40e_vsi_release(struct i40e_vsi *vsi)
9331 {
9332         struct i40e_mac_filter *f, *ftmp;
9333         struct i40e_veb *veb = NULL;
9334         struct i40e_pf *pf;
9335         u16 uplink_seid;
9336         int i, n;
9337
9338         pf = vsi->back;
9339
9340         /* release of a VEB-owner or last VSI is not allowed */
9341         if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
9342                 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
9343                          vsi->seid, vsi->uplink_seid);
9344                 return -ENODEV;
9345         }
9346         if (vsi == pf->vsi[pf->lan_vsi] &&
9347             !test_bit(__I40E_DOWN, &pf->state)) {
9348                 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
9349                 return -ENODEV;
9350         }
9351
9352         uplink_seid = vsi->uplink_seid;
9353         if (vsi->type != I40E_VSI_SRIOV) {
9354                 if (vsi->netdev_registered) {
9355                         vsi->netdev_registered = false;
9356                         if (vsi->netdev) {
9357                                 /* results in a call to i40e_close() */
9358                                 unregister_netdev(vsi->netdev);
9359                         }
9360                 } else {
9361                         i40e_vsi_close(vsi);
9362                 }
9363                 i40e_vsi_disable_irq(vsi);
9364         }
9365
9366         spin_lock_bh(&vsi->mac_filter_list_lock);
9367         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
9368                 i40e_del_filter(vsi, f->macaddr, f->vlan,
9369                                 f->is_vf, f->is_netdev);
9370         spin_unlock_bh(&vsi->mac_filter_list_lock);
9371
9372         i40e_sync_vsi_filters(vsi);
9373
9374         i40e_vsi_delete(vsi);
9375         i40e_vsi_free_q_vectors(vsi);
9376         if (vsi->netdev) {
9377                 free_netdev(vsi->netdev);
9378                 vsi->netdev = NULL;
9379         }
9380         i40e_vsi_clear_rings(vsi);
9381         i40e_vsi_clear(vsi);
9382
9383         /* If this was the last thing on the VEB, except for the
9384          * controlling VSI, remove the VEB, which puts the controlling
9385          * VSI onto the next level down in the switch.
9386          *
9387          * Well, okay, there's one more exception here: don't remove
9388          * the orphan VEBs yet.  We'll wait for an explicit remove request
9389          * from up the network stack.
9390          */
9391         for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
9392                 if (pf->vsi[i] &&
9393                     pf->vsi[i]->uplink_seid == uplink_seid &&
9394                     (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
9395                         n++;      /* count the VSIs */
9396                 }
9397         }
9398         for (i = 0; i < I40E_MAX_VEB; i++) {
9399                 if (!pf->veb[i])
9400                         continue;
9401                 if (pf->veb[i]->uplink_seid == uplink_seid)
9402                         n++;     /* count the VEBs */
9403                 if (pf->veb[i]->seid == uplink_seid)
9404                         veb = pf->veb[i];
9405         }
9406         if (n == 0 && veb && veb->uplink_seid != 0)
9407                 i40e_veb_release(veb);
9408
9409         return 0;
9410 }
9411
9412 /**
9413  * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
9414  * @vsi: ptr to the VSI
9415  *
9416  * This should only be called after i40e_vsi_mem_alloc() which allocates the
9417  * corresponding SW VSI structure and initializes num_queue_pairs for the
9418  * newly allocated VSI.
9419  *
9420  * Returns 0 on success or negative on failure
9421  **/
9422 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
9423 {
9424         int ret = -ENOENT;
9425         struct i40e_pf *pf = vsi->back;
9426
9427         if (vsi->q_vectors[0]) {
9428                 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
9429                          vsi->seid);
9430                 return -EEXIST;
9431         }
9432
9433         if (vsi->base_vector) {
9434                 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
9435                          vsi->seid, vsi->base_vector);
9436                 return -EEXIST;
9437         }
9438
9439         ret = i40e_vsi_alloc_q_vectors(vsi);
9440         if (ret) {
9441                 dev_info(&pf->pdev->dev,
9442                          "failed to allocate %d q_vector for VSI %d, ret=%d\n",
9443                          vsi->num_q_vectors, vsi->seid, ret);
9444                 vsi->num_q_vectors = 0;
9445                 goto vector_setup_out;
9446         }
9447
9448         /* In Legacy mode, we do not have to get any other vector since we
9449          * piggyback on the misc/ICR0 for queue interrupts.
9450         */
9451         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
9452                 return ret;
9453         if (vsi->num_q_vectors)
9454                 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
9455                                                  vsi->num_q_vectors, vsi->idx);
9456         if (vsi->base_vector < 0) {
9457                 dev_info(&pf->pdev->dev,
9458                          "failed to get tracking for %d vectors for VSI %d, err=%d\n",
9459                          vsi->num_q_vectors, vsi->seid, vsi->base_vector);
9460                 i40e_vsi_free_q_vectors(vsi);
9461                 ret = -ENOENT;
9462                 goto vector_setup_out;
9463         }
9464
9465 vector_setup_out:
9466         return ret;
9467 }
9468
9469 /**
9470  * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
9471  * @vsi: pointer to the vsi.
9472  *
9473  * This re-allocates a vsi's queue resources.
9474  *
9475  * Returns pointer to the successfully allocated and configured VSI sw struct
9476  * on success, otherwise returns NULL on failure.
9477  **/
9478 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
9479 {
9480         struct i40e_pf *pf = vsi->back;
9481         u8 enabled_tc;
9482         int ret;
9483
9484         i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
9485         i40e_vsi_clear_rings(vsi);
9486
9487         i40e_vsi_free_arrays(vsi, false);
9488         i40e_set_num_rings_in_vsi(vsi);
9489         ret = i40e_vsi_alloc_arrays(vsi, false);
9490         if (ret)
9491                 goto err_vsi;
9492
9493         ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
9494         if (ret < 0) {
9495                 dev_info(&pf->pdev->dev,
9496                          "failed to get tracking for %d queues for VSI %d err %d\n",
9497                          vsi->alloc_queue_pairs, vsi->seid, ret);
9498                 goto err_vsi;
9499         }
9500         vsi->base_queue = ret;
9501
9502         /* Update the FW view of the VSI. Force a reset of TC and queue
9503          * layout configurations.
9504          */
9505         enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
9506         pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
9507         pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
9508         i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
9509
9510         /* assign it some queues */
9511         ret = i40e_alloc_rings(vsi);
9512         if (ret)
9513                 goto err_rings;
9514
9515         /* map all of the rings to the q_vectors */
9516         i40e_vsi_map_rings_to_vectors(vsi);
9517         return vsi;
9518
9519 err_rings:
9520         i40e_vsi_free_q_vectors(vsi);
9521         if (vsi->netdev_registered) {
9522                 vsi->netdev_registered = false;
9523                 unregister_netdev(vsi->netdev);
9524                 free_netdev(vsi->netdev);
9525                 vsi->netdev = NULL;
9526         }
9527         i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9528 err_vsi:
9529         i40e_vsi_clear(vsi);
9530         return NULL;
9531 }
9532
9533 /**
9534  * i40e_macaddr_init - explicitly write the mac address filters.
9535  *
9536  * @vsi: pointer to the vsi.
9537  * @macaddr: the MAC address
9538  *
9539  * This is needed when the macaddr has been obtained by other
9540  * means than the default, e.g., from Open Firmware or IDPROM.
9541  * Returns 0 on success, negative on failure
9542  **/
9543 static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr)
9544 {
9545         int ret;
9546         struct i40e_aqc_add_macvlan_element_data element;
9547
9548         ret = i40e_aq_mac_address_write(&vsi->back->hw,
9549                                         I40E_AQC_WRITE_TYPE_LAA_WOL,
9550                                         macaddr, NULL);
9551         if (ret) {
9552                 dev_info(&vsi->back->pdev->dev,
9553                          "Addr change for VSI failed: %d\n", ret);
9554                 return -EADDRNOTAVAIL;
9555         }
9556
9557         memset(&element, 0, sizeof(element));
9558         ether_addr_copy(element.mac_addr, macaddr);
9559         element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
9560         ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL);
9561         if (ret) {
9562                 dev_info(&vsi->back->pdev->dev,
9563                          "add filter failed err %s aq_err %s\n",
9564                          i40e_stat_str(&vsi->back->hw, ret),
9565                          i40e_aq_str(&vsi->back->hw,
9566                                      vsi->back->hw.aq.asq_last_status));
9567         }
9568         return ret;
9569 }
9570
9571 /**
9572  * i40e_vsi_setup - Set up a VSI by a given type
9573  * @pf: board private structure
9574  * @type: VSI type
9575  * @uplink_seid: the switch element to link to
9576  * @param1: usage depends upon VSI type. For VF types, indicates VF id
9577  *
9578  * This allocates the sw VSI structure and its queue resources, then add a VSI
9579  * to the identified VEB.
9580  *
9581  * Returns pointer to the successfully allocated and configure VSI sw struct on
9582  * success, otherwise returns NULL on failure.
9583  **/
9584 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
9585                                 u16 uplink_seid, u32 param1)
9586 {
9587         struct i40e_vsi *vsi = NULL;
9588         struct i40e_veb *veb = NULL;
9589         int ret, i;
9590         int v_idx;
9591
9592         /* The requested uplink_seid must be either
9593          *     - the PF's port seid
9594          *              no VEB is needed because this is the PF
9595          *              or this is a Flow Director special case VSI
9596          *     - seid of an existing VEB
9597          *     - seid of a VSI that owns an existing VEB
9598          *     - seid of a VSI that doesn't own a VEB
9599          *              a new VEB is created and the VSI becomes the owner
9600          *     - seid of the PF VSI, which is what creates the first VEB
9601          *              this is a special case of the previous
9602          *
9603          * Find which uplink_seid we were given and create a new VEB if needed
9604          */
9605         for (i = 0; i < I40E_MAX_VEB; i++) {
9606                 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
9607                         veb = pf->veb[i];
9608                         break;
9609                 }
9610         }
9611
9612         if (!veb && uplink_seid != pf->mac_seid) {
9613
9614                 for (i = 0; i < pf->num_alloc_vsi; i++) {
9615                         if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
9616                                 vsi = pf->vsi[i];
9617                                 break;
9618                         }
9619                 }
9620                 if (!vsi) {
9621                         dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
9622                                  uplink_seid);
9623                         return NULL;
9624                 }
9625
9626                 if (vsi->uplink_seid == pf->mac_seid)
9627                         veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
9628                                              vsi->tc_config.enabled_tc);
9629                 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
9630                         veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
9631                                              vsi->tc_config.enabled_tc);
9632                 if (veb) {
9633                         if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
9634                                 dev_info(&vsi->back->pdev->dev,
9635                                          "New VSI creation error, uplink seid of LAN VSI expected.\n");
9636                                 return NULL;
9637                         }
9638                         /* We come up by default in VEPA mode if SRIOV is not
9639                          * already enabled, in which case we can't force VEPA
9640                          * mode.
9641                          */
9642                         if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
9643                                 veb->bridge_mode = BRIDGE_MODE_VEPA;
9644                                 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
9645                         }
9646                         i40e_config_bridge_mode(veb);
9647                 }
9648                 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9649                         if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9650                                 veb = pf->veb[i];
9651                 }
9652                 if (!veb) {
9653                         dev_info(&pf->pdev->dev, "couldn't add VEB\n");
9654                         return NULL;
9655                 }
9656
9657                 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
9658                 uplink_seid = veb->seid;
9659         }
9660
9661         /* get vsi sw struct */
9662         v_idx = i40e_vsi_mem_alloc(pf, type);
9663         if (v_idx < 0)
9664                 goto err_alloc;
9665         vsi = pf->vsi[v_idx];
9666         if (!vsi)
9667                 goto err_alloc;
9668         vsi->type = type;
9669         vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
9670
9671         if (type == I40E_VSI_MAIN)
9672                 pf->lan_vsi = v_idx;
9673         else if (type == I40E_VSI_SRIOV)
9674                 vsi->vf_id = param1;
9675         /* assign it some queues */
9676         ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
9677                                 vsi->idx);
9678         if (ret < 0) {
9679                 dev_info(&pf->pdev->dev,
9680                          "failed to get tracking for %d queues for VSI %d err=%d\n",
9681                          vsi->alloc_queue_pairs, vsi->seid, ret);
9682                 goto err_vsi;
9683         }
9684         vsi->base_queue = ret;
9685
9686         /* get a VSI from the hardware */
9687         vsi->uplink_seid = uplink_seid;
9688         ret = i40e_add_vsi(vsi);
9689         if (ret)
9690                 goto err_vsi;
9691
9692         switch (vsi->type) {
9693         /* setup the netdev if needed */
9694         case I40E_VSI_MAIN:
9695                 /* Apply relevant filters if a platform-specific mac
9696                  * address was selected.
9697                  */
9698                 if (!!(pf->flags & I40E_FLAG_PF_MAC)) {
9699                         ret = i40e_macaddr_init(vsi, pf->hw.mac.addr);
9700                         if (ret) {
9701                                 dev_warn(&pf->pdev->dev,
9702                                          "could not set up macaddr; err %d\n",
9703                                          ret);
9704                         }
9705                 }
9706         case I40E_VSI_VMDQ2:
9707         case I40E_VSI_FCOE:
9708                 ret = i40e_config_netdev(vsi);
9709                 if (ret)
9710                         goto err_netdev;
9711                 ret = register_netdev(vsi->netdev);
9712                 if (ret)
9713                         goto err_netdev;
9714                 vsi->netdev_registered = true;
9715                 netif_carrier_off(vsi->netdev);
9716 #ifdef CONFIG_I40E_DCB
9717                 /* Setup DCB netlink interface */
9718                 i40e_dcbnl_setup(vsi);
9719 #endif /* CONFIG_I40E_DCB */
9720                 /* fall through */
9721
9722         case I40E_VSI_FDIR:
9723                 /* set up vectors and rings if needed */
9724                 ret = i40e_vsi_setup_vectors(vsi);
9725                 if (ret)
9726                         goto err_msix;
9727
9728                 ret = i40e_alloc_rings(vsi);
9729                 if (ret)
9730                         goto err_rings;
9731
9732                 /* map all of the rings to the q_vectors */
9733                 i40e_vsi_map_rings_to_vectors(vsi);
9734
9735                 i40e_vsi_reset_stats(vsi);
9736                 break;
9737
9738         default:
9739                 /* no netdev or rings for the other VSI types */
9740                 break;
9741         }
9742
9743         if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
9744             (vsi->type == I40E_VSI_VMDQ2)) {
9745                 ret = i40e_vsi_config_rss(vsi);
9746         }
9747         return vsi;
9748
9749 err_rings:
9750         i40e_vsi_free_q_vectors(vsi);
9751 err_msix:
9752         if (vsi->netdev_registered) {
9753                 vsi->netdev_registered = false;
9754                 unregister_netdev(vsi->netdev);
9755                 free_netdev(vsi->netdev);
9756                 vsi->netdev = NULL;
9757         }
9758 err_netdev:
9759         i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9760 err_vsi:
9761         i40e_vsi_clear(vsi);
9762 err_alloc:
9763         return NULL;
9764 }
9765
9766 /**
9767  * i40e_veb_get_bw_info - Query VEB BW information
9768  * @veb: the veb to query
9769  *
9770  * Query the Tx scheduler BW configuration data for given VEB
9771  **/
9772 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
9773 {
9774         struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
9775         struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
9776         struct i40e_pf *pf = veb->pf;
9777         struct i40e_hw *hw = &pf->hw;
9778         u32 tc_bw_max;
9779         int ret = 0;
9780         int i;
9781
9782         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
9783                                                   &bw_data, NULL);
9784         if (ret) {
9785                 dev_info(&pf->pdev->dev,
9786                          "query veb bw config failed, err %s aq_err %s\n",
9787                          i40e_stat_str(&pf->hw, ret),
9788                          i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9789                 goto out;
9790         }
9791
9792         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
9793                                                    &ets_data, NULL);
9794         if (ret) {
9795                 dev_info(&pf->pdev->dev,
9796                          "query veb bw ets config failed, err %s aq_err %s\n",
9797                          i40e_stat_str(&pf->hw, ret),
9798                          i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9799                 goto out;
9800         }
9801
9802         veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
9803         veb->bw_max_quanta = ets_data.tc_bw_max;
9804         veb->is_abs_credits = bw_data.absolute_credits_enable;
9805         veb->enabled_tc = ets_data.tc_valid_bits;
9806         tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
9807                     (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
9808         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9809                 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
9810                 veb->bw_tc_limit_credits[i] =
9811                                         le16_to_cpu(bw_data.tc_bw_limits[i]);
9812                 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
9813         }
9814
9815 out:
9816         return ret;
9817 }
9818
9819 /**
9820  * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
9821  * @pf: board private structure
9822  *
9823  * On error: returns error code (negative)
9824  * On success: returns vsi index in PF (positive)
9825  **/
9826 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
9827 {
9828         int ret = -ENOENT;
9829         struct i40e_veb *veb;
9830         int i;
9831
9832         /* Need to protect the allocation of switch elements at the PF level */
9833         mutex_lock(&pf->switch_mutex);
9834
9835         /* VEB list may be fragmented if VEB creation/destruction has
9836          * been happening.  We can afford to do a quick scan to look
9837          * for any free slots in the list.
9838          *
9839          * find next empty veb slot, looping back around if necessary
9840          */
9841         i = 0;
9842         while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
9843                 i++;
9844         if (i >= I40E_MAX_VEB) {
9845                 ret = -ENOMEM;
9846                 goto err_alloc_veb;  /* out of VEB slots! */
9847         }
9848
9849         veb = kzalloc(sizeof(*veb), GFP_KERNEL);
9850         if (!veb) {
9851                 ret = -ENOMEM;
9852                 goto err_alloc_veb;
9853         }
9854         veb->pf = pf;
9855         veb->idx = i;
9856         veb->enabled_tc = 1;
9857
9858         pf->veb[i] = veb;
9859         ret = i;
9860 err_alloc_veb:
9861         mutex_unlock(&pf->switch_mutex);
9862         return ret;
9863 }
9864
9865 /**
9866  * i40e_switch_branch_release - Delete a branch of the switch tree
9867  * @branch: where to start deleting
9868  *
9869  * This uses recursion to find the tips of the branch to be
9870  * removed, deleting until we get back to and can delete this VEB.
9871  **/
9872 static void i40e_switch_branch_release(struct i40e_veb *branch)
9873 {
9874         struct i40e_pf *pf = branch->pf;
9875         u16 branch_seid = branch->seid;
9876         u16 veb_idx = branch->idx;
9877         int i;
9878
9879         /* release any VEBs on this VEB - RECURSION */
9880         for (i = 0; i < I40E_MAX_VEB; i++) {
9881                 if (!pf->veb[i])
9882                         continue;
9883                 if (pf->veb[i]->uplink_seid == branch->seid)
9884                         i40e_switch_branch_release(pf->veb[i]);
9885         }
9886
9887         /* Release the VSIs on this VEB, but not the owner VSI.
9888          *
9889          * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
9890          *       the VEB itself, so don't use (*branch) after this loop.
9891          */
9892         for (i = 0; i < pf->num_alloc_vsi; i++) {
9893                 if (!pf->vsi[i])
9894                         continue;
9895                 if (pf->vsi[i]->uplink_seid == branch_seid &&
9896                    (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
9897                         i40e_vsi_release(pf->vsi[i]);
9898                 }
9899         }
9900
9901         /* There's one corner case where the VEB might not have been
9902          * removed, so double check it here and remove it if needed.
9903          * This case happens if the veb was created from the debugfs
9904          * commands and no VSIs were added to it.
9905          */
9906         if (pf->veb[veb_idx])
9907                 i40e_veb_release(pf->veb[veb_idx]);
9908 }
9909
9910 /**
9911  * i40e_veb_clear - remove veb struct
9912  * @veb: the veb to remove
9913  **/
9914 static void i40e_veb_clear(struct i40e_veb *veb)
9915 {
9916         if (!veb)
9917                 return;
9918
9919         if (veb->pf) {
9920                 struct i40e_pf *pf = veb->pf;
9921
9922                 mutex_lock(&pf->switch_mutex);
9923                 if (pf->veb[veb->idx] == veb)
9924                         pf->veb[veb->idx] = NULL;
9925                 mutex_unlock(&pf->switch_mutex);
9926         }
9927
9928         kfree(veb);
9929 }
9930
9931 /**
9932  * i40e_veb_release - Delete a VEB and free its resources
9933  * @veb: the VEB being removed
9934  **/
9935 void i40e_veb_release(struct i40e_veb *veb)
9936 {
9937         struct i40e_vsi *vsi = NULL;
9938         struct i40e_pf *pf;
9939         int i, n = 0;
9940
9941         pf = veb->pf;
9942
9943         /* find the remaining VSI and check for extras */
9944         for (i = 0; i < pf->num_alloc_vsi; i++) {
9945                 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
9946                         n++;
9947                         vsi = pf->vsi[i];
9948                 }
9949         }
9950         if (n != 1) {
9951                 dev_info(&pf->pdev->dev,
9952                          "can't remove VEB %d with %d VSIs left\n",
9953                          veb->seid, n);
9954                 return;
9955         }
9956
9957         /* move the remaining VSI to uplink veb */
9958         vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
9959         if (veb->uplink_seid) {
9960                 vsi->uplink_seid = veb->uplink_seid;
9961                 if (veb->uplink_seid == pf->mac_seid)
9962                         vsi->veb_idx = I40E_NO_VEB;
9963                 else
9964                         vsi->veb_idx = veb->veb_idx;
9965         } else {
9966                 /* floating VEB */
9967                 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
9968                 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
9969         }
9970
9971         i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
9972         i40e_veb_clear(veb);
9973 }
9974
9975 /**
9976  * i40e_add_veb - create the VEB in the switch
9977  * @veb: the VEB to be instantiated
9978  * @vsi: the controlling VSI
9979  **/
9980 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
9981 {
9982         struct i40e_pf *pf = veb->pf;
9983         bool is_default = veb->pf->cur_promisc;
9984         bool is_cloud = false;
9985         int ret;
9986
9987         /* get a VEB from the hardware */
9988         ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
9989                               veb->enabled_tc, is_default,
9990                               is_cloud, &veb->seid, NULL);
9991         if (ret) {
9992                 dev_info(&pf->pdev->dev,
9993                          "couldn't add VEB, err %s aq_err %s\n",
9994                          i40e_stat_str(&pf->hw, ret),
9995                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9996                 return -EPERM;
9997         }
9998
9999         /* get statistics counter */
10000         ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
10001                                          &veb->stats_idx, NULL, NULL, NULL);
10002         if (ret) {
10003                 dev_info(&pf->pdev->dev,
10004                          "couldn't get VEB statistics idx, err %s aq_err %s\n",
10005                          i40e_stat_str(&pf->hw, ret),
10006                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10007                 return -EPERM;
10008         }
10009         ret = i40e_veb_get_bw_info(veb);
10010         if (ret) {
10011                 dev_info(&pf->pdev->dev,
10012                          "couldn't get VEB bw info, err %s aq_err %s\n",
10013                          i40e_stat_str(&pf->hw, ret),
10014                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10015                 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10016                 return -ENOENT;
10017         }
10018
10019         vsi->uplink_seid = veb->seid;
10020         vsi->veb_idx = veb->idx;
10021         vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
10022
10023         return 0;
10024 }
10025
10026 /**
10027  * i40e_veb_setup - Set up a VEB
10028  * @pf: board private structure
10029  * @flags: VEB setup flags
10030  * @uplink_seid: the switch element to link to
10031  * @vsi_seid: the initial VSI seid
10032  * @enabled_tc: Enabled TC bit-map
10033  *
10034  * This allocates the sw VEB structure and links it into the switch
10035  * It is possible and legal for this to be a duplicate of an already
10036  * existing VEB.  It is also possible for both uplink and vsi seids
10037  * to be zero, in order to create a floating VEB.
10038  *
10039  * Returns pointer to the successfully allocated VEB sw struct on
10040  * success, otherwise returns NULL on failure.
10041  **/
10042 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
10043                                 u16 uplink_seid, u16 vsi_seid,
10044                                 u8 enabled_tc)
10045 {
10046         struct i40e_veb *veb, *uplink_veb = NULL;
10047         int vsi_idx, veb_idx;
10048         int ret;
10049
10050         /* if one seid is 0, the other must be 0 to create a floating relay */
10051         if ((uplink_seid == 0 || vsi_seid == 0) &&
10052             (uplink_seid + vsi_seid != 0)) {
10053                 dev_info(&pf->pdev->dev,
10054                          "one, not both seid's are 0: uplink=%d vsi=%d\n",
10055                          uplink_seid, vsi_seid);
10056                 return NULL;
10057         }
10058
10059         /* make sure there is such a vsi and uplink */
10060         for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
10061                 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
10062                         break;
10063         if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
10064                 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
10065                          vsi_seid);
10066                 return NULL;
10067         }
10068
10069         if (uplink_seid && uplink_seid != pf->mac_seid) {
10070                 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10071                         if (pf->veb[veb_idx] &&
10072                             pf->veb[veb_idx]->seid == uplink_seid) {
10073                                 uplink_veb = pf->veb[veb_idx];
10074                                 break;
10075                         }
10076                 }
10077                 if (!uplink_veb) {
10078                         dev_info(&pf->pdev->dev,
10079                                  "uplink seid %d not found\n", uplink_seid);
10080                         return NULL;
10081                 }
10082         }
10083
10084         /* get veb sw struct */
10085         veb_idx = i40e_veb_mem_alloc(pf);
10086         if (veb_idx < 0)
10087                 goto err_alloc;
10088         veb = pf->veb[veb_idx];
10089         veb->flags = flags;
10090         veb->uplink_seid = uplink_seid;
10091         veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
10092         veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
10093
10094         /* create the VEB in the switch */
10095         ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
10096         if (ret)
10097                 goto err_veb;
10098         if (vsi_idx == pf->lan_vsi)
10099                 pf->lan_veb = veb->idx;
10100
10101         return veb;
10102
10103 err_veb:
10104         i40e_veb_clear(veb);
10105 err_alloc:
10106         return NULL;
10107 }
10108
10109 /**
10110  * i40e_setup_pf_switch_element - set PF vars based on switch type
10111  * @pf: board private structure
10112  * @ele: element we are building info from
10113  * @num_reported: total number of elements
10114  * @printconfig: should we print the contents
10115  *
10116  * helper function to assist in extracting a few useful SEID values.
10117  **/
10118 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
10119                                 struct i40e_aqc_switch_config_element_resp *ele,
10120                                 u16 num_reported, bool printconfig)
10121 {
10122         u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
10123         u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
10124         u8 element_type = ele->element_type;
10125         u16 seid = le16_to_cpu(ele->seid);
10126
10127         if (printconfig)
10128                 dev_info(&pf->pdev->dev,
10129                          "type=%d seid=%d uplink=%d downlink=%d\n",
10130                          element_type, seid, uplink_seid, downlink_seid);
10131
10132         switch (element_type) {
10133         case I40E_SWITCH_ELEMENT_TYPE_MAC:
10134                 pf->mac_seid = seid;
10135                 break;
10136         case I40E_SWITCH_ELEMENT_TYPE_VEB:
10137                 /* Main VEB? */
10138                 if (uplink_seid != pf->mac_seid)
10139                         break;
10140                 if (pf->lan_veb == I40E_NO_VEB) {
10141                         int v;
10142
10143                         /* find existing or else empty VEB */
10144                         for (v = 0; v < I40E_MAX_VEB; v++) {
10145                                 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
10146                                         pf->lan_veb = v;
10147                                         break;
10148                                 }
10149                         }
10150                         if (pf->lan_veb == I40E_NO_VEB) {
10151                                 v = i40e_veb_mem_alloc(pf);
10152                                 if (v < 0)
10153                                         break;
10154                                 pf->lan_veb = v;
10155                         }
10156                 }
10157
10158                 pf->veb[pf->lan_veb]->seid = seid;
10159                 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
10160                 pf->veb[pf->lan_veb]->pf = pf;
10161                 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
10162                 break;
10163         case I40E_SWITCH_ELEMENT_TYPE_VSI:
10164                 if (num_reported != 1)
10165                         break;
10166                 /* This is immediately after a reset so we can assume this is
10167                  * the PF's VSI
10168                  */
10169                 pf->mac_seid = uplink_seid;
10170                 pf->pf_seid = downlink_seid;
10171                 pf->main_vsi_seid = seid;
10172                 if (printconfig)
10173                         dev_info(&pf->pdev->dev,
10174                                  "pf_seid=%d main_vsi_seid=%d\n",
10175                                  pf->pf_seid, pf->main_vsi_seid);
10176                 break;
10177         case I40E_SWITCH_ELEMENT_TYPE_PF:
10178         case I40E_SWITCH_ELEMENT_TYPE_VF:
10179         case I40E_SWITCH_ELEMENT_TYPE_EMP:
10180         case I40E_SWITCH_ELEMENT_TYPE_BMC:
10181         case I40E_SWITCH_ELEMENT_TYPE_PE:
10182         case I40E_SWITCH_ELEMENT_TYPE_PA:
10183                 /* ignore these for now */
10184                 break;
10185         default:
10186                 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
10187                          element_type, seid);
10188                 break;
10189         }
10190 }
10191
10192 /**
10193  * i40e_fetch_switch_configuration - Get switch config from firmware
10194  * @pf: board private structure
10195  * @printconfig: should we print the contents
10196  *
10197  * Get the current switch configuration from the device and
10198  * extract a few useful SEID values.
10199  **/
10200 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
10201 {
10202         struct i40e_aqc_get_switch_config_resp *sw_config;
10203         u16 next_seid = 0;
10204         int ret = 0;
10205         u8 *aq_buf;
10206         int i;
10207
10208         aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
10209         if (!aq_buf)
10210                 return -ENOMEM;
10211
10212         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
10213         do {
10214                 u16 num_reported, num_total;
10215
10216                 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
10217                                                 I40E_AQ_LARGE_BUF,
10218                                                 &next_seid, NULL);
10219                 if (ret) {
10220                         dev_info(&pf->pdev->dev,
10221                                  "get switch config failed err %s aq_err %s\n",
10222                                  i40e_stat_str(&pf->hw, ret),
10223                                  i40e_aq_str(&pf->hw,
10224                                              pf->hw.aq.asq_last_status));
10225                         kfree(aq_buf);
10226                         return -ENOENT;
10227                 }
10228
10229                 num_reported = le16_to_cpu(sw_config->header.num_reported);
10230                 num_total = le16_to_cpu(sw_config->header.num_total);
10231
10232                 if (printconfig)
10233                         dev_info(&pf->pdev->dev,
10234                                  "header: %d reported %d total\n",
10235                                  num_reported, num_total);
10236
10237                 for (i = 0; i < num_reported; i++) {
10238                         struct i40e_aqc_switch_config_element_resp *ele =
10239                                 &sw_config->element[i];
10240
10241                         i40e_setup_pf_switch_element(pf, ele, num_reported,
10242                                                      printconfig);
10243                 }
10244         } while (next_seid != 0);
10245
10246         kfree(aq_buf);
10247         return ret;
10248 }
10249
10250 /**
10251  * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
10252  * @pf: board private structure
10253  * @reinit: if the Main VSI needs to re-initialized.
10254  *
10255  * Returns 0 on success, negative value on failure
10256  **/
10257 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
10258 {
10259         int ret;
10260
10261         /* find out what's out there already */
10262         ret = i40e_fetch_switch_configuration(pf, false);
10263         if (ret) {
10264                 dev_info(&pf->pdev->dev,
10265                          "couldn't fetch switch config, err %s aq_err %s\n",
10266                          i40e_stat_str(&pf->hw, ret),
10267                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10268                 return ret;
10269         }
10270         i40e_pf_reset_stats(pf);
10271
10272         /* first time setup */
10273         if (pf->lan_vsi == I40E_NO_VSI || reinit) {
10274                 struct i40e_vsi *vsi = NULL;
10275                 u16 uplink_seid;
10276
10277                 /* Set up the PF VSI associated with the PF's main VSI
10278                  * that is already in the HW switch
10279                  */
10280                 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
10281                         uplink_seid = pf->veb[pf->lan_veb]->seid;
10282                 else
10283                         uplink_seid = pf->mac_seid;
10284                 if (pf->lan_vsi == I40E_NO_VSI)
10285                         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
10286                 else if (reinit)
10287                         vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
10288                 if (!vsi) {
10289                         dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
10290                         i40e_fdir_teardown(pf);
10291                         return -EAGAIN;
10292                 }
10293         } else {
10294                 /* force a reset of TC and queue layout configurations */
10295                 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
10296
10297                 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
10298                 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
10299                 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
10300         }
10301         i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
10302
10303         i40e_fdir_sb_setup(pf);
10304
10305         /* Setup static PF queue filter control settings */
10306         ret = i40e_setup_pf_filter_control(pf);
10307         if (ret) {
10308                 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
10309                          ret);
10310                 /* Failure here should not stop continuing other steps */
10311         }
10312
10313         /* enable RSS in the HW, even for only one queue, as the stack can use
10314          * the hash
10315          */
10316         if ((pf->flags & I40E_FLAG_RSS_ENABLED))
10317                 i40e_pf_config_rss(pf);
10318
10319         /* fill in link information and enable LSE reporting */
10320         i40e_update_link_info(&pf->hw);
10321         i40e_link_event(pf);
10322
10323         /* Initialize user-specific link properties */
10324         pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
10325                                   I40E_AQ_AN_COMPLETED) ? true : false);
10326
10327         i40e_ptp_init(pf);
10328
10329         return ret;
10330 }
10331
10332 /**
10333  * i40e_determine_queue_usage - Work out queue distribution
10334  * @pf: board private structure
10335  **/
10336 static void i40e_determine_queue_usage(struct i40e_pf *pf)
10337 {
10338         int queues_left;
10339
10340         pf->num_lan_qps = 0;
10341 #ifdef I40E_FCOE
10342         pf->num_fcoe_qps = 0;
10343 #endif
10344
10345         /* Find the max queues to be put into basic use.  We'll always be
10346          * using TC0, whether or not DCB is running, and TC0 will get the
10347          * big RSS set.
10348          */
10349         queues_left = pf->hw.func_caps.num_tx_qp;
10350
10351         if ((queues_left == 1) ||
10352             !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
10353                 /* one qp for PF, no queues for anything else */
10354                 queues_left = 0;
10355                 pf->alloc_rss_size = pf->num_lan_qps = 1;
10356
10357                 /* make sure all the fancies are disabled */
10358                 pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
10359 #ifdef I40E_FCOE
10360                                I40E_FLAG_FCOE_ENABLED   |
10361 #endif
10362                                I40E_FLAG_FD_SB_ENABLED  |
10363                                I40E_FLAG_FD_ATR_ENABLED |
10364                                I40E_FLAG_DCB_CAPABLE    |
10365                                I40E_FLAG_SRIOV_ENABLED  |
10366                                I40E_FLAG_VMDQ_ENABLED);
10367         } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
10368                                   I40E_FLAG_FD_SB_ENABLED |
10369                                   I40E_FLAG_FD_ATR_ENABLED |
10370                                   I40E_FLAG_DCB_CAPABLE))) {
10371                 /* one qp for PF */
10372                 pf->alloc_rss_size = pf->num_lan_qps = 1;
10373                 queues_left -= pf->num_lan_qps;
10374
10375                 pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
10376 #ifdef I40E_FCOE
10377                                I40E_FLAG_FCOE_ENABLED   |
10378 #endif
10379                                I40E_FLAG_FD_SB_ENABLED  |
10380                                I40E_FLAG_FD_ATR_ENABLED |
10381                                I40E_FLAG_DCB_ENABLED    |
10382                                I40E_FLAG_VMDQ_ENABLED);
10383         } else {
10384                 /* Not enough queues for all TCs */
10385                 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
10386                     (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
10387                         pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10388                         dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
10389                 }
10390                 pf->num_lan_qps = max_t(int, pf->rss_size_max,
10391                                         num_online_cpus());
10392                 pf->num_lan_qps = min_t(int, pf->num_lan_qps,
10393                                         pf->hw.func_caps.num_tx_qp);
10394
10395                 queues_left -= pf->num_lan_qps;
10396         }
10397
10398 #ifdef I40E_FCOE
10399         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
10400                 if (I40E_DEFAULT_FCOE <= queues_left) {
10401                         pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
10402                 } else if (I40E_MINIMUM_FCOE <= queues_left) {
10403                         pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
10404                 } else {
10405                         pf->num_fcoe_qps = 0;
10406                         pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
10407                         dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
10408                 }
10409
10410                 queues_left -= pf->num_fcoe_qps;
10411         }
10412
10413 #endif
10414         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10415                 if (queues_left > 1) {
10416                         queues_left -= 1; /* save 1 queue for FD */
10417                 } else {
10418                         pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10419                         dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
10420                 }
10421         }
10422
10423         if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10424             pf->num_vf_qps && pf->num_req_vfs && queues_left) {
10425                 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
10426                                         (queues_left / pf->num_vf_qps));
10427                 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
10428         }
10429
10430         if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
10431             pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
10432                 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
10433                                           (queues_left / pf->num_vmdq_qps));
10434                 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
10435         }
10436
10437         pf->queues_left = queues_left;
10438         dev_dbg(&pf->pdev->dev,
10439                 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
10440                 pf->hw.func_caps.num_tx_qp,
10441                 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
10442                 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
10443                 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
10444                 queues_left);
10445 #ifdef I40E_FCOE
10446         dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
10447 #endif
10448 }
10449
10450 /**
10451  * i40e_setup_pf_filter_control - Setup PF static filter control
10452  * @pf: PF to be setup
10453  *
10454  * i40e_setup_pf_filter_control sets up a PF's initial filter control
10455  * settings. If PE/FCoE are enabled then it will also set the per PF
10456  * based filter sizes required for them. It also enables Flow director,
10457  * ethertype and macvlan type filter settings for the pf.
10458  *
10459  * Returns 0 on success, negative on failure
10460  **/
10461 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
10462 {
10463         struct i40e_filter_control_settings *settings = &pf->filter_settings;
10464
10465         settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
10466
10467         /* Flow Director is enabled */
10468         if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
10469                 settings->enable_fdir = true;
10470
10471         /* Ethtype and MACVLAN filters enabled for PF */
10472         settings->enable_ethtype = true;
10473         settings->enable_macvlan = true;
10474
10475         if (i40e_set_filter_control(&pf->hw, settings))
10476                 return -ENOENT;
10477
10478         return 0;
10479 }
10480
10481 #define INFO_STRING_LEN 255
10482 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
10483 static void i40e_print_features(struct i40e_pf *pf)
10484 {
10485         struct i40e_hw *hw = &pf->hw;
10486         char *buf;
10487         int i;
10488
10489         buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
10490         if (!buf)
10491                 return;
10492
10493         i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
10494 #ifdef CONFIG_PCI_IOV
10495         i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
10496 #endif
10497         i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d RX: %s",
10498                       pf->hw.func_caps.num_vsis,
10499                       pf->vsi[pf->lan_vsi]->num_queue_pairs,
10500                       pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
10501
10502         if (pf->flags & I40E_FLAG_RSS_ENABLED)
10503                 i += snprintf(&buf[i], REMAIN(i), " RSS");
10504         if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
10505                 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
10506         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10507                 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
10508                 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
10509         }
10510         if (pf->flags & I40E_FLAG_DCB_CAPABLE)
10511                 i += snprintf(&buf[i], REMAIN(i), " DCB");
10512 #if IS_ENABLED(CONFIG_VXLAN)
10513         i += snprintf(&buf[i], REMAIN(i), " VxLAN");
10514 #endif
10515 #if IS_ENABLED(CONFIG_GENEVE)
10516         i += snprintf(&buf[i], REMAIN(i), " Geneve");
10517 #endif
10518         if (pf->flags & I40E_FLAG_PTP)
10519                 i += snprintf(&buf[i], REMAIN(i), " PTP");
10520 #ifdef I40E_FCOE
10521         if (pf->flags & I40E_FLAG_FCOE_ENABLED)
10522                 i += snprintf(&buf[i], REMAIN(i), " FCOE");
10523 #endif
10524         if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
10525                 i += snprintf(&buf[i], REMAIN(i), " VEB");
10526         else
10527                 i += snprintf(&buf[i], REMAIN(i), " VEPA");
10528
10529         dev_info(&pf->pdev->dev, "%s\n", buf);
10530         kfree(buf);
10531         WARN_ON(i > INFO_STRING_LEN);
10532 }
10533
10534 /**
10535  * i40e_get_platform_mac_addr - get platform-specific MAC address
10536  *
10537  * @pdev: PCI device information struct
10538  * @pf: board private structure
10539  *
10540  * Look up the MAC address in Open Firmware  on systems that support it,
10541  * and use IDPROM on SPARC if no OF address is found. On return, the
10542  * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value
10543  * has been selected.
10544  **/
10545 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
10546 {
10547         struct device_node *dp = pci_device_to_OF_node(pdev);
10548         const unsigned char *addr;
10549         u8 *mac_addr = pf->hw.mac.addr;
10550
10551         pf->flags &= ~I40E_FLAG_PF_MAC;
10552         addr = of_get_mac_address(dp);
10553         if (addr) {
10554                 ether_addr_copy(mac_addr, addr);
10555                 pf->flags |= I40E_FLAG_PF_MAC;
10556 #ifdef CONFIG_SPARC
10557         } else {
10558                 ether_addr_copy(mac_addr, idprom->id_ethaddr);
10559                 pf->flags |= I40E_FLAG_PF_MAC;
10560 #endif /* CONFIG_SPARC */
10561         }
10562 }
10563
10564 /**
10565  * i40e_probe - Device initialization routine
10566  * @pdev: PCI device information struct
10567  * @ent: entry in i40e_pci_tbl
10568  *
10569  * i40e_probe initializes a PF identified by a pci_dev structure.
10570  * The OS initialization, configuring of the PF private structure,
10571  * and a hardware reset occur.
10572  *
10573  * Returns 0 on success, negative on failure
10574  **/
10575 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10576 {
10577         struct i40e_aq_get_phy_abilities_resp abilities;
10578         struct i40e_pf *pf;
10579         struct i40e_hw *hw;
10580         static u16 pfs_found;
10581         u16 wol_nvm_bits;
10582         u16 link_status;
10583         int err;
10584         u32 len;
10585         u32 val;
10586         u32 i;
10587         u8 set_fc_aq_fail;
10588
10589         err = pci_enable_device_mem(pdev);
10590         if (err)
10591                 return err;
10592
10593         /* set up for high or low dma */
10594         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10595         if (err) {
10596                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10597                 if (err) {
10598                         dev_err(&pdev->dev,
10599                                 "DMA configuration failed: 0x%x\n", err);
10600                         goto err_dma;
10601                 }
10602         }
10603
10604         /* set up pci connections */
10605         err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
10606                                            IORESOURCE_MEM), i40e_driver_name);
10607         if (err) {
10608                 dev_info(&pdev->dev,
10609                          "pci_request_selected_regions failed %d\n", err);
10610                 goto err_pci_reg;
10611         }
10612
10613         pci_enable_pcie_error_reporting(pdev);
10614         pci_set_master(pdev);
10615
10616         /* Now that we have a PCI connection, we need to do the
10617          * low level device setup.  This is primarily setting up
10618          * the Admin Queue structures and then querying for the
10619          * device's current profile information.
10620          */
10621         pf = kzalloc(sizeof(*pf), GFP_KERNEL);
10622         if (!pf) {
10623                 err = -ENOMEM;
10624                 goto err_pf_alloc;
10625         }
10626         pf->next_vsi = 0;
10627         pf->pdev = pdev;
10628         set_bit(__I40E_DOWN, &pf->state);
10629
10630         hw = &pf->hw;
10631         hw->back = pf;
10632
10633         pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
10634                                 I40E_MAX_CSR_SPACE);
10635
10636         hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
10637         if (!hw->hw_addr) {
10638                 err = -EIO;
10639                 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
10640                          (unsigned int)pci_resource_start(pdev, 0),
10641                          pf->ioremap_len, err);
10642                 goto err_ioremap;
10643         }
10644         hw->vendor_id = pdev->vendor;
10645         hw->device_id = pdev->device;
10646         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
10647         hw->subsystem_vendor_id = pdev->subsystem_vendor;
10648         hw->subsystem_device_id = pdev->subsystem_device;
10649         hw->bus.device = PCI_SLOT(pdev->devfn);
10650         hw->bus.func = PCI_FUNC(pdev->devfn);
10651         pf->instance = pfs_found;
10652
10653         if (debug != -1) {
10654                 pf->msg_enable = pf->hw.debug_mask;
10655                 pf->msg_enable = debug;
10656         }
10657
10658         /* do a special CORER for clearing PXE mode once at init */
10659         if (hw->revision_id == 0 &&
10660             (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
10661                 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
10662                 i40e_flush(hw);
10663                 msleep(200);
10664                 pf->corer_count++;
10665
10666                 i40e_clear_pxe_mode(hw);
10667         }
10668
10669         /* Reset here to make sure all is clean and to define PF 'n' */
10670         i40e_clear_hw(hw);
10671         err = i40e_pf_reset(hw);
10672         if (err) {
10673                 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
10674                 goto err_pf_reset;
10675         }
10676         pf->pfr_count++;
10677
10678         hw->aq.num_arq_entries = I40E_AQ_LEN;
10679         hw->aq.num_asq_entries = I40E_AQ_LEN;
10680         hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10681         hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10682         pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
10683
10684         snprintf(pf->int_name, sizeof(pf->int_name) - 1,
10685                  "%s-%s:misc",
10686                  dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
10687
10688         err = i40e_init_shared_code(hw);
10689         if (err) {
10690                 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
10691                          err);
10692                 goto err_pf_reset;
10693         }
10694
10695         /* set up a default setting for link flow control */
10696         pf->hw.fc.requested_mode = I40E_FC_NONE;
10697
10698         /* set up the locks for the AQ, do this only once in probe
10699          * and destroy them only once in remove
10700          */
10701         mutex_init(&hw->aq.asq_mutex);
10702         mutex_init(&hw->aq.arq_mutex);
10703
10704         err = i40e_init_adminq(hw);
10705         if (err) {
10706                 if (err == I40E_ERR_FIRMWARE_API_VERSION)
10707                         dev_info(&pdev->dev,
10708                                  "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
10709                 else
10710                         dev_info(&pdev->dev,
10711                                  "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
10712
10713                 goto err_pf_reset;
10714         }
10715
10716         /* provide nvm, fw, api versions */
10717         dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
10718                  hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
10719                  hw->aq.api_maj_ver, hw->aq.api_min_ver,
10720                  i40e_nvm_version_str(hw));
10721
10722         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
10723             hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
10724                 dev_info(&pdev->dev,
10725                          "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
10726         else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
10727                  hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
10728                 dev_info(&pdev->dev,
10729                          "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
10730
10731         i40e_verify_eeprom(pf);
10732
10733         /* Rev 0 hardware was never productized */
10734         if (hw->revision_id < 1)
10735                 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
10736
10737         i40e_clear_pxe_mode(hw);
10738         err = i40e_get_capabilities(pf);
10739         if (err)
10740                 goto err_adminq_setup;
10741
10742         err = i40e_sw_init(pf);
10743         if (err) {
10744                 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
10745                 goto err_sw_init;
10746         }
10747
10748         err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10749                                 hw->func_caps.num_rx_qp,
10750                                 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
10751         if (err) {
10752                 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
10753                 goto err_init_lan_hmc;
10754         }
10755
10756         err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10757         if (err) {
10758                 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
10759                 err = -ENOENT;
10760                 goto err_configure_lan_hmc;
10761         }
10762
10763         /* Disable LLDP for NICs that have firmware versions lower than v4.3.
10764          * Ignore error return codes because if it was already disabled via
10765          * hardware settings this will fail
10766          */
10767         if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
10768             (pf->hw.aq.fw_maj_ver < 4)) {
10769                 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
10770                 i40e_aq_stop_lldp(hw, true, NULL);
10771         }
10772
10773         i40e_get_mac_addr(hw, hw->mac.addr);
10774         /* allow a platform config to override the HW addr */
10775         i40e_get_platform_mac_addr(pdev, pf);
10776         if (!is_valid_ether_addr(hw->mac.addr)) {
10777                 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
10778                 err = -EIO;
10779                 goto err_mac_addr;
10780         }
10781         dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
10782         ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
10783         i40e_get_port_mac_addr(hw, hw->mac.port_addr);
10784         if (is_valid_ether_addr(hw->mac.port_addr))
10785                 pf->flags |= I40E_FLAG_PORT_ID_VALID;
10786 #ifdef I40E_FCOE
10787         err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
10788         if (err)
10789                 dev_info(&pdev->dev,
10790                          "(non-fatal) SAN MAC retrieval failed: %d\n", err);
10791         if (!is_valid_ether_addr(hw->mac.san_addr)) {
10792                 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
10793                          hw->mac.san_addr);
10794                 ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
10795         }
10796         dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
10797 #endif /* I40E_FCOE */
10798
10799         pci_set_drvdata(pdev, pf);
10800         pci_save_state(pdev);
10801 #ifdef CONFIG_I40E_DCB
10802         err = i40e_init_pf_dcb(pf);
10803         if (err) {
10804                 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
10805                 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10806                 /* Continue without DCB enabled */
10807         }
10808 #endif /* CONFIG_I40E_DCB */
10809
10810         /* set up periodic task facility */
10811         setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
10812         pf->service_timer_period = HZ;
10813
10814         INIT_WORK(&pf->service_task, i40e_service_task);
10815         clear_bit(__I40E_SERVICE_SCHED, &pf->state);
10816         pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
10817
10818         /* NVM bit on means WoL disabled for the port */
10819         i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
10820         if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
10821                 pf->wol_en = false;
10822         else
10823                 pf->wol_en = true;
10824         device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
10825
10826         /* set up the main switch operations */
10827         i40e_determine_queue_usage(pf);
10828         err = i40e_init_interrupt_scheme(pf);
10829         if (err)
10830                 goto err_switch_setup;
10831
10832         /* The number of VSIs reported by the FW is the minimum guaranteed
10833          * to us; HW supports far more and we share the remaining pool with
10834          * the other PFs. We allocate space for more than the guarantee with
10835          * the understanding that we might not get them all later.
10836          */
10837         if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
10838                 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
10839         else
10840                 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
10841
10842         /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
10843         len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
10844         pf->vsi = kzalloc(len, GFP_KERNEL);
10845         if (!pf->vsi) {
10846                 err = -ENOMEM;
10847                 goto err_switch_setup;
10848         }
10849
10850 #ifdef CONFIG_PCI_IOV
10851         /* prep for VF support */
10852         if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10853             (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10854             !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
10855                 if (pci_num_vf(pdev))
10856                         pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
10857         }
10858 #endif
10859         err = i40e_setup_pf_switch(pf, false);
10860         if (err) {
10861                 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
10862                 goto err_vsis;
10863         }
10864
10865         /* Make sure flow control is set according to current settings */
10866         err = i40e_set_fc(hw, &set_fc_aq_fail, true);
10867         if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
10868                 dev_dbg(&pf->pdev->dev,
10869                         "Set fc with err %s aq_err %s on get_phy_cap\n",
10870                         i40e_stat_str(hw, err),
10871                         i40e_aq_str(hw, hw->aq.asq_last_status));
10872         if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
10873                 dev_dbg(&pf->pdev->dev,
10874                         "Set fc with err %s aq_err %s on set_phy_config\n",
10875                         i40e_stat_str(hw, err),
10876                         i40e_aq_str(hw, hw->aq.asq_last_status));
10877         if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
10878                 dev_dbg(&pf->pdev->dev,
10879                         "Set fc with err %s aq_err %s on get_link_info\n",
10880                         i40e_stat_str(hw, err),
10881                         i40e_aq_str(hw, hw->aq.asq_last_status));
10882
10883         /* if FDIR VSI was set up, start it now */
10884         for (i = 0; i < pf->num_alloc_vsi; i++) {
10885                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
10886                         i40e_vsi_open(pf->vsi[i]);
10887                         break;
10888                 }
10889         }
10890
10891         /* driver is only interested in link up/down and module qualification
10892          * reports from firmware
10893          */
10894         err = i40e_aq_set_phy_int_mask(&pf->hw,
10895                                        I40E_AQ_EVENT_LINK_UPDOWN |
10896                                        I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
10897         if (err)
10898                 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10899                          i40e_stat_str(&pf->hw, err),
10900                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10901
10902         /* Reconfigure hardware for allowing smaller MSS in the case
10903          * of TSO, so that we avoid the MDD being fired and causing
10904          * a reset in the case of small MSS+TSO.
10905          */
10906         val = rd32(hw, I40E_REG_MSS);
10907         if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
10908                 val &= ~I40E_REG_MSS_MIN_MASK;
10909                 val |= I40E_64BYTE_MSS;
10910                 wr32(hw, I40E_REG_MSS, val);
10911         }
10912
10913         if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
10914             (pf->hw.aq.fw_maj_ver < 4)) {
10915                 msleep(75);
10916                 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
10917                 if (err)
10918                         dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
10919                                  i40e_stat_str(&pf->hw, err),
10920                                  i40e_aq_str(&pf->hw,
10921                                              pf->hw.aq.asq_last_status));
10922         }
10923         /* The main driver is (mostly) up and happy. We need to set this state
10924          * before setting up the misc vector or we get a race and the vector
10925          * ends up disabled forever.
10926          */
10927         clear_bit(__I40E_DOWN, &pf->state);
10928
10929         /* In case of MSIX we are going to setup the misc vector right here
10930          * to handle admin queue events etc. In case of legacy and MSI
10931          * the misc functionality and queue processing is combined in
10932          * the same vector and that gets setup at open.
10933          */
10934         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
10935                 err = i40e_setup_misc_vector(pf);
10936                 if (err) {
10937                         dev_info(&pdev->dev,
10938                                  "setup of misc vector failed: %d\n", err);
10939                         goto err_vsis;
10940                 }
10941         }
10942
10943 #ifdef CONFIG_PCI_IOV
10944         /* prep for VF support */
10945         if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10946             (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10947             !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
10948                 u32 val;
10949
10950                 /* disable link interrupts for VFs */
10951                 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
10952                 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
10953                 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
10954                 i40e_flush(hw);
10955
10956                 if (pci_num_vf(pdev)) {
10957                         dev_info(&pdev->dev,
10958                                  "Active VFs found, allocating resources.\n");
10959                         err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
10960                         if (err)
10961                                 dev_info(&pdev->dev,
10962                                          "Error %d allocating resources for existing VFs\n",
10963                                          err);
10964                 }
10965         }
10966 #endif /* CONFIG_PCI_IOV */
10967
10968         pfs_found++;
10969
10970         i40e_dbg_pf_init(pf);
10971
10972         /* tell the firmware that we're starting */
10973         i40e_send_version(pf);
10974
10975         /* since everything's happy, start the service_task timer */
10976         mod_timer(&pf->service_timer,
10977                   round_jiffies(jiffies + pf->service_timer_period));
10978
10979 #ifdef I40E_FCOE
10980         /* create FCoE interface */
10981         i40e_fcoe_vsi_setup(pf);
10982
10983 #endif
10984 #define PCI_SPEED_SIZE 8
10985 #define PCI_WIDTH_SIZE 8
10986         /* Devices on the IOSF bus do not have this information
10987          * and will report PCI Gen 1 x 1 by default so don't bother
10988          * checking them.
10989          */
10990         if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) {
10991                 char speed[PCI_SPEED_SIZE] = "Unknown";
10992                 char width[PCI_WIDTH_SIZE] = "Unknown";
10993
10994                 /* Get the negotiated link width and speed from PCI config
10995                  * space
10996                  */
10997                 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
10998                                           &link_status);
10999
11000                 i40e_set_pci_config_data(hw, link_status);
11001
11002                 switch (hw->bus.speed) {
11003                 case i40e_bus_speed_8000:
11004                         strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
11005                 case i40e_bus_speed_5000:
11006                         strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
11007                 case i40e_bus_speed_2500:
11008                         strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
11009                 default:
11010                         break;
11011                 }
11012                 switch (hw->bus.width) {
11013                 case i40e_bus_width_pcie_x8:
11014                         strncpy(width, "8", PCI_WIDTH_SIZE); break;
11015                 case i40e_bus_width_pcie_x4:
11016                         strncpy(width, "4", PCI_WIDTH_SIZE); break;
11017                 case i40e_bus_width_pcie_x2:
11018                         strncpy(width, "2", PCI_WIDTH_SIZE); break;
11019                 case i40e_bus_width_pcie_x1:
11020                         strncpy(width, "1", PCI_WIDTH_SIZE); break;
11021                 default:
11022                         break;
11023                 }
11024
11025                 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
11026                          speed, width);
11027
11028                 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
11029                     hw->bus.speed < i40e_bus_speed_8000) {
11030                         dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
11031                         dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
11032                 }
11033         }
11034
11035         /* get the requested speeds from the fw */
11036         err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
11037         if (err)
11038                 dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %s last_status =  %s\n",
11039                         i40e_stat_str(&pf->hw, err),
11040                         i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11041         pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
11042
11043         /* get the supported phy types from the fw */
11044         err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
11045         if (err)
11046                 dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %s last_status =  %s\n",
11047                         i40e_stat_str(&pf->hw, err),
11048                         i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11049         pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type);
11050
11051         /* Add a filter to drop all Flow control frames from any VSI from being
11052          * transmitted. By doing so we stop a malicious VF from sending out
11053          * PAUSE or PFC frames and potentially controlling traffic for other
11054          * PF/VF VSIs.
11055          * The FW can still send Flow control frames if enabled.
11056          */
11057         i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
11058                                                        pf->main_vsi_seid);
11059
11060         /* print a string summarizing features */
11061         i40e_print_features(pf);
11062
11063         return 0;
11064
11065         /* Unwind what we've done if something failed in the setup */
11066 err_vsis:
11067         set_bit(__I40E_DOWN, &pf->state);
11068         i40e_clear_interrupt_scheme(pf);
11069         kfree(pf->vsi);
11070 err_switch_setup:
11071         i40e_reset_interrupt_capability(pf);
11072         del_timer_sync(&pf->service_timer);
11073 err_mac_addr:
11074 err_configure_lan_hmc:
11075         (void)i40e_shutdown_lan_hmc(hw);
11076 err_init_lan_hmc:
11077         kfree(pf->qp_pile);
11078 err_sw_init:
11079 err_adminq_setup:
11080         (void)i40e_shutdown_adminq(hw);
11081 err_pf_reset:
11082         iounmap(hw->hw_addr);
11083 err_ioremap:
11084         kfree(pf);
11085 err_pf_alloc:
11086         pci_disable_pcie_error_reporting(pdev);
11087         pci_release_selected_regions(pdev,
11088                                      pci_select_bars(pdev, IORESOURCE_MEM));
11089 err_pci_reg:
11090 err_dma:
11091         pci_disable_device(pdev);
11092         return err;
11093 }
11094
11095 /**
11096  * i40e_remove - Device removal routine
11097  * @pdev: PCI device information struct
11098  *
11099  * i40e_remove is called by the PCI subsystem to alert the driver
11100  * that is should release a PCI device.  This could be caused by a
11101  * Hot-Plug event, or because the driver is going to be removed from
11102  * memory.
11103  **/
11104 static void i40e_remove(struct pci_dev *pdev)
11105 {
11106         struct i40e_pf *pf = pci_get_drvdata(pdev);
11107         struct i40e_hw *hw = &pf->hw;
11108         i40e_status ret_code;
11109         int i;
11110
11111         i40e_dbg_pf_exit(pf);
11112
11113         i40e_ptp_stop(pf);
11114
11115         /* Disable RSS in hw */
11116         wr32(hw, I40E_PFQF_HENA(0), 0);
11117         wr32(hw, I40E_PFQF_HENA(1), 0);
11118
11119         /* no more scheduling of any task */
11120         set_bit(__I40E_DOWN, &pf->state);
11121         del_timer_sync(&pf->service_timer);
11122         cancel_work_sync(&pf->service_task);
11123
11124         if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
11125                 i40e_free_vfs(pf);
11126                 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
11127         }
11128
11129         i40e_fdir_teardown(pf);
11130
11131         /* If there is a switch structure or any orphans, remove them.
11132          * This will leave only the PF's VSI remaining.
11133          */
11134         for (i = 0; i < I40E_MAX_VEB; i++) {
11135                 if (!pf->veb[i])
11136                         continue;
11137
11138                 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
11139                     pf->veb[i]->uplink_seid == 0)
11140                         i40e_switch_branch_release(pf->veb[i]);
11141         }
11142
11143         /* Now we can shutdown the PF's VSI, just before we kill
11144          * adminq and hmc.
11145          */
11146         if (pf->vsi[pf->lan_vsi])
11147                 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
11148
11149         /* shutdown and destroy the HMC */
11150         if (pf->hw.hmc.hmc_obj) {
11151                 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
11152                 if (ret_code)
11153                         dev_warn(&pdev->dev,
11154                                  "Failed to destroy the HMC resources: %d\n",
11155                                  ret_code);
11156         }
11157
11158         /* shutdown the adminq */
11159         ret_code = i40e_shutdown_adminq(&pf->hw);
11160         if (ret_code)
11161                 dev_warn(&pdev->dev,
11162                          "Failed to destroy the Admin Queue resources: %d\n",
11163                          ret_code);
11164
11165         /* destroy the locks only once, here */
11166         mutex_destroy(&hw->aq.arq_mutex);
11167         mutex_destroy(&hw->aq.asq_mutex);
11168
11169         /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
11170         i40e_clear_interrupt_scheme(pf);
11171         for (i = 0; i < pf->num_alloc_vsi; i++) {
11172                 if (pf->vsi[i]) {
11173                         i40e_vsi_clear_rings(pf->vsi[i]);
11174                         i40e_vsi_clear(pf->vsi[i]);
11175                         pf->vsi[i] = NULL;
11176                 }
11177         }
11178
11179         for (i = 0; i < I40E_MAX_VEB; i++) {
11180                 kfree(pf->veb[i]);
11181                 pf->veb[i] = NULL;
11182         }
11183
11184         kfree(pf->qp_pile);
11185         kfree(pf->vsi);
11186
11187         iounmap(pf->hw.hw_addr);
11188         kfree(pf);
11189         pci_release_selected_regions(pdev,
11190                                      pci_select_bars(pdev, IORESOURCE_MEM));
11191
11192         pci_disable_pcie_error_reporting(pdev);
11193         pci_disable_device(pdev);
11194 }
11195
11196 /**
11197  * i40e_pci_error_detected - warning that something funky happened in PCI land
11198  * @pdev: PCI device information struct
11199  *
11200  * Called to warn that something happened and the error handling steps
11201  * are in progress.  Allows the driver to quiesce things, be ready for
11202  * remediation.
11203  **/
11204 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
11205                                                 enum pci_channel_state error)
11206 {
11207         struct i40e_pf *pf = pci_get_drvdata(pdev);
11208
11209         dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
11210
11211         /* shutdown all operations */
11212         if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
11213                 rtnl_lock();
11214                 i40e_prep_for_reset(pf);
11215                 rtnl_unlock();
11216         }
11217
11218         /* Request a slot reset */
11219         return PCI_ERS_RESULT_NEED_RESET;
11220 }
11221
11222 /**
11223  * i40e_pci_error_slot_reset - a PCI slot reset just happened
11224  * @pdev: PCI device information struct
11225  *
11226  * Called to find if the driver can work with the device now that
11227  * the pci slot has been reset.  If a basic connection seems good
11228  * (registers are readable and have sane content) then return a
11229  * happy little PCI_ERS_RESULT_xxx.
11230  **/
11231 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
11232 {
11233         struct i40e_pf *pf = pci_get_drvdata(pdev);
11234         pci_ers_result_t result;
11235         int err;
11236         u32 reg;
11237
11238         dev_dbg(&pdev->dev, "%s\n", __func__);
11239         if (pci_enable_device_mem(pdev)) {
11240                 dev_info(&pdev->dev,
11241                          "Cannot re-enable PCI device after reset.\n");
11242                 result = PCI_ERS_RESULT_DISCONNECT;
11243         } else {
11244                 pci_set_master(pdev);
11245                 pci_restore_state(pdev);
11246                 pci_save_state(pdev);
11247                 pci_wake_from_d3(pdev, false);
11248
11249                 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
11250                 if (reg == 0)
11251                         result = PCI_ERS_RESULT_RECOVERED;
11252                 else
11253                         result = PCI_ERS_RESULT_DISCONNECT;
11254         }
11255
11256         err = pci_cleanup_aer_uncorrect_error_status(pdev);
11257         if (err) {
11258                 dev_info(&pdev->dev,
11259                          "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
11260                          err);
11261                 /* non-fatal, continue */
11262         }
11263
11264         return result;
11265 }
11266
11267 /**
11268  * i40e_pci_error_resume - restart operations after PCI error recovery
11269  * @pdev: PCI device information struct
11270  *
11271  * Called to allow the driver to bring things back up after PCI error
11272  * and/or reset recovery has finished.
11273  **/
11274 static void i40e_pci_error_resume(struct pci_dev *pdev)
11275 {
11276         struct i40e_pf *pf = pci_get_drvdata(pdev);
11277
11278         dev_dbg(&pdev->dev, "%s\n", __func__);
11279         if (test_bit(__I40E_SUSPENDED, &pf->state))
11280                 return;
11281
11282         rtnl_lock();
11283         i40e_handle_reset_warning(pf);
11284         rtnl_unlock();
11285 }
11286
11287 /**
11288  * i40e_shutdown - PCI callback for shutting down
11289  * @pdev: PCI device information struct
11290  **/
11291 static void i40e_shutdown(struct pci_dev *pdev)
11292 {
11293         struct i40e_pf *pf = pci_get_drvdata(pdev);
11294         struct i40e_hw *hw = &pf->hw;
11295
11296         set_bit(__I40E_SUSPENDED, &pf->state);
11297         set_bit(__I40E_DOWN, &pf->state);
11298         rtnl_lock();
11299         i40e_prep_for_reset(pf);
11300         rtnl_unlock();
11301
11302         wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11303         wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11304
11305         del_timer_sync(&pf->service_timer);
11306         cancel_work_sync(&pf->service_task);
11307         i40e_fdir_teardown(pf);
11308
11309         rtnl_lock();
11310         i40e_prep_for_reset(pf);
11311         rtnl_unlock();
11312
11313         wr32(hw, I40E_PFPM_APM,
11314              (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11315         wr32(hw, I40E_PFPM_WUFC,
11316              (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11317
11318         i40e_clear_interrupt_scheme(pf);
11319
11320         if (system_state == SYSTEM_POWER_OFF) {
11321                 pci_wake_from_d3(pdev, pf->wol_en);
11322                 pci_set_power_state(pdev, PCI_D3hot);
11323         }
11324 }
11325
11326 #ifdef CONFIG_PM
11327 /**
11328  * i40e_suspend - PCI callback for moving to D3
11329  * @pdev: PCI device information struct
11330  **/
11331 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
11332 {
11333         struct i40e_pf *pf = pci_get_drvdata(pdev);
11334         struct i40e_hw *hw = &pf->hw;
11335
11336         set_bit(__I40E_SUSPENDED, &pf->state);
11337         set_bit(__I40E_DOWN, &pf->state);
11338
11339         rtnl_lock();
11340         i40e_prep_for_reset(pf);
11341         rtnl_unlock();
11342
11343         wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11344         wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11345
11346         pci_wake_from_d3(pdev, pf->wol_en);
11347         pci_set_power_state(pdev, PCI_D3hot);
11348
11349         return 0;
11350 }
11351
11352 /**
11353  * i40e_resume - PCI callback for waking up from D3
11354  * @pdev: PCI device information struct
11355  **/
11356 static int i40e_resume(struct pci_dev *pdev)
11357 {
11358         struct i40e_pf *pf = pci_get_drvdata(pdev);
11359         u32 err;
11360
11361         pci_set_power_state(pdev, PCI_D0);
11362         pci_restore_state(pdev);
11363         /* pci_restore_state() clears dev->state_saves, so
11364          * call pci_save_state() again to restore it.
11365          */
11366         pci_save_state(pdev);
11367
11368         err = pci_enable_device_mem(pdev);
11369         if (err) {
11370                 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
11371                 return err;
11372         }
11373         pci_set_master(pdev);
11374
11375         /* no wakeup events while running */
11376         pci_wake_from_d3(pdev, false);
11377
11378         /* handling the reset will rebuild the device state */
11379         if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
11380                 clear_bit(__I40E_DOWN, &pf->state);
11381                 rtnl_lock();
11382                 i40e_reset_and_rebuild(pf, false);
11383                 rtnl_unlock();
11384         }
11385
11386         return 0;
11387 }
11388
11389 #endif
11390 static const struct pci_error_handlers i40e_err_handler = {
11391         .error_detected = i40e_pci_error_detected,
11392         .slot_reset = i40e_pci_error_slot_reset,
11393         .resume = i40e_pci_error_resume,
11394 };
11395
11396 static struct pci_driver i40e_driver = {
11397         .name     = i40e_driver_name,
11398         .id_table = i40e_pci_tbl,
11399         .probe    = i40e_probe,
11400         .remove   = i40e_remove,
11401 #ifdef CONFIG_PM
11402         .suspend  = i40e_suspend,
11403         .resume   = i40e_resume,
11404 #endif
11405         .shutdown = i40e_shutdown,
11406         .err_handler = &i40e_err_handler,
11407         .sriov_configure = i40e_pci_sriov_configure,
11408 };
11409
11410 /**
11411  * i40e_init_module - Driver registration routine
11412  *
11413  * i40e_init_module is the first routine called when the driver is
11414  * loaded. All it does is register with the PCI subsystem.
11415  **/
11416 static int __init i40e_init_module(void)
11417 {
11418         pr_info("%s: %s - version %s\n", i40e_driver_name,
11419                 i40e_driver_string, i40e_driver_version_str);
11420         pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
11421
11422         i40e_dbg_init();
11423         return pci_register_driver(&i40e_driver);
11424 }
11425 module_init(i40e_init_module);
11426
11427 /**
11428  * i40e_exit_module - Driver exit cleanup routine
11429  *
11430  * i40e_exit_module is called just before the driver is removed
11431  * from memory.
11432  **/
11433 static void __exit i40e_exit_module(void)
11434 {
11435         pci_unregister_driver(&i40e_driver);
11436         i40e_dbg_exit();
11437 }
11438 module_exit(i40e_exit_module);