]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/intel/i40e/i40e_main.c
i40e: When searching all MAC/VLAN filters, ignore removed filters
[karo-tx-linux.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 - 2016 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26
27 #include <linux/etherdevice.h>
28 #include <linux/of_net.h>
29 #include <linux/pci.h>
30
31 /* Local includes */
32 #include "i40e.h"
33 #include "i40e_diag.h"
34 #include <net/udp_tunnel.h>
35
36 const char i40e_driver_name[] = "i40e";
37 static const char i40e_driver_string[] =
38                         "Intel(R) Ethernet Connection XL710 Network Driver";
39
40 #define DRV_KERN "-k"
41
42 #define DRV_VERSION_MAJOR 1
43 #define DRV_VERSION_MINOR 6
44 #define DRV_VERSION_BUILD 21
45 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
46              __stringify(DRV_VERSION_MINOR) "." \
47              __stringify(DRV_VERSION_BUILD)    DRV_KERN
48 const char i40e_driver_version_str[] = DRV_VERSION;
49 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
50
51 /* a bit of forward declarations */
52 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
53 static void i40e_handle_reset_warning(struct i40e_pf *pf);
54 static int i40e_add_vsi(struct i40e_vsi *vsi);
55 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
56 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
57 static int i40e_setup_misc_vector(struct i40e_pf *pf);
58 static void i40e_determine_queue_usage(struct i40e_pf *pf);
59 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
60 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
61 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
62
63 /* i40e_pci_tbl - PCI Device ID Table
64  *
65  * Last entry must be all 0s
66  *
67  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
68  *   Class, Class Mask, private data (not used) }
69  */
70 static const struct pci_device_id i40e_pci_tbl[] = {
71         {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
72         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
73         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
74         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
75         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
76         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
77         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
78         {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
79         {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
80         {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
81         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
82         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
83         {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
84         {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
85         {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
86         {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
87         {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
88         {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
89         /* required last entry */
90         {0, }
91 };
92 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
93
94 #define I40E_MAX_VF_COUNT 128
95 static int debug = -1;
96 module_param(debug, uint, 0);
97 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
98
99 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
100 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
101 MODULE_LICENSE("GPL");
102 MODULE_VERSION(DRV_VERSION);
103
104 static struct workqueue_struct *i40e_wq;
105
106 /**
107  * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
108  * @hw:   pointer to the HW structure
109  * @mem:  ptr to mem struct to fill out
110  * @size: size of memory requested
111  * @alignment: what to align the allocation to
112  **/
113 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
114                             u64 size, u32 alignment)
115 {
116         struct i40e_pf *pf = (struct i40e_pf *)hw->back;
117
118         mem->size = ALIGN(size, alignment);
119         mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
120                                       &mem->pa, GFP_KERNEL);
121         if (!mem->va)
122                 return -ENOMEM;
123
124         return 0;
125 }
126
127 /**
128  * i40e_free_dma_mem_d - OS specific memory free for shared code
129  * @hw:   pointer to the HW structure
130  * @mem:  ptr to mem struct to free
131  **/
132 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
133 {
134         struct i40e_pf *pf = (struct i40e_pf *)hw->back;
135
136         dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
137         mem->va = NULL;
138         mem->pa = 0;
139         mem->size = 0;
140
141         return 0;
142 }
143
144 /**
145  * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
146  * @hw:   pointer to the HW structure
147  * @mem:  ptr to mem struct to fill out
148  * @size: size of memory requested
149  **/
150 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
151                              u32 size)
152 {
153         mem->size = size;
154         mem->va = kzalloc(size, GFP_KERNEL);
155
156         if (!mem->va)
157                 return -ENOMEM;
158
159         return 0;
160 }
161
162 /**
163  * i40e_free_virt_mem_d - OS specific memory free for shared code
164  * @hw:   pointer to the HW structure
165  * @mem:  ptr to mem struct to free
166  **/
167 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
168 {
169         /* it's ok to kfree a NULL pointer */
170         kfree(mem->va);
171         mem->va = NULL;
172         mem->size = 0;
173
174         return 0;
175 }
176
177 /**
178  * i40e_get_lump - find a lump of free generic resource
179  * @pf: board private structure
180  * @pile: the pile of resource to search
181  * @needed: the number of items needed
182  * @id: an owner id to stick on the items assigned
183  *
184  * Returns the base item index of the lump, or negative for error
185  *
186  * The search_hint trick and lack of advanced fit-finding only work
187  * because we're highly likely to have all the same size lump requests.
188  * Linear search time and any fragmentation should be minimal.
189  **/
190 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
191                          u16 needed, u16 id)
192 {
193         int ret = -ENOMEM;
194         int i, j;
195
196         if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
197                 dev_info(&pf->pdev->dev,
198                          "param err: pile=%p needed=%d id=0x%04x\n",
199                          pile, needed, id);
200                 return -EINVAL;
201         }
202
203         /* start the linear search with an imperfect hint */
204         i = pile->search_hint;
205         while (i < pile->num_entries) {
206                 /* skip already allocated entries */
207                 if (pile->list[i] & I40E_PILE_VALID_BIT) {
208                         i++;
209                         continue;
210                 }
211
212                 /* do we have enough in this lump? */
213                 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
214                         if (pile->list[i+j] & I40E_PILE_VALID_BIT)
215                                 break;
216                 }
217
218                 if (j == needed) {
219                         /* there was enough, so assign it to the requestor */
220                         for (j = 0; j < needed; j++)
221                                 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
222                         ret = i;
223                         pile->search_hint = i + j;
224                         break;
225                 }
226
227                 /* not enough, so skip over it and continue looking */
228                 i += j;
229         }
230
231         return ret;
232 }
233
234 /**
235  * i40e_put_lump - return a lump of generic resource
236  * @pile: the pile of resource to search
237  * @index: the base item index
238  * @id: the owner id of the items assigned
239  *
240  * Returns the count of items in the lump
241  **/
242 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
243 {
244         int valid_id = (id | I40E_PILE_VALID_BIT);
245         int count = 0;
246         int i;
247
248         if (!pile || index >= pile->num_entries)
249                 return -EINVAL;
250
251         for (i = index;
252              i < pile->num_entries && pile->list[i] == valid_id;
253              i++) {
254                 pile->list[i] = 0;
255                 count++;
256         }
257
258         if (count && index < pile->search_hint)
259                 pile->search_hint = index;
260
261         return count;
262 }
263
264 /**
265  * i40e_find_vsi_from_id - searches for the vsi with the given id
266  * @pf - the pf structure to search for the vsi
267  * @id - id of the vsi it is searching for
268  **/
269 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
270 {
271         int i;
272
273         for (i = 0; i < pf->num_alloc_vsi; i++)
274                 if (pf->vsi[i] && (pf->vsi[i]->id == id))
275                         return pf->vsi[i];
276
277         return NULL;
278 }
279
280 /**
281  * i40e_service_event_schedule - Schedule the service task to wake up
282  * @pf: board private structure
283  *
284  * If not already scheduled, this puts the task into the work queue
285  **/
286 void i40e_service_event_schedule(struct i40e_pf *pf)
287 {
288         if (!test_bit(__I40E_DOWN, &pf->state) &&
289             !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
290             !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
291                 queue_work(i40e_wq, &pf->service_task);
292 }
293
294 /**
295  * i40e_tx_timeout - Respond to a Tx Hang
296  * @netdev: network interface device structure
297  *
298  * If any port has noticed a Tx timeout, it is likely that the whole
299  * device is munged, not just the one netdev port, so go for the full
300  * reset.
301  **/
302 #ifdef I40E_FCOE
303 void i40e_tx_timeout(struct net_device *netdev)
304 #else
305 static void i40e_tx_timeout(struct net_device *netdev)
306 #endif
307 {
308         struct i40e_netdev_priv *np = netdev_priv(netdev);
309         struct i40e_vsi *vsi = np->vsi;
310         struct i40e_pf *pf = vsi->back;
311         struct i40e_ring *tx_ring = NULL;
312         unsigned int i, hung_queue = 0;
313         u32 head, val;
314
315         pf->tx_timeout_count++;
316
317         /* find the stopped queue the same way the stack does */
318         for (i = 0; i < netdev->num_tx_queues; i++) {
319                 struct netdev_queue *q;
320                 unsigned long trans_start;
321
322                 q = netdev_get_tx_queue(netdev, i);
323                 trans_start = q->trans_start;
324                 if (netif_xmit_stopped(q) &&
325                     time_after(jiffies,
326                                (trans_start + netdev->watchdog_timeo))) {
327                         hung_queue = i;
328                         break;
329                 }
330         }
331
332         if (i == netdev->num_tx_queues) {
333                 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
334         } else {
335                 /* now that we have an index, find the tx_ring struct */
336                 for (i = 0; i < vsi->num_queue_pairs; i++) {
337                         if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
338                                 if (hung_queue ==
339                                     vsi->tx_rings[i]->queue_index) {
340                                         tx_ring = vsi->tx_rings[i];
341                                         break;
342                                 }
343                         }
344                 }
345         }
346
347         if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
348                 pf->tx_timeout_recovery_level = 1;  /* reset after some time */
349         else if (time_before(jiffies,
350                       (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
351                 return;   /* don't do any new action before the next timeout */
352
353         if (tx_ring) {
354                 head = i40e_get_head(tx_ring);
355                 /* Read interrupt register */
356                 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
357                         val = rd32(&pf->hw,
358                              I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
359                                                 tx_ring->vsi->base_vector - 1));
360                 else
361                         val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
362
363                 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
364                             vsi->seid, hung_queue, tx_ring->next_to_clean,
365                             head, tx_ring->next_to_use,
366                             readl(tx_ring->tail), val);
367         }
368
369         pf->tx_timeout_last_recovery = jiffies;
370         netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
371                     pf->tx_timeout_recovery_level, hung_queue);
372
373         switch (pf->tx_timeout_recovery_level) {
374         case 1:
375                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
376                 break;
377         case 2:
378                 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
379                 break;
380         case 3:
381                 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
382                 break;
383         default:
384                 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
385                 break;
386         }
387
388         i40e_service_event_schedule(pf);
389         pf->tx_timeout_recovery_level++;
390 }
391
392 /**
393  * i40e_get_vsi_stats_struct - Get System Network Statistics
394  * @vsi: the VSI we care about
395  *
396  * Returns the address of the device statistics structure.
397  * The statistics are actually updated from the service task.
398  **/
399 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
400 {
401         return &vsi->net_stats;
402 }
403
404 /**
405  * i40e_get_netdev_stats_struct - Get statistics for netdev interface
406  * @netdev: network interface device structure
407  *
408  * Returns the address of the device statistics structure.
409  * The statistics are actually updated from the service task.
410  **/
411 #ifdef I40E_FCOE
412 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
413                                              struct net_device *netdev,
414                                              struct rtnl_link_stats64 *stats)
415 #else
416 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
417                                              struct net_device *netdev,
418                                              struct rtnl_link_stats64 *stats)
419 #endif
420 {
421         struct i40e_netdev_priv *np = netdev_priv(netdev);
422         struct i40e_ring *tx_ring, *rx_ring;
423         struct i40e_vsi *vsi = np->vsi;
424         struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
425         int i;
426
427         if (test_bit(__I40E_DOWN, &vsi->state))
428                 return stats;
429
430         if (!vsi->tx_rings)
431                 return stats;
432
433         rcu_read_lock();
434         for (i = 0; i < vsi->num_queue_pairs; i++) {
435                 u64 bytes, packets;
436                 unsigned int start;
437
438                 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
439                 if (!tx_ring)
440                         continue;
441
442                 do {
443                         start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
444                         packets = tx_ring->stats.packets;
445                         bytes   = tx_ring->stats.bytes;
446                 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
447
448                 stats->tx_packets += packets;
449                 stats->tx_bytes   += bytes;
450                 rx_ring = &tx_ring[1];
451
452                 do {
453                         start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
454                         packets = rx_ring->stats.packets;
455                         bytes   = rx_ring->stats.bytes;
456                 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
457
458                 stats->rx_packets += packets;
459                 stats->rx_bytes   += bytes;
460         }
461         rcu_read_unlock();
462
463         /* following stats updated by i40e_watchdog_subtask() */
464         stats->multicast        = vsi_stats->multicast;
465         stats->tx_errors        = vsi_stats->tx_errors;
466         stats->tx_dropped       = vsi_stats->tx_dropped;
467         stats->rx_errors        = vsi_stats->rx_errors;
468         stats->rx_dropped       = vsi_stats->rx_dropped;
469         stats->rx_crc_errors    = vsi_stats->rx_crc_errors;
470         stats->rx_length_errors = vsi_stats->rx_length_errors;
471
472         return stats;
473 }
474
475 /**
476  * i40e_vsi_reset_stats - Resets all stats of the given vsi
477  * @vsi: the VSI to have its stats reset
478  **/
479 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
480 {
481         struct rtnl_link_stats64 *ns;
482         int i;
483
484         if (!vsi)
485                 return;
486
487         ns = i40e_get_vsi_stats_struct(vsi);
488         memset(ns, 0, sizeof(*ns));
489         memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
490         memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
491         memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
492         if (vsi->rx_rings && vsi->rx_rings[0]) {
493                 for (i = 0; i < vsi->num_queue_pairs; i++) {
494                         memset(&vsi->rx_rings[i]->stats, 0,
495                                sizeof(vsi->rx_rings[i]->stats));
496                         memset(&vsi->rx_rings[i]->rx_stats, 0,
497                                sizeof(vsi->rx_rings[i]->rx_stats));
498                         memset(&vsi->tx_rings[i]->stats, 0,
499                                sizeof(vsi->tx_rings[i]->stats));
500                         memset(&vsi->tx_rings[i]->tx_stats, 0,
501                                sizeof(vsi->tx_rings[i]->tx_stats));
502                 }
503         }
504         vsi->stat_offsets_loaded = false;
505 }
506
507 /**
508  * i40e_pf_reset_stats - Reset all of the stats for the given PF
509  * @pf: the PF to be reset
510  **/
511 void i40e_pf_reset_stats(struct i40e_pf *pf)
512 {
513         int i;
514
515         memset(&pf->stats, 0, sizeof(pf->stats));
516         memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
517         pf->stat_offsets_loaded = false;
518
519         for (i = 0; i < I40E_MAX_VEB; i++) {
520                 if (pf->veb[i]) {
521                         memset(&pf->veb[i]->stats, 0,
522                                sizeof(pf->veb[i]->stats));
523                         memset(&pf->veb[i]->stats_offsets, 0,
524                                sizeof(pf->veb[i]->stats_offsets));
525                         pf->veb[i]->stat_offsets_loaded = false;
526                 }
527         }
528         pf->hw_csum_rx_error = 0;
529 }
530
531 /**
532  * i40e_stat_update48 - read and update a 48 bit stat from the chip
533  * @hw: ptr to the hardware info
534  * @hireg: the high 32 bit reg to read
535  * @loreg: the low 32 bit reg to read
536  * @offset_loaded: has the initial offset been loaded yet
537  * @offset: ptr to current offset value
538  * @stat: ptr to the stat
539  *
540  * Since the device stats are not reset at PFReset, they likely will not
541  * be zeroed when the driver starts.  We'll save the first values read
542  * and use them as offsets to be subtracted from the raw values in order
543  * to report stats that count from zero.  In the process, we also manage
544  * the potential roll-over.
545  **/
546 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
547                                bool offset_loaded, u64 *offset, u64 *stat)
548 {
549         u64 new_data;
550
551         if (hw->device_id == I40E_DEV_ID_QEMU) {
552                 new_data = rd32(hw, loreg);
553                 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
554         } else {
555                 new_data = rd64(hw, loreg);
556         }
557         if (!offset_loaded)
558                 *offset = new_data;
559         if (likely(new_data >= *offset))
560                 *stat = new_data - *offset;
561         else
562                 *stat = (new_data + BIT_ULL(48)) - *offset;
563         *stat &= 0xFFFFFFFFFFFFULL;
564 }
565
566 /**
567  * i40e_stat_update32 - read and update a 32 bit stat from the chip
568  * @hw: ptr to the hardware info
569  * @reg: the hw reg to read
570  * @offset_loaded: has the initial offset been loaded yet
571  * @offset: ptr to current offset value
572  * @stat: ptr to the stat
573  **/
574 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
575                                bool offset_loaded, u64 *offset, u64 *stat)
576 {
577         u32 new_data;
578
579         new_data = rd32(hw, reg);
580         if (!offset_loaded)
581                 *offset = new_data;
582         if (likely(new_data >= *offset))
583                 *stat = (u32)(new_data - *offset);
584         else
585                 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
586 }
587
588 /**
589  * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
590  * @vsi: the VSI to be updated
591  **/
592 void i40e_update_eth_stats(struct i40e_vsi *vsi)
593 {
594         int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
595         struct i40e_pf *pf = vsi->back;
596         struct i40e_hw *hw = &pf->hw;
597         struct i40e_eth_stats *oes;
598         struct i40e_eth_stats *es;     /* device's eth stats */
599
600         es = &vsi->eth_stats;
601         oes = &vsi->eth_stats_offsets;
602
603         /* Gather up the stats that the hw collects */
604         i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
605                            vsi->stat_offsets_loaded,
606                            &oes->tx_errors, &es->tx_errors);
607         i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
608                            vsi->stat_offsets_loaded,
609                            &oes->rx_discards, &es->rx_discards);
610         i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
611                            vsi->stat_offsets_loaded,
612                            &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
613         i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
614                            vsi->stat_offsets_loaded,
615                            &oes->tx_errors, &es->tx_errors);
616
617         i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
618                            I40E_GLV_GORCL(stat_idx),
619                            vsi->stat_offsets_loaded,
620                            &oes->rx_bytes, &es->rx_bytes);
621         i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
622                            I40E_GLV_UPRCL(stat_idx),
623                            vsi->stat_offsets_loaded,
624                            &oes->rx_unicast, &es->rx_unicast);
625         i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
626                            I40E_GLV_MPRCL(stat_idx),
627                            vsi->stat_offsets_loaded,
628                            &oes->rx_multicast, &es->rx_multicast);
629         i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
630                            I40E_GLV_BPRCL(stat_idx),
631                            vsi->stat_offsets_loaded,
632                            &oes->rx_broadcast, &es->rx_broadcast);
633
634         i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
635                            I40E_GLV_GOTCL(stat_idx),
636                            vsi->stat_offsets_loaded,
637                            &oes->tx_bytes, &es->tx_bytes);
638         i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
639                            I40E_GLV_UPTCL(stat_idx),
640                            vsi->stat_offsets_loaded,
641                            &oes->tx_unicast, &es->tx_unicast);
642         i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
643                            I40E_GLV_MPTCL(stat_idx),
644                            vsi->stat_offsets_loaded,
645                            &oes->tx_multicast, &es->tx_multicast);
646         i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
647                            I40E_GLV_BPTCL(stat_idx),
648                            vsi->stat_offsets_loaded,
649                            &oes->tx_broadcast, &es->tx_broadcast);
650         vsi->stat_offsets_loaded = true;
651 }
652
653 /**
654  * i40e_update_veb_stats - Update Switch component statistics
655  * @veb: the VEB being updated
656  **/
657 static void i40e_update_veb_stats(struct i40e_veb *veb)
658 {
659         struct i40e_pf *pf = veb->pf;
660         struct i40e_hw *hw = &pf->hw;
661         struct i40e_eth_stats *oes;
662         struct i40e_eth_stats *es;     /* device's eth stats */
663         struct i40e_veb_tc_stats *veb_oes;
664         struct i40e_veb_tc_stats *veb_es;
665         int i, idx = 0;
666
667         idx = veb->stats_idx;
668         es = &veb->stats;
669         oes = &veb->stats_offsets;
670         veb_es = &veb->tc_stats;
671         veb_oes = &veb->tc_stats_offsets;
672
673         /* Gather up the stats that the hw collects */
674         i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
675                            veb->stat_offsets_loaded,
676                            &oes->tx_discards, &es->tx_discards);
677         if (hw->revision_id > 0)
678                 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
679                                    veb->stat_offsets_loaded,
680                                    &oes->rx_unknown_protocol,
681                                    &es->rx_unknown_protocol);
682         i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
683                            veb->stat_offsets_loaded,
684                            &oes->rx_bytes, &es->rx_bytes);
685         i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
686                            veb->stat_offsets_loaded,
687                            &oes->rx_unicast, &es->rx_unicast);
688         i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
689                            veb->stat_offsets_loaded,
690                            &oes->rx_multicast, &es->rx_multicast);
691         i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
692                            veb->stat_offsets_loaded,
693                            &oes->rx_broadcast, &es->rx_broadcast);
694
695         i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
696                            veb->stat_offsets_loaded,
697                            &oes->tx_bytes, &es->tx_bytes);
698         i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
699                            veb->stat_offsets_loaded,
700                            &oes->tx_unicast, &es->tx_unicast);
701         i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
702                            veb->stat_offsets_loaded,
703                            &oes->tx_multicast, &es->tx_multicast);
704         i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
705                            veb->stat_offsets_loaded,
706                            &oes->tx_broadcast, &es->tx_broadcast);
707         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
708                 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
709                                    I40E_GLVEBTC_RPCL(i, idx),
710                                    veb->stat_offsets_loaded,
711                                    &veb_oes->tc_rx_packets[i],
712                                    &veb_es->tc_rx_packets[i]);
713                 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
714                                    I40E_GLVEBTC_RBCL(i, idx),
715                                    veb->stat_offsets_loaded,
716                                    &veb_oes->tc_rx_bytes[i],
717                                    &veb_es->tc_rx_bytes[i]);
718                 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
719                                    I40E_GLVEBTC_TPCL(i, idx),
720                                    veb->stat_offsets_loaded,
721                                    &veb_oes->tc_tx_packets[i],
722                                    &veb_es->tc_tx_packets[i]);
723                 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
724                                    I40E_GLVEBTC_TBCL(i, idx),
725                                    veb->stat_offsets_loaded,
726                                    &veb_oes->tc_tx_bytes[i],
727                                    &veb_es->tc_tx_bytes[i]);
728         }
729         veb->stat_offsets_loaded = true;
730 }
731
732 #ifdef I40E_FCOE
733 /**
734  * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
735  * @vsi: the VSI that is capable of doing FCoE
736  **/
737 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
738 {
739         struct i40e_pf *pf = vsi->back;
740         struct i40e_hw *hw = &pf->hw;
741         struct i40e_fcoe_stats *ofs;
742         struct i40e_fcoe_stats *fs;     /* device's eth stats */
743         int idx;
744
745         if (vsi->type != I40E_VSI_FCOE)
746                 return;
747
748         idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET;
749         fs = &vsi->fcoe_stats;
750         ofs = &vsi->fcoe_stats_offsets;
751
752         i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
753                            vsi->fcoe_stat_offsets_loaded,
754                            &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
755         i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
756                            vsi->fcoe_stat_offsets_loaded,
757                            &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
758         i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
759                            vsi->fcoe_stat_offsets_loaded,
760                            &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
761         i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
762                            vsi->fcoe_stat_offsets_loaded,
763                            &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
764         i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
765                            vsi->fcoe_stat_offsets_loaded,
766                            &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
767         i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
768                            vsi->fcoe_stat_offsets_loaded,
769                            &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
770         i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
771                            vsi->fcoe_stat_offsets_loaded,
772                            &ofs->fcoe_last_error, &fs->fcoe_last_error);
773         i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
774                            vsi->fcoe_stat_offsets_loaded,
775                            &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
776
777         vsi->fcoe_stat_offsets_loaded = true;
778 }
779
780 #endif
781 /**
782  * i40e_update_vsi_stats - Update the vsi statistics counters.
783  * @vsi: the VSI to be updated
784  *
785  * There are a few instances where we store the same stat in a
786  * couple of different structs.  This is partly because we have
787  * the netdev stats that need to be filled out, which is slightly
788  * different from the "eth_stats" defined by the chip and used in
789  * VF communications.  We sort it out here.
790  **/
791 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
792 {
793         struct i40e_pf *pf = vsi->back;
794         struct rtnl_link_stats64 *ons;
795         struct rtnl_link_stats64 *ns;   /* netdev stats */
796         struct i40e_eth_stats *oes;
797         struct i40e_eth_stats *es;     /* device's eth stats */
798         u32 tx_restart, tx_busy;
799         u64 tx_lost_interrupt;
800         struct i40e_ring *p;
801         u32 rx_page, rx_buf;
802         u64 bytes, packets;
803         unsigned int start;
804         u64 tx_linearize;
805         u64 tx_force_wb;
806         u64 rx_p, rx_b;
807         u64 tx_p, tx_b;
808         u16 q;
809
810         if (test_bit(__I40E_DOWN, &vsi->state) ||
811             test_bit(__I40E_CONFIG_BUSY, &pf->state))
812                 return;
813
814         ns = i40e_get_vsi_stats_struct(vsi);
815         ons = &vsi->net_stats_offsets;
816         es = &vsi->eth_stats;
817         oes = &vsi->eth_stats_offsets;
818
819         /* Gather up the netdev and vsi stats that the driver collects
820          * on the fly during packet processing
821          */
822         rx_b = rx_p = 0;
823         tx_b = tx_p = 0;
824         tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
825         tx_lost_interrupt = 0;
826         rx_page = 0;
827         rx_buf = 0;
828         rcu_read_lock();
829         for (q = 0; q < vsi->num_queue_pairs; q++) {
830                 /* locate Tx ring */
831                 p = ACCESS_ONCE(vsi->tx_rings[q]);
832
833                 do {
834                         start = u64_stats_fetch_begin_irq(&p->syncp);
835                         packets = p->stats.packets;
836                         bytes = p->stats.bytes;
837                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
838                 tx_b += bytes;
839                 tx_p += packets;
840                 tx_restart += p->tx_stats.restart_queue;
841                 tx_busy += p->tx_stats.tx_busy;
842                 tx_linearize += p->tx_stats.tx_linearize;
843                 tx_force_wb += p->tx_stats.tx_force_wb;
844                 tx_lost_interrupt += p->tx_stats.tx_lost_interrupt;
845
846                 /* Rx queue is part of the same block as Tx queue */
847                 p = &p[1];
848                 do {
849                         start = u64_stats_fetch_begin_irq(&p->syncp);
850                         packets = p->stats.packets;
851                         bytes = p->stats.bytes;
852                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
853                 rx_b += bytes;
854                 rx_p += packets;
855                 rx_buf += p->rx_stats.alloc_buff_failed;
856                 rx_page += p->rx_stats.alloc_page_failed;
857         }
858         rcu_read_unlock();
859         vsi->tx_restart = tx_restart;
860         vsi->tx_busy = tx_busy;
861         vsi->tx_linearize = tx_linearize;
862         vsi->tx_force_wb = tx_force_wb;
863         vsi->tx_lost_interrupt = tx_lost_interrupt;
864         vsi->rx_page_failed = rx_page;
865         vsi->rx_buf_failed = rx_buf;
866
867         ns->rx_packets = rx_p;
868         ns->rx_bytes = rx_b;
869         ns->tx_packets = tx_p;
870         ns->tx_bytes = tx_b;
871
872         /* update netdev stats from eth stats */
873         i40e_update_eth_stats(vsi);
874         ons->tx_errors = oes->tx_errors;
875         ns->tx_errors = es->tx_errors;
876         ons->multicast = oes->rx_multicast;
877         ns->multicast = es->rx_multicast;
878         ons->rx_dropped = oes->rx_discards;
879         ns->rx_dropped = es->rx_discards;
880         ons->tx_dropped = oes->tx_discards;
881         ns->tx_dropped = es->tx_discards;
882
883         /* pull in a couple PF stats if this is the main vsi */
884         if (vsi == pf->vsi[pf->lan_vsi]) {
885                 ns->rx_crc_errors = pf->stats.crc_errors;
886                 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
887                 ns->rx_length_errors = pf->stats.rx_length_errors;
888         }
889 }
890
891 /**
892  * i40e_update_pf_stats - Update the PF statistics counters.
893  * @pf: the PF to be updated
894  **/
895 static void i40e_update_pf_stats(struct i40e_pf *pf)
896 {
897         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
898         struct i40e_hw_port_stats *nsd = &pf->stats;
899         struct i40e_hw *hw = &pf->hw;
900         u32 val;
901         int i;
902
903         i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
904                            I40E_GLPRT_GORCL(hw->port),
905                            pf->stat_offsets_loaded,
906                            &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
907         i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
908                            I40E_GLPRT_GOTCL(hw->port),
909                            pf->stat_offsets_loaded,
910                            &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
911         i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
912                            pf->stat_offsets_loaded,
913                            &osd->eth.rx_discards,
914                            &nsd->eth.rx_discards);
915         i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
916                            I40E_GLPRT_UPRCL(hw->port),
917                            pf->stat_offsets_loaded,
918                            &osd->eth.rx_unicast,
919                            &nsd->eth.rx_unicast);
920         i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
921                            I40E_GLPRT_MPRCL(hw->port),
922                            pf->stat_offsets_loaded,
923                            &osd->eth.rx_multicast,
924                            &nsd->eth.rx_multicast);
925         i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
926                            I40E_GLPRT_BPRCL(hw->port),
927                            pf->stat_offsets_loaded,
928                            &osd->eth.rx_broadcast,
929                            &nsd->eth.rx_broadcast);
930         i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
931                            I40E_GLPRT_UPTCL(hw->port),
932                            pf->stat_offsets_loaded,
933                            &osd->eth.tx_unicast,
934                            &nsd->eth.tx_unicast);
935         i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
936                            I40E_GLPRT_MPTCL(hw->port),
937                            pf->stat_offsets_loaded,
938                            &osd->eth.tx_multicast,
939                            &nsd->eth.tx_multicast);
940         i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
941                            I40E_GLPRT_BPTCL(hw->port),
942                            pf->stat_offsets_loaded,
943                            &osd->eth.tx_broadcast,
944                            &nsd->eth.tx_broadcast);
945
946         i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
947                            pf->stat_offsets_loaded,
948                            &osd->tx_dropped_link_down,
949                            &nsd->tx_dropped_link_down);
950
951         i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
952                            pf->stat_offsets_loaded,
953                            &osd->crc_errors, &nsd->crc_errors);
954
955         i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
956                            pf->stat_offsets_loaded,
957                            &osd->illegal_bytes, &nsd->illegal_bytes);
958
959         i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
960                            pf->stat_offsets_loaded,
961                            &osd->mac_local_faults,
962                            &nsd->mac_local_faults);
963         i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
964                            pf->stat_offsets_loaded,
965                            &osd->mac_remote_faults,
966                            &nsd->mac_remote_faults);
967
968         i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
969                            pf->stat_offsets_loaded,
970                            &osd->rx_length_errors,
971                            &nsd->rx_length_errors);
972
973         i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
974                            pf->stat_offsets_loaded,
975                            &osd->link_xon_rx, &nsd->link_xon_rx);
976         i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
977                            pf->stat_offsets_loaded,
978                            &osd->link_xon_tx, &nsd->link_xon_tx);
979         i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
980                            pf->stat_offsets_loaded,
981                            &osd->link_xoff_rx, &nsd->link_xoff_rx);
982         i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
983                            pf->stat_offsets_loaded,
984                            &osd->link_xoff_tx, &nsd->link_xoff_tx);
985
986         for (i = 0; i < 8; i++) {
987                 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
988                                    pf->stat_offsets_loaded,
989                                    &osd->priority_xoff_rx[i],
990                                    &nsd->priority_xoff_rx[i]);
991                 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
992                                    pf->stat_offsets_loaded,
993                                    &osd->priority_xon_rx[i],
994                                    &nsd->priority_xon_rx[i]);
995                 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
996                                    pf->stat_offsets_loaded,
997                                    &osd->priority_xon_tx[i],
998                                    &nsd->priority_xon_tx[i]);
999                 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1000                                    pf->stat_offsets_loaded,
1001                                    &osd->priority_xoff_tx[i],
1002                                    &nsd->priority_xoff_tx[i]);
1003                 i40e_stat_update32(hw,
1004                                    I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1005                                    pf->stat_offsets_loaded,
1006                                    &osd->priority_xon_2_xoff[i],
1007                                    &nsd->priority_xon_2_xoff[i]);
1008         }
1009
1010         i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1011                            I40E_GLPRT_PRC64L(hw->port),
1012                            pf->stat_offsets_loaded,
1013                            &osd->rx_size_64, &nsd->rx_size_64);
1014         i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1015                            I40E_GLPRT_PRC127L(hw->port),
1016                            pf->stat_offsets_loaded,
1017                            &osd->rx_size_127, &nsd->rx_size_127);
1018         i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1019                            I40E_GLPRT_PRC255L(hw->port),
1020                            pf->stat_offsets_loaded,
1021                            &osd->rx_size_255, &nsd->rx_size_255);
1022         i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1023                            I40E_GLPRT_PRC511L(hw->port),
1024                            pf->stat_offsets_loaded,
1025                            &osd->rx_size_511, &nsd->rx_size_511);
1026         i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1027                            I40E_GLPRT_PRC1023L(hw->port),
1028                            pf->stat_offsets_loaded,
1029                            &osd->rx_size_1023, &nsd->rx_size_1023);
1030         i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1031                            I40E_GLPRT_PRC1522L(hw->port),
1032                            pf->stat_offsets_loaded,
1033                            &osd->rx_size_1522, &nsd->rx_size_1522);
1034         i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1035                            I40E_GLPRT_PRC9522L(hw->port),
1036                            pf->stat_offsets_loaded,
1037                            &osd->rx_size_big, &nsd->rx_size_big);
1038
1039         i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1040                            I40E_GLPRT_PTC64L(hw->port),
1041                            pf->stat_offsets_loaded,
1042                            &osd->tx_size_64, &nsd->tx_size_64);
1043         i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1044                            I40E_GLPRT_PTC127L(hw->port),
1045                            pf->stat_offsets_loaded,
1046                            &osd->tx_size_127, &nsd->tx_size_127);
1047         i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1048                            I40E_GLPRT_PTC255L(hw->port),
1049                            pf->stat_offsets_loaded,
1050                            &osd->tx_size_255, &nsd->tx_size_255);
1051         i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1052                            I40E_GLPRT_PTC511L(hw->port),
1053                            pf->stat_offsets_loaded,
1054                            &osd->tx_size_511, &nsd->tx_size_511);
1055         i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1056                            I40E_GLPRT_PTC1023L(hw->port),
1057                            pf->stat_offsets_loaded,
1058                            &osd->tx_size_1023, &nsd->tx_size_1023);
1059         i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1060                            I40E_GLPRT_PTC1522L(hw->port),
1061                            pf->stat_offsets_loaded,
1062                            &osd->tx_size_1522, &nsd->tx_size_1522);
1063         i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1064                            I40E_GLPRT_PTC9522L(hw->port),
1065                            pf->stat_offsets_loaded,
1066                            &osd->tx_size_big, &nsd->tx_size_big);
1067
1068         i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1069                            pf->stat_offsets_loaded,
1070                            &osd->rx_undersize, &nsd->rx_undersize);
1071         i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1072                            pf->stat_offsets_loaded,
1073                            &osd->rx_fragments, &nsd->rx_fragments);
1074         i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1075                            pf->stat_offsets_loaded,
1076                            &osd->rx_oversize, &nsd->rx_oversize);
1077         i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1078                            pf->stat_offsets_loaded,
1079                            &osd->rx_jabber, &nsd->rx_jabber);
1080
1081         /* FDIR stats */
1082         i40e_stat_update32(hw,
1083                            I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
1084                            pf->stat_offsets_loaded,
1085                            &osd->fd_atr_match, &nsd->fd_atr_match);
1086         i40e_stat_update32(hw,
1087                            I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
1088                            pf->stat_offsets_loaded,
1089                            &osd->fd_sb_match, &nsd->fd_sb_match);
1090         i40e_stat_update32(hw,
1091                       I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
1092                       pf->stat_offsets_loaded,
1093                       &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
1094
1095         val = rd32(hw, I40E_PRTPM_EEE_STAT);
1096         nsd->tx_lpi_status =
1097                        (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1098                         I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1099         nsd->rx_lpi_status =
1100                        (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1101                         I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1102         i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1103                            pf->stat_offsets_loaded,
1104                            &osd->tx_lpi_count, &nsd->tx_lpi_count);
1105         i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1106                            pf->stat_offsets_loaded,
1107                            &osd->rx_lpi_count, &nsd->rx_lpi_count);
1108
1109         if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1110             !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
1111                 nsd->fd_sb_status = true;
1112         else
1113                 nsd->fd_sb_status = false;
1114
1115         if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1116             !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1117                 nsd->fd_atr_status = true;
1118         else
1119                 nsd->fd_atr_status = false;
1120
1121         pf->stat_offsets_loaded = true;
1122 }
1123
1124 /**
1125  * i40e_update_stats - Update the various statistics counters.
1126  * @vsi: the VSI to be updated
1127  *
1128  * Update the various stats for this VSI and its related entities.
1129  **/
1130 void i40e_update_stats(struct i40e_vsi *vsi)
1131 {
1132         struct i40e_pf *pf = vsi->back;
1133
1134         if (vsi == pf->vsi[pf->lan_vsi])
1135                 i40e_update_pf_stats(pf);
1136
1137         i40e_update_vsi_stats(vsi);
1138 #ifdef I40E_FCOE
1139         i40e_update_fcoe_stats(vsi);
1140 #endif
1141 }
1142
1143 /**
1144  * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1145  * @vsi: the VSI to be searched
1146  * @macaddr: the MAC address
1147  * @vlan: the vlan
1148  *
1149  * Returns ptr to the filter object or NULL
1150  **/
1151 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1152                                                 const u8 *macaddr, s16 vlan)
1153 {
1154         struct i40e_mac_filter *f;
1155
1156         if (!vsi || !macaddr)
1157                 return NULL;
1158
1159         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1160                 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1161                     (vlan == f->vlan))
1162                         return f;
1163         }
1164         return NULL;
1165 }
1166
1167 /**
1168  * i40e_find_mac - Find a mac addr in the macvlan filters list
1169  * @vsi: the VSI to be searched
1170  * @macaddr: the MAC address we are searching for
1171  *
1172  * Returns the first filter with the provided MAC address or NULL if
1173  * MAC address was not found
1174  **/
1175 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1176 {
1177         struct i40e_mac_filter *f;
1178
1179         if (!vsi || !macaddr)
1180                 return NULL;
1181
1182         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1183                 if ((ether_addr_equal(macaddr, f->macaddr)))
1184                         return f;
1185         }
1186         return NULL;
1187 }
1188
1189 /**
1190  * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1191  * @vsi: the VSI to be searched
1192  *
1193  * Returns true if VSI is in vlan mode or false otherwise
1194  **/
1195 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1196 {
1197         struct i40e_mac_filter *f;
1198
1199         /* Only -1 for all the filters denotes not in vlan mode
1200          * so we have to go through all the list in order to make sure
1201          */
1202         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1203                 if (f->vlan >= 0 || vsi->info.pvid)
1204                         return true;
1205         }
1206
1207         return false;
1208 }
1209
1210 /**
1211  * i40e_add_filter - Add a mac/vlan filter to the VSI
1212  * @vsi: the VSI to be searched
1213  * @macaddr: the MAC address
1214  * @vlan: the vlan
1215  *
1216  * Returns ptr to the filter object or NULL when no memory available.
1217  *
1218  * NOTE: This function is expected to be called with mac_filter_list_lock
1219  * being held.
1220  **/
1221 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1222                                         const u8 *macaddr, s16 vlan)
1223 {
1224         struct i40e_mac_filter *f;
1225
1226         if (!vsi || !macaddr)
1227                 return NULL;
1228
1229         /* Do not allow broadcast filter to be added since broadcast filter
1230          * is added as part of add VSI for any newly created VSI except
1231          * FDIR VSI
1232          */
1233         if (is_broadcast_ether_addr(macaddr))
1234                 return NULL;
1235
1236         f = i40e_find_filter(vsi, macaddr, vlan);
1237         if (!f) {
1238                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1239                 if (!f)
1240                         return NULL;
1241
1242                 ether_addr_copy(f->macaddr, macaddr);
1243                 f->vlan = vlan;
1244                 /* If we're in overflow promisc mode, set the state directly
1245                  * to failed, so we don't bother to try sending the filter
1246                  * to the hardware.
1247                  */
1248                 if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))
1249                         f->state = I40E_FILTER_FAILED;
1250                 else
1251                         f->state = I40E_FILTER_NEW;
1252                 INIT_LIST_HEAD(&f->list);
1253                 list_add_tail(&f->list, &vsi->mac_filter_list);
1254
1255                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1256                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1257         }
1258
1259         /* If we're asked to add a filter that has been marked for removal, it
1260          * is safe to simply restore it to active state. __i40e_del_filter
1261          * will have simply deleted any filters which were previously marked
1262          * NEW or FAILED, so if it is currently marked REMOVE it must have
1263          * previously been ACTIVE. Since we haven't yet run the sync filters
1264          * task, just restore this filter to the ACTIVE state so that the
1265          * sync task leaves it in place
1266          */
1267         if (f->state == I40E_FILTER_REMOVE)
1268                 f->state = I40E_FILTER_ACTIVE;
1269
1270         return f;
1271 }
1272
1273 /**
1274  * i40e_del_filter - Remove a mac/vlan filter from the VSI
1275  * @vsi: the VSI to be searched
1276  * @macaddr: the MAC address
1277  * @vlan: the vlan
1278  *
1279  * NOTE: This function is expected to be called with mac_filter_list_lock
1280  * being held.
1281  * ANOTHER NOTE: This function MUST be called from within the context of
1282  * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1283  * instead of list_for_each_entry().
1284  **/
1285 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1286 {
1287         struct i40e_mac_filter *f;
1288
1289         if (!vsi || !macaddr)
1290                 return;
1291
1292         f = i40e_find_filter(vsi, macaddr, vlan);
1293         if (!f)
1294                 return;
1295
1296         if ((f->state == I40E_FILTER_FAILED) ||
1297             (f->state == I40E_FILTER_NEW)) {
1298                 /* this one never got added by the FW. Just remove it,
1299                  * no need to sync anything.
1300                  */
1301                 list_del(&f->list);
1302                 kfree(f);
1303         } else {
1304                 f->state = I40E_FILTER_REMOVE;
1305                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1306                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1307         }
1308 }
1309
1310 /**
1311  * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1312  * @vsi: the VSI to be searched
1313  * @macaddr: the mac address to be filtered
1314  *
1315  * Goes through all the macvlan filters and adds a macvlan filter for each
1316  * unique vlan that already exists. If a PVID has been assigned, instead only
1317  * add the macaddr to that VLAN.
1318  *
1319  * Returns last filter added on success, else NULL
1320  **/
1321 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
1322                                              const u8 *macaddr)
1323 {
1324         struct i40e_mac_filter *f, *add = NULL;
1325
1326         if (vsi->info.pvid)
1327                 return i40e_add_filter(vsi, macaddr,
1328                                        le16_to_cpu(vsi->info.pvid));
1329
1330         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1331                 if (f->state == I40E_FILTER_REMOVE)
1332                         continue;
1333                 add = i40e_add_filter(vsi, macaddr, f->vlan);
1334                 if (!add)
1335                         return NULL;
1336         }
1337
1338         return add;
1339 }
1340
1341 /**
1342  * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
1343  * @vsi: the VSI to be searched
1344  * @macaddr: the mac address to be removed
1345  *
1346  * Removes a given MAC address from a VSI, regardless of VLAN
1347  *
1348  * Returns 0 for success, or error
1349  **/
1350 int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr)
1351 {
1352         struct i40e_mac_filter *f = NULL;
1353         int changed = 0;
1354
1355         WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
1356              "Missing mac_filter_list_lock\n");
1357         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1358                 if (ether_addr_equal(macaddr, f->macaddr))
1359                         f->state = I40E_FILTER_REMOVE;
1360         }
1361         if (changed) {
1362                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1363                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1364                 return 0;
1365         }
1366         return -ENOENT;
1367 }
1368
1369 /**
1370  * i40e_set_mac - NDO callback to set mac address
1371  * @netdev: network interface device structure
1372  * @p: pointer to an address structure
1373  *
1374  * Returns 0 on success, negative on failure
1375  **/
1376 #ifdef I40E_FCOE
1377 int i40e_set_mac(struct net_device *netdev, void *p)
1378 #else
1379 static int i40e_set_mac(struct net_device *netdev, void *p)
1380 #endif
1381 {
1382         struct i40e_netdev_priv *np = netdev_priv(netdev);
1383         struct i40e_vsi *vsi = np->vsi;
1384         struct i40e_pf *pf = vsi->back;
1385         struct i40e_hw *hw = &pf->hw;
1386         struct sockaddr *addr = p;
1387
1388         if (!is_valid_ether_addr(addr->sa_data))
1389                 return -EADDRNOTAVAIL;
1390
1391         if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1392                 netdev_info(netdev, "already using mac address %pM\n",
1393                             addr->sa_data);
1394                 return 0;
1395         }
1396
1397         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1398             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1399                 return -EADDRNOTAVAIL;
1400
1401         if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1402                 netdev_info(netdev, "returning to hw mac address %pM\n",
1403                             hw->mac.addr);
1404         else
1405                 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1406
1407         spin_lock_bh(&vsi->mac_filter_list_lock);
1408         i40e_del_mac_all_vlan(vsi, netdev->dev_addr);
1409         i40e_put_mac_in_vlan(vsi, addr->sa_data);
1410         spin_unlock_bh(&vsi->mac_filter_list_lock);
1411         ether_addr_copy(netdev->dev_addr, addr->sa_data);
1412         if (vsi->type == I40E_VSI_MAIN) {
1413                 i40e_status ret;
1414
1415                 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1416                                                 I40E_AQC_WRITE_TYPE_LAA_WOL,
1417                                                 addr->sa_data, NULL);
1418                 if (ret)
1419                         netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1420                                     i40e_stat_str(hw, ret),
1421                                     i40e_aq_str(hw, hw->aq.asq_last_status));
1422         }
1423
1424         /* schedule our worker thread which will take care of
1425          * applying the new filter changes
1426          */
1427         i40e_service_event_schedule(vsi->back);
1428         return 0;
1429 }
1430
1431 /**
1432  * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1433  * @vsi: the VSI being setup
1434  * @ctxt: VSI context structure
1435  * @enabled_tc: Enabled TCs bitmap
1436  * @is_add: True if called before Add VSI
1437  *
1438  * Setup VSI queue mapping for enabled traffic classes.
1439  **/
1440 #ifdef I40E_FCOE
1441 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1442                               struct i40e_vsi_context *ctxt,
1443                               u8 enabled_tc,
1444                               bool is_add)
1445 #else
1446 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1447                                      struct i40e_vsi_context *ctxt,
1448                                      u8 enabled_tc,
1449                                      bool is_add)
1450 #endif
1451 {
1452         struct i40e_pf *pf = vsi->back;
1453         u16 sections = 0;
1454         u8 netdev_tc = 0;
1455         u16 numtc = 0;
1456         u16 qcount;
1457         u8 offset;
1458         u16 qmap;
1459         int i;
1460         u16 num_tc_qps = 0;
1461
1462         sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1463         offset = 0;
1464
1465         if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1466                 /* Find numtc from enabled TC bitmap */
1467                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1468                         if (enabled_tc & BIT(i)) /* TC is enabled */
1469                                 numtc++;
1470                 }
1471                 if (!numtc) {
1472                         dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1473                         numtc = 1;
1474                 }
1475         } else {
1476                 /* At least TC0 is enabled in case of non-DCB case */
1477                 numtc = 1;
1478         }
1479
1480         vsi->tc_config.numtc = numtc;
1481         vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1482         /* Number of queues per enabled TC */
1483         qcount = vsi->alloc_queue_pairs;
1484
1485         num_tc_qps = qcount / numtc;
1486         num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
1487
1488         /* Setup queue offset/count for all TCs for given VSI */
1489         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1490                 /* See if the given TC is enabled for the given VSI */
1491                 if (vsi->tc_config.enabled_tc & BIT(i)) {
1492                         /* TC is enabled */
1493                         int pow, num_qps;
1494
1495                         switch (vsi->type) {
1496                         case I40E_VSI_MAIN:
1497                                 qcount = min_t(int, pf->alloc_rss_size,
1498                                                num_tc_qps);
1499                                 break;
1500 #ifdef I40E_FCOE
1501                         case I40E_VSI_FCOE:
1502                                 qcount = num_tc_qps;
1503                                 break;
1504 #endif
1505                         case I40E_VSI_FDIR:
1506                         case I40E_VSI_SRIOV:
1507                         case I40E_VSI_VMDQ2:
1508                         default:
1509                                 qcount = num_tc_qps;
1510                                 WARN_ON(i != 0);
1511                                 break;
1512                         }
1513                         vsi->tc_config.tc_info[i].qoffset = offset;
1514                         vsi->tc_config.tc_info[i].qcount = qcount;
1515
1516                         /* find the next higher power-of-2 of num queue pairs */
1517                         num_qps = qcount;
1518                         pow = 0;
1519                         while (num_qps && (BIT_ULL(pow) < qcount)) {
1520                                 pow++;
1521                                 num_qps >>= 1;
1522                         }
1523
1524                         vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1525                         qmap =
1526                             (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1527                             (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1528
1529                         offset += qcount;
1530                 } else {
1531                         /* TC is not enabled so set the offset to
1532                          * default queue and allocate one queue
1533                          * for the given TC.
1534                          */
1535                         vsi->tc_config.tc_info[i].qoffset = 0;
1536                         vsi->tc_config.tc_info[i].qcount = 1;
1537                         vsi->tc_config.tc_info[i].netdev_tc = 0;
1538
1539                         qmap = 0;
1540                 }
1541                 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1542         }
1543
1544         /* Set actual Tx/Rx queue pairs */
1545         vsi->num_queue_pairs = offset;
1546         if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1547                 if (vsi->req_queue_pairs > 0)
1548                         vsi->num_queue_pairs = vsi->req_queue_pairs;
1549                 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1550                         vsi->num_queue_pairs = pf->num_lan_msix;
1551         }
1552
1553         /* Scheduler section valid can only be set for ADD VSI */
1554         if (is_add) {
1555                 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1556
1557                 ctxt->info.up_enable_bits = enabled_tc;
1558         }
1559         if (vsi->type == I40E_VSI_SRIOV) {
1560                 ctxt->info.mapping_flags |=
1561                                      cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1562                 for (i = 0; i < vsi->num_queue_pairs; i++)
1563                         ctxt->info.queue_mapping[i] =
1564                                                cpu_to_le16(vsi->base_queue + i);
1565         } else {
1566                 ctxt->info.mapping_flags |=
1567                                         cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1568                 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1569         }
1570         ctxt->info.valid_sections |= cpu_to_le16(sections);
1571 }
1572
1573 /**
1574  * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1575  * @netdev: the netdevice
1576  * @addr: address to add
1577  *
1578  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1579  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1580  */
1581 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1582 {
1583         struct i40e_netdev_priv *np = netdev_priv(netdev);
1584         struct i40e_vsi *vsi = np->vsi;
1585         struct i40e_mac_filter *f;
1586
1587         if (i40e_is_vsi_in_vlan(vsi))
1588                 f = i40e_put_mac_in_vlan(vsi, addr);
1589         else
1590                 f = i40e_add_filter(vsi, addr, I40E_VLAN_ANY);
1591
1592         if (f)
1593                 return 0;
1594         else
1595                 return -ENOMEM;
1596 }
1597
1598 /**
1599  * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1600  * @netdev: the netdevice
1601  * @addr: address to add
1602  *
1603  * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1604  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1605  */
1606 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1607 {
1608         struct i40e_netdev_priv *np = netdev_priv(netdev);
1609         struct i40e_vsi *vsi = np->vsi;
1610
1611         if (i40e_is_vsi_in_vlan(vsi))
1612                 i40e_del_mac_all_vlan(vsi, addr);
1613         else
1614                 i40e_del_filter(vsi, addr, I40E_VLAN_ANY);
1615
1616         return 0;
1617 }
1618
1619 /**
1620  * i40e_set_rx_mode - NDO callback to set the netdev filters
1621  * @netdev: network interface device structure
1622  **/
1623 #ifdef I40E_FCOE
1624 void i40e_set_rx_mode(struct net_device *netdev)
1625 #else
1626 static void i40e_set_rx_mode(struct net_device *netdev)
1627 #endif
1628 {
1629         struct i40e_netdev_priv *np = netdev_priv(netdev);
1630         struct i40e_vsi *vsi = np->vsi;
1631
1632         spin_lock_bh(&vsi->mac_filter_list_lock);
1633
1634         __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1635         __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1636
1637         spin_unlock_bh(&vsi->mac_filter_list_lock);
1638
1639         /* check for other flag changes */
1640         if (vsi->current_netdev_flags != vsi->netdev->flags) {
1641                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1642                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1643         }
1644
1645         /* schedule our worker thread which will take care of
1646          * applying the new filter changes
1647          */
1648         i40e_service_event_schedule(vsi->back);
1649 }
1650
1651 /**
1652  * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1653  * @vsi: pointer to vsi struct
1654  * @from: Pointer to list which contains MAC filter entries - changes to
1655  *        those entries needs to be undone.
1656  *
1657  * MAC filter entries from list were slated to be removed from device.
1658  **/
1659 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1660                                          struct list_head *from)
1661 {
1662         struct i40e_mac_filter *f, *ftmp;
1663
1664         list_for_each_entry_safe(f, ftmp, from, list) {
1665                 /* Move the element back into MAC filter list*/
1666                 list_move_tail(&f->list, &vsi->mac_filter_list);
1667         }
1668 }
1669
1670 /**
1671  * i40e_update_filter_state - Update filter state based on return data
1672  * from firmware
1673  * @count: Number of filters added
1674  * @add_list: return data from fw
1675  * @head: pointer to first filter in current batch
1676  * @aq_err: status from fw
1677  *
1678  * MAC filter entries from list were slated to be added to device. Returns
1679  * number of successful filters. Note that 0 does NOT mean success!
1680  **/
1681 static int
1682 i40e_update_filter_state(int count,
1683                          struct i40e_aqc_add_macvlan_element_data *add_list,
1684                          struct i40e_mac_filter *add_head, int aq_err)
1685 {
1686         int retval = 0;
1687         int i;
1688
1689
1690         if (!aq_err) {
1691                 retval = count;
1692                 /* Everything's good, mark all filters active. */
1693                 for (i = 0; i < count ; i++) {
1694                         add_head->state = I40E_FILTER_ACTIVE;
1695                         add_head = list_next_entry(add_head, list);
1696                 }
1697         } else if (aq_err == I40E_AQ_RC_ENOSPC) {
1698                 /* Device ran out of filter space. Check the return value
1699                  * for each filter to see which ones are active.
1700                  */
1701                 for (i = 0; i < count ; i++) {
1702                         if (add_list[i].match_method ==
1703                             I40E_AQC_MM_ERR_NO_RES) {
1704                                 add_head->state = I40E_FILTER_FAILED;
1705                         } else {
1706                                 add_head->state = I40E_FILTER_ACTIVE;
1707                                 retval++;
1708                         }
1709                         add_head = list_next_entry(add_head, list);
1710                 }
1711         } else {
1712                 /* Some other horrible thing happened, fail all filters */
1713                 retval = 0;
1714                 for (i = 0; i < count ; i++) {
1715                         add_head->state = I40E_FILTER_FAILED;
1716                         add_head = list_next_entry(add_head, list);
1717                 }
1718         }
1719         return retval;
1720 }
1721
1722 /**
1723  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1724  * @vsi: ptr to the VSI
1725  *
1726  * Push any outstanding VSI filter changes through the AdminQ.
1727  *
1728  * Returns 0 or error value
1729  **/
1730 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1731 {
1732         struct i40e_mac_filter *f, *ftmp, *add_head = NULL;
1733         struct list_head tmp_add_list, tmp_del_list;
1734         struct i40e_hw *hw = &vsi->back->hw;
1735         bool promisc_changed = false;
1736         char vsi_name[16] = "PF";
1737         int filter_list_len = 0;
1738         u32 changed_flags = 0;
1739         i40e_status aq_ret = 0;
1740         int retval = 0;
1741         struct i40e_pf *pf;
1742         int num_add = 0;
1743         int num_del = 0;
1744         int aq_err = 0;
1745         u16 cmd_flags;
1746         int list_size;
1747         int fcnt;
1748
1749         /* empty array typed pointers, kcalloc later */
1750         struct i40e_aqc_add_macvlan_element_data *add_list;
1751         struct i40e_aqc_remove_macvlan_element_data *del_list;
1752
1753         while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1754                 usleep_range(1000, 2000);
1755         pf = vsi->back;
1756
1757         if (vsi->netdev) {
1758                 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1759                 vsi->current_netdev_flags = vsi->netdev->flags;
1760         }
1761
1762         INIT_LIST_HEAD(&tmp_add_list);
1763         INIT_LIST_HEAD(&tmp_del_list);
1764
1765         if (vsi->type == I40E_VSI_SRIOV)
1766                 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
1767         else if (vsi->type != I40E_VSI_MAIN)
1768                 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
1769
1770         if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1771                 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1772
1773                 spin_lock_bh(&vsi->mac_filter_list_lock);
1774                 /* Create a list of filters to delete. */
1775                 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1776                         if (f->state == I40E_FILTER_REMOVE) {
1777                                 /* Move the element into temporary del_list */
1778                                 list_move_tail(&f->list, &tmp_del_list);
1779                                 vsi->active_filters--;
1780                         }
1781                         if (f->state == I40E_FILTER_NEW) {
1782                                 /* Move the element into temporary add_list */
1783                                 list_move_tail(&f->list, &tmp_add_list);
1784                         }
1785                 }
1786                 spin_unlock_bh(&vsi->mac_filter_list_lock);
1787         }
1788
1789         /* Now process 'del_list' outside the lock */
1790         if (!list_empty(&tmp_del_list)) {
1791                 filter_list_len = hw->aq.asq_buf_size /
1792                             sizeof(struct i40e_aqc_remove_macvlan_element_data);
1793                 list_size = filter_list_len *
1794                             sizeof(struct i40e_aqc_remove_macvlan_element_data);
1795                 del_list = kzalloc(list_size, GFP_ATOMIC);
1796                 if (!del_list) {
1797                         /* Undo VSI's MAC filter entry element updates */
1798                         spin_lock_bh(&vsi->mac_filter_list_lock);
1799                         i40e_undo_del_filter_entries(vsi, &tmp_del_list);
1800                         spin_unlock_bh(&vsi->mac_filter_list_lock);
1801                         retval = -ENOMEM;
1802                         goto out;
1803                 }
1804
1805                 list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
1806                         cmd_flags = 0;
1807
1808                         /* add to delete list */
1809                         ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
1810                         if (f->vlan == I40E_VLAN_ANY) {
1811                                 del_list[num_del].vlan_tag = 0;
1812                                 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1813                         } else {
1814                                 del_list[num_del].vlan_tag =
1815                                         cpu_to_le16((u16)(f->vlan));
1816                         }
1817
1818                         cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1819                         del_list[num_del].flags = cmd_flags;
1820                         num_del++;
1821
1822                         /* flush a full buffer */
1823                         if (num_del == filter_list_len) {
1824                                 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid,
1825                                                                 del_list,
1826                                                                 num_del, NULL);
1827                                 aq_err = hw->aq.asq_last_status;
1828                                 num_del = 0;
1829                                 memset(del_list, 0, list_size);
1830
1831                                 /* Explicitly ignore and do not report when
1832                                  * firmware returns ENOENT.
1833                                  */
1834                                 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
1835                                         retval = -EIO;
1836                                         dev_info(&pf->pdev->dev,
1837                                                  "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
1838                                                  vsi_name,
1839                                                  i40e_stat_str(hw, aq_ret),
1840                                                  i40e_aq_str(hw, aq_err));
1841                                 }
1842                         }
1843                         /* Release memory for MAC filter entries which were
1844                          * synced up with HW.
1845                          */
1846                         list_del(&f->list);
1847                         kfree(f);
1848                 }
1849
1850                 if (num_del) {
1851                         aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list,
1852                                                         num_del, NULL);
1853                         aq_err = hw->aq.asq_last_status;
1854                         num_del = 0;
1855
1856                         /* Explicitly ignore and do not report when firmware
1857                          * returns ENOENT.
1858                          */
1859                         if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
1860                                 retval = -EIO;
1861                                 dev_info(&pf->pdev->dev,
1862                                          "ignoring delete macvlan error on %s, err %s aq_err %s\n",
1863                                          vsi_name,
1864                                          i40e_stat_str(hw, aq_ret),
1865                                          i40e_aq_str(hw, aq_err));
1866                         }
1867                 }
1868
1869                 kfree(del_list);
1870                 del_list = NULL;
1871         }
1872
1873         if (!list_empty(&tmp_add_list)) {
1874                 /* Do all the adds now. */
1875                 filter_list_len = hw->aq.asq_buf_size /
1876                                sizeof(struct i40e_aqc_add_macvlan_element_data);
1877                 list_size = filter_list_len *
1878                                sizeof(struct i40e_aqc_add_macvlan_element_data);
1879                 add_list = kzalloc(list_size, GFP_ATOMIC);
1880                 if (!add_list) {
1881                         retval = -ENOMEM;
1882                         goto out;
1883                 }
1884                 num_add = 0;
1885                 list_for_each_entry(f, &tmp_add_list, list) {
1886                         if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1887                                      &vsi->state)) {
1888                                 f->state = I40E_FILTER_FAILED;
1889                                 continue;
1890                         }
1891                         /* add to add array */
1892                         if (num_add == 0)
1893                                 add_head = f;
1894                         cmd_flags = 0;
1895                         ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
1896                         if (f->vlan == I40E_VLAN_ANY) {
1897                                 add_list[num_add].vlan_tag = 0;
1898                                 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1899                         } else {
1900                                 add_list[num_add].vlan_tag =
1901                                         cpu_to_le16((u16)(f->vlan));
1902                         }
1903                         add_list[num_add].queue_number = 0;
1904                         cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1905                         add_list[num_add].flags = cpu_to_le16(cmd_flags);
1906                         num_add++;
1907
1908                         /* flush a full buffer */
1909                         if (num_add == filter_list_len) {
1910                                 aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
1911                                                              add_list, num_add,
1912                                                              NULL);
1913                                 aq_err = hw->aq.asq_last_status;
1914                                 fcnt = i40e_update_filter_state(num_add,
1915                                                                 add_list,
1916                                                                 add_head,
1917                                                                 aq_ret);
1918                                 vsi->active_filters += fcnt;
1919
1920                                 if (fcnt != num_add) {
1921                                         promisc_changed = true;
1922                                         set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1923                                                 &vsi->state);
1924                                         vsi->promisc_threshold =
1925                                                 (vsi->active_filters * 3) / 4;
1926                                         dev_warn(&pf->pdev->dev,
1927                                                  "Error %s adding RX filters on %s, promiscuous mode forced on\n",
1928                                                  i40e_aq_str(hw, aq_err),
1929                                                  vsi_name);
1930                                 }
1931                                 memset(add_list, 0, list_size);
1932                                 num_add = 0;
1933                         }
1934                 }
1935                 if (num_add) {
1936                         aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
1937                                                      add_list, num_add, NULL);
1938                         aq_err = hw->aq.asq_last_status;
1939                         fcnt = i40e_update_filter_state(num_add, add_list,
1940                                                         add_head, aq_ret);
1941                         vsi->active_filters += fcnt;
1942                         if (fcnt != num_add) {
1943                                 promisc_changed = true;
1944                                 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1945                                         &vsi->state);
1946                                 vsi->promisc_threshold =
1947                                                 (vsi->active_filters * 3) / 4;
1948                                 dev_warn(&pf->pdev->dev,
1949                                          "Error %s adding RX filters on %s, promiscuous mode forced on\n",
1950                                          i40e_aq_str(hw, aq_err), vsi_name);
1951                         }
1952                 }
1953                 /* Now move all of the filters from the temp add list back to
1954                  * the VSI's list.
1955                  */
1956                 spin_lock_bh(&vsi->mac_filter_list_lock);
1957                 list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
1958                         list_move_tail(&f->list, &vsi->mac_filter_list);
1959                 }
1960                 spin_unlock_bh(&vsi->mac_filter_list_lock);
1961                 kfree(add_list);
1962                 add_list = NULL;
1963         }
1964
1965         /* Check to see if we can drop out of overflow promiscuous mode. */
1966         if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) &&
1967             (vsi->active_filters < vsi->promisc_threshold)) {
1968                 int failed_count = 0;
1969                 /* See if we have any failed filters. We can't drop out of
1970                  * promiscuous until these have all been deleted.
1971                  */
1972                 spin_lock_bh(&vsi->mac_filter_list_lock);
1973                 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1974                         if (f->state == I40E_FILTER_FAILED)
1975                                 failed_count++;
1976                 }
1977                 spin_unlock_bh(&vsi->mac_filter_list_lock);
1978                 if (!failed_count) {
1979                         dev_info(&pf->pdev->dev,
1980                                  "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
1981                                  vsi_name);
1982                         clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
1983                         promisc_changed = true;
1984                         vsi->promisc_threshold = 0;
1985                 }
1986         }
1987
1988         /* if the VF is not trusted do not do promisc */
1989         if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
1990                 clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
1991                 goto out;
1992         }
1993
1994         /* check for changes in promiscuous modes */
1995         if (changed_flags & IFF_ALLMULTI) {
1996                 bool cur_multipromisc;
1997
1998                 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
1999                 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2000                                                                vsi->seid,
2001                                                                cur_multipromisc,
2002                                                                NULL);
2003                 if (aq_ret) {
2004                         retval = i40e_aq_rc_to_posix(aq_ret,
2005                                                      hw->aq.asq_last_status);
2006                         dev_info(&pf->pdev->dev,
2007                                  "set multi promisc failed on %s, err %s aq_err %s\n",
2008                                  vsi_name,
2009                                  i40e_stat_str(hw, aq_ret),
2010                                  i40e_aq_str(hw, hw->aq.asq_last_status));
2011                 }
2012         }
2013         if ((changed_flags & IFF_PROMISC) ||
2014             (promisc_changed &&
2015              test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))) {
2016                 bool cur_promisc;
2017
2018                 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2019                                test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2020                                         &vsi->state));
2021                 if ((vsi->type == I40E_VSI_MAIN) &&
2022                     (pf->lan_veb != I40E_NO_VEB) &&
2023                     !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2024                         /* set defport ON for Main VSI instead of true promisc
2025                          * this way we will get all unicast/multicast and VLAN
2026                          * promisc behavior but will not get VF or VMDq traffic
2027                          * replicated on the Main VSI.
2028                          */
2029                         if (pf->cur_promisc != cur_promisc) {
2030                                 pf->cur_promisc = cur_promisc;
2031                                 if (cur_promisc)
2032                                         aq_ret =
2033                                               i40e_aq_set_default_vsi(hw,
2034                                                                       vsi->seid,
2035                                                                       NULL);
2036                                 else
2037                                         aq_ret =
2038                                             i40e_aq_clear_default_vsi(hw,
2039                                                                       vsi->seid,
2040                                                                       NULL);
2041                                 if (aq_ret) {
2042                                         retval = i40e_aq_rc_to_posix(aq_ret,
2043                                                         hw->aq.asq_last_status);
2044                                         dev_info(&pf->pdev->dev,
2045                                                  "Set default VSI failed on %s, err %s, aq_err %s\n",
2046                                                  vsi_name,
2047                                                  i40e_stat_str(hw, aq_ret),
2048                                                  i40e_aq_str(hw,
2049                                                      hw->aq.asq_last_status));
2050                                 }
2051                         }
2052                 } else {
2053                         aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2054                                                           hw,
2055                                                           vsi->seid,
2056                                                           cur_promisc, NULL,
2057                                                           true);
2058                         if (aq_ret) {
2059                                 retval =
2060                                 i40e_aq_rc_to_posix(aq_ret,
2061                                                     hw->aq.asq_last_status);
2062                                 dev_info(&pf->pdev->dev,
2063                                          "set unicast promisc failed on %s, err %s, aq_err %s\n",
2064                                          vsi_name,
2065                                          i40e_stat_str(hw, aq_ret),
2066                                          i40e_aq_str(hw,
2067                                                      hw->aq.asq_last_status));
2068                         }
2069                         aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2070                                                           hw,
2071                                                           vsi->seid,
2072                                                           cur_promisc, NULL);
2073                         if (aq_ret) {
2074                                 retval =
2075                                 i40e_aq_rc_to_posix(aq_ret,
2076                                                     hw->aq.asq_last_status);
2077                                 dev_info(&pf->pdev->dev,
2078                                          "set multicast promisc failed on %s, err %s, aq_err %s\n",
2079                                          vsi_name,
2080                                          i40e_stat_str(hw, aq_ret),
2081                                          i40e_aq_str(hw,
2082                                                      hw->aq.asq_last_status));
2083                         }
2084                 }
2085                 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
2086                                                    vsi->seid,
2087                                                    cur_promisc, NULL);
2088                 if (aq_ret) {
2089                         retval = i40e_aq_rc_to_posix(aq_ret,
2090                                                      pf->hw.aq.asq_last_status);
2091                         dev_info(&pf->pdev->dev,
2092                                  "set brdcast promisc failed, err %s, aq_err %s\n",
2093                                          i40e_stat_str(hw, aq_ret),
2094                                          i40e_aq_str(hw,
2095                                                      hw->aq.asq_last_status));
2096                 }
2097         }
2098 out:
2099         /* if something went wrong then set the changed flag so we try again */
2100         if (retval)
2101                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2102
2103         clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
2104         return retval;
2105 }
2106
2107 /**
2108  * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2109  * @pf: board private structure
2110  **/
2111 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2112 {
2113         int v;
2114
2115         if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
2116                 return;
2117         pf->flags &= ~I40E_FLAG_FILTER_SYNC;
2118
2119         for (v = 0; v < pf->num_alloc_vsi; v++) {
2120                 if (pf->vsi[v] &&
2121                     (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2122                         int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2123
2124                         if (ret) {
2125                                 /* come back and try again later */
2126                                 pf->flags |= I40E_FLAG_FILTER_SYNC;
2127                                 break;
2128                         }
2129                 }
2130         }
2131 }
2132
2133 /**
2134  * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2135  * @netdev: network interface device structure
2136  * @new_mtu: new value for maximum frame size
2137  *
2138  * Returns 0 on success, negative on failure
2139  **/
2140 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2141 {
2142         struct i40e_netdev_priv *np = netdev_priv(netdev);
2143         struct i40e_vsi *vsi = np->vsi;
2144
2145         netdev_info(netdev, "changing MTU from %d to %d\n",
2146                     netdev->mtu, new_mtu);
2147         netdev->mtu = new_mtu;
2148         if (netif_running(netdev))
2149                 i40e_vsi_reinit_locked(vsi);
2150         i40e_notify_client_of_l2_param_changes(vsi);
2151         return 0;
2152 }
2153
2154 /**
2155  * i40e_ioctl - Access the hwtstamp interface
2156  * @netdev: network interface device structure
2157  * @ifr: interface request data
2158  * @cmd: ioctl command
2159  **/
2160 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2161 {
2162         struct i40e_netdev_priv *np = netdev_priv(netdev);
2163         struct i40e_pf *pf = np->vsi->back;
2164
2165         switch (cmd) {
2166         case SIOCGHWTSTAMP:
2167                 return i40e_ptp_get_ts_config(pf, ifr);
2168         case SIOCSHWTSTAMP:
2169                 return i40e_ptp_set_ts_config(pf, ifr);
2170         default:
2171                 return -EOPNOTSUPP;
2172         }
2173 }
2174
2175 /**
2176  * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2177  * @vsi: the vsi being adjusted
2178  **/
2179 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2180 {
2181         struct i40e_vsi_context ctxt;
2182         i40e_status ret;
2183
2184         if ((vsi->info.valid_sections &
2185              cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2186             ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2187                 return;  /* already enabled */
2188
2189         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2190         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2191                                     I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2192
2193         ctxt.seid = vsi->seid;
2194         ctxt.info = vsi->info;
2195         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2196         if (ret) {
2197                 dev_info(&vsi->back->pdev->dev,
2198                          "update vlan stripping failed, err %s aq_err %s\n",
2199                          i40e_stat_str(&vsi->back->hw, ret),
2200                          i40e_aq_str(&vsi->back->hw,
2201                                      vsi->back->hw.aq.asq_last_status));
2202         }
2203 }
2204
2205 /**
2206  * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2207  * @vsi: the vsi being adjusted
2208  **/
2209 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2210 {
2211         struct i40e_vsi_context ctxt;
2212         i40e_status ret;
2213
2214         if ((vsi->info.valid_sections &
2215              cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2216             ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2217              I40E_AQ_VSI_PVLAN_EMOD_MASK))
2218                 return;  /* already disabled */
2219
2220         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2221         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2222                                     I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2223
2224         ctxt.seid = vsi->seid;
2225         ctxt.info = vsi->info;
2226         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2227         if (ret) {
2228                 dev_info(&vsi->back->pdev->dev,
2229                          "update vlan stripping failed, err %s aq_err %s\n",
2230                          i40e_stat_str(&vsi->back->hw, ret),
2231                          i40e_aq_str(&vsi->back->hw,
2232                                      vsi->back->hw.aq.asq_last_status));
2233         }
2234 }
2235
2236 /**
2237  * i40e_vlan_rx_register - Setup or shutdown vlan offload
2238  * @netdev: network interface to be adjusted
2239  * @features: netdev features to test if VLAN offload is enabled or not
2240  **/
2241 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2242 {
2243         struct i40e_netdev_priv *np = netdev_priv(netdev);
2244         struct i40e_vsi *vsi = np->vsi;
2245
2246         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2247                 i40e_vlan_stripping_enable(vsi);
2248         else
2249                 i40e_vlan_stripping_disable(vsi);
2250 }
2251
2252 /**
2253  * i40e_vsi_add_vlan - Add vsi membership for given vlan
2254  * @vsi: the vsi being configured
2255  * @vid: vlan id to be added (0 = untagged only , -1 = any)
2256  **/
2257 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
2258 {
2259         struct i40e_mac_filter *f, *ftmp, *add_f;
2260
2261         /* Locked once because all functions invoked below iterates list*/
2262         spin_lock_bh(&vsi->mac_filter_list_lock);
2263
2264         if (vsi->netdev) {
2265                 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid);
2266                 if (!add_f) {
2267                         dev_info(&vsi->back->pdev->dev,
2268                                  "Could not add vlan filter %d for %pM\n",
2269                                  vid, vsi->netdev->dev_addr);
2270                         spin_unlock_bh(&vsi->mac_filter_list_lock);
2271                         return -ENOMEM;
2272                 }
2273         }
2274
2275         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
2276                 if (f->state == I40E_FILTER_REMOVE)
2277                         continue;
2278                 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2279                 if (!add_f) {
2280                         dev_info(&vsi->back->pdev->dev,
2281                                  "Could not add vlan filter %d for %pM\n",
2282                                  vid, f->macaddr);
2283                         spin_unlock_bh(&vsi->mac_filter_list_lock);
2284                         return -ENOMEM;
2285                 }
2286         }
2287
2288         /* Now if we add a vlan tag, make sure to check if it is the first
2289          * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
2290          * with 0, so we now accept untagged and specified tagged traffic
2291          * (and not all tags along with untagged)
2292          */
2293         if (vid > 0) {
2294                 if (vsi->netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
2295                                                     I40E_VLAN_ANY)) {
2296                         i40e_del_filter(vsi, vsi->netdev->dev_addr,
2297                                         I40E_VLAN_ANY);
2298                         add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0);
2299                         if (!add_f) {
2300                                 dev_info(&vsi->back->pdev->dev,
2301                                          "Could not add filter 0 for %pM\n",
2302                                          vsi->netdev->dev_addr);
2303                                 spin_unlock_bh(&vsi->mac_filter_list_lock);
2304                                 return -ENOMEM;
2305                         }
2306                 }
2307         }
2308
2309         /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
2310         if (vid > 0 && !vsi->info.pvid) {
2311                 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
2312                         if (f->state == I40E_FILTER_REMOVE)
2313                                 continue;
2314                         if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY))
2315                                 continue;
2316                         i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY);
2317                         add_f = i40e_add_filter(vsi, f->macaddr, 0);
2318                         if (!add_f) {
2319                                 dev_info(&vsi->back->pdev->dev,
2320                                          "Could not add filter 0 for %pM\n",
2321                                         f->macaddr);
2322                                 spin_unlock_bh(&vsi->mac_filter_list_lock);
2323                                 return -ENOMEM;
2324                         }
2325                 }
2326         }
2327
2328         spin_unlock_bh(&vsi->mac_filter_list_lock);
2329
2330         /* schedule our worker thread which will take care of
2331          * applying the new filter changes
2332          */
2333         i40e_service_event_schedule(vsi->back);
2334         return 0;
2335 }
2336
2337 /**
2338  * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
2339  * @vsi: the vsi being configured
2340  * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2341  *
2342  * Return: 0 on success or negative otherwise
2343  **/
2344 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2345 {
2346         struct net_device *netdev = vsi->netdev;
2347         struct i40e_mac_filter *f, *ftmp, *add_f;
2348         int filter_count = 0;
2349
2350         /* Locked once because all functions invoked below iterates list */
2351         spin_lock_bh(&vsi->mac_filter_list_lock);
2352
2353         if (vsi->netdev)
2354                 i40e_del_filter(vsi, netdev->dev_addr, vid);
2355
2356         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
2357                 i40e_del_filter(vsi, f->macaddr, vid);
2358
2359         /* go through all the filters for this VSI and if there is only
2360          * vid == 0 it means there are no other filters, so vid 0 must
2361          * be replaced with -1. This signifies that we should from now
2362          * on accept any traffic (with any tag present, or untagged)
2363          */
2364         list_for_each_entry(f, &vsi->mac_filter_list, list) {
2365                 if (vsi->netdev) {
2366                         if (f->vlan &&
2367                             ether_addr_equal(netdev->dev_addr, f->macaddr))
2368                                 filter_count++;
2369                 }
2370
2371                 if (f->vlan)
2372                         filter_count++;
2373         }
2374
2375         if (!filter_count && vsi->netdev) {
2376                 i40e_del_filter(vsi, netdev->dev_addr, 0);
2377                 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY);
2378                 if (!f) {
2379                         dev_info(&vsi->back->pdev->dev,
2380                                  "Could not add filter %d for %pM\n",
2381                                  I40E_VLAN_ANY, netdev->dev_addr);
2382                         spin_unlock_bh(&vsi->mac_filter_list_lock);
2383                         return -ENOMEM;
2384                 }
2385         }
2386
2387         if (!filter_count) {
2388                 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
2389                         i40e_del_filter(vsi, f->macaddr, 0);
2390                         add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY);
2391                         if (!add_f) {
2392                                 dev_info(&vsi->back->pdev->dev,
2393                                          "Could not add filter %d for %pM\n",
2394                                          I40E_VLAN_ANY, f->macaddr);
2395                                 spin_unlock_bh(&vsi->mac_filter_list_lock);
2396                                 return -ENOMEM;
2397                         }
2398                 }
2399         }
2400
2401         spin_unlock_bh(&vsi->mac_filter_list_lock);
2402
2403         /* schedule our worker thread which will take care of
2404          * applying the new filter changes
2405          */
2406         i40e_service_event_schedule(vsi->back);
2407         return 0;
2408 }
2409
2410 /**
2411  * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2412  * @netdev: network interface to be adjusted
2413  * @vid: vlan id to be added
2414  *
2415  * net_device_ops implementation for adding vlan ids
2416  **/
2417 #ifdef I40E_FCOE
2418 int i40e_vlan_rx_add_vid(struct net_device *netdev,
2419                          __always_unused __be16 proto, u16 vid)
2420 #else
2421 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2422                                 __always_unused __be16 proto, u16 vid)
2423 #endif
2424 {
2425         struct i40e_netdev_priv *np = netdev_priv(netdev);
2426         struct i40e_vsi *vsi = np->vsi;
2427         int ret = 0;
2428
2429         if (vid > 4095)
2430                 return -EINVAL;
2431
2432         /* If the network stack called us with vid = 0 then
2433          * it is asking to receive priority tagged packets with
2434          * vlan id 0.  Our HW receives them by default when configured
2435          * to receive untagged packets so there is no need to add an
2436          * extra filter for vlan 0 tagged packets.
2437          */
2438         if (vid)
2439                 ret = i40e_vsi_add_vlan(vsi, vid);
2440
2441         if (!ret && (vid < VLAN_N_VID))
2442                 set_bit(vid, vsi->active_vlans);
2443
2444         return ret;
2445 }
2446
2447 /**
2448  * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2449  * @netdev: network interface to be adjusted
2450  * @vid: vlan id to be removed
2451  *
2452  * net_device_ops implementation for removing vlan ids
2453  **/
2454 #ifdef I40E_FCOE
2455 int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2456                           __always_unused __be16 proto, u16 vid)
2457 #else
2458 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2459                                  __always_unused __be16 proto, u16 vid)
2460 #endif
2461 {
2462         struct i40e_netdev_priv *np = netdev_priv(netdev);
2463         struct i40e_vsi *vsi = np->vsi;
2464
2465         /* return code is ignored as there is nothing a user
2466          * can do about failure to remove and a log message was
2467          * already printed from the other function
2468          */
2469         i40e_vsi_kill_vlan(vsi, vid);
2470
2471         clear_bit(vid, vsi->active_vlans);
2472
2473         return 0;
2474 }
2475
2476 /**
2477  * i40e_macaddr_init - explicitly write the mac address filters
2478  *
2479  * @vsi: pointer to the vsi
2480  * @macaddr: the MAC address
2481  *
2482  * This is needed when the macaddr has been obtained by other
2483  * means than the default, e.g., from Open Firmware or IDPROM.
2484  * Returns 0 on success, negative on failure
2485  **/
2486 static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr)
2487 {
2488         int ret;
2489         struct i40e_aqc_add_macvlan_element_data element;
2490
2491         ret = i40e_aq_mac_address_write(&vsi->back->hw,
2492                                         I40E_AQC_WRITE_TYPE_LAA_WOL,
2493                                         macaddr, NULL);
2494         if (ret) {
2495                 dev_info(&vsi->back->pdev->dev,
2496                          "Addr change for VSI failed: %d\n", ret);
2497                 return -EADDRNOTAVAIL;
2498         }
2499
2500         memset(&element, 0, sizeof(element));
2501         ether_addr_copy(element.mac_addr, macaddr);
2502         element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
2503         ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL);
2504         if (ret) {
2505                 dev_info(&vsi->back->pdev->dev,
2506                          "add filter failed err %s aq_err %s\n",
2507                          i40e_stat_str(&vsi->back->hw, ret),
2508                          i40e_aq_str(&vsi->back->hw,
2509                                      vsi->back->hw.aq.asq_last_status));
2510         }
2511         return ret;
2512 }
2513
2514 /**
2515  * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2516  * @vsi: the vsi being brought back up
2517  **/
2518 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2519 {
2520         u16 vid;
2521
2522         if (!vsi->netdev)
2523                 return;
2524
2525         i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2526
2527         for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2528                 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2529                                      vid);
2530 }
2531
2532 /**
2533  * i40e_vsi_add_pvid - Add pvid for the VSI
2534  * @vsi: the vsi being adjusted
2535  * @vid: the vlan id to set as a PVID
2536  **/
2537 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2538 {
2539         struct i40e_vsi_context ctxt;
2540         i40e_status ret;
2541
2542         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2543         vsi->info.pvid = cpu_to_le16(vid);
2544         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2545                                     I40E_AQ_VSI_PVLAN_INSERT_PVID |
2546                                     I40E_AQ_VSI_PVLAN_EMOD_STR;
2547
2548         ctxt.seid = vsi->seid;
2549         ctxt.info = vsi->info;
2550         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2551         if (ret) {
2552                 dev_info(&vsi->back->pdev->dev,
2553                          "add pvid failed, err %s aq_err %s\n",
2554                          i40e_stat_str(&vsi->back->hw, ret),
2555                          i40e_aq_str(&vsi->back->hw,
2556                                      vsi->back->hw.aq.asq_last_status));
2557                 return -ENOENT;
2558         }
2559
2560         return 0;
2561 }
2562
2563 /**
2564  * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2565  * @vsi: the vsi being adjusted
2566  *
2567  * Just use the vlan_rx_register() service to put it back to normal
2568  **/
2569 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2570 {
2571         i40e_vlan_stripping_disable(vsi);
2572
2573         vsi->info.pvid = 0;
2574 }
2575
2576 /**
2577  * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2578  * @vsi: ptr to the VSI
2579  *
2580  * If this function returns with an error, then it's possible one or
2581  * more of the rings is populated (while the rest are not).  It is the
2582  * callers duty to clean those orphaned rings.
2583  *
2584  * Return 0 on success, negative on failure
2585  **/
2586 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2587 {
2588         int i, err = 0;
2589
2590         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2591                 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2592
2593         return err;
2594 }
2595
2596 /**
2597  * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2598  * @vsi: ptr to the VSI
2599  *
2600  * Free VSI's transmit software resources
2601  **/
2602 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2603 {
2604         int i;
2605
2606         if (!vsi->tx_rings)
2607                 return;
2608
2609         for (i = 0; i < vsi->num_queue_pairs; i++)
2610                 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2611                         i40e_free_tx_resources(vsi->tx_rings[i]);
2612 }
2613
2614 /**
2615  * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2616  * @vsi: ptr to the VSI
2617  *
2618  * If this function returns with an error, then it's possible one or
2619  * more of the rings is populated (while the rest are not).  It is the
2620  * callers duty to clean those orphaned rings.
2621  *
2622  * Return 0 on success, negative on failure
2623  **/
2624 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2625 {
2626         int i, err = 0;
2627
2628         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2629                 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2630 #ifdef I40E_FCOE
2631         i40e_fcoe_setup_ddp_resources(vsi);
2632 #endif
2633         return err;
2634 }
2635
2636 /**
2637  * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2638  * @vsi: ptr to the VSI
2639  *
2640  * Free all receive software resources
2641  **/
2642 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2643 {
2644         int i;
2645
2646         if (!vsi->rx_rings)
2647                 return;
2648
2649         for (i = 0; i < vsi->num_queue_pairs; i++)
2650                 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2651                         i40e_free_rx_resources(vsi->rx_rings[i]);
2652 #ifdef I40E_FCOE
2653         i40e_fcoe_free_ddp_resources(vsi);
2654 #endif
2655 }
2656
2657 /**
2658  * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2659  * @ring: The Tx ring to configure
2660  *
2661  * This enables/disables XPS for a given Tx descriptor ring
2662  * based on the TCs enabled for the VSI that ring belongs to.
2663  **/
2664 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2665 {
2666         struct i40e_vsi *vsi = ring->vsi;
2667         cpumask_var_t mask;
2668
2669         if (!ring->q_vector || !ring->netdev)
2670                 return;
2671
2672         /* Single TC mode enable XPS */
2673         if (vsi->tc_config.numtc <= 1) {
2674                 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2675                         netif_set_xps_queue(ring->netdev,
2676                                             &ring->q_vector->affinity_mask,
2677                                             ring->queue_index);
2678         } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2679                 /* Disable XPS to allow selection based on TC */
2680                 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2681                 netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
2682                 free_cpumask_var(mask);
2683         }
2684
2685         /* schedule our worker thread which will take care of
2686          * applying the new filter changes
2687          */
2688         i40e_service_event_schedule(vsi->back);
2689 }
2690
2691 /**
2692  * i40e_configure_tx_ring - Configure a transmit ring context and rest
2693  * @ring: The Tx ring to configure
2694  *
2695  * Configure the Tx descriptor ring in the HMC context.
2696  **/
2697 static int i40e_configure_tx_ring(struct i40e_ring *ring)
2698 {
2699         struct i40e_vsi *vsi = ring->vsi;
2700         u16 pf_q = vsi->base_queue + ring->queue_index;
2701         struct i40e_hw *hw = &vsi->back->hw;
2702         struct i40e_hmc_obj_txq tx_ctx;
2703         i40e_status err = 0;
2704         u32 qtx_ctl = 0;
2705
2706         /* some ATR related tx ring init */
2707         if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2708                 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2709                 ring->atr_count = 0;
2710         } else {
2711                 ring->atr_sample_rate = 0;
2712         }
2713
2714         /* configure XPS */
2715         i40e_config_xps_tx_ring(ring);
2716
2717         /* clear the context structure first */
2718         memset(&tx_ctx, 0, sizeof(tx_ctx));
2719
2720         tx_ctx.new_context = 1;
2721         tx_ctx.base = (ring->dma / 128);
2722         tx_ctx.qlen = ring->count;
2723         tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2724                                                I40E_FLAG_FD_ATR_ENABLED));
2725 #ifdef I40E_FCOE
2726         tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2727 #endif
2728         tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2729         /* FDIR VSI tx ring can still use RS bit and writebacks */
2730         if (vsi->type != I40E_VSI_FDIR)
2731                 tx_ctx.head_wb_ena = 1;
2732         tx_ctx.head_wb_addr = ring->dma +
2733                               (ring->count * sizeof(struct i40e_tx_desc));
2734
2735         /* As part of VSI creation/update, FW allocates certain
2736          * Tx arbitration queue sets for each TC enabled for
2737          * the VSI. The FW returns the handles to these queue
2738          * sets as part of the response buffer to Add VSI,
2739          * Update VSI, etc. AQ commands. It is expected that
2740          * these queue set handles be associated with the Tx
2741          * queues by the driver as part of the TX queue context
2742          * initialization. This has to be done regardless of
2743          * DCB as by default everything is mapped to TC0.
2744          */
2745         tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2746         tx_ctx.rdylist_act = 0;
2747
2748         /* clear the context in the HMC */
2749         err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2750         if (err) {
2751                 dev_info(&vsi->back->pdev->dev,
2752                          "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2753                          ring->queue_index, pf_q, err);
2754                 return -ENOMEM;
2755         }
2756
2757         /* set the context in the HMC */
2758         err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2759         if (err) {
2760                 dev_info(&vsi->back->pdev->dev,
2761                          "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2762                          ring->queue_index, pf_q, err);
2763                 return -ENOMEM;
2764         }
2765
2766         /* Now associate this queue with this PCI function */
2767         if (vsi->type == I40E_VSI_VMDQ2) {
2768                 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2769                 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2770                            I40E_QTX_CTL_VFVM_INDX_MASK;
2771         } else {
2772                 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2773         }
2774
2775         qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2776                     I40E_QTX_CTL_PF_INDX_MASK);
2777         wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2778         i40e_flush(hw);
2779
2780         /* cache tail off for easier writes later */
2781         ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2782
2783         return 0;
2784 }
2785
2786 /**
2787  * i40e_configure_rx_ring - Configure a receive ring context
2788  * @ring: The Rx ring to configure
2789  *
2790  * Configure the Rx descriptor ring in the HMC context.
2791  **/
2792 static int i40e_configure_rx_ring(struct i40e_ring *ring)
2793 {
2794         struct i40e_vsi *vsi = ring->vsi;
2795         u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2796         u16 pf_q = vsi->base_queue + ring->queue_index;
2797         struct i40e_hw *hw = &vsi->back->hw;
2798         struct i40e_hmc_obj_rxq rx_ctx;
2799         i40e_status err = 0;
2800
2801         ring->state = 0;
2802
2803         /* clear the context structure first */
2804         memset(&rx_ctx, 0, sizeof(rx_ctx));
2805
2806         ring->rx_buf_len = vsi->rx_buf_len;
2807
2808         rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2809
2810         rx_ctx.base = (ring->dma / 128);
2811         rx_ctx.qlen = ring->count;
2812
2813         /* use 32 byte descriptors */
2814         rx_ctx.dsize = 1;
2815
2816         /* descriptor type is always zero
2817          * rx_ctx.dtype = 0;
2818          */
2819         rx_ctx.hsplit_0 = 0;
2820
2821         rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
2822         if (hw->revision_id == 0)
2823                 rx_ctx.lrxqthresh = 0;
2824         else
2825                 rx_ctx.lrxqthresh = 2;
2826         rx_ctx.crcstrip = 1;
2827         rx_ctx.l2tsel = 1;
2828         /* this controls whether VLAN is stripped from inner headers */
2829         rx_ctx.showiv = 0;
2830 #ifdef I40E_FCOE
2831         rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2832 #endif
2833         /* set the prefena field to 1 because the manual says to */
2834         rx_ctx.prefena = 1;
2835
2836         /* clear the context in the HMC */
2837         err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2838         if (err) {
2839                 dev_info(&vsi->back->pdev->dev,
2840                          "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2841                          ring->queue_index, pf_q, err);
2842                 return -ENOMEM;
2843         }
2844
2845         /* set the context in the HMC */
2846         err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2847         if (err) {
2848                 dev_info(&vsi->back->pdev->dev,
2849                          "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2850                          ring->queue_index, pf_q, err);
2851                 return -ENOMEM;
2852         }
2853
2854         /* cache tail for quicker writes, and clear the reg before use */
2855         ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2856         writel(0, ring->tail);
2857
2858         i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
2859
2860         return 0;
2861 }
2862
2863 /**
2864  * i40e_vsi_configure_tx - Configure the VSI for Tx
2865  * @vsi: VSI structure describing this set of rings and resources
2866  *
2867  * Configure the Tx VSI for operation.
2868  **/
2869 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2870 {
2871         int err = 0;
2872         u16 i;
2873
2874         for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2875                 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2876
2877         return err;
2878 }
2879
2880 /**
2881  * i40e_vsi_configure_rx - Configure the VSI for Rx
2882  * @vsi: the VSI being configured
2883  *
2884  * Configure the Rx VSI for operation.
2885  **/
2886 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2887 {
2888         int err = 0;
2889         u16 i;
2890
2891         if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2892                 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2893                                + ETH_FCS_LEN + VLAN_HLEN;
2894         else
2895                 vsi->max_frame = I40E_RXBUFFER_2048;
2896
2897         vsi->rx_buf_len = I40E_RXBUFFER_2048;
2898
2899 #ifdef I40E_FCOE
2900         /* setup rx buffer for FCoE */
2901         if ((vsi->type == I40E_VSI_FCOE) &&
2902             (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
2903                 vsi->rx_buf_len = I40E_RXBUFFER_3072;
2904                 vsi->max_frame = I40E_RXBUFFER_3072;
2905         }
2906
2907 #endif /* I40E_FCOE */
2908         /* round up for the chip's needs */
2909         vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2910                                 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
2911
2912         /* set up individual rings */
2913         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2914                 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
2915
2916         return err;
2917 }
2918
2919 /**
2920  * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2921  * @vsi: ptr to the VSI
2922  **/
2923 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2924 {
2925         struct i40e_ring *tx_ring, *rx_ring;
2926         u16 qoffset, qcount;
2927         int i, n;
2928
2929         if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2930                 /* Reset the TC information */
2931                 for (i = 0; i < vsi->num_queue_pairs; i++) {
2932                         rx_ring = vsi->rx_rings[i];
2933                         tx_ring = vsi->tx_rings[i];
2934                         rx_ring->dcb_tc = 0;
2935                         tx_ring->dcb_tc = 0;
2936                 }
2937         }
2938
2939         for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2940                 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
2941                         continue;
2942
2943                 qoffset = vsi->tc_config.tc_info[n].qoffset;
2944                 qcount = vsi->tc_config.tc_info[n].qcount;
2945                 for (i = qoffset; i < (qoffset + qcount); i++) {
2946                         rx_ring = vsi->rx_rings[i];
2947                         tx_ring = vsi->tx_rings[i];
2948                         rx_ring->dcb_tc = n;
2949                         tx_ring->dcb_tc = n;
2950                 }
2951         }
2952 }
2953
2954 /**
2955  * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2956  * @vsi: ptr to the VSI
2957  **/
2958 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2959 {
2960         struct i40e_pf *pf = vsi->back;
2961         int err;
2962
2963         if (vsi->netdev)
2964                 i40e_set_rx_mode(vsi->netdev);
2965
2966         if (!!(pf->flags & I40E_FLAG_PF_MAC)) {
2967                 err = i40e_macaddr_init(vsi, pf->hw.mac.addr);
2968                 if (err) {
2969                         dev_warn(&pf->pdev->dev,
2970                                  "could not set up macaddr; err %d\n", err);
2971                 }
2972         }
2973 }
2974
2975 /**
2976  * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
2977  * @vsi: Pointer to the targeted VSI
2978  *
2979  * This function replays the hlist on the hw where all the SB Flow Director
2980  * filters were saved.
2981  **/
2982 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
2983 {
2984         struct i40e_fdir_filter *filter;
2985         struct i40e_pf *pf = vsi->back;
2986         struct hlist_node *node;
2987
2988         if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2989                 return;
2990
2991         hlist_for_each_entry_safe(filter, node,
2992                                   &pf->fdir_filter_list, fdir_node) {
2993                 i40e_add_del_fdir(vsi, filter, true);
2994         }
2995 }
2996
2997 /**
2998  * i40e_vsi_configure - Set up the VSI for action
2999  * @vsi: the VSI being configured
3000  **/
3001 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3002 {
3003         int err;
3004
3005         i40e_set_vsi_rx_mode(vsi);
3006         i40e_restore_vlan(vsi);
3007         i40e_vsi_config_dcb_rings(vsi);
3008         err = i40e_vsi_configure_tx(vsi);
3009         if (!err)
3010                 err = i40e_vsi_configure_rx(vsi);
3011
3012         return err;
3013 }
3014
3015 /**
3016  * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3017  * @vsi: the VSI being configured
3018  **/
3019 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3020 {
3021         struct i40e_pf *pf = vsi->back;
3022         struct i40e_hw *hw = &pf->hw;
3023         u16 vector;
3024         int i, q;
3025         u32 qp;
3026
3027         /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3028          * and PFINT_LNKLSTn registers, e.g.:
3029          *   PFINT_ITRn[0..n-1] gets msix-1..msix-n  (qpair interrupts)
3030          */
3031         qp = vsi->base_queue;
3032         vector = vsi->base_vector;
3033         for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3034                 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3035
3036                 q_vector->itr_countdown = ITR_COUNTDOWN_START;
3037                 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting);
3038                 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3039                 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3040                      q_vector->rx.itr);
3041                 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting);
3042                 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3043                 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3044                      q_vector->tx.itr);
3045                 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3046                      INTRL_USEC_TO_REG(vsi->int_rate_limit));
3047
3048                 /* Linked list for the queuepairs assigned to this vector */
3049                 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3050                 for (q = 0; q < q_vector->num_ringpairs; q++) {
3051                         u32 val;
3052
3053                         val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3054                               (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
3055                               (vector      << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3056                               (qp          << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3057                               (I40E_QUEUE_TYPE_TX
3058                                       << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3059
3060                         wr32(hw, I40E_QINT_RQCTL(qp), val);
3061
3062                         val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3063                               (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)  |
3064                               (vector      << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3065                               ((qp+1)      << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
3066                               (I40E_QUEUE_TYPE_RX
3067                                       << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3068
3069                         /* Terminate the linked list */
3070                         if (q == (q_vector->num_ringpairs - 1))
3071                                 val |= (I40E_QUEUE_END_OF_LIST
3072                                            << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3073
3074                         wr32(hw, I40E_QINT_TQCTL(qp), val);
3075                         qp++;
3076                 }
3077         }
3078
3079         i40e_flush(hw);
3080 }
3081
3082 /**
3083  * i40e_enable_misc_int_causes - enable the non-queue interrupts
3084  * @hw: ptr to the hardware info
3085  **/
3086 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3087 {
3088         struct i40e_hw *hw = &pf->hw;
3089         u32 val;
3090
3091         /* clear things first */
3092         wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
3093         rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
3094
3095         val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK       |
3096               I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK    |
3097               I40E_PFINT_ICR0_ENA_GRST_MASK          |
3098               I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3099               I40E_PFINT_ICR0_ENA_GPIO_MASK          |
3100               I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
3101               I40E_PFINT_ICR0_ENA_VFLR_MASK          |
3102               I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3103
3104         if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3105                 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3106
3107         if (pf->flags & I40E_FLAG_PTP)
3108                 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3109
3110         wr32(hw, I40E_PFINT_ICR0_ENA, val);
3111
3112         /* SW_ITR_IDX = 0, but don't change INTENA */
3113         wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3114                                         I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3115
3116         /* OTHER_ITR_IDX = 0 */
3117         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3118 }
3119
3120 /**
3121  * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3122  * @vsi: the VSI being configured
3123  **/
3124 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3125 {
3126         struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3127         struct i40e_pf *pf = vsi->back;
3128         struct i40e_hw *hw = &pf->hw;
3129         u32 val;
3130
3131         /* set the ITR configuration */
3132         q_vector->itr_countdown = ITR_COUNTDOWN_START;
3133         q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting);
3134         q_vector->rx.latency_range = I40E_LOW_LATENCY;
3135         wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
3136         q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting);
3137         q_vector->tx.latency_range = I40E_LOW_LATENCY;
3138         wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
3139
3140         i40e_enable_misc_int_causes(pf);
3141
3142         /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3143         wr32(hw, I40E_PFINT_LNKLST0, 0);
3144
3145         /* Associate the queue pair to the vector and enable the queue int */
3146         val = I40E_QINT_RQCTL_CAUSE_ENA_MASK                  |
3147               (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3148               (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3149
3150         wr32(hw, I40E_QINT_RQCTL(0), val);
3151
3152         val = I40E_QINT_TQCTL_CAUSE_ENA_MASK                  |
3153               (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3154               (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3155
3156         wr32(hw, I40E_QINT_TQCTL(0), val);
3157         i40e_flush(hw);
3158 }
3159
3160 /**
3161  * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3162  * @pf: board private structure
3163  **/
3164 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3165 {
3166         struct i40e_hw *hw = &pf->hw;
3167
3168         wr32(hw, I40E_PFINT_DYN_CTL0,
3169              I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3170         i40e_flush(hw);
3171 }
3172
3173 /**
3174  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3175  * @pf: board private structure
3176  * @clearpba: true when all pending interrupt events should be cleared
3177  **/
3178 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
3179 {
3180         struct i40e_hw *hw = &pf->hw;
3181         u32 val;
3182
3183         val = I40E_PFINT_DYN_CTL0_INTENA_MASK   |
3184               (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
3185               (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3186
3187         wr32(hw, I40E_PFINT_DYN_CTL0, val);
3188         i40e_flush(hw);
3189 }
3190
3191 /**
3192  * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3193  * @irq: interrupt number
3194  * @data: pointer to a q_vector
3195  **/
3196 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3197 {
3198         struct i40e_q_vector *q_vector = data;
3199
3200         if (!q_vector->tx.ring && !q_vector->rx.ring)
3201                 return IRQ_HANDLED;
3202
3203         napi_schedule_irqoff(&q_vector->napi);
3204
3205         return IRQ_HANDLED;
3206 }
3207
3208 /**
3209  * i40e_irq_affinity_notify - Callback for affinity changes
3210  * @notify: context as to what irq was changed
3211  * @mask: the new affinity mask
3212  *
3213  * This is a callback function used by the irq_set_affinity_notifier function
3214  * so that we may register to receive changes to the irq affinity masks.
3215  **/
3216 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3217                                      const cpumask_t *mask)
3218 {
3219         struct i40e_q_vector *q_vector =
3220                 container_of(notify, struct i40e_q_vector, affinity_notify);
3221
3222         q_vector->affinity_mask = *mask;
3223 }
3224
3225 /**
3226  * i40e_irq_affinity_release - Callback for affinity notifier release
3227  * @ref: internal core kernel usage
3228  *
3229  * This is a callback function used by the irq_set_affinity_notifier function
3230  * to inform the current notification subscriber that they will no longer
3231  * receive notifications.
3232  **/
3233 static void i40e_irq_affinity_release(struct kref *ref) {}
3234
3235 /**
3236  * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3237  * @vsi: the VSI being configured
3238  * @basename: name for the vector
3239  *
3240  * Allocates MSI-X vectors and requests interrupts from the kernel.
3241  **/
3242 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3243 {
3244         int q_vectors = vsi->num_q_vectors;
3245         struct i40e_pf *pf = vsi->back;
3246         int base = vsi->base_vector;
3247         int rx_int_idx = 0;
3248         int tx_int_idx = 0;
3249         int vector, err;
3250         int irq_num;
3251
3252         for (vector = 0; vector < q_vectors; vector++) {
3253                 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3254
3255                 irq_num = pf->msix_entries[base + vector].vector;
3256
3257                 if (q_vector->tx.ring && q_vector->rx.ring) {
3258                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3259                                  "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3260                         tx_int_idx++;
3261                 } else if (q_vector->rx.ring) {
3262                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3263                                  "%s-%s-%d", basename, "rx", rx_int_idx++);
3264                 } else if (q_vector->tx.ring) {
3265                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3266                                  "%s-%s-%d", basename, "tx", tx_int_idx++);
3267                 } else {
3268                         /* skip this unused q_vector */
3269                         continue;
3270                 }
3271                 err = request_irq(irq_num,
3272                                   vsi->irq_handler,
3273                                   0,
3274                                   q_vector->name,
3275                                   q_vector);
3276                 if (err) {
3277                         dev_info(&pf->pdev->dev,
3278                                  "MSIX request_irq failed, error: %d\n", err);
3279                         goto free_queue_irqs;
3280                 }
3281
3282                 /* register for affinity change notifications */
3283                 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3284                 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3285                 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3286                 /* assign the mask for this irq */
3287                 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
3288         }
3289
3290         vsi->irqs_ready = true;
3291         return 0;
3292
3293 free_queue_irqs:
3294         while (vector) {
3295                 vector--;
3296                 irq_num = pf->msix_entries[base + vector].vector;
3297                 irq_set_affinity_notifier(irq_num, NULL);
3298                 irq_set_affinity_hint(irq_num, NULL);
3299                 free_irq(irq_num, &vsi->q_vectors[vector]);
3300         }
3301         return err;
3302 }
3303
3304 /**
3305  * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3306  * @vsi: the VSI being un-configured
3307  **/
3308 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3309 {
3310         struct i40e_pf *pf = vsi->back;
3311         struct i40e_hw *hw = &pf->hw;
3312         int base = vsi->base_vector;
3313         int i;
3314
3315         for (i = 0; i < vsi->num_queue_pairs; i++) {
3316                 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3317                 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
3318         }
3319
3320         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3321                 for (i = vsi->base_vector;
3322                      i < (vsi->num_q_vectors + vsi->base_vector); i++)
3323                         wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3324
3325                 i40e_flush(hw);
3326                 for (i = 0; i < vsi->num_q_vectors; i++)
3327                         synchronize_irq(pf->msix_entries[i + base].vector);
3328         } else {
3329                 /* Legacy and MSI mode - this stops all interrupt handling */
3330                 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3331                 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3332                 i40e_flush(hw);
3333                 synchronize_irq(pf->pdev->irq);
3334         }
3335 }
3336
3337 /**
3338  * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3339  * @vsi: the VSI being configured
3340  **/
3341 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3342 {
3343         struct i40e_pf *pf = vsi->back;
3344         int i;
3345
3346         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3347                 for (i = 0; i < vsi->num_q_vectors; i++)
3348                         i40e_irq_dynamic_enable(vsi, i);
3349         } else {
3350                 i40e_irq_dynamic_enable_icr0(pf, true);
3351         }
3352
3353         i40e_flush(&pf->hw);
3354         return 0;
3355 }
3356
3357 /**
3358  * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3359  * @pf: board private structure
3360  **/
3361 static void i40e_stop_misc_vector(struct i40e_pf *pf)
3362 {
3363         /* Disable ICR 0 */
3364         wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3365         i40e_flush(&pf->hw);
3366 }
3367
3368 /**
3369  * i40e_intr - MSI/Legacy and non-queue interrupt handler
3370  * @irq: interrupt number
3371  * @data: pointer to a q_vector
3372  *
3373  * This is the handler used for all MSI/Legacy interrupts, and deals
3374  * with both queue and non-queue interrupts.  This is also used in
3375  * MSIX mode to handle the non-queue interrupts.
3376  **/
3377 static irqreturn_t i40e_intr(int irq, void *data)
3378 {
3379         struct i40e_pf *pf = (struct i40e_pf *)data;
3380         struct i40e_hw *hw = &pf->hw;
3381         irqreturn_t ret = IRQ_NONE;
3382         u32 icr0, icr0_remaining;
3383         u32 val, ena_mask;
3384
3385         icr0 = rd32(hw, I40E_PFINT_ICR0);
3386         ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3387
3388         /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3389         if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3390                 goto enable_intr;
3391
3392         /* if interrupt but no bits showing, must be SWINT */
3393         if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3394             (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3395                 pf->sw_int_count++;
3396
3397         if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3398             (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3399                 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3400                 icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3401                 dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n");
3402         }
3403
3404         /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3405         if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3406                 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3407                 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3408
3409                 /* We do not have a way to disarm Queue causes while leaving
3410                  * interrupt enabled for all other causes, ideally
3411                  * interrupt should be disabled while we are in NAPI but
3412                  * this is not a performance path and napi_schedule()
3413                  * can deal with rescheduling.
3414                  */
3415                 if (!test_bit(__I40E_DOWN, &pf->state))
3416                         napi_schedule_irqoff(&q_vector->napi);
3417         }
3418
3419         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3420                 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3421                 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
3422                 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3423         }
3424
3425         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3426                 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3427                 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3428         }
3429
3430         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3431                 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3432                 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3433         }
3434
3435         if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3436                 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3437                         set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3438                 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3439                 val = rd32(hw, I40E_GLGEN_RSTAT);
3440                 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3441                        >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3442                 if (val == I40E_RESET_CORER) {
3443                         pf->corer_count++;
3444                 } else if (val == I40E_RESET_GLOBR) {
3445                         pf->globr_count++;
3446                 } else if (val == I40E_RESET_EMPR) {
3447                         pf->empr_count++;
3448                         set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
3449                 }
3450         }
3451
3452         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3453                 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3454                 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3455                 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3456                          rd32(hw, I40E_PFHMC_ERRORINFO),
3457                          rd32(hw, I40E_PFHMC_ERRORDATA));
3458         }
3459
3460         if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3461                 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3462
3463                 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3464                         icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3465                         i40e_ptp_tx_hwtstamp(pf);
3466                 }
3467         }
3468
3469         /* If a critical error is pending we have no choice but to reset the
3470          * device.
3471          * Report and mask out any remaining unexpected interrupts.
3472          */
3473         icr0_remaining = icr0 & ena_mask;
3474         if (icr0_remaining) {
3475                 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3476                          icr0_remaining);
3477                 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3478                     (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3479                     (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3480                         dev_info(&pf->pdev->dev, "device will be reset\n");
3481                         set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3482                         i40e_service_event_schedule(pf);
3483                 }
3484                 ena_mask &= ~icr0_remaining;
3485         }
3486         ret = IRQ_HANDLED;
3487
3488 enable_intr:
3489         /* re-enable interrupt causes */
3490         wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3491         if (!test_bit(__I40E_DOWN, &pf->state)) {
3492                 i40e_service_event_schedule(pf);
3493                 i40e_irq_dynamic_enable_icr0(pf, false);
3494         }
3495
3496         return ret;
3497 }
3498
3499 /**
3500  * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3501  * @tx_ring:  tx ring to clean
3502  * @budget:   how many cleans we're allowed
3503  *
3504  * Returns true if there's any budget left (e.g. the clean is finished)
3505  **/
3506 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3507 {
3508         struct i40e_vsi *vsi = tx_ring->vsi;
3509         u16 i = tx_ring->next_to_clean;
3510         struct i40e_tx_buffer *tx_buf;
3511         struct i40e_tx_desc *tx_desc;
3512
3513         tx_buf = &tx_ring->tx_bi[i];
3514         tx_desc = I40E_TX_DESC(tx_ring, i);
3515         i -= tx_ring->count;
3516
3517         do {
3518                 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3519
3520                 /* if next_to_watch is not set then there is no work pending */
3521                 if (!eop_desc)
3522                         break;
3523
3524                 /* prevent any other reads prior to eop_desc */
3525                 read_barrier_depends();
3526
3527                 /* if the descriptor isn't done, no work yet to do */
3528                 if (!(eop_desc->cmd_type_offset_bsz &
3529                       cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3530                         break;
3531
3532                 /* clear next_to_watch to prevent false hangs */
3533                 tx_buf->next_to_watch = NULL;
3534
3535                 tx_desc->buffer_addr = 0;
3536                 tx_desc->cmd_type_offset_bsz = 0;
3537                 /* move past filter desc */
3538                 tx_buf++;
3539                 tx_desc++;
3540                 i++;
3541                 if (unlikely(!i)) {
3542                         i -= tx_ring->count;
3543                         tx_buf = tx_ring->tx_bi;
3544                         tx_desc = I40E_TX_DESC(tx_ring, 0);
3545                 }
3546                 /* unmap skb header data */
3547                 dma_unmap_single(tx_ring->dev,
3548                                  dma_unmap_addr(tx_buf, dma),
3549                                  dma_unmap_len(tx_buf, len),
3550                                  DMA_TO_DEVICE);
3551                 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3552                         kfree(tx_buf->raw_buf);
3553
3554                 tx_buf->raw_buf = NULL;
3555                 tx_buf->tx_flags = 0;
3556                 tx_buf->next_to_watch = NULL;
3557                 dma_unmap_len_set(tx_buf, len, 0);
3558                 tx_desc->buffer_addr = 0;
3559                 tx_desc->cmd_type_offset_bsz = 0;
3560
3561                 /* move us past the eop_desc for start of next FD desc */
3562                 tx_buf++;
3563                 tx_desc++;
3564                 i++;
3565                 if (unlikely(!i)) {
3566                         i -= tx_ring->count;
3567                         tx_buf = tx_ring->tx_bi;
3568                         tx_desc = I40E_TX_DESC(tx_ring, 0);
3569                 }
3570
3571                 /* update budget accounting */
3572                 budget--;
3573         } while (likely(budget));
3574
3575         i += tx_ring->count;
3576         tx_ring->next_to_clean = i;
3577
3578         if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
3579                 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
3580
3581         return budget > 0;
3582 }
3583
3584 /**
3585  * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3586  * @irq: interrupt number
3587  * @data: pointer to a q_vector
3588  **/
3589 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3590 {
3591         struct i40e_q_vector *q_vector = data;
3592         struct i40e_vsi *vsi;
3593
3594         if (!q_vector->tx.ring)
3595                 return IRQ_HANDLED;
3596
3597         vsi = q_vector->tx.ring->vsi;
3598         i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3599
3600         return IRQ_HANDLED;
3601 }
3602
3603 /**
3604  * i40e_map_vector_to_qp - Assigns the queue pair to the vector
3605  * @vsi: the VSI being configured
3606  * @v_idx: vector index
3607  * @qp_idx: queue pair index
3608  **/
3609 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3610 {
3611         struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3612         struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3613         struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3614
3615         tx_ring->q_vector = q_vector;
3616         tx_ring->next = q_vector->tx.ring;
3617         q_vector->tx.ring = tx_ring;
3618         q_vector->tx.count++;
3619
3620         rx_ring->q_vector = q_vector;
3621         rx_ring->next = q_vector->rx.ring;
3622         q_vector->rx.ring = rx_ring;
3623         q_vector->rx.count++;
3624 }
3625
3626 /**
3627  * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3628  * @vsi: the VSI being configured
3629  *
3630  * This function maps descriptor rings to the queue-specific vectors
3631  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
3632  * one vector per queue pair, but on a constrained vector budget, we
3633  * group the queue pairs as "efficiently" as possible.
3634  **/
3635 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3636 {
3637         int qp_remaining = vsi->num_queue_pairs;
3638         int q_vectors = vsi->num_q_vectors;
3639         int num_ringpairs;
3640         int v_start = 0;
3641         int qp_idx = 0;
3642
3643         /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3644          * group them so there are multiple queues per vector.
3645          * It is also important to go through all the vectors available to be
3646          * sure that if we don't use all the vectors, that the remaining vectors
3647          * are cleared. This is especially important when decreasing the
3648          * number of queues in use.
3649          */
3650         for (; v_start < q_vectors; v_start++) {
3651                 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3652
3653                 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3654
3655                 q_vector->num_ringpairs = num_ringpairs;
3656
3657                 q_vector->rx.count = 0;
3658                 q_vector->tx.count = 0;
3659                 q_vector->rx.ring = NULL;
3660                 q_vector->tx.ring = NULL;
3661
3662                 while (num_ringpairs--) {
3663                         i40e_map_vector_to_qp(vsi, v_start, qp_idx);
3664                         qp_idx++;
3665                         qp_remaining--;
3666                 }
3667         }
3668 }
3669
3670 /**
3671  * i40e_vsi_request_irq - Request IRQ from the OS
3672  * @vsi: the VSI being configured
3673  * @basename: name for the vector
3674  **/
3675 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3676 {
3677         struct i40e_pf *pf = vsi->back;
3678         int err;
3679
3680         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3681                 err = i40e_vsi_request_irq_msix(vsi, basename);
3682         else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3683                 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3684                                   pf->int_name, pf);
3685         else
3686                 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3687                                   pf->int_name, pf);
3688
3689         if (err)
3690                 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3691
3692         return err;
3693 }
3694
3695 #ifdef CONFIG_NET_POLL_CONTROLLER
3696 /**
3697  * i40e_netpoll - A Polling 'interrupt' handler
3698  * @netdev: network interface device structure
3699  *
3700  * This is used by netconsole to send skbs without having to re-enable
3701  * interrupts.  It's not called while the normal interrupt routine is executing.
3702  **/
3703 #ifdef I40E_FCOE
3704 void i40e_netpoll(struct net_device *netdev)
3705 #else
3706 static void i40e_netpoll(struct net_device *netdev)
3707 #endif
3708 {
3709         struct i40e_netdev_priv *np = netdev_priv(netdev);
3710         struct i40e_vsi *vsi = np->vsi;
3711         struct i40e_pf *pf = vsi->back;
3712         int i;
3713
3714         /* if interface is down do nothing */
3715         if (test_bit(__I40E_DOWN, &vsi->state))
3716                 return;
3717
3718         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3719                 for (i = 0; i < vsi->num_q_vectors; i++)
3720                         i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3721         } else {
3722                 i40e_intr(pf->pdev->irq, netdev);
3723         }
3724 }
3725 #endif
3726
3727 /**
3728  * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3729  * @pf: the PF being configured
3730  * @pf_q: the PF queue
3731  * @enable: enable or disable state of the queue
3732  *
3733  * This routine will wait for the given Tx queue of the PF to reach the
3734  * enabled or disabled state.
3735  * Returns -ETIMEDOUT in case of failing to reach the requested state after
3736  * multiple retries; else will return 0 in case of success.
3737  **/
3738 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3739 {
3740         int i;
3741         u32 tx_reg;
3742
3743         for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3744                 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3745                 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3746                         break;
3747
3748                 usleep_range(10, 20);
3749         }
3750         if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3751                 return -ETIMEDOUT;
3752
3753         return 0;
3754 }
3755
3756 /**
3757  * i40e_vsi_control_tx - Start or stop a VSI's rings
3758  * @vsi: the VSI being configured
3759  * @enable: start or stop the rings
3760  **/
3761 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3762 {
3763         struct i40e_pf *pf = vsi->back;
3764         struct i40e_hw *hw = &pf->hw;
3765         int i, j, pf_q, ret = 0;
3766         u32 tx_reg;
3767
3768         pf_q = vsi->base_queue;
3769         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3770
3771                 /* warn the TX unit of coming changes */
3772                 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3773                 if (!enable)
3774                         usleep_range(10, 20);
3775
3776                 for (j = 0; j < 50; j++) {
3777                         tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3778                         if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3779                             ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3780                                 break;
3781                         usleep_range(1000, 2000);
3782                 }
3783                 /* Skip if the queue is already in the requested state */
3784                 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3785                         continue;
3786
3787                 /* turn on/off the queue */
3788                 if (enable) {
3789                         wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3790                         tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3791                 } else {
3792                         tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3793                 }
3794
3795                 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3796                 /* No waiting for the Tx queue to disable */
3797                 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3798                         continue;
3799
3800                 /* wait for the change to finish */
3801                 ret = i40e_pf_txq_wait(pf, pf_q, enable);
3802                 if (ret) {
3803                         dev_info(&pf->pdev->dev,
3804                                  "VSI seid %d Tx ring %d %sable timeout\n",
3805                                  vsi->seid, pf_q, (enable ? "en" : "dis"));
3806                         break;
3807                 }
3808         }
3809
3810         if (hw->revision_id == 0)
3811                 mdelay(50);
3812         return ret;
3813 }
3814
3815 /**
3816  * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3817  * @pf: the PF being configured
3818  * @pf_q: the PF queue
3819  * @enable: enable or disable state of the queue
3820  *
3821  * This routine will wait for the given Rx queue of the PF to reach the
3822  * enabled or disabled state.
3823  * Returns -ETIMEDOUT in case of failing to reach the requested state after
3824  * multiple retries; else will return 0 in case of success.
3825  **/
3826 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3827 {
3828         int i;
3829         u32 rx_reg;
3830
3831         for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3832                 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
3833                 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3834                         break;
3835
3836                 usleep_range(10, 20);
3837         }
3838         if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3839                 return -ETIMEDOUT;
3840
3841         return 0;
3842 }
3843
3844 /**
3845  * i40e_vsi_control_rx - Start or stop a VSI's rings
3846  * @vsi: the VSI being configured
3847  * @enable: start or stop the rings
3848  **/
3849 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3850 {
3851         struct i40e_pf *pf = vsi->back;
3852         struct i40e_hw *hw = &pf->hw;
3853         int i, j, pf_q, ret = 0;
3854         u32 rx_reg;
3855
3856         pf_q = vsi->base_queue;
3857         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3858                 for (j = 0; j < 50; j++) {
3859                         rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3860                         if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3861                             ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3862                                 break;
3863                         usleep_range(1000, 2000);
3864                 }
3865
3866                 /* Skip if the queue is already in the requested state */
3867                 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3868                         continue;
3869
3870                 /* turn on/off the queue */
3871                 if (enable)
3872                         rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3873                 else
3874                         rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3875                 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3876                 /* No waiting for the Tx queue to disable */
3877                 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3878                         continue;
3879
3880                 /* wait for the change to finish */
3881                 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
3882                 if (ret) {
3883                         dev_info(&pf->pdev->dev,
3884                                  "VSI seid %d Rx ring %d %sable timeout\n",
3885                                  vsi->seid, pf_q, (enable ? "en" : "dis"));
3886                         break;
3887                 }
3888         }
3889
3890         return ret;
3891 }
3892
3893 /**
3894  * i40e_vsi_control_rings - Start or stop a VSI's rings
3895  * @vsi: the VSI being configured
3896  * @enable: start or stop the rings
3897  **/
3898 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3899 {
3900         int ret = 0;
3901
3902         /* do rx first for enable and last for disable */
3903         if (request) {
3904                 ret = i40e_vsi_control_rx(vsi, request);
3905                 if (ret)
3906                         return ret;
3907                 ret = i40e_vsi_control_tx(vsi, request);
3908         } else {
3909                 /* Ignore return value, we need to shutdown whatever we can */
3910                 i40e_vsi_control_tx(vsi, request);
3911                 i40e_vsi_control_rx(vsi, request);
3912         }
3913
3914         return ret;
3915 }
3916
3917 /**
3918  * i40e_vsi_free_irq - Free the irq association with the OS
3919  * @vsi: the VSI being configured
3920  **/
3921 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3922 {
3923         struct i40e_pf *pf = vsi->back;
3924         struct i40e_hw *hw = &pf->hw;
3925         int base = vsi->base_vector;
3926         u32 val, qp;
3927         int i;
3928
3929         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3930                 if (!vsi->q_vectors)
3931                         return;
3932
3933                 if (!vsi->irqs_ready)
3934                         return;
3935
3936                 vsi->irqs_ready = false;
3937                 for (i = 0; i < vsi->num_q_vectors; i++) {
3938                         int irq_num;
3939                         u16 vector;
3940
3941                         vector = i + base;
3942                         irq_num = pf->msix_entries[vector].vector;
3943
3944                         /* free only the irqs that were actually requested */
3945                         if (!vsi->q_vectors[i] ||
3946                             !vsi->q_vectors[i]->num_ringpairs)
3947                                 continue;
3948
3949                         /* clear the affinity notifier in the IRQ descriptor */
3950                         irq_set_affinity_notifier(irq_num, NULL);
3951                         /* clear the affinity_mask in the IRQ descriptor */
3952                         irq_set_affinity_hint(irq_num, NULL);
3953                         synchronize_irq(irq_num);
3954                         free_irq(irq_num, vsi->q_vectors[i]);
3955
3956                         /* Tear down the interrupt queue link list
3957                          *
3958                          * We know that they come in pairs and always
3959                          * the Rx first, then the Tx.  To clear the
3960                          * link list, stick the EOL value into the
3961                          * next_q field of the registers.
3962                          */
3963                         val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3964                         qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3965                                 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3966                         val |= I40E_QUEUE_END_OF_LIST
3967                                 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3968                         wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3969
3970                         while (qp != I40E_QUEUE_END_OF_LIST) {
3971                                 u32 next;
3972
3973                                 val = rd32(hw, I40E_QINT_RQCTL(qp));
3974
3975                                 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
3976                                          I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3977                                          I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
3978                                          I40E_QINT_RQCTL_INTEVENT_MASK);
3979
3980                                 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3981                                          I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3982
3983                                 wr32(hw, I40E_QINT_RQCTL(qp), val);
3984
3985                                 val = rd32(hw, I40E_QINT_TQCTL(qp));
3986
3987                                 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3988                                         >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3989
3990                                 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
3991                                          I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3992                                          I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
3993                                          I40E_QINT_TQCTL_INTEVENT_MASK);
3994
3995                                 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3996                                          I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3997
3998                                 wr32(hw, I40E_QINT_TQCTL(qp), val);
3999                                 qp = next;
4000                         }
4001                 }
4002         } else {
4003                 free_irq(pf->pdev->irq, pf);
4004
4005                 val = rd32(hw, I40E_PFINT_LNKLST0);
4006                 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4007                         >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4008                 val |= I40E_QUEUE_END_OF_LIST
4009                         << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4010                 wr32(hw, I40E_PFINT_LNKLST0, val);
4011
4012                 val = rd32(hw, I40E_QINT_RQCTL(qp));
4013                 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
4014                          I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4015                          I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
4016                          I40E_QINT_RQCTL_INTEVENT_MASK);
4017
4018                 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4019                         I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4020
4021                 wr32(hw, I40E_QINT_RQCTL(qp), val);
4022
4023                 val = rd32(hw, I40E_QINT_TQCTL(qp));
4024
4025                 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
4026                          I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4027                          I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
4028                          I40E_QINT_TQCTL_INTEVENT_MASK);
4029
4030                 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4031                         I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4032
4033                 wr32(hw, I40E_QINT_TQCTL(qp), val);
4034         }
4035 }
4036
4037 /**
4038  * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4039  * @vsi: the VSI being configured
4040  * @v_idx: Index of vector to be freed
4041  *
4042  * This function frees the memory allocated to the q_vector.  In addition if
4043  * NAPI is enabled it will delete any references to the NAPI struct prior
4044  * to freeing the q_vector.
4045  **/
4046 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4047 {
4048         struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4049         struct i40e_ring *ring;
4050
4051         if (!q_vector)
4052                 return;
4053
4054         /* disassociate q_vector from rings */
4055         i40e_for_each_ring(ring, q_vector->tx)
4056                 ring->q_vector = NULL;
4057
4058         i40e_for_each_ring(ring, q_vector->rx)
4059                 ring->q_vector = NULL;
4060
4061         /* only VSI w/ an associated netdev is set up w/ NAPI */
4062         if (vsi->netdev)
4063                 netif_napi_del(&q_vector->napi);
4064
4065         vsi->q_vectors[v_idx] = NULL;
4066
4067         kfree_rcu(q_vector, rcu);
4068 }
4069
4070 /**
4071  * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4072  * @vsi: the VSI being un-configured
4073  *
4074  * This frees the memory allocated to the q_vectors and
4075  * deletes references to the NAPI struct.
4076  **/
4077 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4078 {
4079         int v_idx;
4080
4081         for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4082                 i40e_free_q_vector(vsi, v_idx);
4083 }
4084
4085 /**
4086  * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4087  * @pf: board private structure
4088  **/
4089 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4090 {
4091         /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4092         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4093                 pci_disable_msix(pf->pdev);
4094                 kfree(pf->msix_entries);
4095                 pf->msix_entries = NULL;
4096                 kfree(pf->irq_pile);
4097                 pf->irq_pile = NULL;
4098         } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4099                 pci_disable_msi(pf->pdev);
4100         }
4101         pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4102 }
4103
4104 /**
4105  * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4106  * @pf: board private structure
4107  *
4108  * We go through and clear interrupt specific resources and reset the structure
4109  * to pre-load conditions
4110  **/
4111 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4112 {
4113         int i;
4114
4115         i40e_stop_misc_vector(pf);
4116         if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
4117                 synchronize_irq(pf->msix_entries[0].vector);
4118                 free_irq(pf->msix_entries[0].vector, pf);
4119         }
4120
4121         i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4122                       I40E_IWARP_IRQ_PILE_ID);
4123
4124         i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4125         for (i = 0; i < pf->num_alloc_vsi; i++)
4126                 if (pf->vsi[i])
4127                         i40e_vsi_free_q_vectors(pf->vsi[i]);
4128         i40e_reset_interrupt_capability(pf);
4129 }
4130
4131 /**
4132  * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4133  * @vsi: the VSI being configured
4134  **/
4135 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4136 {
4137         int q_idx;
4138
4139         if (!vsi->netdev)
4140                 return;
4141
4142         for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
4143                 napi_enable(&vsi->q_vectors[q_idx]->napi);
4144 }
4145
4146 /**
4147  * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4148  * @vsi: the VSI being configured
4149  **/
4150 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4151 {
4152         int q_idx;
4153
4154         if (!vsi->netdev)
4155                 return;
4156
4157         for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
4158                 napi_disable(&vsi->q_vectors[q_idx]->napi);
4159 }
4160
4161 /**
4162  * i40e_vsi_close - Shut down a VSI
4163  * @vsi: the vsi to be quelled
4164  **/
4165 static void i40e_vsi_close(struct i40e_vsi *vsi)
4166 {
4167         bool reset = false;
4168
4169         if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
4170                 i40e_down(vsi);
4171         i40e_vsi_free_irq(vsi);
4172         i40e_vsi_free_tx_resources(vsi);
4173         i40e_vsi_free_rx_resources(vsi);
4174         vsi->current_netdev_flags = 0;
4175         if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4176                 reset = true;
4177         i40e_notify_client_of_netdev_close(vsi, reset);
4178 }
4179
4180 /**
4181  * i40e_quiesce_vsi - Pause a given VSI
4182  * @vsi: the VSI being paused
4183  **/
4184 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4185 {
4186         if (test_bit(__I40E_DOWN, &vsi->state))
4187                 return;
4188
4189         /* No need to disable FCoE VSI when Tx suspended */
4190         if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
4191             vsi->type == I40E_VSI_FCOE) {
4192                 dev_dbg(&vsi->back->pdev->dev,
4193                          "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
4194                 return;
4195         }
4196
4197         set_bit(__I40E_NEEDS_RESTART, &vsi->state);
4198         if (vsi->netdev && netif_running(vsi->netdev))
4199                 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4200         else
4201                 i40e_vsi_close(vsi);
4202 }
4203
4204 /**
4205  * i40e_unquiesce_vsi - Resume a given VSI
4206  * @vsi: the VSI being resumed
4207  **/
4208 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4209 {
4210         if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
4211                 return;
4212
4213         clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4214         if (vsi->netdev && netif_running(vsi->netdev))
4215                 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4216         else
4217                 i40e_vsi_open(vsi);   /* this clears the DOWN bit */
4218 }
4219
4220 /**
4221  * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4222  * @pf: the PF
4223  **/
4224 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4225 {
4226         int v;
4227
4228         for (v = 0; v < pf->num_alloc_vsi; v++) {
4229                 if (pf->vsi[v])
4230                         i40e_quiesce_vsi(pf->vsi[v]);
4231         }
4232 }
4233
4234 /**
4235  * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4236  * @pf: the PF
4237  **/
4238 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4239 {
4240         int v;
4241
4242         for (v = 0; v < pf->num_alloc_vsi; v++) {
4243                 if (pf->vsi[v])
4244                         i40e_unquiesce_vsi(pf->vsi[v]);
4245         }
4246 }
4247
4248 #ifdef CONFIG_I40E_DCB
4249 /**
4250  * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4251  * @vsi: the VSI being configured
4252  *
4253  * This function waits for the given VSI's queues to be disabled.
4254  **/
4255 static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4256 {
4257         struct i40e_pf *pf = vsi->back;
4258         int i, pf_q, ret;
4259
4260         pf_q = vsi->base_queue;
4261         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4262                 /* Check and wait for the disable status of the queue */
4263                 ret = i40e_pf_txq_wait(pf, pf_q, false);
4264                 if (ret) {
4265                         dev_info(&pf->pdev->dev,
4266                                  "VSI seid %d Tx ring %d disable timeout\n",
4267                                  vsi->seid, pf_q);
4268                         return ret;
4269                 }
4270         }
4271
4272         pf_q = vsi->base_queue;
4273         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4274                 /* Check and wait for the disable status of the queue */
4275                 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4276                 if (ret) {
4277                         dev_info(&pf->pdev->dev,
4278                                  "VSI seid %d Rx ring %d disable timeout\n",
4279                                  vsi->seid, pf_q);
4280                         return ret;
4281                 }
4282         }
4283
4284         return 0;
4285 }
4286
4287 /**
4288  * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
4289  * @pf: the PF
4290  *
4291  * This function waits for the queues to be in disabled state for all the
4292  * VSIs that are managed by this PF.
4293  **/
4294 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4295 {
4296         int v, ret = 0;
4297
4298         for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4299                 /* No need to wait for FCoE VSI queues */
4300                 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
4301                         ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
4302                         if (ret)
4303                                 break;
4304                 }
4305         }
4306
4307         return ret;
4308 }
4309
4310 #endif
4311
4312 /**
4313  * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
4314  * @q_idx: TX queue number
4315  * @vsi: Pointer to VSI struct
4316  *
4317  * This function checks specified queue for given VSI. Detects hung condition.
4318  * Sets hung bit since it is two step process. Before next run of service task
4319  * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
4320  * hung condition remain unchanged and during subsequent run, this function
4321  * issues SW interrupt to recover from hung condition.
4322  **/
4323 static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
4324 {
4325         struct i40e_ring *tx_ring = NULL;
4326         struct i40e_pf  *pf;
4327         u32 head, val, tx_pending_hw;
4328         int i;
4329
4330         pf = vsi->back;
4331
4332         /* now that we have an index, find the tx_ring struct */
4333         for (i = 0; i < vsi->num_queue_pairs; i++) {
4334                 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
4335                         if (q_idx == vsi->tx_rings[i]->queue_index) {
4336                                 tx_ring = vsi->tx_rings[i];
4337                                 break;
4338                         }
4339                 }
4340         }
4341
4342         if (!tx_ring)
4343                 return;
4344
4345         /* Read interrupt register */
4346         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4347                 val = rd32(&pf->hw,
4348                            I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
4349                                                tx_ring->vsi->base_vector - 1));
4350         else
4351                 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
4352
4353         head = i40e_get_head(tx_ring);
4354
4355         tx_pending_hw = i40e_get_tx_pending(tx_ring, false);
4356
4357         /* HW is done executing descriptors, updated HEAD write back,
4358          * but SW hasn't processed those descriptors. If interrupt is
4359          * not generated from this point ON, it could result into
4360          * dev_watchdog detecting timeout on those netdev_queue,
4361          * hence proactively trigger SW interrupt.
4362          */
4363         if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
4364                 /* NAPI Poll didn't run and clear since it was set */
4365                 if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT,
4366                                        &tx_ring->q_vector->hung_detected)) {
4367                         netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
4368                                     vsi->seid, q_idx, tx_pending_hw,
4369                                     tx_ring->next_to_clean, head,
4370                                     tx_ring->next_to_use,
4371                                     readl(tx_ring->tail));
4372                         netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n",
4373                                     vsi->seid, q_idx, val);
4374                         i40e_force_wb(vsi, tx_ring->q_vector);
4375                 } else {
4376                         /* First Chance - detected possible hung */
4377                         set_bit(I40E_Q_VECTOR_HUNG_DETECT,
4378                                 &tx_ring->q_vector->hung_detected);
4379                 }
4380         }
4381
4382         /* This is the case where we have interrupts missing,
4383          * so the tx_pending in HW will most likely be 0, but we
4384          * will have tx_pending in SW since the WB happened but the
4385          * interrupt got lost.
4386          */
4387         if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) &&
4388             (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
4389                 if (napi_reschedule(&tx_ring->q_vector->napi))
4390                         tx_ring->tx_stats.tx_lost_interrupt++;
4391         }
4392 }
4393
4394 /**
4395  * i40e_detect_recover_hung - Function to detect and recover hung_queues
4396  * @pf:  pointer to PF struct
4397  *
4398  * LAN VSI has netdev and netdev has TX queues. This function is to check
4399  * each of those TX queues if they are hung, trigger recovery by issuing
4400  * SW interrupt.
4401  **/
4402 static void i40e_detect_recover_hung(struct i40e_pf *pf)
4403 {
4404         struct net_device *netdev;
4405         struct i40e_vsi *vsi;
4406         int i;
4407
4408         /* Only for LAN VSI */
4409         vsi = pf->vsi[pf->lan_vsi];
4410
4411         if (!vsi)
4412                 return;
4413
4414         /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
4415         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
4416             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4417                 return;
4418
4419         /* Make sure type is MAIN VSI */
4420         if (vsi->type != I40E_VSI_MAIN)
4421                 return;
4422
4423         netdev = vsi->netdev;
4424         if (!netdev)
4425                 return;
4426
4427         /* Bail out if netif_carrier is not OK */
4428         if (!netif_carrier_ok(netdev))
4429                 return;
4430
4431         /* Go thru' TX queues for netdev */
4432         for (i = 0; i < netdev->num_tx_queues; i++) {
4433                 struct netdev_queue *q;
4434
4435                 q = netdev_get_tx_queue(netdev, i);
4436                 if (q)
4437                         i40e_detect_recover_hung_queue(i, vsi);
4438         }
4439 }
4440
4441 /**
4442  * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4443  * @pf: pointer to PF
4444  *
4445  * Get TC map for ISCSI PF type that will include iSCSI TC
4446  * and LAN TC.
4447  **/
4448 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4449 {
4450         struct i40e_dcb_app_priority_table app;
4451         struct i40e_hw *hw = &pf->hw;
4452         u8 enabled_tc = 1; /* TC0 is always enabled */
4453         u8 tc, i;
4454         /* Get the iSCSI APP TLV */
4455         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4456
4457         for (i = 0; i < dcbcfg->numapps; i++) {
4458                 app = dcbcfg->app[i];
4459                 if (app.selector == I40E_APP_SEL_TCPIP &&
4460                     app.protocolid == I40E_APP_PROTOID_ISCSI) {
4461                         tc = dcbcfg->etscfg.prioritytable[app.priority];
4462                         enabled_tc |= BIT(tc);
4463                         break;
4464                 }
4465         }
4466
4467         return enabled_tc;
4468 }
4469
4470 /**
4471  * i40e_dcb_get_num_tc -  Get the number of TCs from DCBx config
4472  * @dcbcfg: the corresponding DCBx configuration structure
4473  *
4474  * Return the number of TCs from given DCBx configuration
4475  **/
4476 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4477 {
4478         int i, tc_unused = 0;
4479         u8 num_tc = 0;
4480         u8 ret = 0;
4481
4482         /* Scan the ETS Config Priority Table to find
4483          * traffic class enabled for a given priority
4484          * and create a bitmask of enabled TCs
4485          */
4486         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
4487                 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
4488
4489         /* Now scan the bitmask to check for
4490          * contiguous TCs starting with TC0
4491          */
4492         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4493                 if (num_tc & BIT(i)) {
4494                         if (!tc_unused) {
4495                                 ret++;
4496                         } else {
4497                                 pr_err("Non-contiguous TC - Disabling DCB\n");
4498                                 return 1;
4499                         }
4500                 } else {
4501                         tc_unused = 1;
4502                 }
4503         }
4504
4505         /* There is always at least TC0 */
4506         if (!ret)
4507                 ret = 1;
4508
4509         return ret;
4510 }
4511
4512 /**
4513  * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4514  * @dcbcfg: the corresponding DCBx configuration structure
4515  *
4516  * Query the current DCB configuration and return the number of
4517  * traffic classes enabled from the given DCBX config
4518  **/
4519 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4520 {
4521         u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4522         u8 enabled_tc = 1;
4523         u8 i;
4524
4525         for (i = 0; i < num_tc; i++)
4526                 enabled_tc |= BIT(i);
4527
4528         return enabled_tc;
4529 }
4530
4531 /**
4532  * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4533  * @pf: PF being queried
4534  *
4535  * Return number of traffic classes enabled for the given PF
4536  **/
4537 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4538 {
4539         struct i40e_hw *hw = &pf->hw;
4540         u8 i, enabled_tc = 1;
4541         u8 num_tc = 0;
4542         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4543
4544         /* If DCB is not enabled then always in single TC */
4545         if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4546                 return 1;
4547
4548         /* SFP mode will be enabled for all TCs on port */
4549         if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4550                 return i40e_dcb_get_num_tc(dcbcfg);
4551
4552         /* MFP mode return count of enabled TCs for this PF */
4553         if (pf->hw.func_caps.iscsi)
4554                 enabled_tc =  i40e_get_iscsi_tc_map(pf);
4555         else
4556                 return 1; /* Only TC0 */
4557
4558         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4559                 if (enabled_tc & BIT(i))
4560                         num_tc++;
4561         }
4562         return num_tc;
4563 }
4564
4565 /**
4566  * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4567  * @pf: PF being queried
4568  *
4569  * Return a bitmap for enabled traffic classes for this PF.
4570  **/
4571 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4572 {
4573         /* If DCB is not enabled for this PF then just return default TC */
4574         if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4575                 return I40E_DEFAULT_TRAFFIC_CLASS;
4576
4577         /* SFP mode we want PF to be enabled for all TCs */
4578         if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4579                 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4580
4581         /* MFP enabled and iSCSI PF type */
4582         if (pf->hw.func_caps.iscsi)
4583                 return i40e_get_iscsi_tc_map(pf);
4584         else
4585                 return I40E_DEFAULT_TRAFFIC_CLASS;
4586 }
4587
4588 /**
4589  * i40e_vsi_get_bw_info - Query VSI BW Information
4590  * @vsi: the VSI being queried
4591  *
4592  * Returns 0 on success, negative value on failure
4593  **/
4594 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4595 {
4596         struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4597         struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4598         struct i40e_pf *pf = vsi->back;
4599         struct i40e_hw *hw = &pf->hw;
4600         i40e_status ret;
4601         u32 tc_bw_max;
4602         int i;
4603
4604         /* Get the VSI level BW configuration */
4605         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4606         if (ret) {
4607                 dev_info(&pf->pdev->dev,
4608                          "couldn't get PF vsi bw config, err %s aq_err %s\n",
4609                          i40e_stat_str(&pf->hw, ret),
4610                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4611                 return -EINVAL;
4612         }
4613
4614         /* Get the VSI level BW configuration per TC */
4615         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4616                                                NULL);
4617         if (ret) {
4618                 dev_info(&pf->pdev->dev,
4619                          "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
4620                          i40e_stat_str(&pf->hw, ret),
4621                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4622                 return -EINVAL;
4623         }
4624
4625         if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4626                 dev_info(&pf->pdev->dev,
4627                          "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4628                          bw_config.tc_valid_bits,
4629                          bw_ets_config.tc_valid_bits);
4630                 /* Still continuing */
4631         }
4632
4633         vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4634         vsi->bw_max_quanta = bw_config.max_bw;
4635         tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4636                     (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4637         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4638                 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4639                 vsi->bw_ets_limit_credits[i] =
4640                                         le16_to_cpu(bw_ets_config.credits[i]);
4641                 /* 3 bits out of 4 for each TC */
4642                 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4643         }
4644
4645         return 0;
4646 }
4647
4648 /**
4649  * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4650  * @vsi: the VSI being configured
4651  * @enabled_tc: TC bitmap
4652  * @bw_credits: BW shared credits per TC
4653  *
4654  * Returns 0 on success, negative value on failure
4655  **/
4656 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
4657                                        u8 *bw_share)
4658 {
4659         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
4660         i40e_status ret;
4661         int i;
4662
4663         bw_data.tc_valid_bits = enabled_tc;
4664         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4665                 bw_data.tc_bw_credits[i] = bw_share[i];
4666
4667         ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4668                                        NULL);
4669         if (ret) {
4670                 dev_info(&vsi->back->pdev->dev,
4671                          "AQ command Config VSI BW allocation per TC failed = %d\n",
4672                          vsi->back->hw.aq.asq_last_status);
4673                 return -EINVAL;
4674         }
4675
4676         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4677                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4678
4679         return 0;
4680 }
4681
4682 /**
4683  * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4684  * @vsi: the VSI being configured
4685  * @enabled_tc: TC map to be enabled
4686  *
4687  **/
4688 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4689 {
4690         struct net_device *netdev = vsi->netdev;
4691         struct i40e_pf *pf = vsi->back;
4692         struct i40e_hw *hw = &pf->hw;
4693         u8 netdev_tc = 0;
4694         int i;
4695         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4696
4697         if (!netdev)
4698                 return;
4699
4700         if (!enabled_tc) {
4701                 netdev_reset_tc(netdev);
4702                 return;
4703         }
4704
4705         /* Set up actual enabled TCs on the VSI */
4706         if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4707                 return;
4708
4709         /* set per TC queues for the VSI */
4710         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4711                 /* Only set TC queues for enabled tcs
4712                  *
4713                  * e.g. For a VSI that has TC0 and TC3 enabled the
4714                  * enabled_tc bitmap would be 0x00001001; the driver
4715                  * will set the numtc for netdev as 2 that will be
4716                  * referenced by the netdev layer as TC 0 and 1.
4717                  */
4718                 if (vsi->tc_config.enabled_tc & BIT(i))
4719                         netdev_set_tc_queue(netdev,
4720                                         vsi->tc_config.tc_info[i].netdev_tc,
4721                                         vsi->tc_config.tc_info[i].qcount,
4722                                         vsi->tc_config.tc_info[i].qoffset);
4723         }
4724
4725         /* Assign UP2TC map for the VSI */
4726         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4727                 /* Get the actual TC# for the UP */
4728                 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4729                 /* Get the mapped netdev TC# for the UP */
4730                 netdev_tc =  vsi->tc_config.tc_info[ets_tc].netdev_tc;
4731                 netdev_set_prio_tc_map(netdev, i, netdev_tc);
4732         }
4733 }
4734
4735 /**
4736  * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4737  * @vsi: the VSI being configured
4738  * @ctxt: the ctxt buffer returned from AQ VSI update param command
4739  **/
4740 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4741                                       struct i40e_vsi_context *ctxt)
4742 {
4743         /* copy just the sections touched not the entire info
4744          * since not all sections are valid as returned by
4745          * update vsi params
4746          */
4747         vsi->info.mapping_flags = ctxt->info.mapping_flags;
4748         memcpy(&vsi->info.queue_mapping,
4749                &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4750         memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4751                sizeof(vsi->info.tc_mapping));
4752 }
4753
4754 /**
4755  * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4756  * @vsi: VSI to be configured
4757  * @enabled_tc: TC bitmap
4758  *
4759  * This configures a particular VSI for TCs that are mapped to the
4760  * given TC bitmap. It uses default bandwidth share for TCs across
4761  * VSIs to configure TC for a particular VSI.
4762  *
4763  * NOTE:
4764  * It is expected that the VSI queues have been quisced before calling
4765  * this function.
4766  **/
4767 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4768 {
4769         u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4770         struct i40e_vsi_context ctxt;
4771         int ret = 0;
4772         int i;
4773
4774         /* Check if enabled_tc is same as existing or new TCs */
4775         if (vsi->tc_config.enabled_tc == enabled_tc)
4776                 return ret;
4777
4778         /* Enable ETS TCs with equal BW Share for now across all VSIs */
4779         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4780                 if (enabled_tc & BIT(i))
4781                         bw_share[i] = 1;
4782         }
4783
4784         ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4785         if (ret) {
4786                 dev_info(&vsi->back->pdev->dev,
4787                          "Failed configuring TC map %d for VSI %d\n",
4788                          enabled_tc, vsi->seid);
4789                 goto out;
4790         }
4791
4792         /* Update Queue Pairs Mapping for currently enabled UPs */
4793         ctxt.seid = vsi->seid;
4794         ctxt.pf_num = vsi->back->hw.pf_id;
4795         ctxt.vf_num = 0;
4796         ctxt.uplink_seid = vsi->uplink_seid;
4797         ctxt.info = vsi->info;
4798         i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4799
4800         if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
4801                 ctxt.info.valid_sections |=
4802                                 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
4803                 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
4804         }
4805
4806         /* Update the VSI after updating the VSI queue-mapping information */
4807         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4808         if (ret) {
4809                 dev_info(&vsi->back->pdev->dev,
4810                          "Update vsi tc config failed, err %s aq_err %s\n",
4811                          i40e_stat_str(&vsi->back->hw, ret),
4812                          i40e_aq_str(&vsi->back->hw,
4813                                      vsi->back->hw.aq.asq_last_status));
4814                 goto out;
4815         }
4816         /* update the local VSI info with updated queue map */
4817         i40e_vsi_update_queue_map(vsi, &ctxt);
4818         vsi->info.valid_sections = 0;
4819
4820         /* Update current VSI BW information */
4821         ret = i40e_vsi_get_bw_info(vsi);
4822         if (ret) {
4823                 dev_info(&vsi->back->pdev->dev,
4824                          "Failed updating vsi bw info, err %s aq_err %s\n",
4825                          i40e_stat_str(&vsi->back->hw, ret),
4826                          i40e_aq_str(&vsi->back->hw,
4827                                      vsi->back->hw.aq.asq_last_status));
4828                 goto out;
4829         }
4830
4831         /* Update the netdev TC setup */
4832         i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4833 out:
4834         return ret;
4835 }
4836
4837 /**
4838  * i40e_veb_config_tc - Configure TCs for given VEB
4839  * @veb: given VEB
4840  * @enabled_tc: TC bitmap
4841  *
4842  * Configures given TC bitmap for VEB (switching) element
4843  **/
4844 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4845 {
4846         struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4847         struct i40e_pf *pf = veb->pf;
4848         int ret = 0;
4849         int i;
4850
4851         /* No TCs or already enabled TCs just return */
4852         if (!enabled_tc || veb->enabled_tc == enabled_tc)
4853                 return ret;
4854
4855         bw_data.tc_valid_bits = enabled_tc;
4856         /* bw_data.absolute_credits is not set (relative) */
4857
4858         /* Enable ETS TCs with equal BW Share for now */
4859         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4860                 if (enabled_tc & BIT(i))
4861                         bw_data.tc_bw_share_credits[i] = 1;
4862         }
4863
4864         ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4865                                                    &bw_data, NULL);
4866         if (ret) {
4867                 dev_info(&pf->pdev->dev,
4868                          "VEB bw config failed, err %s aq_err %s\n",
4869                          i40e_stat_str(&pf->hw, ret),
4870                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4871                 goto out;
4872         }
4873
4874         /* Update the BW information */
4875         ret = i40e_veb_get_bw_info(veb);
4876         if (ret) {
4877                 dev_info(&pf->pdev->dev,
4878                          "Failed getting veb bw config, err %s aq_err %s\n",
4879                          i40e_stat_str(&pf->hw, ret),
4880                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4881         }
4882
4883 out:
4884         return ret;
4885 }
4886
4887 #ifdef CONFIG_I40E_DCB
4888 /**
4889  * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4890  * @pf: PF struct
4891  *
4892  * Reconfigure VEB/VSIs on a given PF; it is assumed that
4893  * the caller would've quiesce all the VSIs before calling
4894  * this function
4895  **/
4896 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4897 {
4898         u8 tc_map = 0;
4899         int ret;
4900         u8 v;
4901
4902         /* Enable the TCs available on PF to all VEBs */
4903         tc_map = i40e_pf_get_tc_map(pf);
4904         for (v = 0; v < I40E_MAX_VEB; v++) {
4905                 if (!pf->veb[v])
4906                         continue;
4907                 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
4908                 if (ret) {
4909                         dev_info(&pf->pdev->dev,
4910                                  "Failed configuring TC for VEB seid=%d\n",
4911                                  pf->veb[v]->seid);
4912                         /* Will try to configure as many components */
4913                 }
4914         }
4915
4916         /* Update each VSI */
4917         for (v = 0; v < pf->num_alloc_vsi; v++) {
4918                 if (!pf->vsi[v])
4919                         continue;
4920
4921                 /* - Enable all TCs for the LAN VSI
4922 #ifdef I40E_FCOE
4923                  * - For FCoE VSI only enable the TC configured
4924                  *   as per the APP TLV
4925 #endif
4926                  * - For all others keep them at TC0 for now
4927                  */
4928                 if (v == pf->lan_vsi)
4929                         tc_map = i40e_pf_get_tc_map(pf);
4930                 else
4931                         tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
4932 #ifdef I40E_FCOE
4933                 if (pf->vsi[v]->type == I40E_VSI_FCOE)
4934                         tc_map = i40e_get_fcoe_tc_map(pf);
4935 #endif /* #ifdef I40E_FCOE */
4936
4937                 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4938                 if (ret) {
4939                         dev_info(&pf->pdev->dev,
4940                                  "Failed configuring TC for VSI seid=%d\n",
4941                                  pf->vsi[v]->seid);
4942                         /* Will try to configure as many components */
4943                 } else {
4944                         /* Re-configure VSI vectors based on updated TC map */
4945                         i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4946                         if (pf->vsi[v]->netdev)
4947                                 i40e_dcbnl_set_all(pf->vsi[v]);
4948                 }
4949         }
4950 }
4951
4952 /**
4953  * i40e_resume_port_tx - Resume port Tx
4954  * @pf: PF struct
4955  *
4956  * Resume a port's Tx and issue a PF reset in case of failure to
4957  * resume.
4958  **/
4959 static int i40e_resume_port_tx(struct i40e_pf *pf)
4960 {
4961         struct i40e_hw *hw = &pf->hw;
4962         int ret;
4963
4964         ret = i40e_aq_resume_port_tx(hw, NULL);
4965         if (ret) {
4966                 dev_info(&pf->pdev->dev,
4967                          "Resume Port Tx failed, err %s aq_err %s\n",
4968                           i40e_stat_str(&pf->hw, ret),
4969                           i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4970                 /* Schedule PF reset to recover */
4971                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4972                 i40e_service_event_schedule(pf);
4973         }
4974
4975         return ret;
4976 }
4977
4978 /**
4979  * i40e_init_pf_dcb - Initialize DCB configuration
4980  * @pf: PF being configured
4981  *
4982  * Query the current DCB configuration and cache it
4983  * in the hardware structure
4984  **/
4985 static int i40e_init_pf_dcb(struct i40e_pf *pf)
4986 {
4987         struct i40e_hw *hw = &pf->hw;
4988         int err = 0;
4989
4990         /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
4991         if (pf->flags & I40E_FLAG_NO_DCB_SUPPORT)
4992                 goto out;
4993
4994         /* Get the initial DCB configuration */
4995         err = i40e_init_dcb(hw);
4996         if (!err) {
4997                 /* Device/Function is not DCBX capable */
4998                 if ((!hw->func_caps.dcb) ||
4999                     (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
5000                         dev_info(&pf->pdev->dev,
5001                                  "DCBX offload is not supported or is disabled for this PF.\n");
5002
5003                         if (pf->flags & I40E_FLAG_MFP_ENABLED)
5004                                 goto out;
5005
5006                 } else {
5007                         /* When status is not DISABLED then DCBX in FW */
5008                         pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
5009                                        DCB_CAP_DCBX_VER_IEEE;
5010
5011                         pf->flags |= I40E_FLAG_DCB_CAPABLE;
5012                         /* Enable DCB tagging only when more than one TC
5013                          * or explicitly disable if only one TC
5014                          */
5015                         if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5016                                 pf->flags |= I40E_FLAG_DCB_ENABLED;
5017                         else
5018                                 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5019                         dev_dbg(&pf->pdev->dev,
5020                                 "DCBX offload is supported for this PF.\n");
5021                 }
5022         } else {
5023                 dev_info(&pf->pdev->dev,
5024                          "Query for DCB configuration failed, err %s aq_err %s\n",
5025                          i40e_stat_str(&pf->hw, err),
5026                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5027         }
5028
5029 out:
5030         return err;
5031 }
5032 #endif /* CONFIG_I40E_DCB */
5033 #define SPEED_SIZE 14
5034 #define FC_SIZE 8
5035 /**
5036  * i40e_print_link_message - print link up or down
5037  * @vsi: the VSI for which link needs a message
5038  */
5039 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
5040 {
5041         char *speed = "Unknown";
5042         char *fc = "Unknown";
5043
5044         if (vsi->current_isup == isup)
5045                 return;
5046         vsi->current_isup = isup;
5047         if (!isup) {
5048                 netdev_info(vsi->netdev, "NIC Link is Down\n");
5049                 return;
5050         }
5051
5052         /* Warn user if link speed on NPAR enabled partition is not at
5053          * least 10GB
5054          */
5055         if (vsi->back->hw.func_caps.npar_enable &&
5056             (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
5057              vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
5058                 netdev_warn(vsi->netdev,
5059                             "The partition detected link speed that is less than 10Gbps\n");
5060
5061         switch (vsi->back->hw.phy.link_info.link_speed) {
5062         case I40E_LINK_SPEED_40GB:
5063                 speed = "40 G";
5064                 break;
5065         case I40E_LINK_SPEED_20GB:
5066                 speed = "20 G";
5067                 break;
5068         case I40E_LINK_SPEED_10GB:
5069                 speed = "10 G";
5070                 break;
5071         case I40E_LINK_SPEED_1GB:
5072                 speed = "1000 M";
5073                 break;
5074         case I40E_LINK_SPEED_100MB:
5075                 speed = "100 M";
5076                 break;
5077         default:
5078                 break;
5079         }
5080
5081         switch (vsi->back->hw.fc.current_mode) {
5082         case I40E_FC_FULL:
5083                 fc = "RX/TX";
5084                 break;
5085         case I40E_FC_TX_PAUSE:
5086                 fc = "TX";
5087                 break;
5088         case I40E_FC_RX_PAUSE:
5089                 fc = "RX";
5090                 break;
5091         default:
5092                 fc = "None";
5093                 break;
5094         }
5095
5096         netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
5097                     speed, fc);
5098 }
5099
5100 /**
5101  * i40e_up_complete - Finish the last steps of bringing up a connection
5102  * @vsi: the VSI being configured
5103  **/
5104 static int i40e_up_complete(struct i40e_vsi *vsi)
5105 {
5106         struct i40e_pf *pf = vsi->back;
5107         int err;
5108
5109         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5110                 i40e_vsi_configure_msix(vsi);
5111         else
5112                 i40e_configure_msi_and_legacy(vsi);
5113
5114         /* start rings */
5115         err = i40e_vsi_control_rings(vsi, true);
5116         if (err)
5117                 return err;
5118
5119         clear_bit(__I40E_DOWN, &vsi->state);
5120         i40e_napi_enable_all(vsi);
5121         i40e_vsi_enable_irq(vsi);
5122
5123         if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
5124             (vsi->netdev)) {
5125                 i40e_print_link_message(vsi, true);
5126                 netif_tx_start_all_queues(vsi->netdev);
5127                 netif_carrier_on(vsi->netdev);
5128         } else if (vsi->netdev) {
5129                 i40e_print_link_message(vsi, false);
5130                 /* need to check for qualified module here*/
5131                 if ((pf->hw.phy.link_info.link_info &
5132                         I40E_AQ_MEDIA_AVAILABLE) &&
5133                     (!(pf->hw.phy.link_info.an_info &
5134                         I40E_AQ_QUALIFIED_MODULE)))
5135                         netdev_err(vsi->netdev,
5136                                    "the driver failed to link because an unqualified module was detected.");
5137         }
5138
5139         /* replay FDIR SB filters */
5140         if (vsi->type == I40E_VSI_FDIR) {
5141                 /* reset fd counters */
5142                 pf->fd_add_err = pf->fd_atr_cnt = 0;
5143                 if (pf->fd_tcp_rule > 0) {
5144                         pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
5145                         if (I40E_DEBUG_FD & pf->hw.debug_mask)
5146                                 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
5147                         pf->fd_tcp_rule = 0;
5148                 }
5149                 i40e_fdir_filter_restore(vsi);
5150         }
5151
5152         /* On the next run of the service_task, notify any clients of the new
5153          * opened netdev
5154          */
5155         pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
5156         i40e_service_event_schedule(pf);
5157
5158         return 0;
5159 }
5160
5161 /**
5162  * i40e_vsi_reinit_locked - Reset the VSI
5163  * @vsi: the VSI being configured
5164  *
5165  * Rebuild the ring structs after some configuration
5166  * has changed, e.g. MTU size.
5167  **/
5168 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
5169 {
5170         struct i40e_pf *pf = vsi->back;
5171
5172         WARN_ON(in_interrupt());
5173         while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
5174                 usleep_range(1000, 2000);
5175         i40e_down(vsi);
5176
5177         i40e_up(vsi);
5178         clear_bit(__I40E_CONFIG_BUSY, &pf->state);
5179 }
5180
5181 /**
5182  * i40e_up - Bring the connection back up after being down
5183  * @vsi: the VSI being configured
5184  **/
5185 int i40e_up(struct i40e_vsi *vsi)
5186 {
5187         int err;
5188
5189         err = i40e_vsi_configure(vsi);
5190         if (!err)
5191                 err = i40e_up_complete(vsi);
5192
5193         return err;
5194 }
5195
5196 /**
5197  * i40e_down - Shutdown the connection processing
5198  * @vsi: the VSI being stopped
5199  **/
5200 void i40e_down(struct i40e_vsi *vsi)
5201 {
5202         int i;
5203
5204         /* It is assumed that the caller of this function
5205          * sets the vsi->state __I40E_DOWN bit.
5206          */
5207         if (vsi->netdev) {
5208                 netif_carrier_off(vsi->netdev);
5209                 netif_tx_disable(vsi->netdev);
5210         }
5211         i40e_vsi_disable_irq(vsi);
5212         i40e_vsi_control_rings(vsi, false);
5213         i40e_napi_disable_all(vsi);
5214
5215         for (i = 0; i < vsi->num_queue_pairs; i++) {
5216                 i40e_clean_tx_ring(vsi->tx_rings[i]);
5217                 i40e_clean_rx_ring(vsi->rx_rings[i]);
5218         }
5219
5220         i40e_notify_client_of_netdev_close(vsi, false);
5221
5222 }
5223
5224 /**
5225  * i40e_setup_tc - configure multiple traffic classes
5226  * @netdev: net device to configure
5227  * @tc: number of traffic classes to enable
5228  **/
5229 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
5230 {
5231         struct i40e_netdev_priv *np = netdev_priv(netdev);
5232         struct i40e_vsi *vsi = np->vsi;
5233         struct i40e_pf *pf = vsi->back;
5234         u8 enabled_tc = 0;
5235         int ret = -EINVAL;
5236         int i;
5237
5238         /* Check if DCB enabled to continue */
5239         if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5240                 netdev_info(netdev, "DCB is not enabled for adapter\n");
5241                 goto exit;
5242         }
5243
5244         /* Check if MFP enabled */
5245         if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5246                 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
5247                 goto exit;
5248         }
5249
5250         /* Check whether tc count is within enabled limit */
5251         if (tc > i40e_pf_get_num_tc(pf)) {
5252                 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
5253                 goto exit;
5254         }
5255
5256         /* Generate TC map for number of tc requested */
5257         for (i = 0; i < tc; i++)
5258                 enabled_tc |= BIT(i);
5259
5260         /* Requesting same TC configuration as already enabled */
5261         if (enabled_tc == vsi->tc_config.enabled_tc)
5262                 return 0;
5263
5264         /* Quiesce VSI queues */
5265         i40e_quiesce_vsi(vsi);
5266
5267         /* Configure VSI for enabled TCs */
5268         ret = i40e_vsi_config_tc(vsi, enabled_tc);
5269         if (ret) {
5270                 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
5271                             vsi->seid);
5272                 goto exit;
5273         }
5274
5275         /* Unquiesce VSI */
5276         i40e_unquiesce_vsi(vsi);
5277
5278 exit:
5279         return ret;
5280 }
5281
5282 #ifdef I40E_FCOE
5283 int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
5284                     struct tc_to_netdev *tc)
5285 #else
5286 static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
5287                            struct tc_to_netdev *tc)
5288 #endif
5289 {
5290         if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
5291                 return -EINVAL;
5292         return i40e_setup_tc(netdev, tc->tc);
5293 }
5294
5295 /**
5296  * i40e_open - Called when a network interface is made active
5297  * @netdev: network interface device structure
5298  *
5299  * The open entry point is called when a network interface is made
5300  * active by the system (IFF_UP).  At this point all resources needed
5301  * for transmit and receive operations are allocated, the interrupt
5302  * handler is registered with the OS, the netdev watchdog subtask is
5303  * enabled, and the stack is notified that the interface is ready.
5304  *
5305  * Returns 0 on success, negative value on failure
5306  **/
5307 int i40e_open(struct net_device *netdev)
5308 {
5309         struct i40e_netdev_priv *np = netdev_priv(netdev);
5310         struct i40e_vsi *vsi = np->vsi;
5311         struct i40e_pf *pf = vsi->back;
5312         int err;
5313
5314         /* disallow open during test or if eeprom is broken */
5315         if (test_bit(__I40E_TESTING, &pf->state) ||
5316             test_bit(__I40E_BAD_EEPROM, &pf->state))
5317                 return -EBUSY;
5318
5319         netif_carrier_off(netdev);
5320
5321         err = i40e_vsi_open(vsi);
5322         if (err)
5323                 return err;
5324
5325         /* configure global TSO hardware offload settings */
5326         wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
5327                                                        TCP_FLAG_FIN) >> 16);
5328         wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
5329                                                        TCP_FLAG_FIN |
5330                                                        TCP_FLAG_CWR) >> 16);
5331         wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
5332
5333         udp_tunnel_get_rx_info(netdev);
5334
5335         return 0;
5336 }
5337
5338 /**
5339  * i40e_vsi_open -
5340  * @vsi: the VSI to open
5341  *
5342  * Finish initialization of the VSI.
5343  *
5344  * Returns 0 on success, negative value on failure
5345  **/
5346 int i40e_vsi_open(struct i40e_vsi *vsi)
5347 {
5348         struct i40e_pf *pf = vsi->back;
5349         char int_name[I40E_INT_NAME_STR_LEN];
5350         int err;
5351
5352         /* allocate descriptors */
5353         err = i40e_vsi_setup_tx_resources(vsi);
5354         if (err)
5355                 goto err_setup_tx;
5356         err = i40e_vsi_setup_rx_resources(vsi);
5357         if (err)
5358                 goto err_setup_rx;
5359
5360         err = i40e_vsi_configure(vsi);
5361         if (err)
5362                 goto err_setup_rx;
5363
5364         if (vsi->netdev) {
5365                 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5366                          dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5367                 err = i40e_vsi_request_irq(vsi, int_name);
5368                 if (err)
5369                         goto err_setup_rx;
5370
5371                 /* Notify the stack of the actual queue counts. */
5372                 err = netif_set_real_num_tx_queues(vsi->netdev,
5373                                                    vsi->num_queue_pairs);
5374                 if (err)
5375                         goto err_set_queues;
5376
5377                 err = netif_set_real_num_rx_queues(vsi->netdev,
5378                                                    vsi->num_queue_pairs);
5379                 if (err)
5380                         goto err_set_queues;
5381
5382         } else if (vsi->type == I40E_VSI_FDIR) {
5383                 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
5384                          dev_driver_string(&pf->pdev->dev),
5385                          dev_name(&pf->pdev->dev));
5386                 err = i40e_vsi_request_irq(vsi, int_name);
5387
5388         } else {
5389                 err = -EINVAL;
5390                 goto err_setup_rx;
5391         }
5392
5393         err = i40e_up_complete(vsi);
5394         if (err)
5395                 goto err_up_complete;
5396
5397         return 0;
5398
5399 err_up_complete:
5400         i40e_down(vsi);
5401 err_set_queues:
5402         i40e_vsi_free_irq(vsi);
5403 err_setup_rx:
5404         i40e_vsi_free_rx_resources(vsi);
5405 err_setup_tx:
5406         i40e_vsi_free_tx_resources(vsi);
5407         if (vsi == pf->vsi[pf->lan_vsi])
5408                 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
5409
5410         return err;
5411 }
5412
5413 /**
5414  * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
5415  * @pf: Pointer to PF
5416  *
5417  * This function destroys the hlist where all the Flow Director
5418  * filters were saved.
5419  **/
5420 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5421 {
5422         struct i40e_fdir_filter *filter;
5423         struct hlist_node *node2;
5424
5425         hlist_for_each_entry_safe(filter, node2,
5426                                   &pf->fdir_filter_list, fdir_node) {
5427                 hlist_del(&filter->fdir_node);
5428                 kfree(filter);
5429         }
5430         pf->fdir_pf_active_filters = 0;
5431 }
5432
5433 /**
5434  * i40e_close - Disables a network interface
5435  * @netdev: network interface device structure
5436  *
5437  * The close entry point is called when an interface is de-activated
5438  * by the OS.  The hardware is still under the driver's control, but
5439  * this netdev interface is disabled.
5440  *
5441  * Returns 0, this is not allowed to fail
5442  **/
5443 int i40e_close(struct net_device *netdev)
5444 {
5445         struct i40e_netdev_priv *np = netdev_priv(netdev);
5446         struct i40e_vsi *vsi = np->vsi;
5447
5448         i40e_vsi_close(vsi);
5449
5450         return 0;
5451 }
5452
5453 /**
5454  * i40e_do_reset - Start a PF or Core Reset sequence
5455  * @pf: board private structure
5456  * @reset_flags: which reset is requested
5457  *
5458  * The essential difference in resets is that the PF Reset
5459  * doesn't clear the packet buffers, doesn't reset the PE
5460  * firmware, and doesn't bother the other PFs on the chip.
5461  **/
5462 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5463 {
5464         u32 val;
5465
5466         WARN_ON(in_interrupt());
5467
5468
5469         /* do the biggest reset indicated */
5470         if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
5471
5472                 /* Request a Global Reset
5473                  *
5474                  * This will start the chip's countdown to the actual full
5475                  * chip reset event, and a warning interrupt to be sent
5476                  * to all PFs, including the requestor.  Our handler
5477                  * for the warning interrupt will deal with the shutdown
5478                  * and recovery of the switch setup.
5479                  */
5480                 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
5481                 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5482                 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5483                 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5484
5485         } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
5486
5487                 /* Request a Core Reset
5488                  *
5489                  * Same as Global Reset, except does *not* include the MAC/PHY
5490                  */
5491                 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
5492                 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5493                 val |= I40E_GLGEN_RTRIG_CORER_MASK;
5494                 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5495                 i40e_flush(&pf->hw);
5496
5497         } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
5498
5499                 /* Request a PF Reset
5500                  *
5501                  * Resets only the PF-specific registers
5502                  *
5503                  * This goes directly to the tear-down and rebuild of
5504                  * the switch, since we need to do all the recovery as
5505                  * for the Core Reset.
5506                  */
5507                 dev_dbg(&pf->pdev->dev, "PFR requested\n");
5508                 i40e_handle_reset_warning(pf);
5509
5510         } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
5511                 int v;
5512
5513                 /* Find the VSI(s) that requested a re-init */
5514                 dev_info(&pf->pdev->dev,
5515                          "VSI reinit requested\n");
5516                 for (v = 0; v < pf->num_alloc_vsi; v++) {
5517                         struct i40e_vsi *vsi = pf->vsi[v];
5518
5519                         if (vsi != NULL &&
5520                             test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
5521                                 i40e_vsi_reinit_locked(pf->vsi[v]);
5522                                 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
5523                         }
5524                 }
5525         } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
5526                 int v;
5527
5528                 /* Find the VSI(s) that needs to be brought down */
5529                 dev_info(&pf->pdev->dev, "VSI down requested\n");
5530                 for (v = 0; v < pf->num_alloc_vsi; v++) {
5531                         struct i40e_vsi *vsi = pf->vsi[v];
5532
5533                         if (vsi != NULL &&
5534                             test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
5535                                 set_bit(__I40E_DOWN, &vsi->state);
5536                                 i40e_down(vsi);
5537                                 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
5538                         }
5539                 }
5540         } else {
5541                 dev_info(&pf->pdev->dev,
5542                          "bad reset request 0x%08x\n", reset_flags);
5543         }
5544 }
5545
5546 #ifdef CONFIG_I40E_DCB
5547 /**
5548  * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5549  * @pf: board private structure
5550  * @old_cfg: current DCB config
5551  * @new_cfg: new DCB config
5552  **/
5553 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5554                             struct i40e_dcbx_config *old_cfg,
5555                             struct i40e_dcbx_config *new_cfg)
5556 {
5557         bool need_reconfig = false;
5558
5559         /* Check if ETS configuration has changed */
5560         if (memcmp(&new_cfg->etscfg,
5561                    &old_cfg->etscfg,
5562                    sizeof(new_cfg->etscfg))) {
5563                 /* If Priority Table has changed reconfig is needed */
5564                 if (memcmp(&new_cfg->etscfg.prioritytable,
5565                            &old_cfg->etscfg.prioritytable,
5566                            sizeof(new_cfg->etscfg.prioritytable))) {
5567                         need_reconfig = true;
5568                         dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
5569                 }
5570
5571                 if (memcmp(&new_cfg->etscfg.tcbwtable,
5572                            &old_cfg->etscfg.tcbwtable,
5573                            sizeof(new_cfg->etscfg.tcbwtable)))
5574                         dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
5575
5576                 if (memcmp(&new_cfg->etscfg.tsatable,
5577                            &old_cfg->etscfg.tsatable,
5578                            sizeof(new_cfg->etscfg.tsatable)))
5579                         dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
5580         }
5581
5582         /* Check if PFC configuration has changed */
5583         if (memcmp(&new_cfg->pfc,
5584                    &old_cfg->pfc,
5585                    sizeof(new_cfg->pfc))) {
5586                 need_reconfig = true;
5587                 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
5588         }
5589
5590         /* Check if APP Table has changed */
5591         if (memcmp(&new_cfg->app,
5592                    &old_cfg->app,
5593                    sizeof(new_cfg->app))) {
5594                 need_reconfig = true;
5595                 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
5596         }
5597
5598         dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
5599         return need_reconfig;
5600 }
5601
5602 /**
5603  * i40e_handle_lldp_event - Handle LLDP Change MIB event
5604  * @pf: board private structure
5605  * @e: event info posted on ARQ
5606  **/
5607 static int i40e_handle_lldp_event(struct i40e_pf *pf,
5608                                   struct i40e_arq_event_info *e)
5609 {
5610         struct i40e_aqc_lldp_get_mib *mib =
5611                 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5612         struct i40e_hw *hw = &pf->hw;
5613         struct i40e_dcbx_config tmp_dcbx_cfg;
5614         bool need_reconfig = false;
5615         int ret = 0;
5616         u8 type;
5617
5618         /* Not DCB capable or capability disabled */
5619         if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
5620                 return ret;
5621
5622         /* Ignore if event is not for Nearest Bridge */
5623         type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
5624                 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
5625         dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
5626         if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
5627                 return ret;
5628
5629         /* Check MIB Type and return if event for Remote MIB update */
5630         type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
5631         dev_dbg(&pf->pdev->dev,
5632                 "LLDP event mib type %s\n", type ? "remote" : "local");
5633         if (type == I40E_AQ_LLDP_MIB_REMOTE) {
5634                 /* Update the remote cached instance and return */
5635                 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
5636                                 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
5637                                 &hw->remote_dcbx_config);
5638                 goto exit;
5639         }
5640
5641         /* Store the old configuration */
5642         tmp_dcbx_cfg = hw->local_dcbx_config;
5643
5644         /* Reset the old DCBx configuration data */
5645         memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
5646         /* Get updated DCBX data from firmware */
5647         ret = i40e_get_dcb_config(&pf->hw);
5648         if (ret) {
5649                 dev_info(&pf->pdev->dev,
5650                          "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
5651                          i40e_stat_str(&pf->hw, ret),
5652                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5653                 goto exit;
5654         }
5655
5656         /* No change detected in DCBX configs */
5657         if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
5658                     sizeof(tmp_dcbx_cfg))) {
5659                 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
5660                 goto exit;
5661         }
5662
5663         need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
5664                                                &hw->local_dcbx_config);
5665
5666         i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
5667
5668         if (!need_reconfig)
5669                 goto exit;
5670
5671         /* Enable DCB tagging only when more than one TC */
5672         if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5673                 pf->flags |= I40E_FLAG_DCB_ENABLED;
5674         else
5675                 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5676
5677         set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5678         /* Reconfiguration needed quiesce all VSIs */
5679         i40e_pf_quiesce_all_vsi(pf);
5680
5681         /* Changes in configuration update VEB/VSI */
5682         i40e_dcb_reconfigure(pf);
5683
5684         ret = i40e_resume_port_tx(pf);
5685
5686         clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5687         /* In case of error no point in resuming VSIs */
5688         if (ret)
5689                 goto exit;
5690
5691         /* Wait for the PF's queues to be disabled */
5692         ret = i40e_pf_wait_queues_disabled(pf);
5693         if (ret) {
5694                 /* Schedule PF reset to recover */
5695                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5696                 i40e_service_event_schedule(pf);
5697         } else {
5698                 i40e_pf_unquiesce_all_vsi(pf);
5699                 /* Notify the client for the DCB changes */
5700                 i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]);
5701         }
5702
5703 exit:
5704         return ret;
5705 }
5706 #endif /* CONFIG_I40E_DCB */
5707
5708 /**
5709  * i40e_do_reset_safe - Protected reset path for userland calls.
5710  * @pf: board private structure
5711  * @reset_flags: which reset is requested
5712  *
5713  **/
5714 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
5715 {
5716         rtnl_lock();
5717         i40e_do_reset(pf, reset_flags);
5718         rtnl_unlock();
5719 }
5720
5721 /**
5722  * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
5723  * @pf: board private structure
5724  * @e: event info posted on ARQ
5725  *
5726  * Handler for LAN Queue Overflow Event generated by the firmware for PF
5727  * and VF queues
5728  **/
5729 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
5730                                            struct i40e_arq_event_info *e)
5731 {
5732         struct i40e_aqc_lan_overflow *data =
5733                 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
5734         u32 queue = le32_to_cpu(data->prtdcb_rupto);
5735         u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
5736         struct i40e_hw *hw = &pf->hw;
5737         struct i40e_vf *vf;
5738         u16 vf_id;
5739
5740         dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5741                 queue, qtx_ctl);
5742
5743         /* Queue belongs to VF, find the VF and issue VF reset */
5744         if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
5745             >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
5746                 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
5747                          >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
5748                 vf_id -= hw->func_caps.vf_base_id;
5749                 vf = &pf->vf[vf_id];
5750                 i40e_vc_notify_vf_reset(vf);
5751                 /* Allow VF to process pending reset notification */
5752                 msleep(20);
5753                 i40e_reset_vf(vf, false);
5754         }
5755 }
5756
5757 /**
5758  * i40e_service_event_complete - Finish up the service event
5759  * @pf: board private structure
5760  **/
5761 static void i40e_service_event_complete(struct i40e_pf *pf)
5762 {
5763         WARN_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
5764
5765         /* flush memory to make sure state is correct before next watchog */
5766         smp_mb__before_atomic();
5767         clear_bit(__I40E_SERVICE_SCHED, &pf->state);
5768 }
5769
5770 /**
5771  * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
5772  * @pf: board private structure
5773  **/
5774 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
5775 {
5776         u32 val, fcnt_prog;
5777
5778         val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5779         fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
5780         return fcnt_prog;
5781 }
5782
5783 /**
5784  * i40e_get_current_fd_count - Get total FD filters programmed for this PF
5785  * @pf: board private structure
5786  **/
5787 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
5788 {
5789         u32 val, fcnt_prog;
5790
5791         val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5792         fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
5793                     ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
5794                       I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5795         return fcnt_prog;
5796 }
5797
5798 /**
5799  * i40e_get_global_fd_count - Get total FD filters programmed on device
5800  * @pf: board private structure
5801  **/
5802 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
5803 {
5804         u32 val, fcnt_prog;
5805
5806         val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
5807         fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
5808                     ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
5809                      I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
5810         return fcnt_prog;
5811 }
5812
5813 /**
5814  * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
5815  * @pf: board private structure
5816  **/
5817 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5818 {
5819         struct i40e_fdir_filter *filter;
5820         u32 fcnt_prog, fcnt_avail;
5821         struct hlist_node *node;
5822
5823         if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5824                 return;
5825
5826         /* Check if, FD SB or ATR was auto disabled and if there is enough room
5827          * to re-enable
5828          */
5829         fcnt_prog = i40e_get_global_fd_count(pf);
5830         fcnt_avail = pf->fdir_pf_filter_count;
5831         if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
5832             (pf->fd_add_err == 0) ||
5833             (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
5834                 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5835                     (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5836                         pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5837                         if (I40E_DEBUG_FD & pf->hw.debug_mask)
5838                                 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
5839                 }
5840         }
5841
5842         /* Wait for some more space to be available to turn on ATR. We also
5843          * must check that no existing ntuple rules for TCP are in effect
5844          */
5845         if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
5846                 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
5847                     (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED) &&
5848                     (pf->fd_tcp_rule == 0)) {
5849                         pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5850                         if (I40E_DEBUG_FD & pf->hw.debug_mask)
5851                                 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
5852                 }
5853         }
5854
5855         /* if hw had a problem adding a filter, delete it */
5856         if (pf->fd_inv > 0) {
5857                 hlist_for_each_entry_safe(filter, node,
5858                                           &pf->fdir_filter_list, fdir_node) {
5859                         if (filter->fd_id == pf->fd_inv) {
5860                                 hlist_del(&filter->fdir_node);
5861                                 kfree(filter);
5862                                 pf->fdir_pf_active_filters--;
5863                         }
5864                 }
5865         }
5866 }
5867
5868 #define I40E_MIN_FD_FLUSH_INTERVAL 10
5869 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
5870 /**
5871  * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
5872  * @pf: board private structure
5873  **/
5874 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5875 {
5876         unsigned long min_flush_time;
5877         int flush_wait_retry = 50;
5878         bool disable_atr = false;
5879         int fd_room;
5880         int reg;
5881
5882         if (!time_after(jiffies, pf->fd_flush_timestamp +
5883                                  (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
5884                 return;
5885
5886         /* If the flush is happening too quick and we have mostly SB rules we
5887          * should not re-enable ATR for some time.
5888          */
5889         min_flush_time = pf->fd_flush_timestamp +
5890                          (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
5891         fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
5892
5893         if (!(time_after(jiffies, min_flush_time)) &&
5894             (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
5895                 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5896                         dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
5897                 disable_atr = true;
5898         }
5899
5900         pf->fd_flush_timestamp = jiffies;
5901         pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
5902         /* flush all filters */
5903         wr32(&pf->hw, I40E_PFQF_CTL_1,
5904              I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
5905         i40e_flush(&pf->hw);
5906         pf->fd_flush_cnt++;
5907         pf->fd_add_err = 0;
5908         do {
5909                 /* Check FD flush status every 5-6msec */
5910                 usleep_range(5000, 6000);
5911                 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
5912                 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
5913                         break;
5914         } while (flush_wait_retry--);
5915         if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
5916                 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
5917         } else {
5918                 /* replay sideband filters */
5919                 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
5920                 if (!disable_atr)
5921                         pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5922                 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5923                 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5924                         dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
5925         }
5926 }
5927
5928 /**
5929  * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
5930  * @pf: board private structure
5931  **/
5932 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
5933 {
5934         return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
5935 }
5936
5937 /* We can see up to 256 filter programming desc in transit if the filters are
5938  * being applied really fast; before we see the first
5939  * filter miss error on Rx queue 0. Accumulating enough error messages before
5940  * reacting will make sure we don't cause flush too often.
5941  */
5942 #define I40E_MAX_FD_PROGRAM_ERROR 256
5943
5944 /**
5945  * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
5946  * @pf: board private structure
5947  **/
5948 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
5949 {
5950
5951         /* if interface is down do nothing */
5952         if (test_bit(__I40E_DOWN, &pf->state))
5953                 return;
5954
5955         if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5956                 i40e_fdir_flush_and_replay(pf);
5957
5958         i40e_fdir_check_and_reenable(pf);
5959
5960 }
5961
5962 /**
5963  * i40e_vsi_link_event - notify VSI of a link event
5964  * @vsi: vsi to be notified
5965  * @link_up: link up or down
5966  **/
5967 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
5968 {
5969         if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
5970                 return;
5971
5972         switch (vsi->type) {
5973         case I40E_VSI_MAIN:
5974 #ifdef I40E_FCOE
5975         case I40E_VSI_FCOE:
5976 #endif
5977                 if (!vsi->netdev || !vsi->netdev_registered)
5978                         break;
5979
5980                 if (link_up) {
5981                         netif_carrier_on(vsi->netdev);
5982                         netif_tx_wake_all_queues(vsi->netdev);
5983                 } else {
5984                         netif_carrier_off(vsi->netdev);
5985                         netif_tx_stop_all_queues(vsi->netdev);
5986                 }
5987                 break;
5988
5989         case I40E_VSI_SRIOV:
5990         case I40E_VSI_VMDQ2:
5991         case I40E_VSI_CTRL:
5992         case I40E_VSI_IWARP:
5993         case I40E_VSI_MIRROR:
5994         default:
5995                 /* there is no notification for other VSIs */
5996                 break;
5997         }
5998 }
5999
6000 /**
6001  * i40e_veb_link_event - notify elements on the veb of a link event
6002  * @veb: veb to be notified
6003  * @link_up: link up or down
6004  **/
6005 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
6006 {
6007         struct i40e_pf *pf;
6008         int i;
6009
6010         if (!veb || !veb->pf)
6011                 return;
6012         pf = veb->pf;
6013
6014         /* depth first... */
6015         for (i = 0; i < I40E_MAX_VEB; i++)
6016                 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
6017                         i40e_veb_link_event(pf->veb[i], link_up);
6018
6019         /* ... now the local VSIs */
6020         for (i = 0; i < pf->num_alloc_vsi; i++)
6021                 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
6022                         i40e_vsi_link_event(pf->vsi[i], link_up);
6023 }
6024
6025 /**
6026  * i40e_link_event - Update netif_carrier status
6027  * @pf: board private structure
6028  **/
6029 static void i40e_link_event(struct i40e_pf *pf)
6030 {
6031         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6032         u8 new_link_speed, old_link_speed;
6033         i40e_status status;
6034         bool new_link, old_link;
6035
6036         /* save off old link status information */
6037         pf->hw.phy.link_info_old = pf->hw.phy.link_info;
6038
6039         /* set this to force the get_link_status call to refresh state */
6040         pf->hw.phy.get_link_info = true;
6041
6042         old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
6043
6044         status = i40e_get_link_status(&pf->hw, &new_link);
6045         if (status) {
6046                 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
6047                         status);
6048                 return;
6049         }
6050
6051         old_link_speed = pf->hw.phy.link_info_old.link_speed;
6052         new_link_speed = pf->hw.phy.link_info.link_speed;
6053
6054         if (new_link == old_link &&
6055             new_link_speed == old_link_speed &&
6056             (test_bit(__I40E_DOWN, &vsi->state) ||
6057              new_link == netif_carrier_ok(vsi->netdev)))
6058                 return;
6059
6060         if (!test_bit(__I40E_DOWN, &vsi->state))
6061                 i40e_print_link_message(vsi, new_link);
6062
6063         /* Notify the base of the switch tree connected to
6064          * the link.  Floating VEBs are not notified.
6065          */
6066         if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
6067                 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
6068         else
6069                 i40e_vsi_link_event(vsi, new_link);
6070
6071         if (pf->vf)
6072                 i40e_vc_notify_link_state(pf);
6073
6074         if (pf->flags & I40E_FLAG_PTP)
6075                 i40e_ptp_set_increment(pf);
6076 }
6077
6078 /**
6079  * i40e_watchdog_subtask - periodic checks not using event driven response
6080  * @pf: board private structure
6081  **/
6082 static void i40e_watchdog_subtask(struct i40e_pf *pf)
6083 {
6084         int i;
6085
6086         /* if interface is down do nothing */
6087         if (test_bit(__I40E_DOWN, &pf->state) ||
6088             test_bit(__I40E_CONFIG_BUSY, &pf->state))
6089                 return;
6090
6091         /* make sure we don't do these things too often */
6092         if (time_before(jiffies, (pf->service_timer_previous +
6093                                   pf->service_timer_period)))
6094                 return;
6095         pf->service_timer_previous = jiffies;
6096
6097         if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
6098                 i40e_link_event(pf);
6099
6100         /* Update the stats for active netdevs so the network stack
6101          * can look at updated numbers whenever it cares to
6102          */
6103         for (i = 0; i < pf->num_alloc_vsi; i++)
6104                 if (pf->vsi[i] && pf->vsi[i]->netdev)
6105                         i40e_update_stats(pf->vsi[i]);
6106
6107         if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
6108                 /* Update the stats for the active switching components */
6109                 for (i = 0; i < I40E_MAX_VEB; i++)
6110                         if (pf->veb[i])
6111                                 i40e_update_veb_stats(pf->veb[i]);
6112         }
6113
6114         i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
6115 }
6116
6117 /**
6118  * i40e_reset_subtask - Set up for resetting the device and driver
6119  * @pf: board private structure
6120  **/
6121 static void i40e_reset_subtask(struct i40e_pf *pf)
6122 {
6123         u32 reset_flags = 0;
6124
6125         rtnl_lock();
6126         if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
6127                 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
6128                 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
6129         }
6130         if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
6131                 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
6132                 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6133         }
6134         if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
6135                 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
6136                 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
6137         }
6138         if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
6139                 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
6140                 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
6141         }
6142         if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
6143                 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
6144                 clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
6145         }
6146
6147         /* If there's a recovery already waiting, it takes
6148          * precedence before starting a new reset sequence.
6149          */
6150         if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
6151                 i40e_handle_reset_warning(pf);
6152                 goto unlock;
6153         }
6154
6155         /* If we're already down or resetting, just bail */
6156         if (reset_flags &&
6157             !test_bit(__I40E_DOWN, &pf->state) &&
6158             !test_bit(__I40E_CONFIG_BUSY, &pf->state))
6159                 i40e_do_reset(pf, reset_flags);
6160
6161 unlock:
6162         rtnl_unlock();
6163 }
6164
6165 /**
6166  * i40e_handle_link_event - Handle link event
6167  * @pf: board private structure
6168  * @e: event info posted on ARQ
6169  **/
6170 static void i40e_handle_link_event(struct i40e_pf *pf,
6171                                    struct i40e_arq_event_info *e)
6172 {
6173         struct i40e_aqc_get_link_status *status =
6174                 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
6175
6176         /* Do a new status request to re-enable LSE reporting
6177          * and load new status information into the hw struct
6178          * This completely ignores any state information
6179          * in the ARQ event info, instead choosing to always
6180          * issue the AQ update link status command.
6181          */
6182         i40e_link_event(pf);
6183
6184         /* check for unqualified module, if link is down */
6185         if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
6186             (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
6187             (!(status->link_info & I40E_AQ_LINK_UP)))
6188                 dev_err(&pf->pdev->dev,
6189                         "The driver failed to link because an unqualified module was detected.\n");
6190 }
6191
6192 /**
6193  * i40e_clean_adminq_subtask - Clean the AdminQ rings
6194  * @pf: board private structure
6195  **/
6196 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
6197 {
6198         struct i40e_arq_event_info event;
6199         struct i40e_hw *hw = &pf->hw;
6200         u16 pending, i = 0;
6201         i40e_status ret;
6202         u16 opcode;
6203         u32 oldval;
6204         u32 val;
6205
6206         /* Do not run clean AQ when PF reset fails */
6207         if (test_bit(__I40E_RESET_FAILED, &pf->state))
6208                 return;
6209
6210         /* check for error indications */
6211         val = rd32(&pf->hw, pf->hw.aq.arq.len);
6212         oldval = val;
6213         if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
6214                 if (hw->debug_mask & I40E_DEBUG_AQ)
6215                         dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
6216                 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
6217         }
6218         if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
6219                 if (hw->debug_mask & I40E_DEBUG_AQ)
6220                         dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
6221                 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
6222                 pf->arq_overflows++;
6223         }
6224         if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
6225                 if (hw->debug_mask & I40E_DEBUG_AQ)
6226                         dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
6227                 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
6228         }
6229         if (oldval != val)
6230                 wr32(&pf->hw, pf->hw.aq.arq.len, val);
6231
6232         val = rd32(&pf->hw, pf->hw.aq.asq.len);
6233         oldval = val;
6234         if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
6235                 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6236                         dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
6237                 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
6238         }
6239         if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
6240                 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6241                         dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
6242                 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
6243         }
6244         if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
6245                 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6246                         dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
6247                 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
6248         }
6249         if (oldval != val)
6250                 wr32(&pf->hw, pf->hw.aq.asq.len, val);
6251
6252         event.buf_len = I40E_MAX_AQ_BUF_SIZE;
6253         event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
6254         if (!event.msg_buf)
6255                 return;
6256
6257         do {
6258                 ret = i40e_clean_arq_element(hw, &event, &pending);
6259                 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
6260                         break;
6261                 else if (ret) {
6262                         dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
6263                         break;
6264                 }
6265
6266                 opcode = le16_to_cpu(event.desc.opcode);
6267                 switch (opcode) {
6268
6269                 case i40e_aqc_opc_get_link_status:
6270                         i40e_handle_link_event(pf, &event);
6271                         break;
6272                 case i40e_aqc_opc_send_msg_to_pf:
6273                         ret = i40e_vc_process_vf_msg(pf,
6274                                         le16_to_cpu(event.desc.retval),
6275                                         le32_to_cpu(event.desc.cookie_high),
6276                                         le32_to_cpu(event.desc.cookie_low),
6277                                         event.msg_buf,
6278                                         event.msg_len);
6279                         break;
6280                 case i40e_aqc_opc_lldp_update_mib:
6281                         dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
6282 #ifdef CONFIG_I40E_DCB
6283                         rtnl_lock();
6284                         ret = i40e_handle_lldp_event(pf, &event);
6285                         rtnl_unlock();
6286 #endif /* CONFIG_I40E_DCB */
6287                         break;
6288                 case i40e_aqc_opc_event_lan_overflow:
6289                         dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
6290                         i40e_handle_lan_overflow_event(pf, &event);
6291                         break;
6292                 case i40e_aqc_opc_send_msg_to_peer:
6293                         dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
6294                         break;
6295                 case i40e_aqc_opc_nvm_erase:
6296                 case i40e_aqc_opc_nvm_update:
6297                 case i40e_aqc_opc_oem_post_update:
6298                         i40e_debug(&pf->hw, I40E_DEBUG_NVM,
6299                                    "ARQ NVM operation 0x%04x completed\n",
6300                                    opcode);
6301                         break;
6302                 default:
6303                         dev_info(&pf->pdev->dev,
6304                                  "ARQ: Unknown event 0x%04x ignored\n",
6305                                  opcode);
6306                         break;
6307                 }
6308         } while (pending && (i++ < pf->adminq_work_limit));
6309
6310         clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
6311         /* re-enable Admin queue interrupt cause */
6312         val = rd32(hw, I40E_PFINT_ICR0_ENA);
6313         val |=  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
6314         wr32(hw, I40E_PFINT_ICR0_ENA, val);
6315         i40e_flush(hw);
6316
6317         kfree(event.msg_buf);
6318 }
6319
6320 /**
6321  * i40e_verify_eeprom - make sure eeprom is good to use
6322  * @pf: board private structure
6323  **/
6324 static void i40e_verify_eeprom(struct i40e_pf *pf)
6325 {
6326         int err;
6327
6328         err = i40e_diag_eeprom_test(&pf->hw);
6329         if (err) {
6330                 /* retry in case of garbage read */
6331                 err = i40e_diag_eeprom_test(&pf->hw);
6332                 if (err) {
6333                         dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
6334                                  err);
6335                         set_bit(__I40E_BAD_EEPROM, &pf->state);
6336                 }
6337         }
6338
6339         if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
6340                 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
6341                 clear_bit(__I40E_BAD_EEPROM, &pf->state);
6342         }
6343 }
6344
6345 /**
6346  * i40e_enable_pf_switch_lb
6347  * @pf: pointer to the PF structure
6348  *
6349  * enable switch loop back or die - no point in a return value
6350  **/
6351 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
6352 {
6353         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6354         struct i40e_vsi_context ctxt;
6355         int ret;
6356
6357         ctxt.seid = pf->main_vsi_seid;
6358         ctxt.pf_num = pf->hw.pf_id;
6359         ctxt.vf_num = 0;
6360         ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6361         if (ret) {
6362                 dev_info(&pf->pdev->dev,
6363                          "couldn't get PF vsi config, err %s aq_err %s\n",
6364                          i40e_stat_str(&pf->hw, ret),
6365                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6366                 return;
6367         }
6368         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6369         ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6370         ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6371
6372         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6373         if (ret) {
6374                 dev_info(&pf->pdev->dev,
6375                          "update vsi switch failed, err %s aq_err %s\n",
6376                          i40e_stat_str(&pf->hw, ret),
6377                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6378         }
6379 }
6380
6381 /**
6382  * i40e_disable_pf_switch_lb
6383  * @pf: pointer to the PF structure
6384  *
6385  * disable switch loop back or die - no point in a return value
6386  **/
6387 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6388 {
6389         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6390         struct i40e_vsi_context ctxt;
6391         int ret;
6392
6393         ctxt.seid = pf->main_vsi_seid;
6394         ctxt.pf_num = pf->hw.pf_id;
6395         ctxt.vf_num = 0;
6396         ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6397         if (ret) {
6398                 dev_info(&pf->pdev->dev,
6399                          "couldn't get PF vsi config, err %s aq_err %s\n",
6400                          i40e_stat_str(&pf->hw, ret),
6401                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6402                 return;
6403         }
6404         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6405         ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6406         ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6407
6408         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6409         if (ret) {
6410                 dev_info(&pf->pdev->dev,
6411                          "update vsi switch failed, err %s aq_err %s\n",
6412                          i40e_stat_str(&pf->hw, ret),
6413                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6414         }
6415 }
6416
6417 /**
6418  * i40e_config_bridge_mode - Configure the HW bridge mode
6419  * @veb: pointer to the bridge instance
6420  *
6421  * Configure the loop back mode for the LAN VSI that is downlink to the
6422  * specified HW bridge instance. It is expected this function is called
6423  * when a new HW bridge is instantiated.
6424  **/
6425 static void i40e_config_bridge_mode(struct i40e_veb *veb)
6426 {
6427         struct i40e_pf *pf = veb->pf;
6428
6429         if (pf->hw.debug_mask & I40E_DEBUG_LAN)
6430                 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6431                          veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
6432         if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6433                 i40e_disable_pf_switch_lb(pf);
6434         else
6435                 i40e_enable_pf_switch_lb(pf);
6436 }
6437
6438 /**
6439  * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
6440  * @veb: pointer to the VEB instance
6441  *
6442  * This is a recursive function that first builds the attached VSIs then
6443  * recurses in to build the next layer of VEB.  We track the connections
6444  * through our own index numbers because the seid's from the HW could
6445  * change across the reset.
6446  **/
6447 static int i40e_reconstitute_veb(struct i40e_veb *veb)
6448 {
6449         struct i40e_vsi *ctl_vsi = NULL;
6450         struct i40e_pf *pf = veb->pf;
6451         int v, veb_idx;
6452         int ret;
6453
6454         /* build VSI that owns this VEB, temporarily attached to base VEB */
6455         for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
6456                 if (pf->vsi[v] &&
6457                     pf->vsi[v]->veb_idx == veb->idx &&
6458                     pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6459                         ctl_vsi = pf->vsi[v];
6460                         break;
6461                 }
6462         }
6463         if (!ctl_vsi) {
6464                 dev_info(&pf->pdev->dev,
6465                          "missing owner VSI for veb_idx %d\n", veb->idx);
6466                 ret = -ENOENT;
6467                 goto end_reconstitute;
6468         }
6469         if (ctl_vsi != pf->vsi[pf->lan_vsi])
6470                 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6471         ret = i40e_add_vsi(ctl_vsi);
6472         if (ret) {
6473                 dev_info(&pf->pdev->dev,
6474                          "rebuild of veb_idx %d owner VSI failed: %d\n",
6475                          veb->idx, ret);
6476                 goto end_reconstitute;
6477         }
6478         i40e_vsi_reset_stats(ctl_vsi);
6479
6480         /* create the VEB in the switch and move the VSI onto the VEB */
6481         ret = i40e_add_veb(veb, ctl_vsi);
6482         if (ret)
6483                 goto end_reconstitute;
6484
6485         if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6486                 veb->bridge_mode = BRIDGE_MODE_VEB;
6487         else
6488                 veb->bridge_mode = BRIDGE_MODE_VEPA;
6489         i40e_config_bridge_mode(veb);
6490
6491         /* create the remaining VSIs attached to this VEB */
6492         for (v = 0; v < pf->num_alloc_vsi; v++) {
6493                 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6494                         continue;
6495
6496                 if (pf->vsi[v]->veb_idx == veb->idx) {
6497                         struct i40e_vsi *vsi = pf->vsi[v];
6498
6499                         vsi->uplink_seid = veb->seid;
6500                         ret = i40e_add_vsi(vsi);
6501                         if (ret) {
6502                                 dev_info(&pf->pdev->dev,
6503                                          "rebuild of vsi_idx %d failed: %d\n",
6504                                          v, ret);
6505                                 goto end_reconstitute;
6506                         }
6507                         i40e_vsi_reset_stats(vsi);
6508                 }
6509         }
6510
6511         /* create any VEBs attached to this VEB - RECURSION */
6512         for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6513                 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6514                         pf->veb[veb_idx]->uplink_seid = veb->seid;
6515                         ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6516                         if (ret)
6517                                 break;
6518                 }
6519         }
6520
6521 end_reconstitute:
6522         return ret;
6523 }
6524
6525 /**
6526  * i40e_get_capabilities - get info about the HW
6527  * @pf: the PF struct
6528  **/
6529 static int i40e_get_capabilities(struct i40e_pf *pf)
6530 {
6531         struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6532         u16 data_size;
6533         int buf_len;
6534         int err;
6535
6536         buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6537         do {
6538                 cap_buf = kzalloc(buf_len, GFP_KERNEL);
6539                 if (!cap_buf)
6540                         return -ENOMEM;
6541
6542                 /* this loads the data into the hw struct for us */
6543                 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6544                                             &data_size,
6545                                             i40e_aqc_opc_list_func_capabilities,
6546                                             NULL);
6547                 /* data loaded, buffer no longer needed */
6548                 kfree(cap_buf);
6549
6550                 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6551                         /* retry with a larger buffer */
6552                         buf_len = data_size;
6553                 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6554                         dev_info(&pf->pdev->dev,
6555                                  "capability discovery failed, err %s aq_err %s\n",
6556                                  i40e_stat_str(&pf->hw, err),
6557                                  i40e_aq_str(&pf->hw,
6558                                              pf->hw.aq.asq_last_status));
6559                         return -ENODEV;
6560                 }
6561         } while (err);
6562
6563         if (pf->hw.debug_mask & I40E_DEBUG_USER)
6564                 dev_info(&pf->pdev->dev,
6565                          "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6566                          pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6567                          pf->hw.func_caps.num_msix_vectors,
6568                          pf->hw.func_caps.num_msix_vectors_vf,
6569                          pf->hw.func_caps.fd_filters_guaranteed,
6570                          pf->hw.func_caps.fd_filters_best_effort,
6571                          pf->hw.func_caps.num_tx_qp,
6572                          pf->hw.func_caps.num_vsis);
6573
6574 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6575                        + pf->hw.func_caps.num_vfs)
6576         if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6577                 dev_info(&pf->pdev->dev,
6578                          "got num_vsis %d, setting num_vsis to %d\n",
6579                          pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6580                 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6581         }
6582
6583         return 0;
6584 }
6585
6586 static int i40e_vsi_clear(struct i40e_vsi *vsi);
6587
6588 /**
6589  * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
6590  * @pf: board private structure
6591  **/
6592 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
6593 {
6594         struct i40e_vsi *vsi;
6595         int i;
6596
6597         /* quick workaround for an NVM issue that leaves a critical register
6598          * uninitialized
6599          */
6600         if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6601                 static const u32 hkey[] = {
6602                         0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6603                         0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6604                         0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6605                         0x95b3a76d};
6606
6607                 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6608                         wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6609         }
6610
6611         if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6612                 return;
6613
6614         /* find existing VSI and see if it needs configuring */
6615         vsi = NULL;
6616         for (i = 0; i < pf->num_alloc_vsi; i++) {
6617                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6618                         vsi = pf->vsi[i];
6619                         break;
6620                 }
6621         }
6622
6623         /* create a new VSI if none exists */
6624         if (!vsi) {
6625                 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
6626                                      pf->vsi[pf->lan_vsi]->seid, 0);
6627                 if (!vsi) {
6628                         dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
6629                         pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6630                         return;
6631                 }
6632         }
6633
6634         i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
6635 }
6636
6637 /**
6638  * i40e_fdir_teardown - release the Flow Director resources
6639  * @pf: board private structure
6640  **/
6641 static void i40e_fdir_teardown(struct i40e_pf *pf)
6642 {
6643         int i;
6644
6645         i40e_fdir_filter_exit(pf);
6646         for (i = 0; i < pf->num_alloc_vsi; i++) {
6647                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6648                         i40e_vsi_release(pf->vsi[i]);
6649                         break;
6650                 }
6651         }
6652 }
6653
6654 /**
6655  * i40e_prep_for_reset - prep for the core to reset
6656  * @pf: board private structure
6657  *
6658  * Close up the VFs and other things in prep for PF Reset.
6659   **/
6660 static void i40e_prep_for_reset(struct i40e_pf *pf)
6661 {
6662         struct i40e_hw *hw = &pf->hw;
6663         i40e_status ret = 0;
6664         u32 v;
6665
6666         clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6667         if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
6668                 return;
6669         if (i40e_check_asq_alive(&pf->hw))
6670                 i40e_vc_notify_reset(pf);
6671
6672         dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
6673
6674         /* quiesce the VSIs and their queues that are not already DOWN */
6675         i40e_pf_quiesce_all_vsi(pf);
6676
6677         for (v = 0; v < pf->num_alloc_vsi; v++) {
6678                 if (pf->vsi[v])
6679                         pf->vsi[v]->seid = 0;
6680         }
6681
6682         i40e_shutdown_adminq(&pf->hw);
6683
6684         /* call shutdown HMC */
6685         if (hw->hmc.hmc_obj) {
6686                 ret = i40e_shutdown_lan_hmc(hw);
6687                 if (ret)
6688                         dev_warn(&pf->pdev->dev,
6689                                  "shutdown_lan_hmc failed: %d\n", ret);
6690         }
6691 }
6692
6693 /**
6694  * i40e_send_version - update firmware with driver version
6695  * @pf: PF struct
6696  */
6697 static void i40e_send_version(struct i40e_pf *pf)
6698 {
6699         struct i40e_driver_version dv;
6700
6701         dv.major_version = DRV_VERSION_MAJOR;
6702         dv.minor_version = DRV_VERSION_MINOR;
6703         dv.build_version = DRV_VERSION_BUILD;
6704         dv.subbuild_version = 0;
6705         strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
6706         i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
6707 }
6708
6709 /**
6710  * i40e_reset_and_rebuild - reset and rebuild using a saved config
6711  * @pf: board private structure
6712  * @reinit: if the Main VSI needs to re-initialized.
6713  **/
6714 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
6715 {
6716         struct i40e_hw *hw = &pf->hw;
6717         u8 set_fc_aq_fail = 0;
6718         i40e_status ret;
6719         u32 val;
6720         u32 v;
6721
6722         /* Now we wait for GRST to settle out.
6723          * We don't have to delete the VEBs or VSIs from the hw switch
6724          * because the reset will make them disappear.
6725          */
6726         ret = i40e_pf_reset(hw);
6727         if (ret) {
6728                 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
6729                 set_bit(__I40E_RESET_FAILED, &pf->state);
6730                 goto clear_recovery;
6731         }
6732         pf->pfr_count++;
6733
6734         if (test_bit(__I40E_DOWN, &pf->state))
6735                 goto clear_recovery;
6736         dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
6737
6738         /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
6739         ret = i40e_init_adminq(&pf->hw);
6740         if (ret) {
6741                 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
6742                          i40e_stat_str(&pf->hw, ret),
6743                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6744                 goto clear_recovery;
6745         }
6746
6747         /* re-verify the eeprom if we just had an EMP reset */
6748         if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
6749                 i40e_verify_eeprom(pf);
6750
6751         i40e_clear_pxe_mode(hw);
6752         ret = i40e_get_capabilities(pf);
6753         if (ret)
6754                 goto end_core_reset;
6755
6756         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
6757                                 hw->func_caps.num_rx_qp,
6758                                 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
6759         if (ret) {
6760                 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
6761                 goto end_core_reset;
6762         }
6763         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
6764         if (ret) {
6765                 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
6766                 goto end_core_reset;
6767         }
6768
6769 #ifdef CONFIG_I40E_DCB
6770         ret = i40e_init_pf_dcb(pf);
6771         if (ret) {
6772                 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6773                 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6774                 /* Continue without DCB enabled */
6775         }
6776 #endif /* CONFIG_I40E_DCB */
6777 #ifdef I40E_FCOE
6778         i40e_init_pf_fcoe(pf);
6779
6780 #endif
6781         /* do basic switch setup */
6782         ret = i40e_setup_pf_switch(pf, reinit);
6783         if (ret)
6784                 goto end_core_reset;
6785
6786         /* The driver only wants link up/down and module qualification
6787          * reports from firmware.  Note the negative logic.
6788          */
6789         ret = i40e_aq_set_phy_int_mask(&pf->hw,
6790                                        ~(I40E_AQ_EVENT_LINK_UPDOWN |
6791                                          I40E_AQ_EVENT_MEDIA_NA |
6792                                          I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
6793         if (ret)
6794                 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
6795                          i40e_stat_str(&pf->hw, ret),
6796                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6797
6798         /* make sure our flow control settings are restored */
6799         ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
6800         if (ret)
6801                 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
6802                         i40e_stat_str(&pf->hw, ret),
6803                         i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6804
6805         /* Rebuild the VSIs and VEBs that existed before reset.
6806          * They are still in our local switch element arrays, so only
6807          * need to rebuild the switch model in the HW.
6808          *
6809          * If there were VEBs but the reconstitution failed, we'll try
6810          * try to recover minimal use by getting the basic PF VSI working.
6811          */
6812         if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
6813                 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
6814                 /* find the one VEB connected to the MAC, and find orphans */
6815                 for (v = 0; v < I40E_MAX_VEB; v++) {
6816                         if (!pf->veb[v])
6817                                 continue;
6818
6819                         if (pf->veb[v]->uplink_seid == pf->mac_seid ||
6820                             pf->veb[v]->uplink_seid == 0) {
6821                                 ret = i40e_reconstitute_veb(pf->veb[v]);
6822
6823                                 if (!ret)
6824                                         continue;
6825
6826                                 /* If Main VEB failed, we're in deep doodoo,
6827                                  * so give up rebuilding the switch and set up
6828                                  * for minimal rebuild of PF VSI.
6829                                  * If orphan failed, we'll report the error
6830                                  * but try to keep going.
6831                                  */
6832                                 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
6833                                         dev_info(&pf->pdev->dev,
6834                                                  "rebuild of switch failed: %d, will try to set up simple PF connection\n",
6835                                                  ret);
6836                                         pf->vsi[pf->lan_vsi]->uplink_seid
6837                                                                 = pf->mac_seid;
6838                                         break;
6839                                 } else if (pf->veb[v]->uplink_seid == 0) {
6840                                         dev_info(&pf->pdev->dev,
6841                                                  "rebuild of orphan VEB failed: %d\n",
6842                                                  ret);
6843                                 }
6844                         }
6845                 }
6846         }
6847
6848         if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
6849                 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
6850                 /* no VEB, so rebuild only the Main VSI */
6851                 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
6852                 if (ret) {
6853                         dev_info(&pf->pdev->dev,
6854                                  "rebuild of Main VSI failed: %d\n", ret);
6855                         goto end_core_reset;
6856                 }
6857         }
6858
6859         /* Reconfigure hardware for allowing smaller MSS in the case
6860          * of TSO, so that we avoid the MDD being fired and causing
6861          * a reset in the case of small MSS+TSO.
6862          */
6863 #define I40E_REG_MSS          0x000E64DC
6864 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
6865 #define I40E_64BYTE_MSS       0x400000
6866         val = rd32(hw, I40E_REG_MSS);
6867         if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
6868                 val &= ~I40E_REG_MSS_MIN_MASK;
6869                 val |= I40E_64BYTE_MSS;
6870                 wr32(hw, I40E_REG_MSS, val);
6871         }
6872
6873         if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
6874                 msleep(75);
6875                 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
6876                 if (ret)
6877                         dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
6878                                  i40e_stat_str(&pf->hw, ret),
6879                                  i40e_aq_str(&pf->hw,
6880                                              pf->hw.aq.asq_last_status));
6881         }
6882         /* reinit the misc interrupt */
6883         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6884                 ret = i40e_setup_misc_vector(pf);
6885
6886         /* Add a filter to drop all Flow control frames from any VSI from being
6887          * transmitted. By doing so we stop a malicious VF from sending out
6888          * PAUSE or PFC frames and potentially controlling traffic for other
6889          * PF/VF VSIs.
6890          * The FW can still send Flow control frames if enabled.
6891          */
6892         i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
6893                                                        pf->main_vsi_seid);
6894
6895         /* restart the VSIs that were rebuilt and running before the reset */
6896         i40e_pf_unquiesce_all_vsi(pf);
6897
6898         if (pf->num_alloc_vfs) {
6899                 for (v = 0; v < pf->num_alloc_vfs; v++)
6900                         i40e_reset_vf(&pf->vf[v], true);
6901         }
6902
6903         /* tell the firmware that we're starting */
6904         i40e_send_version(pf);
6905
6906 end_core_reset:
6907         clear_bit(__I40E_RESET_FAILED, &pf->state);
6908 clear_recovery:
6909         clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
6910 }
6911
6912 /**
6913  * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
6914  * @pf: board private structure
6915  *
6916  * Close up the VFs and other things in prep for a Core Reset,
6917  * then get ready to rebuild the world.
6918  **/
6919 static void i40e_handle_reset_warning(struct i40e_pf *pf)
6920 {
6921         i40e_prep_for_reset(pf);
6922         i40e_reset_and_rebuild(pf, false);
6923 }
6924
6925 /**
6926  * i40e_handle_mdd_event
6927  * @pf: pointer to the PF structure
6928  *
6929  * Called from the MDD irq handler to identify possibly malicious vfs
6930  **/
6931 static void i40e_handle_mdd_event(struct i40e_pf *pf)
6932 {
6933         struct i40e_hw *hw = &pf->hw;
6934         bool mdd_detected = false;
6935         bool pf_mdd_detected = false;
6936         struct i40e_vf *vf;
6937         u32 reg;
6938         int i;
6939
6940         if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
6941                 return;
6942
6943         /* find what triggered the MDD event */
6944         reg = rd32(hw, I40E_GL_MDET_TX);
6945         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6946                 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6947                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
6948                 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6949                                 I40E_GL_MDET_TX_VF_NUM_SHIFT;
6950                 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6951                                 I40E_GL_MDET_TX_EVENT_SHIFT;
6952                 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6953                                 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6954                                 pf->hw.func_caps.base_queue;
6955                 if (netif_msg_tx_err(pf))
6956                         dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
6957                                  event, queue, pf_num, vf_num);
6958                 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
6959                 mdd_detected = true;
6960         }
6961         reg = rd32(hw, I40E_GL_MDET_RX);
6962         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6963                 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6964                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
6965                 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6966                                 I40E_GL_MDET_RX_EVENT_SHIFT;
6967                 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6968                                 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6969                                 pf->hw.func_caps.base_queue;
6970                 if (netif_msg_rx_err(pf))
6971                         dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
6972                                  event, queue, func);
6973                 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
6974                 mdd_detected = true;
6975         }
6976
6977         if (mdd_detected) {
6978                 reg = rd32(hw, I40E_PF_MDET_TX);
6979                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6980                         wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
6981                         dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
6982                         pf_mdd_detected = true;
6983                 }
6984                 reg = rd32(hw, I40E_PF_MDET_RX);
6985                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6986                         wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
6987                         dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
6988                         pf_mdd_detected = true;
6989                 }
6990                 /* Queue belongs to the PF, initiate a reset */
6991                 if (pf_mdd_detected) {
6992                         set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6993                         i40e_service_event_schedule(pf);
6994                 }
6995         }
6996
6997         /* see if one of the VFs needs its hand slapped */
6998         for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
6999                 vf = &(pf->vf[i]);
7000                 reg = rd32(hw, I40E_VP_MDET_TX(i));
7001                 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
7002                         wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
7003                         vf->num_mdd_events++;
7004                         dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
7005                                  i);
7006                 }
7007
7008                 reg = rd32(hw, I40E_VP_MDET_RX(i));
7009                 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
7010                         wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
7011                         vf->num_mdd_events++;
7012                         dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
7013                                  i);
7014                 }
7015
7016                 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
7017                         dev_info(&pf->pdev->dev,
7018                                  "Too many MDD events on VF %d, disabled\n", i);
7019                         dev_info(&pf->pdev->dev,
7020                                  "Use PF Control I/F to re-enable the VF\n");
7021                         set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
7022                 }
7023         }
7024
7025         /* re-enable mdd interrupt cause */
7026         clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
7027         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
7028         reg |=  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
7029         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
7030         i40e_flush(hw);
7031 }
7032
7033 /**
7034  * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
7035  * @pf: board private structure
7036  **/
7037 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
7038 {
7039         struct i40e_hw *hw = &pf->hw;
7040         i40e_status ret;
7041         __be16 port;
7042         int i;
7043
7044         if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
7045                 return;
7046
7047         pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
7048
7049         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7050                 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
7051                         pf->pending_udp_bitmap &= ~BIT_ULL(i);
7052                         port = pf->udp_ports[i].index;
7053                         if (port)
7054                                 ret = i40e_aq_add_udp_tunnel(hw, port,
7055                                                         pf->udp_ports[i].type,
7056                                                         NULL, NULL);
7057                         else
7058                                 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
7059
7060                         if (ret) {
7061                                 dev_dbg(&pf->pdev->dev,
7062                                         "%s %s port %d, index %d failed, err %s aq_err %s\n",
7063                                         pf->udp_ports[i].type ? "vxlan" : "geneve",
7064                                         port ? "add" : "delete",
7065                                         ntohs(port), i,
7066                                         i40e_stat_str(&pf->hw, ret),
7067                                         i40e_aq_str(&pf->hw,
7068                                                     pf->hw.aq.asq_last_status));
7069                                 pf->udp_ports[i].index = 0;
7070                         }
7071                 }
7072         }
7073 }
7074
7075 /**
7076  * i40e_service_task - Run the driver's async subtasks
7077  * @work: pointer to work_struct containing our data
7078  **/
7079 static void i40e_service_task(struct work_struct *work)
7080 {
7081         struct i40e_pf *pf = container_of(work,
7082                                           struct i40e_pf,
7083                                           service_task);
7084         unsigned long start_time = jiffies;
7085
7086         /* don't bother with service tasks if a reset is in progress */
7087         if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7088                 i40e_service_event_complete(pf);
7089                 return;
7090         }
7091
7092         i40e_detect_recover_hung(pf);
7093         i40e_sync_filters_subtask(pf);
7094         i40e_reset_subtask(pf);
7095         i40e_handle_mdd_event(pf);
7096         i40e_vc_process_vflr_event(pf);
7097         i40e_watchdog_subtask(pf);
7098         i40e_fdir_reinit_subtask(pf);
7099         i40e_client_subtask(pf);
7100         i40e_sync_filters_subtask(pf);
7101         i40e_sync_udp_filters_subtask(pf);
7102         i40e_clean_adminq_subtask(pf);
7103
7104         i40e_service_event_complete(pf);
7105
7106         /* If the tasks have taken longer than one timer cycle or there
7107          * is more work to be done, reschedule the service task now
7108          * rather than wait for the timer to tick again.
7109          */
7110         if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
7111             test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)            ||
7112             test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)               ||
7113             test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
7114                 i40e_service_event_schedule(pf);
7115 }
7116
7117 /**
7118  * i40e_service_timer - timer callback
7119  * @data: pointer to PF struct
7120  **/
7121 static void i40e_service_timer(unsigned long data)
7122 {
7123         struct i40e_pf *pf = (struct i40e_pf *)data;
7124
7125         mod_timer(&pf->service_timer,
7126                   round_jiffies(jiffies + pf->service_timer_period));
7127         i40e_service_event_schedule(pf);
7128 }
7129
7130 /**
7131  * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
7132  * @vsi: the VSI being configured
7133  **/
7134 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
7135 {
7136         struct i40e_pf *pf = vsi->back;
7137
7138         switch (vsi->type) {
7139         case I40E_VSI_MAIN:
7140                 vsi->alloc_queue_pairs = pf->num_lan_qps;
7141                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7142                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
7143                 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7144                         vsi->num_q_vectors = pf->num_lan_msix;
7145                 else
7146                         vsi->num_q_vectors = 1;
7147
7148                 break;
7149
7150         case I40E_VSI_FDIR:
7151                 vsi->alloc_queue_pairs = 1;
7152                 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
7153                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
7154                 vsi->num_q_vectors = pf->num_fdsb_msix;
7155                 break;
7156
7157         case I40E_VSI_VMDQ2:
7158                 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
7159                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7160                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
7161                 vsi->num_q_vectors = pf->num_vmdq_msix;
7162                 break;
7163
7164         case I40E_VSI_SRIOV:
7165                 vsi->alloc_queue_pairs = pf->num_vf_qps;
7166                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7167                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
7168                 break;
7169
7170 #ifdef I40E_FCOE
7171         case I40E_VSI_FCOE:
7172                 vsi->alloc_queue_pairs = pf->num_fcoe_qps;
7173                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7174                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
7175                 vsi->num_q_vectors = pf->num_fcoe_msix;
7176                 break;
7177
7178 #endif /* I40E_FCOE */
7179         default:
7180                 WARN_ON(1);
7181                 return -ENODATA;
7182         }
7183
7184         return 0;
7185 }
7186
7187 /**
7188  * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
7189  * @type: VSI pointer
7190  * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
7191  *
7192  * On error: returns error code (negative)
7193  * On success: returns 0
7194  **/
7195 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
7196 {
7197         int size;
7198         int ret = 0;
7199
7200         /* allocate memory for both Tx and Rx ring pointers */
7201         size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
7202         vsi->tx_rings = kzalloc(size, GFP_KERNEL);
7203         if (!vsi->tx_rings)
7204                 return -ENOMEM;
7205         vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
7206
7207         if (alloc_qvectors) {
7208                 /* allocate memory for q_vector pointers */
7209                 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
7210                 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
7211                 if (!vsi->q_vectors) {
7212                         ret = -ENOMEM;
7213                         goto err_vectors;
7214                 }
7215         }
7216         return ret;
7217
7218 err_vectors:
7219         kfree(vsi->tx_rings);
7220         return ret;
7221 }
7222
7223 /**
7224  * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
7225  * @pf: board private structure
7226  * @type: type of VSI
7227  *
7228  * On error: returns error code (negative)
7229  * On success: returns vsi index in PF (positive)
7230  **/
7231 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
7232 {
7233         int ret = -ENODEV;
7234         struct i40e_vsi *vsi;
7235         int vsi_idx;
7236         int i;
7237
7238         /* Need to protect the allocation of the VSIs at the PF level */
7239         mutex_lock(&pf->switch_mutex);
7240
7241         /* VSI list may be fragmented if VSI creation/destruction has
7242          * been happening.  We can afford to do a quick scan to look
7243          * for any free VSIs in the list.
7244          *
7245          * find next empty vsi slot, looping back around if necessary
7246          */
7247         i = pf->next_vsi;
7248         while (i < pf->num_alloc_vsi && pf->vsi[i])
7249                 i++;
7250         if (i >= pf->num_alloc_vsi) {
7251                 i = 0;
7252                 while (i < pf->next_vsi && pf->vsi[i])
7253                         i++;
7254         }
7255
7256         if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
7257                 vsi_idx = i;             /* Found one! */
7258         } else {
7259                 ret = -ENODEV;
7260                 goto unlock_pf;  /* out of VSI slots! */
7261         }
7262         pf->next_vsi = ++i;
7263
7264         vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
7265         if (!vsi) {
7266                 ret = -ENOMEM;
7267                 goto unlock_pf;
7268         }
7269         vsi->type = type;
7270         vsi->back = pf;
7271         set_bit(__I40E_DOWN, &vsi->state);
7272         vsi->flags = 0;
7273         vsi->idx = vsi_idx;
7274         vsi->int_rate_limit = 0;
7275         vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
7276                                 pf->rss_table_size : 64;
7277         vsi->netdev_registered = false;
7278         vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
7279         INIT_LIST_HEAD(&vsi->mac_filter_list);
7280         vsi->irqs_ready = false;
7281
7282         ret = i40e_set_num_rings_in_vsi(vsi);
7283         if (ret)
7284                 goto err_rings;
7285
7286         ret = i40e_vsi_alloc_arrays(vsi, true);
7287         if (ret)
7288                 goto err_rings;
7289
7290         /* Setup default MSIX irq handler for VSI */
7291         i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
7292
7293         /* Initialize VSI lock */
7294         spin_lock_init(&vsi->mac_filter_list_lock);
7295         pf->vsi[vsi_idx] = vsi;
7296         ret = vsi_idx;
7297         goto unlock_pf;
7298
7299 err_rings:
7300         pf->next_vsi = i - 1;
7301         kfree(vsi);
7302 unlock_pf:
7303         mutex_unlock(&pf->switch_mutex);
7304         return ret;
7305 }
7306
7307 /**
7308  * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
7309  * @type: VSI pointer
7310  * @free_qvectors: a bool to specify if q_vectors need to be freed.
7311  *
7312  * On error: returns error code (negative)
7313  * On success: returns 0
7314  **/
7315 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
7316 {
7317         /* free the ring and vector containers */
7318         if (free_qvectors) {
7319                 kfree(vsi->q_vectors);
7320                 vsi->q_vectors = NULL;
7321         }
7322         kfree(vsi->tx_rings);
7323         vsi->tx_rings = NULL;
7324         vsi->rx_rings = NULL;
7325 }
7326
7327 /**
7328  * i40e_clear_rss_config_user - clear the user configured RSS hash keys
7329  * and lookup table
7330  * @vsi: Pointer to VSI structure
7331  */
7332 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
7333 {
7334         if (!vsi)
7335                 return;
7336
7337         kfree(vsi->rss_hkey_user);
7338         vsi->rss_hkey_user = NULL;
7339
7340         kfree(vsi->rss_lut_user);
7341         vsi->rss_lut_user = NULL;
7342 }
7343
7344 /**
7345  * i40e_vsi_clear - Deallocate the VSI provided
7346  * @vsi: the VSI being un-configured
7347  **/
7348 static int i40e_vsi_clear(struct i40e_vsi *vsi)
7349 {
7350         struct i40e_pf *pf;
7351
7352         if (!vsi)
7353                 return 0;
7354
7355         if (!vsi->back)
7356                 goto free_vsi;
7357         pf = vsi->back;
7358
7359         mutex_lock(&pf->switch_mutex);
7360         if (!pf->vsi[vsi->idx]) {
7361                 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
7362                         vsi->idx, vsi->idx, vsi, vsi->type);
7363                 goto unlock_vsi;
7364         }
7365
7366         if (pf->vsi[vsi->idx] != vsi) {
7367                 dev_err(&pf->pdev->dev,
7368                         "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
7369                         pf->vsi[vsi->idx]->idx,
7370                         pf->vsi[vsi->idx],
7371                         pf->vsi[vsi->idx]->type,
7372                         vsi->idx, vsi, vsi->type);
7373                 goto unlock_vsi;
7374         }
7375
7376         /* updates the PF for this cleared vsi */
7377         i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7378         i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
7379
7380         i40e_vsi_free_arrays(vsi, true);
7381         i40e_clear_rss_config_user(vsi);
7382
7383         pf->vsi[vsi->idx] = NULL;
7384         if (vsi->idx < pf->next_vsi)
7385                 pf->next_vsi = vsi->idx;
7386
7387 unlock_vsi:
7388         mutex_unlock(&pf->switch_mutex);
7389 free_vsi:
7390         kfree(vsi);
7391
7392         return 0;
7393 }
7394
7395 /**
7396  * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
7397  * @vsi: the VSI being cleaned
7398  **/
7399 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
7400 {
7401         int i;
7402
7403         if (vsi->tx_rings && vsi->tx_rings[0]) {
7404                 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7405                         kfree_rcu(vsi->tx_rings[i], rcu);
7406                         vsi->tx_rings[i] = NULL;
7407                         vsi->rx_rings[i] = NULL;
7408                 }
7409         }
7410 }
7411
7412 /**
7413  * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
7414  * @vsi: the VSI being configured
7415  **/
7416 static int i40e_alloc_rings(struct i40e_vsi *vsi)
7417 {
7418         struct i40e_ring *tx_ring, *rx_ring;
7419         struct i40e_pf *pf = vsi->back;
7420         int i;
7421
7422         /* Set basic values in the rings to be used later during open() */
7423         for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7424                 /* allocate space for both Tx and Rx in one shot */
7425                 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
7426                 if (!tx_ring)
7427                         goto err_out;
7428
7429                 tx_ring->queue_index = i;
7430                 tx_ring->reg_idx = vsi->base_queue + i;
7431                 tx_ring->ring_active = false;
7432                 tx_ring->vsi = vsi;
7433                 tx_ring->netdev = vsi->netdev;
7434                 tx_ring->dev = &pf->pdev->dev;
7435                 tx_ring->count = vsi->num_desc;
7436                 tx_ring->size = 0;
7437                 tx_ring->dcb_tc = 0;
7438                 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
7439                         tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7440                 tx_ring->tx_itr_setting = pf->tx_itr_default;
7441                 vsi->tx_rings[i] = tx_ring;
7442
7443                 rx_ring = &tx_ring[1];
7444                 rx_ring->queue_index = i;
7445                 rx_ring->reg_idx = vsi->base_queue + i;
7446                 rx_ring->ring_active = false;
7447                 rx_ring->vsi = vsi;
7448                 rx_ring->netdev = vsi->netdev;
7449                 rx_ring->dev = &pf->pdev->dev;
7450                 rx_ring->count = vsi->num_desc;
7451                 rx_ring->size = 0;
7452                 rx_ring->dcb_tc = 0;
7453                 rx_ring->rx_itr_setting = pf->rx_itr_default;
7454                 vsi->rx_rings[i] = rx_ring;
7455         }
7456
7457         return 0;
7458
7459 err_out:
7460         i40e_vsi_clear_rings(vsi);
7461         return -ENOMEM;
7462 }
7463
7464 /**
7465  * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
7466  * @pf: board private structure
7467  * @vectors: the number of MSI-X vectors to request
7468  *
7469  * Returns the number of vectors reserved, or error
7470  **/
7471 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
7472 {
7473         vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
7474                                         I40E_MIN_MSIX, vectors);
7475         if (vectors < 0) {
7476                 dev_info(&pf->pdev->dev,
7477                          "MSI-X vector reservation failed: %d\n", vectors);
7478                 vectors = 0;
7479         }
7480
7481         return vectors;
7482 }
7483
7484 /**
7485  * i40e_init_msix - Setup the MSIX capability
7486  * @pf: board private structure
7487  *
7488  * Work with the OS to set up the MSIX vectors needed.
7489  *
7490  * Returns the number of vectors reserved or negative on failure
7491  **/
7492 static int i40e_init_msix(struct i40e_pf *pf)
7493 {
7494         struct i40e_hw *hw = &pf->hw;
7495         int vectors_left;
7496         int v_budget, i;
7497         int v_actual;
7498         int iwarp_requested = 0;
7499
7500         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7501                 return -ENODEV;
7502
7503         /* The number of vectors we'll request will be comprised of:
7504          *   - Add 1 for "other" cause for Admin Queue events, etc.
7505          *   - The number of LAN queue pairs
7506          *      - Queues being used for RSS.
7507          *              We don't need as many as max_rss_size vectors.
7508          *              use rss_size instead in the calculation since that
7509          *              is governed by number of cpus in the system.
7510          *      - assumes symmetric Tx/Rx pairing
7511          *   - The number of VMDq pairs
7512          *   - The CPU count within the NUMA node if iWARP is enabled
7513 #ifdef I40E_FCOE
7514          *   - The number of FCOE qps.
7515 #endif
7516          * Once we count this up, try the request.
7517          *
7518          * If we can't get what we want, we'll simplify to nearly nothing
7519          * and try again.  If that still fails, we punt.
7520          */
7521         vectors_left = hw->func_caps.num_msix_vectors;
7522         v_budget = 0;
7523
7524         /* reserve one vector for miscellaneous handler */
7525         if (vectors_left) {
7526                 v_budget++;
7527                 vectors_left--;
7528         }
7529
7530         /* reserve vectors for the main PF traffic queues */
7531         pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
7532         vectors_left -= pf->num_lan_msix;
7533         v_budget += pf->num_lan_msix;
7534
7535         /* reserve one vector for sideband flow director */
7536         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7537                 if (vectors_left) {
7538                         pf->num_fdsb_msix = 1;
7539                         v_budget++;
7540                         vectors_left--;
7541                 } else {
7542                         pf->num_fdsb_msix = 0;
7543                 }
7544         }
7545
7546 #ifdef I40E_FCOE
7547         /* can we reserve enough for FCoE? */
7548         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7549                 if (!vectors_left)
7550                         pf->num_fcoe_msix = 0;
7551                 else if (vectors_left >= pf->num_fcoe_qps)
7552                         pf->num_fcoe_msix = pf->num_fcoe_qps;
7553                 else
7554                         pf->num_fcoe_msix = 1;
7555                 v_budget += pf->num_fcoe_msix;
7556                 vectors_left -= pf->num_fcoe_msix;
7557         }
7558
7559 #endif
7560         /* can we reserve enough for iWARP? */
7561         if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7562                 iwarp_requested = pf->num_iwarp_msix;
7563
7564                 if (!vectors_left)
7565                         pf->num_iwarp_msix = 0;
7566                 else if (vectors_left < pf->num_iwarp_msix)
7567                         pf->num_iwarp_msix = 1;
7568                 v_budget += pf->num_iwarp_msix;
7569                 vectors_left -= pf->num_iwarp_msix;
7570         }
7571
7572         /* any vectors left over go for VMDq support */
7573         if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
7574                 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
7575                 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
7576
7577                 if (!vectors_left) {
7578                         pf->num_vmdq_msix = 0;
7579                         pf->num_vmdq_qps = 0;
7580                 } else {
7581                         /* if we're short on vectors for what's desired, we limit
7582                          * the queues per vmdq.  If this is still more than are
7583                          * available, the user will need to change the number of
7584                          * queues/vectors used by the PF later with the ethtool
7585                          * channels command
7586                          */
7587                         if (vmdq_vecs < vmdq_vecs_wanted)
7588                                 pf->num_vmdq_qps = 1;
7589                         pf->num_vmdq_msix = pf->num_vmdq_qps;
7590
7591                         v_budget += vmdq_vecs;
7592                         vectors_left -= vmdq_vecs;
7593                 }
7594         }
7595
7596         pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
7597                                    GFP_KERNEL);
7598         if (!pf->msix_entries)
7599                 return -ENOMEM;
7600
7601         for (i = 0; i < v_budget; i++)
7602                 pf->msix_entries[i].entry = i;
7603         v_actual = i40e_reserve_msix_vectors(pf, v_budget);
7604
7605         if (v_actual < I40E_MIN_MSIX) {
7606                 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
7607                 kfree(pf->msix_entries);
7608                 pf->msix_entries = NULL;
7609                 pci_disable_msix(pf->pdev);
7610                 return -ENODEV;
7611
7612         } else if (v_actual == I40E_MIN_MSIX) {
7613                 /* Adjust for minimal MSIX use */
7614                 pf->num_vmdq_vsis = 0;
7615                 pf->num_vmdq_qps = 0;
7616                 pf->num_lan_qps = 1;
7617                 pf->num_lan_msix = 1;
7618
7619         } else if (!vectors_left) {
7620                 /* If we have limited resources, we will start with no vectors
7621                  * for the special features and then allocate vectors to some
7622                  * of these features based on the policy and at the end disable
7623                  * the features that did not get any vectors.
7624                  */
7625                 int vec;
7626
7627                 dev_info(&pf->pdev->dev,
7628                          "MSI-X vector limit reached, attempting to redistribute vectors\n");
7629                 /* reserve the misc vector */
7630                 vec = v_actual - 1;
7631
7632                 /* Scale vector usage down */
7633                 pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
7634                 pf->num_vmdq_vsis = 1;
7635                 pf->num_vmdq_qps = 1;
7636 #ifdef I40E_FCOE
7637                 pf->num_fcoe_qps = 0;
7638                 pf->num_fcoe_msix = 0;
7639 #endif
7640
7641                 /* partition out the remaining vectors */
7642                 switch (vec) {
7643                 case 2:
7644                         pf->num_lan_msix = 1;
7645                         break;
7646                 case 3:
7647                         if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7648                                 pf->num_lan_msix = 1;
7649                                 pf->num_iwarp_msix = 1;
7650                         } else {
7651                                 pf->num_lan_msix = 2;
7652                         }
7653 #ifdef I40E_FCOE
7654                         /* give one vector to FCoE */
7655                         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7656                                 pf->num_lan_msix = 1;
7657                                 pf->num_fcoe_msix = 1;
7658                         }
7659 #endif
7660                         break;
7661                 default:
7662                         if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7663                                 pf->num_iwarp_msix = min_t(int, (vec / 3),
7664                                                  iwarp_requested);
7665                                 pf->num_vmdq_vsis = min_t(int, (vec / 3),
7666                                                   I40E_DEFAULT_NUM_VMDQ_VSI);
7667                         } else {
7668                                 pf->num_vmdq_vsis = min_t(int, (vec / 2),
7669                                                   I40E_DEFAULT_NUM_VMDQ_VSI);
7670                         }
7671                         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7672                                 pf->num_fdsb_msix = 1;
7673                                 vec--;
7674                         }
7675                         pf->num_lan_msix = min_t(int,
7676                                (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
7677                                                               pf->num_lan_msix);
7678                         pf->num_lan_qps = pf->num_lan_msix;
7679 #ifdef I40E_FCOE
7680                         /* give one vector to FCoE */
7681                         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7682                                 pf->num_fcoe_msix = 1;
7683                                 vec--;
7684                         }
7685 #endif
7686                         break;
7687                 }
7688         }
7689
7690         if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
7691             (pf->num_fdsb_msix == 0)) {
7692                 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
7693                 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7694         }
7695         if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7696             (pf->num_vmdq_msix == 0)) {
7697                 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
7698                 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
7699         }
7700
7701         if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
7702             (pf->num_iwarp_msix == 0)) {
7703                 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
7704                 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
7705         }
7706 #ifdef I40E_FCOE
7707
7708         if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
7709                 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
7710                 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
7711         }
7712 #endif
7713         i40e_debug(&pf->hw, I40E_DEBUG_INIT,
7714                    "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
7715                    pf->num_lan_msix,
7716                    pf->num_vmdq_msix * pf->num_vmdq_vsis,
7717                    pf->num_fdsb_msix,
7718                    pf->num_iwarp_msix);
7719
7720         return v_actual;
7721 }
7722
7723 /**
7724  * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
7725  * @vsi: the VSI being configured
7726  * @v_idx: index of the vector in the vsi struct
7727  * @cpu: cpu to be used on affinity_mask
7728  *
7729  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
7730  **/
7731 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
7732 {
7733         struct i40e_q_vector *q_vector;
7734
7735         /* allocate q_vector */
7736         q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
7737         if (!q_vector)
7738                 return -ENOMEM;
7739
7740         q_vector->vsi = vsi;
7741         q_vector->v_idx = v_idx;
7742         cpumask_set_cpu(cpu, &q_vector->affinity_mask);
7743
7744         if (vsi->netdev)
7745                 netif_napi_add(vsi->netdev, &q_vector->napi,
7746                                i40e_napi_poll, NAPI_POLL_WEIGHT);
7747
7748         q_vector->rx.latency_range = I40E_LOW_LATENCY;
7749         q_vector->tx.latency_range = I40E_LOW_LATENCY;
7750
7751         /* tie q_vector and vsi together */
7752         vsi->q_vectors[v_idx] = q_vector;
7753
7754         return 0;
7755 }
7756
7757 /**
7758  * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
7759  * @vsi: the VSI being configured
7760  *
7761  * We allocate one q_vector per queue interrupt.  If allocation fails we
7762  * return -ENOMEM.
7763  **/
7764 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
7765 {
7766         struct i40e_pf *pf = vsi->back;
7767         int err, v_idx, num_q_vectors, current_cpu;
7768
7769         /* if not MSIX, give the one vector only to the LAN VSI */
7770         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7771                 num_q_vectors = vsi->num_q_vectors;
7772         else if (vsi == pf->vsi[pf->lan_vsi])
7773                 num_q_vectors = 1;
7774         else
7775                 return -EINVAL;
7776
7777         current_cpu = cpumask_first(cpu_online_mask);
7778
7779         for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
7780                 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
7781                 if (err)
7782                         goto err_out;
7783                 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
7784                 if (unlikely(current_cpu >= nr_cpu_ids))
7785                         current_cpu = cpumask_first(cpu_online_mask);
7786         }
7787
7788         return 0;
7789
7790 err_out:
7791         while (v_idx--)
7792                 i40e_free_q_vector(vsi, v_idx);
7793
7794         return err;
7795 }
7796
7797 /**
7798  * i40e_init_interrupt_scheme - Determine proper interrupt scheme
7799  * @pf: board private structure to initialize
7800  **/
7801 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
7802 {
7803         int vectors = 0;
7804         ssize_t size;
7805
7806         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7807                 vectors = i40e_init_msix(pf);
7808                 if (vectors < 0) {
7809                         pf->flags &= ~(I40E_FLAG_MSIX_ENABLED   |
7810                                        I40E_FLAG_IWARP_ENABLED  |
7811 #ifdef I40E_FCOE
7812                                        I40E_FLAG_FCOE_ENABLED   |
7813 #endif
7814                                        I40E_FLAG_RSS_ENABLED    |
7815                                        I40E_FLAG_DCB_CAPABLE    |
7816                                        I40E_FLAG_DCB_ENABLED    |
7817                                        I40E_FLAG_SRIOV_ENABLED  |
7818                                        I40E_FLAG_FD_SB_ENABLED  |
7819                                        I40E_FLAG_FD_ATR_ENABLED |
7820                                        I40E_FLAG_VMDQ_ENABLED);
7821
7822                         /* rework the queue expectations without MSIX */
7823                         i40e_determine_queue_usage(pf);
7824                 }
7825         }
7826
7827         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
7828             (pf->flags & I40E_FLAG_MSI_ENABLED)) {
7829                 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
7830                 vectors = pci_enable_msi(pf->pdev);
7831                 if (vectors < 0) {
7832                         dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
7833                                  vectors);
7834                         pf->flags &= ~I40E_FLAG_MSI_ENABLED;
7835                 }
7836                 vectors = 1;  /* one MSI or Legacy vector */
7837         }
7838
7839         if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
7840                 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
7841
7842         /* set up vector assignment tracking */
7843         size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
7844         pf->irq_pile = kzalloc(size, GFP_KERNEL);
7845         if (!pf->irq_pile) {
7846                 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
7847                 return -ENOMEM;
7848         }
7849         pf->irq_pile->num_entries = vectors;
7850         pf->irq_pile->search_hint = 0;
7851
7852         /* track first vector for misc interrupts, ignore return */
7853         (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
7854
7855         return 0;
7856 }
7857
7858 /**
7859  * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
7860  * @pf: board private structure
7861  *
7862  * This sets up the handler for MSIX 0, which is used to manage the
7863  * non-queue interrupts, e.g. AdminQ and errors.  This is not used
7864  * when in MSI or Legacy interrupt mode.
7865  **/
7866 static int i40e_setup_misc_vector(struct i40e_pf *pf)
7867 {
7868         struct i40e_hw *hw = &pf->hw;
7869         int err = 0;
7870
7871         /* Only request the irq if this is the first time through, and
7872          * not when we're rebuilding after a Reset
7873          */
7874         if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7875                 err = request_irq(pf->msix_entries[0].vector,
7876                                   i40e_intr, 0, pf->int_name, pf);
7877                 if (err) {
7878                         dev_info(&pf->pdev->dev,
7879                                  "request_irq for %s failed: %d\n",
7880                                  pf->int_name, err);
7881                         return -EFAULT;
7882                 }
7883         }
7884
7885         i40e_enable_misc_int_causes(pf);
7886
7887         /* associate no queues to the misc vector */
7888         wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
7889         wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
7890
7891         i40e_flush(hw);
7892
7893         i40e_irq_dynamic_enable_icr0(pf, true);
7894
7895         return err;
7896 }
7897
7898 /**
7899  * i40e_config_rss_aq - Prepare for RSS using AQ commands
7900  * @vsi: vsi structure
7901  * @seed: RSS hash seed
7902  **/
7903 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
7904                               u8 *lut, u16 lut_size)
7905 {
7906         struct i40e_pf *pf = vsi->back;
7907         struct i40e_hw *hw = &pf->hw;
7908         int ret = 0;
7909
7910         if (seed) {
7911                 struct i40e_aqc_get_set_rss_key_data *seed_dw =
7912                         (struct i40e_aqc_get_set_rss_key_data *)seed;
7913                 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
7914                 if (ret) {
7915                         dev_info(&pf->pdev->dev,
7916                                  "Cannot set RSS key, err %s aq_err %s\n",
7917                                  i40e_stat_str(hw, ret),
7918                                  i40e_aq_str(hw, hw->aq.asq_last_status));
7919                         return ret;
7920                 }
7921         }
7922         if (lut) {
7923                 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
7924
7925                 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
7926                 if (ret) {
7927                         dev_info(&pf->pdev->dev,
7928                                  "Cannot set RSS lut, err %s aq_err %s\n",
7929                                  i40e_stat_str(hw, ret),
7930                                  i40e_aq_str(hw, hw->aq.asq_last_status));
7931                         return ret;
7932                 }
7933         }
7934         return ret;
7935 }
7936
7937 /**
7938  * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
7939  * @vsi: Pointer to vsi structure
7940  * @seed: Buffter to store the hash keys
7941  * @lut: Buffer to store the lookup table entries
7942  * @lut_size: Size of buffer to store the lookup table entries
7943  *
7944  * Return 0 on success, negative on failure
7945  */
7946 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
7947                            u8 *lut, u16 lut_size)
7948 {
7949         struct i40e_pf *pf = vsi->back;
7950         struct i40e_hw *hw = &pf->hw;
7951         int ret = 0;
7952
7953         if (seed) {
7954                 ret = i40e_aq_get_rss_key(hw, vsi->id,
7955                         (struct i40e_aqc_get_set_rss_key_data *)seed);
7956                 if (ret) {
7957                         dev_info(&pf->pdev->dev,
7958                                  "Cannot get RSS key, err %s aq_err %s\n",
7959                                  i40e_stat_str(&pf->hw, ret),
7960                                  i40e_aq_str(&pf->hw,
7961                                              pf->hw.aq.asq_last_status));
7962                         return ret;
7963                 }
7964         }
7965
7966         if (lut) {
7967                 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
7968
7969                 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
7970                 if (ret) {
7971                         dev_info(&pf->pdev->dev,
7972                                  "Cannot get RSS lut, err %s aq_err %s\n",
7973                                  i40e_stat_str(&pf->hw, ret),
7974                                  i40e_aq_str(&pf->hw,
7975                                              pf->hw.aq.asq_last_status));
7976                         return ret;
7977                 }
7978         }
7979
7980         return ret;
7981 }
7982
7983 /**
7984  * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
7985  * @vsi: VSI structure
7986  **/
7987 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
7988 {
7989         u8 seed[I40E_HKEY_ARRAY_SIZE];
7990         struct i40e_pf *pf = vsi->back;
7991         u8 *lut;
7992         int ret;
7993
7994         if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE))
7995                 return 0;
7996
7997         if (!vsi->rss_size)
7998                 vsi->rss_size = min_t(int, pf->alloc_rss_size,
7999                                       vsi->num_queue_pairs);
8000         if (!vsi->rss_size)
8001                 return -EINVAL;
8002
8003         lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8004         if (!lut)
8005                 return -ENOMEM;
8006         /* Use the user configured hash keys and lookup table if there is one,
8007          * otherwise use default
8008          */
8009         if (vsi->rss_lut_user)
8010                 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8011         else
8012                 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8013         if (vsi->rss_hkey_user)
8014                 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8015         else
8016                 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
8017         ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
8018         kfree(lut);
8019
8020         return ret;
8021 }
8022
8023 /**
8024  * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
8025  * @vsi: Pointer to vsi structure
8026  * @seed: RSS hash seed
8027  * @lut: Lookup table
8028  * @lut_size: Lookup table size
8029  *
8030  * Returns 0 on success, negative on failure
8031  **/
8032 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
8033                                const u8 *lut, u16 lut_size)
8034 {
8035         struct i40e_pf *pf = vsi->back;
8036         struct i40e_hw *hw = &pf->hw;
8037         u16 vf_id = vsi->vf_id;
8038         u8 i;
8039
8040         /* Fill out hash function seed */
8041         if (seed) {
8042                 u32 *seed_dw = (u32 *)seed;
8043
8044                 if (vsi->type == I40E_VSI_MAIN) {
8045                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8046                                 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i),
8047                                                   seed_dw[i]);
8048                 } else if (vsi->type == I40E_VSI_SRIOV) {
8049                         for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
8050                                 i40e_write_rx_ctl(hw,
8051                                                   I40E_VFQF_HKEY1(i, vf_id),
8052                                                   seed_dw[i]);
8053                 } else {
8054                         dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
8055                 }
8056         }
8057
8058         if (lut) {
8059                 u32 *lut_dw = (u32 *)lut;
8060
8061                 if (vsi->type == I40E_VSI_MAIN) {
8062                         if (lut_size != I40E_HLUT_ARRAY_SIZE)
8063                                 return -EINVAL;
8064                         for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8065                                 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
8066                 } else if (vsi->type == I40E_VSI_SRIOV) {
8067                         if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
8068                                 return -EINVAL;
8069                         for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
8070                                 i40e_write_rx_ctl(hw,
8071                                                   I40E_VFQF_HLUT1(i, vf_id),
8072                                                   lut_dw[i]);
8073                 } else {
8074                         dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
8075                 }
8076         }
8077         i40e_flush(hw);
8078
8079         return 0;
8080 }
8081
8082 /**
8083  * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
8084  * @vsi: Pointer to VSI structure
8085  * @seed: Buffer to store the keys
8086  * @lut: Buffer to store the lookup table entries
8087  * @lut_size: Size of buffer to store the lookup table entries
8088  *
8089  * Returns 0 on success, negative on failure
8090  */
8091 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
8092                             u8 *lut, u16 lut_size)
8093 {
8094         struct i40e_pf *pf = vsi->back;
8095         struct i40e_hw *hw = &pf->hw;
8096         u16 i;
8097
8098         if (seed) {
8099                 u32 *seed_dw = (u32 *)seed;
8100
8101                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8102                         seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
8103         }
8104         if (lut) {
8105                 u32 *lut_dw = (u32 *)lut;
8106
8107                 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8108                         return -EINVAL;
8109                 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8110                         lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
8111         }
8112
8113         return 0;
8114 }
8115
8116 /**
8117  * i40e_config_rss - Configure RSS keys and lut
8118  * @vsi: Pointer to VSI structure
8119  * @seed: RSS hash seed
8120  * @lut: Lookup table
8121  * @lut_size: Lookup table size
8122  *
8123  * Returns 0 on success, negative on failure
8124  */
8125 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8126 {
8127         struct i40e_pf *pf = vsi->back;
8128
8129         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
8130                 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
8131         else
8132                 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
8133 }
8134
8135 /**
8136  * i40e_get_rss - Get RSS keys and lut
8137  * @vsi: Pointer to VSI structure
8138  * @seed: Buffer to store the keys
8139  * @lut: Buffer to store the lookup table entries
8140  * lut_size: Size of buffer to store the lookup table entries
8141  *
8142  * Returns 0 on success, negative on failure
8143  */
8144 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8145 {
8146         struct i40e_pf *pf = vsi->back;
8147
8148         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
8149                 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
8150         else
8151                 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
8152 }
8153
8154 /**
8155  * i40e_fill_rss_lut - Fill the RSS lookup table with default values
8156  * @pf: Pointer to board private structure
8157  * @lut: Lookup table
8158  * @rss_table_size: Lookup table size
8159  * @rss_size: Range of queue number for hashing
8160  */
8161 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
8162                        u16 rss_table_size, u16 rss_size)
8163 {
8164         u16 i;
8165
8166         for (i = 0; i < rss_table_size; i++)
8167                 lut[i] = i % rss_size;
8168 }
8169
8170 /**
8171  * i40e_pf_config_rss - Prepare for RSS if used
8172  * @pf: board private structure
8173  **/
8174 static int i40e_pf_config_rss(struct i40e_pf *pf)
8175 {
8176         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8177         u8 seed[I40E_HKEY_ARRAY_SIZE];
8178         u8 *lut;
8179         struct i40e_hw *hw = &pf->hw;
8180         u32 reg_val;
8181         u64 hena;
8182         int ret;
8183
8184         /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
8185         hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
8186                 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
8187         hena |= i40e_pf_get_default_rss_hena(pf);
8188
8189         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
8190         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
8191
8192         /* Determine the RSS table size based on the hardware capabilities */
8193         reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
8194         reg_val = (pf->rss_table_size == 512) ?
8195                         (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
8196                         (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
8197         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
8198
8199         /* Determine the RSS size of the VSI */
8200         if (!vsi->rss_size)
8201                 vsi->rss_size = min_t(int, pf->alloc_rss_size,
8202                                       vsi->num_queue_pairs);
8203         if (!vsi->rss_size)
8204                 return -EINVAL;
8205
8206         lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8207         if (!lut)
8208                 return -ENOMEM;
8209
8210         /* Use user configured lut if there is one, otherwise use default */
8211         if (vsi->rss_lut_user)
8212                 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8213         else
8214                 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8215
8216         /* Use user configured hash key if there is one, otherwise
8217          * use default.
8218          */
8219         if (vsi->rss_hkey_user)
8220                 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8221         else
8222                 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
8223         ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
8224         kfree(lut);
8225
8226         return ret;
8227 }
8228
8229 /**
8230  * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
8231  * @pf: board private structure
8232  * @queue_count: the requested queue count for rss.
8233  *
8234  * returns 0 if rss is not enabled, if enabled returns the final rss queue
8235  * count which may be different from the requested queue count.
8236  **/
8237 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
8238 {
8239         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8240         int new_rss_size;
8241
8242         if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
8243                 return 0;
8244
8245         new_rss_size = min_t(int, queue_count, pf->rss_size_max);
8246
8247         if (queue_count != vsi->num_queue_pairs) {
8248                 vsi->req_queue_pairs = queue_count;
8249                 i40e_prep_for_reset(pf);
8250
8251                 pf->alloc_rss_size = new_rss_size;
8252
8253                 i40e_reset_and_rebuild(pf, true);
8254
8255                 /* Discard the user configured hash keys and lut, if less
8256                  * queues are enabled.
8257                  */
8258                 if (queue_count < vsi->rss_size) {
8259                         i40e_clear_rss_config_user(vsi);
8260                         dev_dbg(&pf->pdev->dev,
8261                                 "discard user configured hash keys and lut\n");
8262                 }
8263
8264                 /* Reset vsi->rss_size, as number of enabled queues changed */
8265                 vsi->rss_size = min_t(int, pf->alloc_rss_size,
8266                                       vsi->num_queue_pairs);
8267
8268                 i40e_pf_config_rss(pf);
8269         }
8270         dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count:  %d/%d\n",
8271                  vsi->req_queue_pairs, pf->rss_size_max);
8272         return pf->alloc_rss_size;
8273 }
8274
8275 /**
8276  * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
8277  * @pf: board private structure
8278  **/
8279 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
8280 {
8281         i40e_status status;
8282         bool min_valid, max_valid;
8283         u32 max_bw, min_bw;
8284
8285         status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
8286                                            &min_valid, &max_valid);
8287
8288         if (!status) {
8289                 if (min_valid)
8290                         pf->npar_min_bw = min_bw;
8291                 if (max_valid)
8292                         pf->npar_max_bw = max_bw;
8293         }
8294
8295         return status;
8296 }
8297
8298 /**
8299  * i40e_set_npar_bw_setting - Set BW settings for this PF partition
8300  * @pf: board private structure
8301  **/
8302 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
8303 {
8304         struct i40e_aqc_configure_partition_bw_data bw_data;
8305         i40e_status status;
8306
8307         /* Set the valid bit for this PF */
8308         bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
8309         bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
8310         bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
8311
8312         /* Set the new bandwidths */
8313         status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
8314
8315         return status;
8316 }
8317
8318 /**
8319  * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
8320  * @pf: board private structure
8321  **/
8322 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
8323 {
8324         /* Commit temporary BW setting to permanent NVM image */
8325         enum i40e_admin_queue_err last_aq_status;
8326         i40e_status ret;
8327         u16 nvm_word;
8328
8329         if (pf->hw.partition_id != 1) {
8330                 dev_info(&pf->pdev->dev,
8331                          "Commit BW only works on partition 1! This is partition %d",
8332                          pf->hw.partition_id);
8333                 ret = I40E_NOT_SUPPORTED;
8334                 goto bw_commit_out;
8335         }
8336
8337         /* Acquire NVM for read access */
8338         ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
8339         last_aq_status = pf->hw.aq.asq_last_status;
8340         if (ret) {
8341                 dev_info(&pf->pdev->dev,
8342                          "Cannot acquire NVM for read access, err %s aq_err %s\n",
8343                          i40e_stat_str(&pf->hw, ret),
8344                          i40e_aq_str(&pf->hw, last_aq_status));
8345                 goto bw_commit_out;
8346         }
8347
8348         /* Read word 0x10 of NVM - SW compatibility word 1 */
8349         ret = i40e_aq_read_nvm(&pf->hw,
8350                                I40E_SR_NVM_CONTROL_WORD,
8351                                0x10, sizeof(nvm_word), &nvm_word,
8352                                false, NULL);
8353         /* Save off last admin queue command status before releasing
8354          * the NVM
8355          */
8356         last_aq_status = pf->hw.aq.asq_last_status;
8357         i40e_release_nvm(&pf->hw);
8358         if (ret) {
8359                 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
8360                          i40e_stat_str(&pf->hw, ret),
8361                          i40e_aq_str(&pf->hw, last_aq_status));
8362                 goto bw_commit_out;
8363         }
8364
8365         /* Wait a bit for NVM release to complete */
8366         msleep(50);
8367
8368         /* Acquire NVM for write access */
8369         ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
8370         last_aq_status = pf->hw.aq.asq_last_status;
8371         if (ret) {
8372                 dev_info(&pf->pdev->dev,
8373                          "Cannot acquire NVM for write access, err %s aq_err %s\n",
8374                          i40e_stat_str(&pf->hw, ret),
8375                          i40e_aq_str(&pf->hw, last_aq_status));
8376                 goto bw_commit_out;
8377         }
8378         /* Write it back out unchanged to initiate update NVM,
8379          * which will force a write of the shadow (alt) RAM to
8380          * the NVM - thus storing the bandwidth values permanently.
8381          */
8382         ret = i40e_aq_update_nvm(&pf->hw,
8383                                  I40E_SR_NVM_CONTROL_WORD,
8384                                  0x10, sizeof(nvm_word),
8385                                  &nvm_word, true, NULL);
8386         /* Save off last admin queue command status before releasing
8387          * the NVM
8388          */
8389         last_aq_status = pf->hw.aq.asq_last_status;
8390         i40e_release_nvm(&pf->hw);
8391         if (ret)
8392                 dev_info(&pf->pdev->dev,
8393                          "BW settings NOT SAVED, err %s aq_err %s\n",
8394                          i40e_stat_str(&pf->hw, ret),
8395                          i40e_aq_str(&pf->hw, last_aq_status));
8396 bw_commit_out:
8397
8398         return ret;
8399 }
8400
8401 /**
8402  * i40e_sw_init - Initialize general software structures (struct i40e_pf)
8403  * @pf: board private structure to initialize
8404  *
8405  * i40e_sw_init initializes the Adapter private data structure.
8406  * Fields are initialized based on PCI device information and
8407  * OS network device settings (MTU size).
8408  **/
8409 static int i40e_sw_init(struct i40e_pf *pf)
8410 {
8411         int err = 0;
8412         int size;
8413
8414         /* Set default capability flags */
8415         pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
8416                     I40E_FLAG_MSI_ENABLED     |
8417                     I40E_FLAG_MSIX_ENABLED;
8418
8419         /* Set default ITR */
8420         pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
8421         pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
8422
8423         /* Depending on PF configurations, it is possible that the RSS
8424          * maximum might end up larger than the available queues
8425          */
8426         pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
8427         pf->alloc_rss_size = 1;
8428         pf->rss_table_size = pf->hw.func_caps.rss_table_size;
8429         pf->rss_size_max = min_t(int, pf->rss_size_max,
8430                                  pf->hw.func_caps.num_tx_qp);
8431         if (pf->hw.func_caps.rss) {
8432                 pf->flags |= I40E_FLAG_RSS_ENABLED;
8433                 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
8434                                            num_online_cpus());
8435         }
8436
8437         /* MFP mode enabled */
8438         if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
8439                 pf->flags |= I40E_FLAG_MFP_ENABLED;
8440                 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
8441                 if (i40e_get_npar_bw_setting(pf))
8442                         dev_warn(&pf->pdev->dev,
8443                                  "Could not get NPAR bw settings\n");
8444                 else
8445                         dev_info(&pf->pdev->dev,
8446                                  "Min BW = %8.8x, Max BW = %8.8x\n",
8447                                  pf->npar_min_bw, pf->npar_max_bw);
8448         }
8449
8450         /* FW/NVM is not yet fixed in this regard */
8451         if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
8452             (pf->hw.func_caps.fd_filters_best_effort > 0)) {
8453                 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
8454                 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
8455                 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
8456                     pf->hw.num_partitions > 1)
8457                         dev_info(&pf->pdev->dev,
8458                                  "Flow Director Sideband mode Disabled in MFP mode\n");
8459                 else
8460                         pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8461                 pf->fdir_pf_filter_count =
8462                                  pf->hw.func_caps.fd_filters_guaranteed;
8463                 pf->hw.fdir_shared_filter_count =
8464                                  pf->hw.func_caps.fd_filters_best_effort;
8465         }
8466
8467         if (i40e_is_mac_710(&pf->hw) &&
8468             (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
8469             (pf->hw.aq.fw_maj_ver < 4))) {
8470                 pf->flags |= I40E_FLAG_RESTART_AUTONEG;
8471                 /* No DCB support  for FW < v4.33 */
8472                 pf->flags |= I40E_FLAG_NO_DCB_SUPPORT;
8473         }
8474
8475         /* Disable FW LLDP if FW < v4.3 */
8476         if (i40e_is_mac_710(&pf->hw) &&
8477             (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
8478             (pf->hw.aq.fw_maj_ver < 4)))
8479                 pf->flags |= I40E_FLAG_STOP_FW_LLDP;
8480
8481         /* Use the FW Set LLDP MIB API if FW > v4.40 */
8482         if (i40e_is_mac_710(&pf->hw) &&
8483             (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
8484             (pf->hw.aq.fw_maj_ver >= 5)))
8485                 pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB;
8486
8487         if (pf->hw.func_caps.vmdq) {
8488                 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
8489                 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
8490                 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
8491         }
8492
8493         if (pf->hw.func_caps.iwarp) {
8494                 pf->flags |= I40E_FLAG_IWARP_ENABLED;
8495                 /* IWARP needs one extra vector for CQP just like MISC.*/
8496                 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
8497         }
8498
8499 #ifdef I40E_FCOE
8500         i40e_init_pf_fcoe(pf);
8501
8502 #endif /* I40E_FCOE */
8503 #ifdef CONFIG_PCI_IOV
8504         if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
8505                 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
8506                 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
8507                 pf->num_req_vfs = min_t(int,
8508                                         pf->hw.func_caps.num_vfs,
8509                                         I40E_MAX_VF_COUNT);
8510         }
8511 #endif /* CONFIG_PCI_IOV */
8512         if (pf->hw.mac.type == I40E_MAC_X722) {
8513                 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
8514                              I40E_FLAG_128_QP_RSS_CAPABLE |
8515                              I40E_FLAG_HW_ATR_EVICT_CAPABLE |
8516                              I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
8517                              I40E_FLAG_WB_ON_ITR_CAPABLE |
8518                              I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
8519                              I40E_FLAG_NO_PCI_LINK_CHECK |
8520                              I40E_FLAG_USE_SET_LLDP_MIB |
8521                              I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
8522         } else if ((pf->hw.aq.api_maj_ver > 1) ||
8523                    ((pf->hw.aq.api_maj_ver == 1) &&
8524                     (pf->hw.aq.api_min_ver > 4))) {
8525                 /* Supported in FW API version higher than 1.4 */
8526                 pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
8527                 pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
8528         } else {
8529                 pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
8530         }
8531
8532         pf->eeprom_version = 0xDEAD;
8533         pf->lan_veb = I40E_NO_VEB;
8534         pf->lan_vsi = I40E_NO_VSI;
8535
8536         /* By default FW has this off for performance reasons */
8537         pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
8538
8539         /* set up queue assignment tracking */
8540         size = sizeof(struct i40e_lump_tracking)
8541                 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
8542         pf->qp_pile = kzalloc(size, GFP_KERNEL);
8543         if (!pf->qp_pile) {
8544                 err = -ENOMEM;
8545                 goto sw_init_done;
8546         }
8547         pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
8548         pf->qp_pile->search_hint = 0;
8549
8550         pf->tx_timeout_recovery_level = 1;
8551
8552         mutex_init(&pf->switch_mutex);
8553
8554         /* If NPAR is enabled nudge the Tx scheduler */
8555         if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
8556                 i40e_set_npar_bw_setting(pf);
8557
8558 sw_init_done:
8559         return err;
8560 }
8561
8562 /**
8563  * i40e_set_ntuple - set the ntuple feature flag and take action
8564  * @pf: board private structure to initialize
8565  * @features: the feature set that the stack is suggesting
8566  *
8567  * returns a bool to indicate if reset needs to happen
8568  **/
8569 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
8570 {
8571         bool need_reset = false;
8572
8573         /* Check if Flow Director n-tuple support was enabled or disabled.  If
8574          * the state changed, we need to reset.
8575          */
8576         if (features & NETIF_F_NTUPLE) {
8577                 /* Enable filters and mark for reset */
8578                 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
8579                         need_reset = true;
8580                 /* enable FD_SB only if there is MSI-X vector */
8581                 if (pf->num_fdsb_msix > 0)
8582                         pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8583         } else {
8584                 /* turn off filters, mark for reset and clear SW filter list */
8585                 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8586                         need_reset = true;
8587                         i40e_fdir_filter_exit(pf);
8588                 }
8589                 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8590                 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
8591                 /* reset fd counters */
8592                 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
8593                 pf->fdir_pf_active_filters = 0;
8594                 /* if ATR was auto disabled it can be re-enabled. */
8595                 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8596                     (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
8597                         pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
8598                         if (I40E_DEBUG_FD & pf->hw.debug_mask)
8599                                 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
8600                 }
8601         }
8602         return need_reset;
8603 }
8604
8605 /**
8606  * i40e_clear_rss_lut - clear the rx hash lookup table
8607  * @vsi: the VSI being configured
8608  **/
8609 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
8610 {
8611         struct i40e_pf *pf = vsi->back;
8612         struct i40e_hw *hw = &pf->hw;
8613         u16 vf_id = vsi->vf_id;
8614         u8 i;
8615
8616         if (vsi->type == I40E_VSI_MAIN) {
8617                 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8618                         wr32(hw, I40E_PFQF_HLUT(i), 0);
8619         } else if (vsi->type == I40E_VSI_SRIOV) {
8620                 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
8621                         i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
8622         } else {
8623                 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
8624         }
8625 }
8626
8627 /**
8628  * i40e_set_features - set the netdev feature flags
8629  * @netdev: ptr to the netdev being adjusted
8630  * @features: the feature set that the stack is suggesting
8631  **/
8632 static int i40e_set_features(struct net_device *netdev,
8633                              netdev_features_t features)
8634 {
8635         struct i40e_netdev_priv *np = netdev_priv(netdev);
8636         struct i40e_vsi *vsi = np->vsi;
8637         struct i40e_pf *pf = vsi->back;
8638         bool need_reset;
8639
8640         if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
8641                 i40e_pf_config_rss(pf);
8642         else if (!(features & NETIF_F_RXHASH) &&
8643                  netdev->features & NETIF_F_RXHASH)
8644                 i40e_clear_rss_lut(vsi);
8645
8646         if (features & NETIF_F_HW_VLAN_CTAG_RX)
8647                 i40e_vlan_stripping_enable(vsi);
8648         else
8649                 i40e_vlan_stripping_disable(vsi);
8650
8651         need_reset = i40e_set_ntuple(pf, features);
8652
8653         if (need_reset)
8654                 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8655
8656         return 0;
8657 }
8658
8659 /**
8660  * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
8661  * @pf: board private structure
8662  * @port: The UDP port to look up
8663  *
8664  * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
8665  **/
8666 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
8667 {
8668         u8 i;
8669
8670         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8671                 if (pf->udp_ports[i].index == port)
8672                         return i;
8673         }
8674
8675         return i;
8676 }
8677
8678 /**
8679  * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
8680  * @netdev: This physical port's netdev
8681  * @ti: Tunnel endpoint information
8682  **/
8683 static void i40e_udp_tunnel_add(struct net_device *netdev,
8684                                 struct udp_tunnel_info *ti)
8685 {
8686         struct i40e_netdev_priv *np = netdev_priv(netdev);
8687         struct i40e_vsi *vsi = np->vsi;
8688         struct i40e_pf *pf = vsi->back;
8689         __be16 port = ti->port;
8690         u8 next_idx;
8691         u8 idx;
8692
8693         idx = i40e_get_udp_port_idx(pf, port);
8694
8695         /* Check if port already exists */
8696         if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8697                 netdev_info(netdev, "port %d already offloaded\n",
8698                             ntohs(port));
8699                 return;
8700         }
8701
8702         /* Now check if there is space to add the new port */
8703         next_idx = i40e_get_udp_port_idx(pf, 0);
8704
8705         if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8706                 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
8707                             ntohs(port));
8708                 return;
8709         }
8710
8711         switch (ti->type) {
8712         case UDP_TUNNEL_TYPE_VXLAN:
8713                 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
8714                 break;
8715         case UDP_TUNNEL_TYPE_GENEVE:
8716                 if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
8717                         return;
8718                 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
8719                 break;
8720         default:
8721                 return;
8722         }
8723
8724         /* New port: add it and mark its index in the bitmap */
8725         pf->udp_ports[next_idx].index = port;
8726         pf->pending_udp_bitmap |= BIT_ULL(next_idx);
8727         pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8728 }
8729
8730 /**
8731  * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
8732  * @netdev: This physical port's netdev
8733  * @ti: Tunnel endpoint information
8734  **/
8735 static void i40e_udp_tunnel_del(struct net_device *netdev,
8736                                 struct udp_tunnel_info *ti)
8737 {
8738         struct i40e_netdev_priv *np = netdev_priv(netdev);
8739         struct i40e_vsi *vsi = np->vsi;
8740         struct i40e_pf *pf = vsi->back;
8741         __be16 port = ti->port;
8742         u8 idx;
8743
8744         idx = i40e_get_udp_port_idx(pf, port);
8745
8746         /* Check if port already exists */
8747         if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
8748                 goto not_found;
8749
8750         switch (ti->type) {
8751         case UDP_TUNNEL_TYPE_VXLAN:
8752                 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
8753                         goto not_found;
8754                 break;
8755         case UDP_TUNNEL_TYPE_GENEVE:
8756                 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
8757                         goto not_found;
8758                 break;
8759         default:
8760                 goto not_found;
8761         }
8762
8763         /* if port exists, set it to 0 (mark for deletion)
8764          * and make it pending
8765          */
8766         pf->udp_ports[idx].index = 0;
8767         pf->pending_udp_bitmap |= BIT_ULL(idx);
8768         pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8769
8770         return;
8771 not_found:
8772         netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
8773                     ntohs(port));
8774 }
8775
8776 static int i40e_get_phys_port_id(struct net_device *netdev,
8777                                  struct netdev_phys_item_id *ppid)
8778 {
8779         struct i40e_netdev_priv *np = netdev_priv(netdev);
8780         struct i40e_pf *pf = np->vsi->back;
8781         struct i40e_hw *hw = &pf->hw;
8782
8783         if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
8784                 return -EOPNOTSUPP;
8785
8786         ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
8787         memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
8788
8789         return 0;
8790 }
8791
8792 /**
8793  * i40e_ndo_fdb_add - add an entry to the hardware database
8794  * @ndm: the input from the stack
8795  * @tb: pointer to array of nladdr (unused)
8796  * @dev: the net device pointer
8797  * @addr: the MAC address entry being added
8798  * @flags: instructions from stack about fdb operation
8799  */
8800 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
8801                             struct net_device *dev,
8802                             const unsigned char *addr, u16 vid,
8803                             u16 flags)
8804 {
8805         struct i40e_netdev_priv *np = netdev_priv(dev);
8806         struct i40e_pf *pf = np->vsi->back;
8807         int err = 0;
8808
8809         if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
8810                 return -EOPNOTSUPP;
8811
8812         if (vid) {
8813                 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
8814                 return -EINVAL;
8815         }
8816
8817         /* Hardware does not support aging addresses so if a
8818          * ndm_state is given only allow permanent addresses
8819          */
8820         if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
8821                 netdev_info(dev, "FDB only supports static addresses\n");
8822                 return -EINVAL;
8823         }
8824
8825         if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
8826                 err = dev_uc_add_excl(dev, addr);
8827         else if (is_multicast_ether_addr(addr))
8828                 err = dev_mc_add_excl(dev, addr);
8829         else
8830                 err = -EINVAL;
8831
8832         /* Only return duplicate errors if NLM_F_EXCL is set */
8833         if (err == -EEXIST && !(flags & NLM_F_EXCL))
8834                 err = 0;
8835
8836         return err;
8837 }
8838
8839 /**
8840  * i40e_ndo_bridge_setlink - Set the hardware bridge mode
8841  * @dev: the netdev being configured
8842  * @nlh: RTNL message
8843  *
8844  * Inserts a new hardware bridge if not already created and
8845  * enables the bridging mode requested (VEB or VEPA). If the
8846  * hardware bridge has already been inserted and the request
8847  * is to change the mode then that requires a PF reset to
8848  * allow rebuild of the components with required hardware
8849  * bridge mode enabled.
8850  **/
8851 static int i40e_ndo_bridge_setlink(struct net_device *dev,
8852                                    struct nlmsghdr *nlh,
8853                                    u16 flags)
8854 {
8855         struct i40e_netdev_priv *np = netdev_priv(dev);
8856         struct i40e_vsi *vsi = np->vsi;
8857         struct i40e_pf *pf = vsi->back;
8858         struct i40e_veb *veb = NULL;
8859         struct nlattr *attr, *br_spec;
8860         int i, rem;
8861
8862         /* Only for PF VSI for now */
8863         if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8864                 return -EOPNOTSUPP;
8865
8866         /* Find the HW bridge for PF VSI */
8867         for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8868                 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8869                         veb = pf->veb[i];
8870         }
8871
8872         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8873
8874         nla_for_each_nested(attr, br_spec, rem) {
8875                 __u16 mode;
8876
8877                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
8878                         continue;
8879
8880                 mode = nla_get_u16(attr);
8881                 if ((mode != BRIDGE_MODE_VEPA) &&
8882                     (mode != BRIDGE_MODE_VEB))
8883                         return -EINVAL;
8884
8885                 /* Insert a new HW bridge */
8886                 if (!veb) {
8887                         veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8888                                              vsi->tc_config.enabled_tc);
8889                         if (veb) {
8890                                 veb->bridge_mode = mode;
8891                                 i40e_config_bridge_mode(veb);
8892                         } else {
8893                                 /* No Bridge HW offload available */
8894                                 return -ENOENT;
8895                         }
8896                         break;
8897                 } else if (mode != veb->bridge_mode) {
8898                         /* Existing HW bridge but different mode needs reset */
8899                         veb->bridge_mode = mode;
8900                         /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
8901                         if (mode == BRIDGE_MODE_VEB)
8902                                 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
8903                         else
8904                                 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
8905                         i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8906                         break;
8907                 }
8908         }
8909
8910         return 0;
8911 }
8912
8913 /**
8914  * i40e_ndo_bridge_getlink - Get the hardware bridge mode
8915  * @skb: skb buff
8916  * @pid: process id
8917  * @seq: RTNL message seq #
8918  * @dev: the netdev being configured
8919  * @filter_mask: unused
8920  * @nlflags: netlink flags passed in
8921  *
8922  * Return the mode in which the hardware bridge is operating in
8923  * i.e VEB or VEPA.
8924  **/
8925 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8926                                    struct net_device *dev,
8927                                    u32 __always_unused filter_mask,
8928                                    int nlflags)
8929 {
8930         struct i40e_netdev_priv *np = netdev_priv(dev);
8931         struct i40e_vsi *vsi = np->vsi;
8932         struct i40e_pf *pf = vsi->back;
8933         struct i40e_veb *veb = NULL;
8934         int i;
8935
8936         /* Only for PF VSI for now */
8937         if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8938                 return -EOPNOTSUPP;
8939
8940         /* Find the HW bridge for the PF VSI */
8941         for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8942                 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8943                         veb = pf->veb[i];
8944         }
8945
8946         if (!veb)
8947                 return 0;
8948
8949         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
8950                                        0, 0, nlflags, filter_mask, NULL);
8951 }
8952
8953 /* Hardware supports L4 tunnel length of 128B (=2^7) which includes
8954  * inner mac plus all inner ethertypes.
8955  */
8956 #define I40E_MAX_TUNNEL_HDR_LEN 128
8957 /**
8958  * i40e_features_check - Validate encapsulated packet conforms to limits
8959  * @skb: skb buff
8960  * @dev: This physical port's netdev
8961  * @features: Offload features that the stack believes apply
8962  **/
8963 static netdev_features_t i40e_features_check(struct sk_buff *skb,
8964                                              struct net_device *dev,
8965                                              netdev_features_t features)
8966 {
8967         if (skb->encapsulation &&
8968             ((skb_inner_network_header(skb) - skb_transport_header(skb)) >
8969              I40E_MAX_TUNNEL_HDR_LEN))
8970                 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
8971
8972         return features;
8973 }
8974
8975 static const struct net_device_ops i40e_netdev_ops = {
8976         .ndo_open               = i40e_open,
8977         .ndo_stop               = i40e_close,
8978         .ndo_start_xmit         = i40e_lan_xmit_frame,
8979         .ndo_get_stats64        = i40e_get_netdev_stats_struct,
8980         .ndo_set_rx_mode        = i40e_set_rx_mode,
8981         .ndo_validate_addr      = eth_validate_addr,
8982         .ndo_set_mac_address    = i40e_set_mac,
8983         .ndo_change_mtu         = i40e_change_mtu,
8984         .ndo_do_ioctl           = i40e_ioctl,
8985         .ndo_tx_timeout         = i40e_tx_timeout,
8986         .ndo_vlan_rx_add_vid    = i40e_vlan_rx_add_vid,
8987         .ndo_vlan_rx_kill_vid   = i40e_vlan_rx_kill_vid,
8988 #ifdef CONFIG_NET_POLL_CONTROLLER
8989         .ndo_poll_controller    = i40e_netpoll,
8990 #endif
8991         .ndo_setup_tc           = __i40e_setup_tc,
8992 #ifdef I40E_FCOE
8993         .ndo_fcoe_enable        = i40e_fcoe_enable,
8994         .ndo_fcoe_disable       = i40e_fcoe_disable,
8995 #endif
8996         .ndo_set_features       = i40e_set_features,
8997         .ndo_set_vf_mac         = i40e_ndo_set_vf_mac,
8998         .ndo_set_vf_vlan        = i40e_ndo_set_vf_port_vlan,
8999         .ndo_set_vf_rate        = i40e_ndo_set_vf_bw,
9000         .ndo_get_vf_config      = i40e_ndo_get_vf_config,
9001         .ndo_set_vf_link_state  = i40e_ndo_set_vf_link_state,
9002         .ndo_set_vf_spoofchk    = i40e_ndo_set_vf_spoofchk,
9003         .ndo_set_vf_trust       = i40e_ndo_set_vf_trust,
9004         .ndo_udp_tunnel_add     = i40e_udp_tunnel_add,
9005         .ndo_udp_tunnel_del     = i40e_udp_tunnel_del,
9006         .ndo_get_phys_port_id   = i40e_get_phys_port_id,
9007         .ndo_fdb_add            = i40e_ndo_fdb_add,
9008         .ndo_features_check     = i40e_features_check,
9009         .ndo_bridge_getlink     = i40e_ndo_bridge_getlink,
9010         .ndo_bridge_setlink     = i40e_ndo_bridge_setlink,
9011 };
9012
9013 /**
9014  * i40e_config_netdev - Setup the netdev flags
9015  * @vsi: the VSI being configured
9016  *
9017  * Returns 0 on success, negative value on failure
9018  **/
9019 static int i40e_config_netdev(struct i40e_vsi *vsi)
9020 {
9021         struct i40e_pf *pf = vsi->back;
9022         struct i40e_hw *hw = &pf->hw;
9023         struct i40e_netdev_priv *np;
9024         struct net_device *netdev;
9025         u8 mac_addr[ETH_ALEN];
9026         int etherdev_size;
9027
9028         etherdev_size = sizeof(struct i40e_netdev_priv);
9029         netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
9030         if (!netdev)
9031                 return -ENOMEM;
9032
9033         vsi->netdev = netdev;
9034         np = netdev_priv(netdev);
9035         np->vsi = vsi;
9036
9037         netdev->hw_enc_features |= NETIF_F_SG                   |
9038                                    NETIF_F_IP_CSUM              |
9039                                    NETIF_F_IPV6_CSUM            |
9040                                    NETIF_F_HIGHDMA              |
9041                                    NETIF_F_SOFT_FEATURES        |
9042                                    NETIF_F_TSO                  |
9043                                    NETIF_F_TSO_ECN              |
9044                                    NETIF_F_TSO6                 |
9045                                    NETIF_F_GSO_GRE              |
9046                                    NETIF_F_GSO_GRE_CSUM         |
9047                                    NETIF_F_GSO_IPXIP4           |
9048                                    NETIF_F_GSO_IPXIP6           |
9049                                    NETIF_F_GSO_UDP_TUNNEL       |
9050                                    NETIF_F_GSO_UDP_TUNNEL_CSUM  |
9051                                    NETIF_F_GSO_PARTIAL          |
9052                                    NETIF_F_SCTP_CRC             |
9053                                    NETIF_F_RXHASH               |
9054                                    NETIF_F_RXCSUM               |
9055                                    0;
9056
9057         if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE))
9058                 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
9059
9060         netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
9061
9062         /* record features VLANs can make use of */
9063         netdev->vlan_features |= netdev->hw_enc_features |
9064                                  NETIF_F_TSO_MANGLEID;
9065
9066         if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
9067                 netdev->hw_features |= NETIF_F_NTUPLE;
9068
9069         netdev->hw_features |= netdev->hw_enc_features  |
9070                                NETIF_F_HW_VLAN_CTAG_TX  |
9071                                NETIF_F_HW_VLAN_CTAG_RX;
9072
9073         netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
9074         netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
9075
9076         if (vsi->type == I40E_VSI_MAIN) {
9077                 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
9078                 ether_addr_copy(mac_addr, hw->mac.perm_addr);
9079                 spin_lock_bh(&vsi->mac_filter_list_lock);
9080                 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
9081                 spin_unlock_bh(&vsi->mac_filter_list_lock);
9082         } else {
9083                 /* relate the VSI_VMDQ name to the VSI_MAIN name */
9084                 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
9085                          pf->vsi[pf->lan_vsi]->netdev->name);
9086                 random_ether_addr(mac_addr);
9087
9088                 spin_lock_bh(&vsi->mac_filter_list_lock);
9089                 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
9090                 spin_unlock_bh(&vsi->mac_filter_list_lock);
9091         }
9092
9093         ether_addr_copy(netdev->dev_addr, mac_addr);
9094         ether_addr_copy(netdev->perm_addr, mac_addr);
9095
9096         netdev->priv_flags |= IFF_UNICAST_FLT;
9097         netdev->priv_flags |= IFF_SUPP_NOFCS;
9098         /* Setup netdev TC information */
9099         i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
9100
9101         netdev->netdev_ops = &i40e_netdev_ops;
9102         netdev->watchdog_timeo = 5 * HZ;
9103         i40e_set_ethtool_ops(netdev);
9104 #ifdef I40E_FCOE
9105         i40e_fcoe_config_netdev(netdev, vsi);
9106 #endif
9107
9108         /* MTU range: 68 - 9706 */
9109         netdev->min_mtu = ETH_MIN_MTU;
9110         netdev->max_mtu = I40E_MAX_RXBUFFER -
9111                           (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9112
9113         return 0;
9114 }
9115
9116 /**
9117  * i40e_vsi_delete - Delete a VSI from the switch
9118  * @vsi: the VSI being removed
9119  *
9120  * Returns 0 on success, negative value on failure
9121  **/
9122 static void i40e_vsi_delete(struct i40e_vsi *vsi)
9123 {
9124         /* remove default VSI is not allowed */
9125         if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
9126                 return;
9127
9128         i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
9129 }
9130
9131 /**
9132  * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
9133  * @vsi: the VSI being queried
9134  *
9135  * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
9136  **/
9137 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
9138 {
9139         struct i40e_veb *veb;
9140         struct i40e_pf *pf = vsi->back;
9141
9142         /* Uplink is not a bridge so default to VEB */
9143         if (vsi->veb_idx == I40E_NO_VEB)
9144                 return 1;
9145
9146         veb = pf->veb[vsi->veb_idx];
9147         if (!veb) {
9148                 dev_info(&pf->pdev->dev,
9149                          "There is no veb associated with the bridge\n");
9150                 return -ENOENT;
9151         }
9152
9153         /* Uplink is a bridge in VEPA mode */
9154         if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
9155                 return 0;
9156         } else {
9157                 /* Uplink is a bridge in VEB mode */
9158                 return 1;
9159         }
9160
9161         /* VEPA is now default bridge, so return 0 */
9162         return 0;
9163 }
9164
9165 /**
9166  * i40e_add_vsi - Add a VSI to the switch
9167  * @vsi: the VSI being configured
9168  *
9169  * This initializes a VSI context depending on the VSI type to be added and
9170  * passes it down to the add_vsi aq command.
9171  **/
9172 static int i40e_add_vsi(struct i40e_vsi *vsi)
9173 {
9174         int ret = -ENODEV;
9175         i40e_status aq_ret = 0;
9176         struct i40e_pf *pf = vsi->back;
9177         struct i40e_hw *hw = &pf->hw;
9178         struct i40e_vsi_context ctxt;
9179         struct i40e_mac_filter *f, *ftmp;
9180
9181         u8 enabled_tc = 0x1; /* TC0 enabled */
9182         int f_count = 0;
9183
9184         memset(&ctxt, 0, sizeof(ctxt));
9185         switch (vsi->type) {
9186         case I40E_VSI_MAIN:
9187                 /* The PF's main VSI is already setup as part of the
9188                  * device initialization, so we'll not bother with
9189                  * the add_vsi call, but we will retrieve the current
9190                  * VSI context.
9191                  */
9192                 ctxt.seid = pf->main_vsi_seid;
9193                 ctxt.pf_num = pf->hw.pf_id;
9194                 ctxt.vf_num = 0;
9195                 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9196                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9197                 if (ret) {
9198                         dev_info(&pf->pdev->dev,
9199                                  "couldn't get PF vsi config, err %s aq_err %s\n",
9200                                  i40e_stat_str(&pf->hw, ret),
9201                                  i40e_aq_str(&pf->hw,
9202                                              pf->hw.aq.asq_last_status));
9203                         return -ENOENT;
9204                 }
9205                 vsi->info = ctxt.info;
9206                 vsi->info.valid_sections = 0;
9207
9208                 vsi->seid = ctxt.seid;
9209                 vsi->id = ctxt.vsi_number;
9210
9211                 enabled_tc = i40e_pf_get_tc_map(pf);
9212
9213                 /* MFP mode setup queue map and update VSI */
9214                 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
9215                     !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
9216                         memset(&ctxt, 0, sizeof(ctxt));
9217                         ctxt.seid = pf->main_vsi_seid;
9218                         ctxt.pf_num = pf->hw.pf_id;
9219                         ctxt.vf_num = 0;
9220                         i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
9221                         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
9222                         if (ret) {
9223                                 dev_info(&pf->pdev->dev,
9224                                          "update vsi failed, err %s aq_err %s\n",
9225                                          i40e_stat_str(&pf->hw, ret),
9226                                          i40e_aq_str(&pf->hw,
9227                                                     pf->hw.aq.asq_last_status));
9228                                 ret = -ENOENT;
9229                                 goto err;
9230                         }
9231                         /* update the local VSI info queue map */
9232                         i40e_vsi_update_queue_map(vsi, &ctxt);
9233                         vsi->info.valid_sections = 0;
9234                 } else {
9235                         /* Default/Main VSI is only enabled for TC0
9236                          * reconfigure it to enable all TCs that are
9237                          * available on the port in SFP mode.
9238                          * For MFP case the iSCSI PF would use this
9239                          * flow to enable LAN+iSCSI TC.
9240                          */
9241                         ret = i40e_vsi_config_tc(vsi, enabled_tc);
9242                         if (ret) {
9243                                 dev_info(&pf->pdev->dev,
9244                                          "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
9245                                          enabled_tc,
9246                                          i40e_stat_str(&pf->hw, ret),
9247                                          i40e_aq_str(&pf->hw,
9248                                                     pf->hw.aq.asq_last_status));
9249                                 ret = -ENOENT;
9250                         }
9251                 }
9252                 break;
9253
9254         case I40E_VSI_FDIR:
9255                 ctxt.pf_num = hw->pf_id;
9256                 ctxt.vf_num = 0;
9257                 ctxt.uplink_seid = vsi->uplink_seid;
9258                 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9259                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9260                 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
9261                     (i40e_is_vsi_uplink_mode_veb(vsi))) {
9262                         ctxt.info.valid_sections |=
9263                              cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9264                         ctxt.info.switch_id =
9265                            cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9266                 }
9267                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9268                 break;
9269
9270         case I40E_VSI_VMDQ2:
9271                 ctxt.pf_num = hw->pf_id;
9272                 ctxt.vf_num = 0;
9273                 ctxt.uplink_seid = vsi->uplink_seid;
9274                 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9275                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
9276
9277                 /* This VSI is connected to VEB so the switch_id
9278                  * should be set to zero by default.
9279                  */
9280                 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9281                         ctxt.info.valid_sections |=
9282                                 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9283                         ctxt.info.switch_id =
9284                                 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9285                 }
9286
9287                 /* Setup the VSI tx/rx queue map for TC0 only for now */
9288                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9289                 break;
9290
9291         case I40E_VSI_SRIOV:
9292                 ctxt.pf_num = hw->pf_id;
9293                 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
9294                 ctxt.uplink_seid = vsi->uplink_seid;
9295                 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9296                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
9297
9298                 /* This VSI is connected to VEB so the switch_id
9299                  * should be set to zero by default.
9300                  */
9301                 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9302                         ctxt.info.valid_sections |=
9303                                 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9304                         ctxt.info.switch_id =
9305                                 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9306                 }
9307
9308                 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
9309                         ctxt.info.valid_sections |=
9310                                 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
9311                         ctxt.info.queueing_opt_flags |=
9312                                 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
9313                                  I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
9314                 }
9315
9316                 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
9317                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
9318                 if (pf->vf[vsi->vf_id].spoofchk) {
9319                         ctxt.info.valid_sections |=
9320                                 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
9321                         ctxt.info.sec_flags |=
9322                                 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
9323                                  I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
9324                 }
9325                 /* Setup the VSI tx/rx queue map for TC0 only for now */
9326                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9327                 break;
9328
9329 #ifdef I40E_FCOE
9330         case I40E_VSI_FCOE:
9331                 ret = i40e_fcoe_vsi_init(vsi, &ctxt);
9332                 if (ret) {
9333                         dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
9334                         return ret;
9335                 }
9336                 break;
9337
9338 #endif /* I40E_FCOE */
9339         case I40E_VSI_IWARP:
9340                 /* send down message to iWARP */
9341                 break;
9342
9343         default:
9344                 return -ENODEV;
9345         }
9346
9347         if (vsi->type != I40E_VSI_MAIN) {
9348                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
9349                 if (ret) {
9350                         dev_info(&vsi->back->pdev->dev,
9351                                  "add vsi failed, err %s aq_err %s\n",
9352                                  i40e_stat_str(&pf->hw, ret),
9353                                  i40e_aq_str(&pf->hw,
9354                                              pf->hw.aq.asq_last_status));
9355                         ret = -ENOENT;
9356                         goto err;
9357                 }
9358                 vsi->info = ctxt.info;
9359                 vsi->info.valid_sections = 0;
9360                 vsi->seid = ctxt.seid;
9361                 vsi->id = ctxt.vsi_number;
9362         }
9363         /* Except FDIR VSI, for all othet VSI set the broadcast filter */
9364         if (vsi->type != I40E_VSI_FDIR) {
9365                 aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
9366                 if (aq_ret) {
9367                         ret = i40e_aq_rc_to_posix(aq_ret,
9368                                                   hw->aq.asq_last_status);
9369                         dev_info(&pf->pdev->dev,
9370                                  "set brdcast promisc failed, err %s, aq_err %s\n",
9371                                  i40e_stat_str(hw, aq_ret),
9372                                  i40e_aq_str(hw, hw->aq.asq_last_status));
9373                 }
9374         }
9375
9376         vsi->active_filters = 0;
9377         clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
9378         spin_lock_bh(&vsi->mac_filter_list_lock);
9379         /* If macvlan filters already exist, force them to get loaded */
9380         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
9381                 f->state = I40E_FILTER_NEW;
9382                 f_count++;
9383         }
9384         spin_unlock_bh(&vsi->mac_filter_list_lock);
9385
9386         if (f_count) {
9387                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
9388                 pf->flags |= I40E_FLAG_FILTER_SYNC;
9389         }
9390
9391         /* Update VSI BW information */
9392         ret = i40e_vsi_get_bw_info(vsi);
9393         if (ret) {
9394                 dev_info(&pf->pdev->dev,
9395                          "couldn't get vsi bw info, err %s aq_err %s\n",
9396                          i40e_stat_str(&pf->hw, ret),
9397                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9398                 /* VSI is already added so not tearing that up */
9399                 ret = 0;
9400         }
9401
9402 err:
9403         return ret;
9404 }
9405
9406 /**
9407  * i40e_vsi_release - Delete a VSI and free its resources
9408  * @vsi: the VSI being removed
9409  *
9410  * Returns 0 on success or < 0 on error
9411  **/
9412 int i40e_vsi_release(struct i40e_vsi *vsi)
9413 {
9414         struct i40e_mac_filter *f, *ftmp;
9415         struct i40e_veb *veb = NULL;
9416         struct i40e_pf *pf;
9417         u16 uplink_seid;
9418         int i, n;
9419
9420         pf = vsi->back;
9421
9422         /* release of a VEB-owner or last VSI is not allowed */
9423         if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
9424                 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
9425                          vsi->seid, vsi->uplink_seid);
9426                 return -ENODEV;
9427         }
9428         if (vsi == pf->vsi[pf->lan_vsi] &&
9429             !test_bit(__I40E_DOWN, &pf->state)) {
9430                 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
9431                 return -ENODEV;
9432         }
9433
9434         uplink_seid = vsi->uplink_seid;
9435         if (vsi->type != I40E_VSI_SRIOV) {
9436                 if (vsi->netdev_registered) {
9437                         vsi->netdev_registered = false;
9438                         if (vsi->netdev) {
9439                                 /* results in a call to i40e_close() */
9440                                 unregister_netdev(vsi->netdev);
9441                         }
9442                 } else {
9443                         i40e_vsi_close(vsi);
9444                 }
9445                 i40e_vsi_disable_irq(vsi);
9446         }
9447
9448         spin_lock_bh(&vsi->mac_filter_list_lock);
9449
9450         /* clear the sync flag on all filters */
9451         if (vsi->netdev) {
9452                 __dev_uc_unsync(vsi->netdev, NULL);
9453                 __dev_mc_unsync(vsi->netdev, NULL);
9454         }
9455
9456         /* make sure any remaining filters are marked for deletion */
9457         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
9458                 i40e_del_filter(vsi, f->macaddr, f->vlan);
9459
9460         spin_unlock_bh(&vsi->mac_filter_list_lock);
9461
9462         i40e_sync_vsi_filters(vsi);
9463
9464         i40e_vsi_delete(vsi);
9465         i40e_vsi_free_q_vectors(vsi);
9466         if (vsi->netdev) {
9467                 free_netdev(vsi->netdev);
9468                 vsi->netdev = NULL;
9469         }
9470         i40e_vsi_clear_rings(vsi);
9471         i40e_vsi_clear(vsi);
9472
9473         /* If this was the last thing on the VEB, except for the
9474          * controlling VSI, remove the VEB, which puts the controlling
9475          * VSI onto the next level down in the switch.
9476          *
9477          * Well, okay, there's one more exception here: don't remove
9478          * the orphan VEBs yet.  We'll wait for an explicit remove request
9479          * from up the network stack.
9480          */
9481         for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
9482                 if (pf->vsi[i] &&
9483                     pf->vsi[i]->uplink_seid == uplink_seid &&
9484                     (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
9485                         n++;      /* count the VSIs */
9486                 }
9487         }
9488         for (i = 0; i < I40E_MAX_VEB; i++) {
9489                 if (!pf->veb[i])
9490                         continue;
9491                 if (pf->veb[i]->uplink_seid == uplink_seid)
9492                         n++;     /* count the VEBs */
9493                 if (pf->veb[i]->seid == uplink_seid)
9494                         veb = pf->veb[i];
9495         }
9496         if (n == 0 && veb && veb->uplink_seid != 0)
9497                 i40e_veb_release(veb);
9498
9499         return 0;
9500 }
9501
9502 /**
9503  * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
9504  * @vsi: ptr to the VSI
9505  *
9506  * This should only be called after i40e_vsi_mem_alloc() which allocates the
9507  * corresponding SW VSI structure and initializes num_queue_pairs for the
9508  * newly allocated VSI.
9509  *
9510  * Returns 0 on success or negative on failure
9511  **/
9512 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
9513 {
9514         int ret = -ENOENT;
9515         struct i40e_pf *pf = vsi->back;
9516
9517         if (vsi->q_vectors[0]) {
9518                 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
9519                          vsi->seid);
9520                 return -EEXIST;
9521         }
9522
9523         if (vsi->base_vector) {
9524                 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
9525                          vsi->seid, vsi->base_vector);
9526                 return -EEXIST;
9527         }
9528
9529         ret = i40e_vsi_alloc_q_vectors(vsi);
9530         if (ret) {
9531                 dev_info(&pf->pdev->dev,
9532                          "failed to allocate %d q_vector for VSI %d, ret=%d\n",
9533                          vsi->num_q_vectors, vsi->seid, ret);
9534                 vsi->num_q_vectors = 0;
9535                 goto vector_setup_out;
9536         }
9537
9538         /* In Legacy mode, we do not have to get any other vector since we
9539          * piggyback on the misc/ICR0 for queue interrupts.
9540         */
9541         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
9542                 return ret;
9543         if (vsi->num_q_vectors)
9544                 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
9545                                                  vsi->num_q_vectors, vsi->idx);
9546         if (vsi->base_vector < 0) {
9547                 dev_info(&pf->pdev->dev,
9548                          "failed to get tracking for %d vectors for VSI %d, err=%d\n",
9549                          vsi->num_q_vectors, vsi->seid, vsi->base_vector);
9550                 i40e_vsi_free_q_vectors(vsi);
9551                 ret = -ENOENT;
9552                 goto vector_setup_out;
9553         }
9554
9555 vector_setup_out:
9556         return ret;
9557 }
9558
9559 /**
9560  * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
9561  * @vsi: pointer to the vsi.
9562  *
9563  * This re-allocates a vsi's queue resources.
9564  *
9565  * Returns pointer to the successfully allocated and configured VSI sw struct
9566  * on success, otherwise returns NULL on failure.
9567  **/
9568 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
9569 {
9570         struct i40e_pf *pf;
9571         u8 enabled_tc;
9572         int ret;
9573
9574         if (!vsi)
9575                 return NULL;
9576
9577         pf = vsi->back;
9578
9579         i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
9580         i40e_vsi_clear_rings(vsi);
9581
9582         i40e_vsi_free_arrays(vsi, false);
9583         i40e_set_num_rings_in_vsi(vsi);
9584         ret = i40e_vsi_alloc_arrays(vsi, false);
9585         if (ret)
9586                 goto err_vsi;
9587
9588         ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
9589         if (ret < 0) {
9590                 dev_info(&pf->pdev->dev,
9591                          "failed to get tracking for %d queues for VSI %d err %d\n",
9592                          vsi->alloc_queue_pairs, vsi->seid, ret);
9593                 goto err_vsi;
9594         }
9595         vsi->base_queue = ret;
9596
9597         /* Update the FW view of the VSI. Force a reset of TC and queue
9598          * layout configurations.
9599          */
9600         enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
9601         pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
9602         pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
9603         i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
9604
9605         /* assign it some queues */
9606         ret = i40e_alloc_rings(vsi);
9607         if (ret)
9608                 goto err_rings;
9609
9610         /* map all of the rings to the q_vectors */
9611         i40e_vsi_map_rings_to_vectors(vsi);
9612         return vsi;
9613
9614 err_rings:
9615         i40e_vsi_free_q_vectors(vsi);
9616         if (vsi->netdev_registered) {
9617                 vsi->netdev_registered = false;
9618                 unregister_netdev(vsi->netdev);
9619                 free_netdev(vsi->netdev);
9620                 vsi->netdev = NULL;
9621         }
9622         i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9623 err_vsi:
9624         i40e_vsi_clear(vsi);
9625         return NULL;
9626 }
9627
9628 /**
9629  * i40e_vsi_setup - Set up a VSI by a given type
9630  * @pf: board private structure
9631  * @type: VSI type
9632  * @uplink_seid: the switch element to link to
9633  * @param1: usage depends upon VSI type. For VF types, indicates VF id
9634  *
9635  * This allocates the sw VSI structure and its queue resources, then add a VSI
9636  * to the identified VEB.
9637  *
9638  * Returns pointer to the successfully allocated and configure VSI sw struct on
9639  * success, otherwise returns NULL on failure.
9640  **/
9641 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
9642                                 u16 uplink_seid, u32 param1)
9643 {
9644         struct i40e_vsi *vsi = NULL;
9645         struct i40e_veb *veb = NULL;
9646         int ret, i;
9647         int v_idx;
9648
9649         /* The requested uplink_seid must be either
9650          *     - the PF's port seid
9651          *              no VEB is needed because this is the PF
9652          *              or this is a Flow Director special case VSI
9653          *     - seid of an existing VEB
9654          *     - seid of a VSI that owns an existing VEB
9655          *     - seid of a VSI that doesn't own a VEB
9656          *              a new VEB is created and the VSI becomes the owner
9657          *     - seid of the PF VSI, which is what creates the first VEB
9658          *              this is a special case of the previous
9659          *
9660          * Find which uplink_seid we were given and create a new VEB if needed
9661          */
9662         for (i = 0; i < I40E_MAX_VEB; i++) {
9663                 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
9664                         veb = pf->veb[i];
9665                         break;
9666                 }
9667         }
9668
9669         if (!veb && uplink_seid != pf->mac_seid) {
9670
9671                 for (i = 0; i < pf->num_alloc_vsi; i++) {
9672                         if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
9673                                 vsi = pf->vsi[i];
9674                                 break;
9675                         }
9676                 }
9677                 if (!vsi) {
9678                         dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
9679                                  uplink_seid);
9680                         return NULL;
9681                 }
9682
9683                 if (vsi->uplink_seid == pf->mac_seid)
9684                         veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
9685                                              vsi->tc_config.enabled_tc);
9686                 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
9687                         veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
9688                                              vsi->tc_config.enabled_tc);
9689                 if (veb) {
9690                         if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
9691                                 dev_info(&vsi->back->pdev->dev,
9692                                          "New VSI creation error, uplink seid of LAN VSI expected.\n");
9693                                 return NULL;
9694                         }
9695                         /* We come up by default in VEPA mode if SRIOV is not
9696                          * already enabled, in which case we can't force VEPA
9697                          * mode.
9698                          */
9699                         if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
9700                                 veb->bridge_mode = BRIDGE_MODE_VEPA;
9701                                 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
9702                         }
9703                         i40e_config_bridge_mode(veb);
9704                 }
9705                 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9706                         if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9707                                 veb = pf->veb[i];
9708                 }
9709                 if (!veb) {
9710                         dev_info(&pf->pdev->dev, "couldn't add VEB\n");
9711                         return NULL;
9712                 }
9713
9714                 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
9715                 uplink_seid = veb->seid;
9716         }
9717
9718         /* get vsi sw struct */
9719         v_idx = i40e_vsi_mem_alloc(pf, type);
9720         if (v_idx < 0)
9721                 goto err_alloc;
9722         vsi = pf->vsi[v_idx];
9723         if (!vsi)
9724                 goto err_alloc;
9725         vsi->type = type;
9726         vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
9727
9728         if (type == I40E_VSI_MAIN)
9729                 pf->lan_vsi = v_idx;
9730         else if (type == I40E_VSI_SRIOV)
9731                 vsi->vf_id = param1;
9732         /* assign it some queues */
9733         ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
9734                                 vsi->idx);
9735         if (ret < 0) {
9736                 dev_info(&pf->pdev->dev,
9737                          "failed to get tracking for %d queues for VSI %d err=%d\n",
9738                          vsi->alloc_queue_pairs, vsi->seid, ret);
9739                 goto err_vsi;
9740         }
9741         vsi->base_queue = ret;
9742
9743         /* get a VSI from the hardware */
9744         vsi->uplink_seid = uplink_seid;
9745         ret = i40e_add_vsi(vsi);
9746         if (ret)
9747                 goto err_vsi;
9748
9749         switch (vsi->type) {
9750         /* setup the netdev if needed */
9751         case I40E_VSI_MAIN:
9752                 /* Apply relevant filters if a platform-specific mac
9753                  * address was selected.
9754                  */
9755                 if (!!(pf->flags & I40E_FLAG_PF_MAC)) {
9756                         ret = i40e_macaddr_init(vsi, pf->hw.mac.addr);
9757                         if (ret) {
9758                                 dev_warn(&pf->pdev->dev,
9759                                          "could not set up macaddr; err %d\n",
9760                                          ret);
9761                         }
9762                 }
9763         case I40E_VSI_VMDQ2:
9764         case I40E_VSI_FCOE:
9765                 ret = i40e_config_netdev(vsi);
9766                 if (ret)
9767                         goto err_netdev;
9768                 ret = register_netdev(vsi->netdev);
9769                 if (ret)
9770                         goto err_netdev;
9771                 vsi->netdev_registered = true;
9772                 netif_carrier_off(vsi->netdev);
9773 #ifdef CONFIG_I40E_DCB
9774                 /* Setup DCB netlink interface */
9775                 i40e_dcbnl_setup(vsi);
9776 #endif /* CONFIG_I40E_DCB */
9777                 /* fall through */
9778
9779         case I40E_VSI_FDIR:
9780                 /* set up vectors and rings if needed */
9781                 ret = i40e_vsi_setup_vectors(vsi);
9782                 if (ret)
9783                         goto err_msix;
9784
9785                 ret = i40e_alloc_rings(vsi);
9786                 if (ret)
9787                         goto err_rings;
9788
9789                 /* map all of the rings to the q_vectors */
9790                 i40e_vsi_map_rings_to_vectors(vsi);
9791
9792                 i40e_vsi_reset_stats(vsi);
9793                 break;
9794
9795         default:
9796                 /* no netdev or rings for the other VSI types */
9797                 break;
9798         }
9799
9800         if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
9801             (vsi->type == I40E_VSI_VMDQ2)) {
9802                 ret = i40e_vsi_config_rss(vsi);
9803         }
9804         return vsi;
9805
9806 err_rings:
9807         i40e_vsi_free_q_vectors(vsi);
9808 err_msix:
9809         if (vsi->netdev_registered) {
9810                 vsi->netdev_registered = false;
9811                 unregister_netdev(vsi->netdev);
9812                 free_netdev(vsi->netdev);
9813                 vsi->netdev = NULL;
9814         }
9815 err_netdev:
9816         i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9817 err_vsi:
9818         i40e_vsi_clear(vsi);
9819 err_alloc:
9820         return NULL;
9821 }
9822
9823 /**
9824  * i40e_veb_get_bw_info - Query VEB BW information
9825  * @veb: the veb to query
9826  *
9827  * Query the Tx scheduler BW configuration data for given VEB
9828  **/
9829 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
9830 {
9831         struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
9832         struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
9833         struct i40e_pf *pf = veb->pf;
9834         struct i40e_hw *hw = &pf->hw;
9835         u32 tc_bw_max;
9836         int ret = 0;
9837         int i;
9838
9839         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
9840                                                   &bw_data, NULL);
9841         if (ret) {
9842                 dev_info(&pf->pdev->dev,
9843                          "query veb bw config failed, err %s aq_err %s\n",
9844                          i40e_stat_str(&pf->hw, ret),
9845                          i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9846                 goto out;
9847         }
9848
9849         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
9850                                                    &ets_data, NULL);
9851         if (ret) {
9852                 dev_info(&pf->pdev->dev,
9853                          "query veb bw ets config failed, err %s aq_err %s\n",
9854                          i40e_stat_str(&pf->hw, ret),
9855                          i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9856                 goto out;
9857         }
9858
9859         veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
9860         veb->bw_max_quanta = ets_data.tc_bw_max;
9861         veb->is_abs_credits = bw_data.absolute_credits_enable;
9862         veb->enabled_tc = ets_data.tc_valid_bits;
9863         tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
9864                     (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
9865         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9866                 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
9867                 veb->bw_tc_limit_credits[i] =
9868                                         le16_to_cpu(bw_data.tc_bw_limits[i]);
9869                 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
9870         }
9871
9872 out:
9873         return ret;
9874 }
9875
9876 /**
9877  * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
9878  * @pf: board private structure
9879  *
9880  * On error: returns error code (negative)
9881  * On success: returns vsi index in PF (positive)
9882  **/
9883 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
9884 {
9885         int ret = -ENOENT;
9886         struct i40e_veb *veb;
9887         int i;
9888
9889         /* Need to protect the allocation of switch elements at the PF level */
9890         mutex_lock(&pf->switch_mutex);
9891
9892         /* VEB list may be fragmented if VEB creation/destruction has
9893          * been happening.  We can afford to do a quick scan to look
9894          * for any free slots in the list.
9895          *
9896          * find next empty veb slot, looping back around if necessary
9897          */
9898         i = 0;
9899         while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
9900                 i++;
9901         if (i >= I40E_MAX_VEB) {
9902                 ret = -ENOMEM;
9903                 goto err_alloc_veb;  /* out of VEB slots! */
9904         }
9905
9906         veb = kzalloc(sizeof(*veb), GFP_KERNEL);
9907         if (!veb) {
9908                 ret = -ENOMEM;
9909                 goto err_alloc_veb;
9910         }
9911         veb->pf = pf;
9912         veb->idx = i;
9913         veb->enabled_tc = 1;
9914
9915         pf->veb[i] = veb;
9916         ret = i;
9917 err_alloc_veb:
9918         mutex_unlock(&pf->switch_mutex);
9919         return ret;
9920 }
9921
9922 /**
9923  * i40e_switch_branch_release - Delete a branch of the switch tree
9924  * @branch: where to start deleting
9925  *
9926  * This uses recursion to find the tips of the branch to be
9927  * removed, deleting until we get back to and can delete this VEB.
9928  **/
9929 static void i40e_switch_branch_release(struct i40e_veb *branch)
9930 {
9931         struct i40e_pf *pf = branch->pf;
9932         u16 branch_seid = branch->seid;
9933         u16 veb_idx = branch->idx;
9934         int i;
9935
9936         /* release any VEBs on this VEB - RECURSION */
9937         for (i = 0; i < I40E_MAX_VEB; i++) {
9938                 if (!pf->veb[i])
9939                         continue;
9940                 if (pf->veb[i]->uplink_seid == branch->seid)
9941                         i40e_switch_branch_release(pf->veb[i]);
9942         }
9943
9944         /* Release the VSIs on this VEB, but not the owner VSI.
9945          *
9946          * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
9947          *       the VEB itself, so don't use (*branch) after this loop.
9948          */
9949         for (i = 0; i < pf->num_alloc_vsi; i++) {
9950                 if (!pf->vsi[i])
9951                         continue;
9952                 if (pf->vsi[i]->uplink_seid == branch_seid &&
9953                    (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
9954                         i40e_vsi_release(pf->vsi[i]);
9955                 }
9956         }
9957
9958         /* There's one corner case where the VEB might not have been
9959          * removed, so double check it here and remove it if needed.
9960          * This case happens if the veb was created from the debugfs
9961          * commands and no VSIs were added to it.
9962          */
9963         if (pf->veb[veb_idx])
9964                 i40e_veb_release(pf->veb[veb_idx]);
9965 }
9966
9967 /**
9968  * i40e_veb_clear - remove veb struct
9969  * @veb: the veb to remove
9970  **/
9971 static void i40e_veb_clear(struct i40e_veb *veb)
9972 {
9973         if (!veb)
9974                 return;
9975
9976         if (veb->pf) {
9977                 struct i40e_pf *pf = veb->pf;
9978
9979                 mutex_lock(&pf->switch_mutex);
9980                 if (pf->veb[veb->idx] == veb)
9981                         pf->veb[veb->idx] = NULL;
9982                 mutex_unlock(&pf->switch_mutex);
9983         }
9984
9985         kfree(veb);
9986 }
9987
9988 /**
9989  * i40e_veb_release - Delete a VEB and free its resources
9990  * @veb: the VEB being removed
9991  **/
9992 void i40e_veb_release(struct i40e_veb *veb)
9993 {
9994         struct i40e_vsi *vsi = NULL;
9995         struct i40e_pf *pf;
9996         int i, n = 0;
9997
9998         pf = veb->pf;
9999
10000         /* find the remaining VSI and check for extras */
10001         for (i = 0; i < pf->num_alloc_vsi; i++) {
10002                 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
10003                         n++;
10004                         vsi = pf->vsi[i];
10005                 }
10006         }
10007         if (n != 1) {
10008                 dev_info(&pf->pdev->dev,
10009                          "can't remove VEB %d with %d VSIs left\n",
10010                          veb->seid, n);
10011                 return;
10012         }
10013
10014         /* move the remaining VSI to uplink veb */
10015         vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
10016         if (veb->uplink_seid) {
10017                 vsi->uplink_seid = veb->uplink_seid;
10018                 if (veb->uplink_seid == pf->mac_seid)
10019                         vsi->veb_idx = I40E_NO_VEB;
10020                 else
10021                         vsi->veb_idx = veb->veb_idx;
10022         } else {
10023                 /* floating VEB */
10024                 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10025                 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
10026         }
10027
10028         i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10029         i40e_veb_clear(veb);
10030 }
10031
10032 /**
10033  * i40e_add_veb - create the VEB in the switch
10034  * @veb: the VEB to be instantiated
10035  * @vsi: the controlling VSI
10036  **/
10037 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
10038 {
10039         struct i40e_pf *pf = veb->pf;
10040         bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
10041         int ret;
10042
10043         ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
10044                               veb->enabled_tc, false,
10045                               &veb->seid, enable_stats, NULL);
10046
10047         /* get a VEB from the hardware */
10048         if (ret) {
10049                 dev_info(&pf->pdev->dev,
10050                          "couldn't add VEB, err %s aq_err %s\n",
10051                          i40e_stat_str(&pf->hw, ret),
10052                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10053                 return -EPERM;
10054         }
10055
10056         /* get statistics counter */
10057         ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
10058                                          &veb->stats_idx, NULL, NULL, NULL);
10059         if (ret) {
10060                 dev_info(&pf->pdev->dev,
10061                          "couldn't get VEB statistics idx, err %s aq_err %s\n",
10062                          i40e_stat_str(&pf->hw, ret),
10063                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10064                 return -EPERM;
10065         }
10066         ret = i40e_veb_get_bw_info(veb);
10067         if (ret) {
10068                 dev_info(&pf->pdev->dev,
10069                          "couldn't get VEB bw info, err %s aq_err %s\n",
10070                          i40e_stat_str(&pf->hw, ret),
10071                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10072                 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10073                 return -ENOENT;
10074         }
10075
10076         vsi->uplink_seid = veb->seid;
10077         vsi->veb_idx = veb->idx;
10078         vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
10079
10080         return 0;
10081 }
10082
10083 /**
10084  * i40e_veb_setup - Set up a VEB
10085  * @pf: board private structure
10086  * @flags: VEB setup flags
10087  * @uplink_seid: the switch element to link to
10088  * @vsi_seid: the initial VSI seid
10089  * @enabled_tc: Enabled TC bit-map
10090  *
10091  * This allocates the sw VEB structure and links it into the switch
10092  * It is possible and legal for this to be a duplicate of an already
10093  * existing VEB.  It is also possible for both uplink and vsi seids
10094  * to be zero, in order to create a floating VEB.
10095  *
10096  * Returns pointer to the successfully allocated VEB sw struct on
10097  * success, otherwise returns NULL on failure.
10098  **/
10099 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
10100                                 u16 uplink_seid, u16 vsi_seid,
10101                                 u8 enabled_tc)
10102 {
10103         struct i40e_veb *veb, *uplink_veb = NULL;
10104         int vsi_idx, veb_idx;
10105         int ret;
10106
10107         /* if one seid is 0, the other must be 0 to create a floating relay */
10108         if ((uplink_seid == 0 || vsi_seid == 0) &&
10109             (uplink_seid + vsi_seid != 0)) {
10110                 dev_info(&pf->pdev->dev,
10111                          "one, not both seid's are 0: uplink=%d vsi=%d\n",
10112                          uplink_seid, vsi_seid);
10113                 return NULL;
10114         }
10115
10116         /* make sure there is such a vsi and uplink */
10117         for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
10118                 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
10119                         break;
10120         if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
10121                 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
10122                          vsi_seid);
10123                 return NULL;
10124         }
10125
10126         if (uplink_seid && uplink_seid != pf->mac_seid) {
10127                 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10128                         if (pf->veb[veb_idx] &&
10129                             pf->veb[veb_idx]->seid == uplink_seid) {
10130                                 uplink_veb = pf->veb[veb_idx];
10131                                 break;
10132                         }
10133                 }
10134                 if (!uplink_veb) {
10135                         dev_info(&pf->pdev->dev,
10136                                  "uplink seid %d not found\n", uplink_seid);
10137                         return NULL;
10138                 }
10139         }
10140
10141         /* get veb sw struct */
10142         veb_idx = i40e_veb_mem_alloc(pf);
10143         if (veb_idx < 0)
10144                 goto err_alloc;
10145         veb = pf->veb[veb_idx];
10146         veb->flags = flags;
10147         veb->uplink_seid = uplink_seid;
10148         veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
10149         veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
10150
10151         /* create the VEB in the switch */
10152         ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
10153         if (ret)
10154                 goto err_veb;
10155         if (vsi_idx == pf->lan_vsi)
10156                 pf->lan_veb = veb->idx;
10157
10158         return veb;
10159
10160 err_veb:
10161         i40e_veb_clear(veb);
10162 err_alloc:
10163         return NULL;
10164 }
10165
10166 /**
10167  * i40e_setup_pf_switch_element - set PF vars based on switch type
10168  * @pf: board private structure
10169  * @ele: element we are building info from
10170  * @num_reported: total number of elements
10171  * @printconfig: should we print the contents
10172  *
10173  * helper function to assist in extracting a few useful SEID values.
10174  **/
10175 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
10176                                 struct i40e_aqc_switch_config_element_resp *ele,
10177                                 u16 num_reported, bool printconfig)
10178 {
10179         u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
10180         u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
10181         u8 element_type = ele->element_type;
10182         u16 seid = le16_to_cpu(ele->seid);
10183
10184         if (printconfig)
10185                 dev_info(&pf->pdev->dev,
10186                          "type=%d seid=%d uplink=%d downlink=%d\n",
10187                          element_type, seid, uplink_seid, downlink_seid);
10188
10189         switch (element_type) {
10190         case I40E_SWITCH_ELEMENT_TYPE_MAC:
10191                 pf->mac_seid = seid;
10192                 break;
10193         case I40E_SWITCH_ELEMENT_TYPE_VEB:
10194                 /* Main VEB? */
10195                 if (uplink_seid != pf->mac_seid)
10196                         break;
10197                 if (pf->lan_veb == I40E_NO_VEB) {
10198                         int v;
10199
10200                         /* find existing or else empty VEB */
10201                         for (v = 0; v < I40E_MAX_VEB; v++) {
10202                                 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
10203                                         pf->lan_veb = v;
10204                                         break;
10205                                 }
10206                         }
10207                         if (pf->lan_veb == I40E_NO_VEB) {
10208                                 v = i40e_veb_mem_alloc(pf);
10209                                 if (v < 0)
10210                                         break;
10211                                 pf->lan_veb = v;
10212                         }
10213                 }
10214
10215                 pf->veb[pf->lan_veb]->seid = seid;
10216                 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
10217                 pf->veb[pf->lan_veb]->pf = pf;
10218                 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
10219                 break;
10220         case I40E_SWITCH_ELEMENT_TYPE_VSI:
10221                 if (num_reported != 1)
10222                         break;
10223                 /* This is immediately after a reset so we can assume this is
10224                  * the PF's VSI
10225                  */
10226                 pf->mac_seid = uplink_seid;
10227                 pf->pf_seid = downlink_seid;
10228                 pf->main_vsi_seid = seid;
10229                 if (printconfig)
10230                         dev_info(&pf->pdev->dev,
10231                                  "pf_seid=%d main_vsi_seid=%d\n",
10232                                  pf->pf_seid, pf->main_vsi_seid);
10233                 break;
10234         case I40E_SWITCH_ELEMENT_TYPE_PF:
10235         case I40E_SWITCH_ELEMENT_TYPE_VF:
10236         case I40E_SWITCH_ELEMENT_TYPE_EMP:
10237         case I40E_SWITCH_ELEMENT_TYPE_BMC:
10238         case I40E_SWITCH_ELEMENT_TYPE_PE:
10239         case I40E_SWITCH_ELEMENT_TYPE_PA:
10240                 /* ignore these for now */
10241                 break;
10242         default:
10243                 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
10244                          element_type, seid);
10245                 break;
10246         }
10247 }
10248
10249 /**
10250  * i40e_fetch_switch_configuration - Get switch config from firmware
10251  * @pf: board private structure
10252  * @printconfig: should we print the contents
10253  *
10254  * Get the current switch configuration from the device and
10255  * extract a few useful SEID values.
10256  **/
10257 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
10258 {
10259         struct i40e_aqc_get_switch_config_resp *sw_config;
10260         u16 next_seid = 0;
10261         int ret = 0;
10262         u8 *aq_buf;
10263         int i;
10264
10265         aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
10266         if (!aq_buf)
10267                 return -ENOMEM;
10268
10269         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
10270         do {
10271                 u16 num_reported, num_total;
10272
10273                 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
10274                                                 I40E_AQ_LARGE_BUF,
10275                                                 &next_seid, NULL);
10276                 if (ret) {
10277                         dev_info(&pf->pdev->dev,
10278                                  "get switch config failed err %s aq_err %s\n",
10279                                  i40e_stat_str(&pf->hw, ret),
10280                                  i40e_aq_str(&pf->hw,
10281                                              pf->hw.aq.asq_last_status));
10282                         kfree(aq_buf);
10283                         return -ENOENT;
10284                 }
10285
10286                 num_reported = le16_to_cpu(sw_config->header.num_reported);
10287                 num_total = le16_to_cpu(sw_config->header.num_total);
10288
10289                 if (printconfig)
10290                         dev_info(&pf->pdev->dev,
10291                                  "header: %d reported %d total\n",
10292                                  num_reported, num_total);
10293
10294                 for (i = 0; i < num_reported; i++) {
10295                         struct i40e_aqc_switch_config_element_resp *ele =
10296                                 &sw_config->element[i];
10297
10298                         i40e_setup_pf_switch_element(pf, ele, num_reported,
10299                                                      printconfig);
10300                 }
10301         } while (next_seid != 0);
10302
10303         kfree(aq_buf);
10304         return ret;
10305 }
10306
10307 /**
10308  * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
10309  * @pf: board private structure
10310  * @reinit: if the Main VSI needs to re-initialized.
10311  *
10312  * Returns 0 on success, negative value on failure
10313  **/
10314 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
10315 {
10316         u16 flags = 0;
10317         int ret;
10318
10319         /* find out what's out there already */
10320         ret = i40e_fetch_switch_configuration(pf, false);
10321         if (ret) {
10322                 dev_info(&pf->pdev->dev,
10323                          "couldn't fetch switch config, err %s aq_err %s\n",
10324                          i40e_stat_str(&pf->hw, ret),
10325                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10326                 return ret;
10327         }
10328         i40e_pf_reset_stats(pf);
10329
10330         /* set the switch config bit for the whole device to
10331          * support limited promisc or true promisc
10332          * when user requests promisc. The default is limited
10333          * promisc.
10334         */
10335
10336         if ((pf->hw.pf_id == 0) &&
10337             !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
10338                 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
10339
10340         if (pf->hw.pf_id == 0) {
10341                 u16 valid_flags;
10342
10343                 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
10344                 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
10345                                                 NULL);
10346                 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
10347                         dev_info(&pf->pdev->dev,
10348                                  "couldn't set switch config bits, err %s aq_err %s\n",
10349                                  i40e_stat_str(&pf->hw, ret),
10350                                  i40e_aq_str(&pf->hw,
10351                                              pf->hw.aq.asq_last_status));
10352                         /* not a fatal problem, just keep going */
10353                 }
10354         }
10355
10356         /* first time setup */
10357         if (pf->lan_vsi == I40E_NO_VSI || reinit) {
10358                 struct i40e_vsi *vsi = NULL;
10359                 u16 uplink_seid;
10360
10361                 /* Set up the PF VSI associated with the PF's main VSI
10362                  * that is already in the HW switch
10363                  */
10364                 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
10365                         uplink_seid = pf->veb[pf->lan_veb]->seid;
10366                 else
10367                         uplink_seid = pf->mac_seid;
10368                 if (pf->lan_vsi == I40E_NO_VSI)
10369                         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
10370                 else if (reinit)
10371                         vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
10372                 if (!vsi) {
10373                         dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
10374                         i40e_fdir_teardown(pf);
10375                         return -EAGAIN;
10376                 }
10377         } else {
10378                 /* force a reset of TC and queue layout configurations */
10379                 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
10380
10381                 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
10382                 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
10383                 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
10384         }
10385         i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
10386
10387         i40e_fdir_sb_setup(pf);
10388
10389         /* Setup static PF queue filter control settings */
10390         ret = i40e_setup_pf_filter_control(pf);
10391         if (ret) {
10392                 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
10393                          ret);
10394                 /* Failure here should not stop continuing other steps */
10395         }
10396
10397         /* enable RSS in the HW, even for only one queue, as the stack can use
10398          * the hash
10399          */
10400         if ((pf->flags & I40E_FLAG_RSS_ENABLED))
10401                 i40e_pf_config_rss(pf);
10402
10403         /* fill in link information and enable LSE reporting */
10404         i40e_update_link_info(&pf->hw);
10405         i40e_link_event(pf);
10406
10407         /* Initialize user-specific link properties */
10408         pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
10409                                   I40E_AQ_AN_COMPLETED) ? true : false);
10410
10411         i40e_ptp_init(pf);
10412
10413         return ret;
10414 }
10415
10416 /**
10417  * i40e_determine_queue_usage - Work out queue distribution
10418  * @pf: board private structure
10419  **/
10420 static void i40e_determine_queue_usage(struct i40e_pf *pf)
10421 {
10422         int queues_left;
10423
10424         pf->num_lan_qps = 0;
10425 #ifdef I40E_FCOE
10426         pf->num_fcoe_qps = 0;
10427 #endif
10428
10429         /* Find the max queues to be put into basic use.  We'll always be
10430          * using TC0, whether or not DCB is running, and TC0 will get the
10431          * big RSS set.
10432          */
10433         queues_left = pf->hw.func_caps.num_tx_qp;
10434
10435         if ((queues_left == 1) ||
10436             !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
10437                 /* one qp for PF, no queues for anything else */
10438                 queues_left = 0;
10439                 pf->alloc_rss_size = pf->num_lan_qps = 1;
10440
10441                 /* make sure all the fancies are disabled */
10442                 pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
10443                                I40E_FLAG_IWARP_ENABLED  |
10444 #ifdef I40E_FCOE
10445                                I40E_FLAG_FCOE_ENABLED   |
10446 #endif
10447                                I40E_FLAG_FD_SB_ENABLED  |
10448                                I40E_FLAG_FD_ATR_ENABLED |
10449                                I40E_FLAG_DCB_CAPABLE    |
10450                                I40E_FLAG_DCB_ENABLED    |
10451                                I40E_FLAG_SRIOV_ENABLED  |
10452                                I40E_FLAG_VMDQ_ENABLED);
10453         } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
10454                                   I40E_FLAG_FD_SB_ENABLED |
10455                                   I40E_FLAG_FD_ATR_ENABLED |
10456                                   I40E_FLAG_DCB_CAPABLE))) {
10457                 /* one qp for PF */
10458                 pf->alloc_rss_size = pf->num_lan_qps = 1;
10459                 queues_left -= pf->num_lan_qps;
10460
10461                 pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
10462                                I40E_FLAG_IWARP_ENABLED  |
10463 #ifdef I40E_FCOE
10464                                I40E_FLAG_FCOE_ENABLED   |
10465 #endif
10466                                I40E_FLAG_FD_SB_ENABLED  |
10467                                I40E_FLAG_FD_ATR_ENABLED |
10468                                I40E_FLAG_DCB_ENABLED    |
10469                                I40E_FLAG_VMDQ_ENABLED);
10470         } else {
10471                 /* Not enough queues for all TCs */
10472                 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
10473                     (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
10474                         pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
10475                                         I40E_FLAG_DCB_ENABLED);
10476                         dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
10477                 }
10478                 pf->num_lan_qps = max_t(int, pf->rss_size_max,
10479                                         num_online_cpus());
10480                 pf->num_lan_qps = min_t(int, pf->num_lan_qps,
10481                                         pf->hw.func_caps.num_tx_qp);
10482
10483                 queues_left -= pf->num_lan_qps;
10484         }
10485
10486 #ifdef I40E_FCOE
10487         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
10488                 if (I40E_DEFAULT_FCOE <= queues_left) {
10489                         pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
10490                 } else if (I40E_MINIMUM_FCOE <= queues_left) {
10491                         pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
10492                 } else {
10493                         pf->num_fcoe_qps = 0;
10494                         pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
10495                         dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
10496                 }
10497
10498                 queues_left -= pf->num_fcoe_qps;
10499         }
10500
10501 #endif
10502         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10503                 if (queues_left > 1) {
10504                         queues_left -= 1; /* save 1 queue for FD */
10505                 } else {
10506                         pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10507                         dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
10508                 }
10509         }
10510
10511         if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10512             pf->num_vf_qps && pf->num_req_vfs && queues_left) {
10513                 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
10514                                         (queues_left / pf->num_vf_qps));
10515                 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
10516         }
10517
10518         if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
10519             pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
10520                 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
10521                                           (queues_left / pf->num_vmdq_qps));
10522                 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
10523         }
10524
10525         pf->queues_left = queues_left;
10526         dev_dbg(&pf->pdev->dev,
10527                 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
10528                 pf->hw.func_caps.num_tx_qp,
10529                 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
10530                 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
10531                 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
10532                 queues_left);
10533 #ifdef I40E_FCOE
10534         dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
10535 #endif
10536 }
10537
10538 /**
10539  * i40e_setup_pf_filter_control - Setup PF static filter control
10540  * @pf: PF to be setup
10541  *
10542  * i40e_setup_pf_filter_control sets up a PF's initial filter control
10543  * settings. If PE/FCoE are enabled then it will also set the per PF
10544  * based filter sizes required for them. It also enables Flow director,
10545  * ethertype and macvlan type filter settings for the pf.
10546  *
10547  * Returns 0 on success, negative on failure
10548  **/
10549 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
10550 {
10551         struct i40e_filter_control_settings *settings = &pf->filter_settings;
10552
10553         settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
10554
10555         /* Flow Director is enabled */
10556         if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
10557                 settings->enable_fdir = true;
10558
10559         /* Ethtype and MACVLAN filters enabled for PF */
10560         settings->enable_ethtype = true;
10561         settings->enable_macvlan = true;
10562
10563         if (i40e_set_filter_control(&pf->hw, settings))
10564                 return -ENOENT;
10565
10566         return 0;
10567 }
10568
10569 #define INFO_STRING_LEN 255
10570 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
10571 static void i40e_print_features(struct i40e_pf *pf)
10572 {
10573         struct i40e_hw *hw = &pf->hw;
10574         char *buf;
10575         int i;
10576
10577         buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
10578         if (!buf)
10579                 return;
10580
10581         i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
10582 #ifdef CONFIG_PCI_IOV
10583         i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
10584 #endif
10585         i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
10586                       pf->hw.func_caps.num_vsis,
10587                       pf->vsi[pf->lan_vsi]->num_queue_pairs);
10588         if (pf->flags & I40E_FLAG_RSS_ENABLED)
10589                 i += snprintf(&buf[i], REMAIN(i), " RSS");
10590         if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
10591                 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
10592         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10593                 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
10594                 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
10595         }
10596         if (pf->flags & I40E_FLAG_DCB_CAPABLE)
10597                 i += snprintf(&buf[i], REMAIN(i), " DCB");
10598         i += snprintf(&buf[i], REMAIN(i), " VxLAN");
10599         i += snprintf(&buf[i], REMAIN(i), " Geneve");
10600         if (pf->flags & I40E_FLAG_PTP)
10601                 i += snprintf(&buf[i], REMAIN(i), " PTP");
10602 #ifdef I40E_FCOE
10603         if (pf->flags & I40E_FLAG_FCOE_ENABLED)
10604                 i += snprintf(&buf[i], REMAIN(i), " FCOE");
10605 #endif
10606         if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
10607                 i += snprintf(&buf[i], REMAIN(i), " VEB");
10608         else
10609                 i += snprintf(&buf[i], REMAIN(i), " VEPA");
10610
10611         dev_info(&pf->pdev->dev, "%s\n", buf);
10612         kfree(buf);
10613         WARN_ON(i > INFO_STRING_LEN);
10614 }
10615
10616 /**
10617  * i40e_get_platform_mac_addr - get platform-specific MAC address
10618  *
10619  * @pdev: PCI device information struct
10620  * @pf: board private structure
10621  *
10622  * Look up the MAC address in Open Firmware  on systems that support it,
10623  * and use IDPROM on SPARC if no OF address is found. On return, the
10624  * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value
10625  * has been selected.
10626  **/
10627 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
10628 {
10629         pf->flags &= ~I40E_FLAG_PF_MAC;
10630         if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
10631                 pf->flags |= I40E_FLAG_PF_MAC;
10632 }
10633
10634 /**
10635  * i40e_probe - Device initialization routine
10636  * @pdev: PCI device information struct
10637  * @ent: entry in i40e_pci_tbl
10638  *
10639  * i40e_probe initializes a PF identified by a pci_dev structure.
10640  * The OS initialization, configuring of the PF private structure,
10641  * and a hardware reset occur.
10642  *
10643  * Returns 0 on success, negative on failure
10644  **/
10645 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10646 {
10647         struct i40e_aq_get_phy_abilities_resp abilities;
10648         struct i40e_pf *pf;
10649         struct i40e_hw *hw;
10650         static u16 pfs_found;
10651         u16 wol_nvm_bits;
10652         u16 link_status;
10653         int err;
10654         u32 val;
10655         u32 i;
10656         u8 set_fc_aq_fail;
10657
10658         err = pci_enable_device_mem(pdev);
10659         if (err)
10660                 return err;
10661
10662         /* set up for high or low dma */
10663         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10664         if (err) {
10665                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10666                 if (err) {
10667                         dev_err(&pdev->dev,
10668                                 "DMA configuration failed: 0x%x\n", err);
10669                         goto err_dma;
10670                 }
10671         }
10672
10673         /* set up pci connections */
10674         err = pci_request_mem_regions(pdev, i40e_driver_name);
10675         if (err) {
10676                 dev_info(&pdev->dev,
10677                          "pci_request_selected_regions failed %d\n", err);
10678                 goto err_pci_reg;
10679         }
10680
10681         pci_enable_pcie_error_reporting(pdev);
10682         pci_set_master(pdev);
10683
10684         /* Now that we have a PCI connection, we need to do the
10685          * low level device setup.  This is primarily setting up
10686          * the Admin Queue structures and then querying for the
10687          * device's current profile information.
10688          */
10689         pf = kzalloc(sizeof(*pf), GFP_KERNEL);
10690         if (!pf) {
10691                 err = -ENOMEM;
10692                 goto err_pf_alloc;
10693         }
10694         pf->next_vsi = 0;
10695         pf->pdev = pdev;
10696         set_bit(__I40E_DOWN, &pf->state);
10697
10698         hw = &pf->hw;
10699         hw->back = pf;
10700
10701         pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
10702                                 I40E_MAX_CSR_SPACE);
10703
10704         hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
10705         if (!hw->hw_addr) {
10706                 err = -EIO;
10707                 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
10708                          (unsigned int)pci_resource_start(pdev, 0),
10709                          pf->ioremap_len, err);
10710                 goto err_ioremap;
10711         }
10712         hw->vendor_id = pdev->vendor;
10713         hw->device_id = pdev->device;
10714         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
10715         hw->subsystem_vendor_id = pdev->subsystem_vendor;
10716         hw->subsystem_device_id = pdev->subsystem_device;
10717         hw->bus.device = PCI_SLOT(pdev->devfn);
10718         hw->bus.func = PCI_FUNC(pdev->devfn);
10719         pf->instance = pfs_found;
10720
10721         /* set up the locks for the AQ, do this only once in probe
10722          * and destroy them only once in remove
10723          */
10724         mutex_init(&hw->aq.asq_mutex);
10725         mutex_init(&hw->aq.arq_mutex);
10726
10727         pf->msg_enable = netif_msg_init(debug,
10728                                         NETIF_MSG_DRV |
10729                                         NETIF_MSG_PROBE |
10730                                         NETIF_MSG_LINK);
10731         if (debug < -1)
10732                 pf->hw.debug_mask = debug;
10733
10734         /* do a special CORER for clearing PXE mode once at init */
10735         if (hw->revision_id == 0 &&
10736             (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
10737                 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
10738                 i40e_flush(hw);
10739                 msleep(200);
10740                 pf->corer_count++;
10741
10742                 i40e_clear_pxe_mode(hw);
10743         }
10744
10745         /* Reset here to make sure all is clean and to define PF 'n' */
10746         i40e_clear_hw(hw);
10747         err = i40e_pf_reset(hw);
10748         if (err) {
10749                 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
10750                 goto err_pf_reset;
10751         }
10752         pf->pfr_count++;
10753
10754         hw->aq.num_arq_entries = I40E_AQ_LEN;
10755         hw->aq.num_asq_entries = I40E_AQ_LEN;
10756         hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10757         hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10758         pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
10759
10760         snprintf(pf->int_name, sizeof(pf->int_name) - 1,
10761                  "%s-%s:misc",
10762                  dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
10763
10764         err = i40e_init_shared_code(hw);
10765         if (err) {
10766                 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
10767                          err);
10768                 goto err_pf_reset;
10769         }
10770
10771         /* set up a default setting for link flow control */
10772         pf->hw.fc.requested_mode = I40E_FC_NONE;
10773
10774         err = i40e_init_adminq(hw);
10775         if (err) {
10776                 if (err == I40E_ERR_FIRMWARE_API_VERSION)
10777                         dev_info(&pdev->dev,
10778                                  "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
10779                 else
10780                         dev_info(&pdev->dev,
10781                                  "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
10782
10783                 goto err_pf_reset;
10784         }
10785
10786         /* provide nvm, fw, api versions */
10787         dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
10788                  hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
10789                  hw->aq.api_maj_ver, hw->aq.api_min_ver,
10790                  i40e_nvm_version_str(hw));
10791
10792         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
10793             hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
10794                 dev_info(&pdev->dev,
10795                          "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
10796         else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
10797                  hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
10798                 dev_info(&pdev->dev,
10799                          "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
10800
10801         i40e_verify_eeprom(pf);
10802
10803         /* Rev 0 hardware was never productized */
10804         if (hw->revision_id < 1)
10805                 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
10806
10807         i40e_clear_pxe_mode(hw);
10808         err = i40e_get_capabilities(pf);
10809         if (err)
10810                 goto err_adminq_setup;
10811
10812         err = i40e_sw_init(pf);
10813         if (err) {
10814                 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
10815                 goto err_sw_init;
10816         }
10817
10818         err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10819                                 hw->func_caps.num_rx_qp,
10820                                 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
10821         if (err) {
10822                 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
10823                 goto err_init_lan_hmc;
10824         }
10825
10826         err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10827         if (err) {
10828                 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
10829                 err = -ENOENT;
10830                 goto err_configure_lan_hmc;
10831         }
10832
10833         /* Disable LLDP for NICs that have firmware versions lower than v4.3.
10834          * Ignore error return codes because if it was already disabled via
10835          * hardware settings this will fail
10836          */
10837         if (pf->flags & I40E_FLAG_STOP_FW_LLDP) {
10838                 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
10839                 i40e_aq_stop_lldp(hw, true, NULL);
10840         }
10841
10842         i40e_get_mac_addr(hw, hw->mac.addr);
10843         /* allow a platform config to override the HW addr */
10844         i40e_get_platform_mac_addr(pdev, pf);
10845         if (!is_valid_ether_addr(hw->mac.addr)) {
10846                 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
10847                 err = -EIO;
10848                 goto err_mac_addr;
10849         }
10850         dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
10851         ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
10852         i40e_get_port_mac_addr(hw, hw->mac.port_addr);
10853         if (is_valid_ether_addr(hw->mac.port_addr))
10854                 pf->flags |= I40E_FLAG_PORT_ID_VALID;
10855 #ifdef I40E_FCOE
10856         err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
10857         if (err)
10858                 dev_info(&pdev->dev,
10859                          "(non-fatal) SAN MAC retrieval failed: %d\n", err);
10860         if (!is_valid_ether_addr(hw->mac.san_addr)) {
10861                 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
10862                          hw->mac.san_addr);
10863                 ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
10864         }
10865         dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
10866 #endif /* I40E_FCOE */
10867
10868         pci_set_drvdata(pdev, pf);
10869         pci_save_state(pdev);
10870 #ifdef CONFIG_I40E_DCB
10871         err = i40e_init_pf_dcb(pf);
10872         if (err) {
10873                 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
10874                 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
10875                 /* Continue without DCB enabled */
10876         }
10877 #endif /* CONFIG_I40E_DCB */
10878
10879         /* set up periodic task facility */
10880         setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
10881         pf->service_timer_period = HZ;
10882
10883         INIT_WORK(&pf->service_task, i40e_service_task);
10884         clear_bit(__I40E_SERVICE_SCHED, &pf->state);
10885         pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
10886
10887         /* NVM bit on means WoL disabled for the port */
10888         i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
10889         if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
10890                 pf->wol_en = false;
10891         else
10892                 pf->wol_en = true;
10893         device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
10894
10895         /* set up the main switch operations */
10896         i40e_determine_queue_usage(pf);
10897         err = i40e_init_interrupt_scheme(pf);
10898         if (err)
10899                 goto err_switch_setup;
10900
10901         /* The number of VSIs reported by the FW is the minimum guaranteed
10902          * to us; HW supports far more and we share the remaining pool with
10903          * the other PFs. We allocate space for more than the guarantee with
10904          * the understanding that we might not get them all later.
10905          */
10906         if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
10907                 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
10908         else
10909                 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
10910
10911         /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
10912         pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
10913                           GFP_KERNEL);
10914         if (!pf->vsi) {
10915                 err = -ENOMEM;
10916                 goto err_switch_setup;
10917         }
10918
10919 #ifdef CONFIG_PCI_IOV
10920         /* prep for VF support */
10921         if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10922             (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10923             !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
10924                 if (pci_num_vf(pdev))
10925                         pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
10926         }
10927 #endif
10928         err = i40e_setup_pf_switch(pf, false);
10929         if (err) {
10930                 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
10931                 goto err_vsis;
10932         }
10933
10934         /* Make sure flow control is set according to current settings */
10935         err = i40e_set_fc(hw, &set_fc_aq_fail, true);
10936         if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
10937                 dev_dbg(&pf->pdev->dev,
10938                         "Set fc with err %s aq_err %s on get_phy_cap\n",
10939                         i40e_stat_str(hw, err),
10940                         i40e_aq_str(hw, hw->aq.asq_last_status));
10941         if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
10942                 dev_dbg(&pf->pdev->dev,
10943                         "Set fc with err %s aq_err %s on set_phy_config\n",
10944                         i40e_stat_str(hw, err),
10945                         i40e_aq_str(hw, hw->aq.asq_last_status));
10946         if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
10947                 dev_dbg(&pf->pdev->dev,
10948                         "Set fc with err %s aq_err %s on get_link_info\n",
10949                         i40e_stat_str(hw, err),
10950                         i40e_aq_str(hw, hw->aq.asq_last_status));
10951
10952         /* if FDIR VSI was set up, start it now */
10953         for (i = 0; i < pf->num_alloc_vsi; i++) {
10954                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
10955                         i40e_vsi_open(pf->vsi[i]);
10956                         break;
10957                 }
10958         }
10959
10960         /* The driver only wants link up/down and module qualification
10961          * reports from firmware.  Note the negative logic.
10962          */
10963         err = i40e_aq_set_phy_int_mask(&pf->hw,
10964                                        ~(I40E_AQ_EVENT_LINK_UPDOWN |
10965                                          I40E_AQ_EVENT_MEDIA_NA |
10966                                          I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
10967         if (err)
10968                 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10969                          i40e_stat_str(&pf->hw, err),
10970                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10971
10972         /* Reconfigure hardware for allowing smaller MSS in the case
10973          * of TSO, so that we avoid the MDD being fired and causing
10974          * a reset in the case of small MSS+TSO.
10975          */
10976         val = rd32(hw, I40E_REG_MSS);
10977         if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
10978                 val &= ~I40E_REG_MSS_MIN_MASK;
10979                 val |= I40E_64BYTE_MSS;
10980                 wr32(hw, I40E_REG_MSS, val);
10981         }
10982
10983         if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
10984                 msleep(75);
10985                 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
10986                 if (err)
10987                         dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
10988                                  i40e_stat_str(&pf->hw, err),
10989                                  i40e_aq_str(&pf->hw,
10990                                              pf->hw.aq.asq_last_status));
10991         }
10992         /* The main driver is (mostly) up and happy. We need to set this state
10993          * before setting up the misc vector or we get a race and the vector
10994          * ends up disabled forever.
10995          */
10996         clear_bit(__I40E_DOWN, &pf->state);
10997
10998         /* In case of MSIX we are going to setup the misc vector right here
10999          * to handle admin queue events etc. In case of legacy and MSI
11000          * the misc functionality and queue processing is combined in
11001          * the same vector and that gets setup at open.
11002          */
11003         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11004                 err = i40e_setup_misc_vector(pf);
11005                 if (err) {
11006                         dev_info(&pdev->dev,
11007                                  "setup of misc vector failed: %d\n", err);
11008                         goto err_vsis;
11009                 }
11010         }
11011
11012 #ifdef CONFIG_PCI_IOV
11013         /* prep for VF support */
11014         if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11015             (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11016             !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
11017                 /* disable link interrupts for VFs */
11018                 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
11019                 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
11020                 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
11021                 i40e_flush(hw);
11022
11023                 if (pci_num_vf(pdev)) {
11024                         dev_info(&pdev->dev,
11025                                  "Active VFs found, allocating resources.\n");
11026                         err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
11027                         if (err)
11028                                 dev_info(&pdev->dev,
11029                                          "Error %d allocating resources for existing VFs\n",
11030                                          err);
11031                 }
11032         }
11033 #endif /* CONFIG_PCI_IOV */
11034
11035         if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11036                 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
11037                                                       pf->num_iwarp_msix,
11038                                                       I40E_IWARP_IRQ_PILE_ID);
11039                 if (pf->iwarp_base_vector < 0) {
11040                         dev_info(&pdev->dev,
11041                                  "failed to get tracking for %d vectors for IWARP err=%d\n",
11042                                  pf->num_iwarp_msix, pf->iwarp_base_vector);
11043                         pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11044                 }
11045         }
11046
11047         i40e_dbg_pf_init(pf);
11048
11049         /* tell the firmware that we're starting */
11050         i40e_send_version(pf);
11051
11052         /* since everything's happy, start the service_task timer */
11053         mod_timer(&pf->service_timer,
11054                   round_jiffies(jiffies + pf->service_timer_period));
11055
11056         /* add this PF to client device list and launch a client service task */
11057         err = i40e_lan_add_device(pf);
11058         if (err)
11059                 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
11060                          err);
11061
11062 #ifdef I40E_FCOE
11063         /* create FCoE interface */
11064         i40e_fcoe_vsi_setup(pf);
11065
11066 #endif
11067 #define PCI_SPEED_SIZE 8
11068 #define PCI_WIDTH_SIZE 8
11069         /* Devices on the IOSF bus do not have this information
11070          * and will report PCI Gen 1 x 1 by default so don't bother
11071          * checking them.
11072          */
11073         if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) {
11074                 char speed[PCI_SPEED_SIZE] = "Unknown";
11075                 char width[PCI_WIDTH_SIZE] = "Unknown";
11076
11077                 /* Get the negotiated link width and speed from PCI config
11078                  * space
11079                  */
11080                 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
11081                                           &link_status);
11082
11083                 i40e_set_pci_config_data(hw, link_status);
11084
11085                 switch (hw->bus.speed) {
11086                 case i40e_bus_speed_8000:
11087                         strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
11088                 case i40e_bus_speed_5000:
11089                         strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
11090                 case i40e_bus_speed_2500:
11091                         strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
11092                 default:
11093                         break;
11094                 }
11095                 switch (hw->bus.width) {
11096                 case i40e_bus_width_pcie_x8:
11097                         strncpy(width, "8", PCI_WIDTH_SIZE); break;
11098                 case i40e_bus_width_pcie_x4:
11099                         strncpy(width, "4", PCI_WIDTH_SIZE); break;
11100                 case i40e_bus_width_pcie_x2:
11101                         strncpy(width, "2", PCI_WIDTH_SIZE); break;
11102                 case i40e_bus_width_pcie_x1:
11103                         strncpy(width, "1", PCI_WIDTH_SIZE); break;
11104                 default:
11105                         break;
11106                 }
11107
11108                 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
11109                          speed, width);
11110
11111                 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
11112                     hw->bus.speed < i40e_bus_speed_8000) {
11113                         dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
11114                         dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
11115                 }
11116         }
11117
11118         /* get the requested speeds from the fw */
11119         err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
11120         if (err)
11121                 dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %s last_status =  %s\n",
11122                         i40e_stat_str(&pf->hw, err),
11123                         i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11124         pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
11125
11126         /* get the supported phy types from the fw */
11127         err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
11128         if (err)
11129                 dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %s last_status =  %s\n",
11130                         i40e_stat_str(&pf->hw, err),
11131                         i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11132         pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type);
11133
11134         /* Add a filter to drop all Flow control frames from any VSI from being
11135          * transmitted. By doing so we stop a malicious VF from sending out
11136          * PAUSE or PFC frames and potentially controlling traffic for other
11137          * PF/VF VSIs.
11138          * The FW can still send Flow control frames if enabled.
11139          */
11140         i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
11141                                                        pf->main_vsi_seid);
11142
11143         if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
11144             (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
11145                 pf->flags |= I40E_FLAG_HAVE_10GBASET_PHY;
11146
11147         /* print a string summarizing features */
11148         i40e_print_features(pf);
11149
11150         return 0;
11151
11152         /* Unwind what we've done if something failed in the setup */
11153 err_vsis:
11154         set_bit(__I40E_DOWN, &pf->state);
11155         i40e_clear_interrupt_scheme(pf);
11156         kfree(pf->vsi);
11157 err_switch_setup:
11158         i40e_reset_interrupt_capability(pf);
11159         del_timer_sync(&pf->service_timer);
11160 err_mac_addr:
11161 err_configure_lan_hmc:
11162         (void)i40e_shutdown_lan_hmc(hw);
11163 err_init_lan_hmc:
11164         kfree(pf->qp_pile);
11165 err_sw_init:
11166 err_adminq_setup:
11167 err_pf_reset:
11168         iounmap(hw->hw_addr);
11169 err_ioremap:
11170         kfree(pf);
11171 err_pf_alloc:
11172         pci_disable_pcie_error_reporting(pdev);
11173         pci_release_mem_regions(pdev);
11174 err_pci_reg:
11175 err_dma:
11176         pci_disable_device(pdev);
11177         return err;
11178 }
11179
11180 /**
11181  * i40e_remove - Device removal routine
11182  * @pdev: PCI device information struct
11183  *
11184  * i40e_remove is called by the PCI subsystem to alert the driver
11185  * that is should release a PCI device.  This could be caused by a
11186  * Hot-Plug event, or because the driver is going to be removed from
11187  * memory.
11188  **/
11189 static void i40e_remove(struct pci_dev *pdev)
11190 {
11191         struct i40e_pf *pf = pci_get_drvdata(pdev);
11192         struct i40e_hw *hw = &pf->hw;
11193         i40e_status ret_code;
11194         int i;
11195
11196         i40e_dbg_pf_exit(pf);
11197
11198         i40e_ptp_stop(pf);
11199
11200         /* Disable RSS in hw */
11201         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
11202         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
11203
11204         /* no more scheduling of any task */
11205         set_bit(__I40E_SUSPENDED, &pf->state);
11206         set_bit(__I40E_DOWN, &pf->state);
11207         if (pf->service_timer.data)
11208                 del_timer_sync(&pf->service_timer);
11209         if (pf->service_task.func)
11210                 cancel_work_sync(&pf->service_task);
11211
11212         if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
11213                 i40e_free_vfs(pf);
11214                 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
11215         }
11216
11217         i40e_fdir_teardown(pf);
11218
11219         /* If there is a switch structure or any orphans, remove them.
11220          * This will leave only the PF's VSI remaining.
11221          */
11222         for (i = 0; i < I40E_MAX_VEB; i++) {
11223                 if (!pf->veb[i])
11224                         continue;
11225
11226                 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
11227                     pf->veb[i]->uplink_seid == 0)
11228                         i40e_switch_branch_release(pf->veb[i]);
11229         }
11230
11231         /* Now we can shutdown the PF's VSI, just before we kill
11232          * adminq and hmc.
11233          */
11234         if (pf->vsi[pf->lan_vsi])
11235                 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
11236
11237         /* remove attached clients */
11238         ret_code = i40e_lan_del_device(pf);
11239         if (ret_code) {
11240                 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
11241                          ret_code);
11242         }
11243
11244         /* shutdown and destroy the HMC */
11245         if (hw->hmc.hmc_obj) {
11246                 ret_code = i40e_shutdown_lan_hmc(hw);
11247                 if (ret_code)
11248                         dev_warn(&pdev->dev,
11249                                  "Failed to destroy the HMC resources: %d\n",
11250                                  ret_code);
11251         }
11252
11253         /* shutdown the adminq */
11254         i40e_shutdown_adminq(hw);
11255
11256         /* destroy the locks only once, here */
11257         mutex_destroy(&hw->aq.arq_mutex);
11258         mutex_destroy(&hw->aq.asq_mutex);
11259
11260         /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
11261         i40e_clear_interrupt_scheme(pf);
11262         for (i = 0; i < pf->num_alloc_vsi; i++) {
11263                 if (pf->vsi[i]) {
11264                         i40e_vsi_clear_rings(pf->vsi[i]);
11265                         i40e_vsi_clear(pf->vsi[i]);
11266                         pf->vsi[i] = NULL;
11267                 }
11268         }
11269
11270         for (i = 0; i < I40E_MAX_VEB; i++) {
11271                 kfree(pf->veb[i]);
11272                 pf->veb[i] = NULL;
11273         }
11274
11275         kfree(pf->qp_pile);
11276         kfree(pf->vsi);
11277
11278         iounmap(hw->hw_addr);
11279         kfree(pf);
11280         pci_release_mem_regions(pdev);
11281
11282         pci_disable_pcie_error_reporting(pdev);
11283         pci_disable_device(pdev);
11284 }
11285
11286 /**
11287  * i40e_pci_error_detected - warning that something funky happened in PCI land
11288  * @pdev: PCI device information struct
11289  *
11290  * Called to warn that something happened and the error handling steps
11291  * are in progress.  Allows the driver to quiesce things, be ready for
11292  * remediation.
11293  **/
11294 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
11295                                                 enum pci_channel_state error)
11296 {
11297         struct i40e_pf *pf = pci_get_drvdata(pdev);
11298
11299         dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
11300
11301         if (!pf) {
11302                 dev_info(&pdev->dev,
11303                          "Cannot recover - error happened during device probe\n");
11304                 return PCI_ERS_RESULT_DISCONNECT;
11305         }
11306
11307         /* shutdown all operations */
11308         if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
11309                 rtnl_lock();
11310                 i40e_prep_for_reset(pf);
11311                 rtnl_unlock();
11312         }
11313
11314         /* Request a slot reset */
11315         return PCI_ERS_RESULT_NEED_RESET;
11316 }
11317
11318 /**
11319  * i40e_pci_error_slot_reset - a PCI slot reset just happened
11320  * @pdev: PCI device information struct
11321  *
11322  * Called to find if the driver can work with the device now that
11323  * the pci slot has been reset.  If a basic connection seems good
11324  * (registers are readable and have sane content) then return a
11325  * happy little PCI_ERS_RESULT_xxx.
11326  **/
11327 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
11328 {
11329         struct i40e_pf *pf = pci_get_drvdata(pdev);
11330         pci_ers_result_t result;
11331         int err;
11332         u32 reg;
11333
11334         dev_dbg(&pdev->dev, "%s\n", __func__);
11335         if (pci_enable_device_mem(pdev)) {
11336                 dev_info(&pdev->dev,
11337                          "Cannot re-enable PCI device after reset.\n");
11338                 result = PCI_ERS_RESULT_DISCONNECT;
11339         } else {
11340                 pci_set_master(pdev);
11341                 pci_restore_state(pdev);
11342                 pci_save_state(pdev);
11343                 pci_wake_from_d3(pdev, false);
11344
11345                 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
11346                 if (reg == 0)
11347                         result = PCI_ERS_RESULT_RECOVERED;
11348                 else
11349                         result = PCI_ERS_RESULT_DISCONNECT;
11350         }
11351
11352         err = pci_cleanup_aer_uncorrect_error_status(pdev);
11353         if (err) {
11354                 dev_info(&pdev->dev,
11355                          "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
11356                          err);
11357                 /* non-fatal, continue */
11358         }
11359
11360         return result;
11361 }
11362
11363 /**
11364  * i40e_pci_error_resume - restart operations after PCI error recovery
11365  * @pdev: PCI device information struct
11366  *
11367  * Called to allow the driver to bring things back up after PCI error
11368  * and/or reset recovery has finished.
11369  **/
11370 static void i40e_pci_error_resume(struct pci_dev *pdev)
11371 {
11372         struct i40e_pf *pf = pci_get_drvdata(pdev);
11373
11374         dev_dbg(&pdev->dev, "%s\n", __func__);
11375         if (test_bit(__I40E_SUSPENDED, &pf->state))
11376                 return;
11377
11378         rtnl_lock();
11379         i40e_handle_reset_warning(pf);
11380         rtnl_unlock();
11381 }
11382
11383 /**
11384  * i40e_shutdown - PCI callback for shutting down
11385  * @pdev: PCI device information struct
11386  **/
11387 static void i40e_shutdown(struct pci_dev *pdev)
11388 {
11389         struct i40e_pf *pf = pci_get_drvdata(pdev);
11390         struct i40e_hw *hw = &pf->hw;
11391
11392         set_bit(__I40E_SUSPENDED, &pf->state);
11393         set_bit(__I40E_DOWN, &pf->state);
11394         rtnl_lock();
11395         i40e_prep_for_reset(pf);
11396         rtnl_unlock();
11397
11398         wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11399         wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11400
11401         del_timer_sync(&pf->service_timer);
11402         cancel_work_sync(&pf->service_task);
11403         i40e_fdir_teardown(pf);
11404
11405         rtnl_lock();
11406         i40e_prep_for_reset(pf);
11407         rtnl_unlock();
11408
11409         wr32(hw, I40E_PFPM_APM,
11410              (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11411         wr32(hw, I40E_PFPM_WUFC,
11412              (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11413
11414         i40e_clear_interrupt_scheme(pf);
11415
11416         if (system_state == SYSTEM_POWER_OFF) {
11417                 pci_wake_from_d3(pdev, pf->wol_en);
11418                 pci_set_power_state(pdev, PCI_D3hot);
11419         }
11420 }
11421
11422 #ifdef CONFIG_PM
11423 /**
11424  * i40e_suspend - PCI callback for moving to D3
11425  * @pdev: PCI device information struct
11426  **/
11427 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
11428 {
11429         struct i40e_pf *pf = pci_get_drvdata(pdev);
11430         struct i40e_hw *hw = &pf->hw;
11431         int retval = 0;
11432
11433         set_bit(__I40E_SUSPENDED, &pf->state);
11434         set_bit(__I40E_DOWN, &pf->state);
11435
11436         rtnl_lock();
11437         i40e_prep_for_reset(pf);
11438         rtnl_unlock();
11439
11440         wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11441         wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11442
11443         i40e_stop_misc_vector(pf);
11444
11445         retval = pci_save_state(pdev);
11446         if (retval)
11447                 return retval;
11448
11449         pci_wake_from_d3(pdev, pf->wol_en);
11450         pci_set_power_state(pdev, PCI_D3hot);
11451
11452         return retval;
11453 }
11454
11455 /**
11456  * i40e_resume - PCI callback for waking up from D3
11457  * @pdev: PCI device information struct
11458  **/
11459 static int i40e_resume(struct pci_dev *pdev)
11460 {
11461         struct i40e_pf *pf = pci_get_drvdata(pdev);
11462         u32 err;
11463
11464         pci_set_power_state(pdev, PCI_D0);
11465         pci_restore_state(pdev);
11466         /* pci_restore_state() clears dev->state_saves, so
11467          * call pci_save_state() again to restore it.
11468          */
11469         pci_save_state(pdev);
11470
11471         err = pci_enable_device_mem(pdev);
11472         if (err) {
11473                 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
11474                 return err;
11475         }
11476         pci_set_master(pdev);
11477
11478         /* no wakeup events while running */
11479         pci_wake_from_d3(pdev, false);
11480
11481         /* handling the reset will rebuild the device state */
11482         if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
11483                 clear_bit(__I40E_DOWN, &pf->state);
11484                 rtnl_lock();
11485                 i40e_reset_and_rebuild(pf, false);
11486                 rtnl_unlock();
11487         }
11488
11489         return 0;
11490 }
11491
11492 #endif
11493 static const struct pci_error_handlers i40e_err_handler = {
11494         .error_detected = i40e_pci_error_detected,
11495         .slot_reset = i40e_pci_error_slot_reset,
11496         .resume = i40e_pci_error_resume,
11497 };
11498
11499 static struct pci_driver i40e_driver = {
11500         .name     = i40e_driver_name,
11501         .id_table = i40e_pci_tbl,
11502         .probe    = i40e_probe,
11503         .remove   = i40e_remove,
11504 #ifdef CONFIG_PM
11505         .suspend  = i40e_suspend,
11506         .resume   = i40e_resume,
11507 #endif
11508         .shutdown = i40e_shutdown,
11509         .err_handler = &i40e_err_handler,
11510         .sriov_configure = i40e_pci_sriov_configure,
11511 };
11512
11513 /**
11514  * i40e_init_module - Driver registration routine
11515  *
11516  * i40e_init_module is the first routine called when the driver is
11517  * loaded. All it does is register with the PCI subsystem.
11518  **/
11519 static int __init i40e_init_module(void)
11520 {
11521         pr_info("%s: %s - version %s\n", i40e_driver_name,
11522                 i40e_driver_string, i40e_driver_version_str);
11523         pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
11524
11525         /* we will see if single thread per module is enough for now,
11526          * it can't be any worse than using the system workqueue which
11527          * was already single threaded
11528          */
11529         i40e_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
11530                                   i40e_driver_name);
11531         if (!i40e_wq) {
11532                 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
11533                 return -ENOMEM;
11534         }
11535
11536         i40e_dbg_init();
11537         return pci_register_driver(&i40e_driver);
11538 }
11539 module_init(i40e_init_module);
11540
11541 /**
11542  * i40e_exit_module - Driver exit cleanup routine
11543  *
11544  * i40e_exit_module is called just before the driver is removed
11545  * from memory.
11546  **/
11547 static void __exit i40e_exit_module(void)
11548 {
11549         pci_unregister_driver(&i40e_driver);
11550         destroy_workqueue(i40e_wq);
11551         i40e_dbg_exit();
11552 }
11553 module_exit(i40e_exit_module);