1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2016 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
30 #include "ixgbe_sriov.h"
32 #ifdef CONFIG_IXGBE_DCB
34 * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
35 * @adapter: board private structure to initialize
37 * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It
38 * will also try to cache the proper offsets if RSS/FCoE are enabled along
42 static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
45 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
46 #endif /* IXGBE_FCOE */
47 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
50 u8 tcs = netdev_get_num_tc(adapter->netdev);
52 /* verify we have DCB queueing enabled before proceeding */
56 /* verify we have VMDq enabled before proceeding */
57 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
60 /* start at VMDq register offset for SR-IOV enabled setups */
61 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
62 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
63 /* If we are greater than indices move to next pool */
64 if ((reg_idx & ~vmdq->mask) >= tcs)
65 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
66 adapter->rx_ring[i]->reg_idx = reg_idx;
69 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
70 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
71 /* If we are greater than indices move to next pool */
72 if ((reg_idx & ~vmdq->mask) >= tcs)
73 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
74 adapter->tx_ring[i]->reg_idx = reg_idx;
78 /* nothing to do if FCoE is disabled */
79 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
82 /* The work is already done if the FCoE ring is shared */
83 if (fcoe->offset < tcs)
86 /* The FCoE rings exist separately, we need to move their reg_idx */
88 u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
89 u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
91 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
92 for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
93 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
94 adapter->rx_ring[i]->reg_idx = reg_idx;
98 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
99 for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
100 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
101 adapter->tx_ring[i]->reg_idx = reg_idx;
106 #endif /* IXGBE_FCOE */
110 /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
111 static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
112 unsigned int *tx, unsigned int *rx)
114 struct net_device *dev = adapter->netdev;
115 struct ixgbe_hw *hw = &adapter->hw;
116 u8 num_tcs = netdev_get_num_tc(dev);
121 switch (hw->mac.type) {
122 case ixgbe_mac_82598EB:
123 /* TxQs/TC: 4 RxQs/TC: 8 */
124 *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */
125 *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
127 case ixgbe_mac_82599EB:
130 case ixgbe_mac_X550EM_x:
131 case ixgbe_mac_x550em_a:
134 * TCs : TC0/1 TC2/3 TC4-7
140 *tx = tc << 5; /* 0, 32, 64 */
142 *tx = (tc + 2) << 4; /* 80, 96 */
144 *tx = (tc + 8) << 3; /* 104, 112, 120 */
147 * TCs : TC0 TC1 TC2/3
153 *tx = tc << 6; /* 0, 64 */
155 *tx = (tc + 4) << 4; /* 96, 112 */
163 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
164 * @adapter: board private structure to initialize
166 * Cache the descriptor ring offsets for DCB to the assigned rings.
169 static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
171 struct net_device *dev = adapter->netdev;
172 unsigned int tx_idx, rx_idx;
173 int tc, offset, rss_i, i;
174 u8 num_tcs = netdev_get_num_tc(dev);
176 /* verify we have DCB queueing enabled before proceeding */
180 rss_i = adapter->ring_feature[RING_F_RSS].indices;
182 for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
183 ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
184 for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
185 adapter->tx_ring[offset + i]->reg_idx = tx_idx;
186 adapter->rx_ring[offset + i]->reg_idx = rx_idx;
187 adapter->tx_ring[offset + i]->dcb_tc = tc;
188 adapter->rx_ring[offset + i]->dcb_tc = tc;
197 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
198 * @adapter: board private structure to initialize
200 * SR-IOV doesn't use any descriptor rings but changes the default if
201 * no other mapping is used.
204 static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
207 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
208 #endif /* IXGBE_FCOE */
209 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
210 struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
214 /* only proceed if VMDq is enabled */
215 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
218 /* start at VMDq register offset for SR-IOV enabled setups */
219 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
220 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
222 /* Allow first FCoE queue to be mapped as RSS */
223 if (fcoe->offset && (i > fcoe->offset))
226 /* If we are greater than indices move to next pool */
227 if ((reg_idx & ~vmdq->mask) >= rss->indices)
228 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
229 adapter->rx_ring[i]->reg_idx = reg_idx;
233 /* FCoE uses a linear block of queues so just assigning 1:1 */
234 for (; i < adapter->num_rx_queues; i++, reg_idx++)
235 adapter->rx_ring[i]->reg_idx = reg_idx;
238 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
239 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
241 /* Allow first FCoE queue to be mapped as RSS */
242 if (fcoe->offset && (i > fcoe->offset))
245 /* If we are greater than indices move to next pool */
246 if ((reg_idx & rss->mask) >= rss->indices)
247 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
248 adapter->tx_ring[i]->reg_idx = reg_idx;
252 /* FCoE uses a linear block of queues so just assigning 1:1 */
253 for (; i < adapter->num_tx_queues; i++, reg_idx++)
254 adapter->tx_ring[i]->reg_idx = reg_idx;
262 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
263 * @adapter: board private structure to initialize
265 * Cache the descriptor ring offsets for RSS to the assigned rings.
268 static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
272 for (i = 0; i < adapter->num_rx_queues; i++)
273 adapter->rx_ring[i]->reg_idx = i;
274 for (i = 0; i < adapter->num_tx_queues; i++)
275 adapter->tx_ring[i]->reg_idx = i;
281 * ixgbe_cache_ring_register - Descriptor ring to register mapping
282 * @adapter: board private structure to initialize
284 * Once we know the feature-set enabled for the device, we'll cache
285 * the register offset the descriptor ring is assigned to.
287 * Note, the order the various feature calls is important. It must start with
288 * the "most" features enabled at the same time, then trickle down to the
289 * least amount of features turned on at once.
291 static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
293 /* start with default case */
294 adapter->rx_ring[0]->reg_idx = 0;
295 adapter->tx_ring[0]->reg_idx = 0;
297 #ifdef CONFIG_IXGBE_DCB
298 if (ixgbe_cache_ring_dcb_sriov(adapter))
301 if (ixgbe_cache_ring_dcb(adapter))
305 if (ixgbe_cache_ring_sriov(adapter))
308 ixgbe_cache_ring_rss(adapter);
311 #define IXGBE_RSS_16Q_MASK 0xF
312 #define IXGBE_RSS_8Q_MASK 0x7
313 #define IXGBE_RSS_4Q_MASK 0x3
314 #define IXGBE_RSS_2Q_MASK 0x1
315 #define IXGBE_RSS_DISABLED_MASK 0x0
317 #ifdef CONFIG_IXGBE_DCB
319 * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
320 * @adapter: board private structure to initialize
322 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
323 * and VM pools where appropriate. Also assign queues based on DCB
324 * priorities and map accordingly..
327 static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
330 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
335 u8 tcs = netdev_get_num_tc(adapter->netdev);
337 /* verify we have DCB queueing enabled before proceeding */
341 /* verify we have VMDq enabled before proceeding */
342 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
345 /* Add starting offset to total pool count */
346 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
348 /* 16 pools w/ 8 TC per pool */
350 vmdq_i = min_t(u16, vmdq_i, 16);
351 vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
352 /* 32 pools w/ 4 TC per pool */
354 vmdq_i = min_t(u16, vmdq_i, 32);
355 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
359 /* queues in the remaining pools are available for FCoE */
360 fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
363 /* remove the starting offset from the pool count */
364 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
366 /* save features for later use */
367 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
368 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
371 * We do not support DCB, VMDq, and RSS all simultaneously
372 * so we will disable RSS since it is the lowest priority
374 adapter->ring_feature[RING_F_RSS].indices = 1;
375 adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
377 /* disable ATR as it is not supported when VMDq is enabled */
378 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
380 adapter->num_rx_pools = vmdq_i;
381 adapter->num_rx_queues_per_pool = tcs;
383 adapter->num_tx_queues = vmdq_i * tcs;
384 adapter->num_rx_queues = vmdq_i * tcs;
387 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
388 struct ixgbe_ring_feature *fcoe;
390 fcoe = &adapter->ring_feature[RING_F_FCOE];
392 /* limit ourselves based on feature limits */
393 fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
396 /* alloc queues for FCoE separately */
397 fcoe->indices = fcoe_i;
398 fcoe->offset = vmdq_i * tcs;
400 /* add queues to adapter */
401 adapter->num_tx_queues += fcoe_i;
402 adapter->num_rx_queues += fcoe_i;
403 } else if (tcs > 1) {
404 /* use queue belonging to FcoE TC */
406 fcoe->offset = ixgbe_fcoe_get_tc(adapter);
408 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
415 #endif /* IXGBE_FCOE */
416 /* configure TC to queue mapping */
417 for (i = 0; i < tcs; i++)
418 netdev_set_tc_queue(adapter->netdev, i, 1, i);
423 static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
425 struct net_device *dev = adapter->netdev;
426 struct ixgbe_ring_feature *f;
430 /* Map queue offset and counts onto allocated tx queues */
431 tcs = netdev_get_num_tc(dev);
433 /* verify we have DCB queueing enabled before proceeding */
437 /* determine the upper limit for our current DCB mode */
438 rss_i = dev->num_tx_queues / tcs;
439 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
440 /* 8 TC w/ 4 queues per TC */
441 rss_i = min_t(u16, rss_i, 4);
442 rss_m = IXGBE_RSS_4Q_MASK;
443 } else if (tcs > 4) {
444 /* 8 TC w/ 8 queues per TC */
445 rss_i = min_t(u16, rss_i, 8);
446 rss_m = IXGBE_RSS_8Q_MASK;
448 /* 4 TC w/ 16 queues per TC */
449 rss_i = min_t(u16, rss_i, 16);
450 rss_m = IXGBE_RSS_16Q_MASK;
453 /* set RSS mask and indices */
454 f = &adapter->ring_feature[RING_F_RSS];
455 rss_i = min_t(int, rss_i, f->limit);
459 /* disable ATR as it is not supported when multiple TCs are enabled */
460 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
463 /* FCoE enabled queues require special configuration indexed
464 * by feature specific indices and offset. Here we map FCoE
465 * indices onto the DCB queue pairs allowing FCoE to own
466 * configuration later.
468 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
469 u8 tc = ixgbe_fcoe_get_tc(adapter);
471 f = &adapter->ring_feature[RING_F_FCOE];
472 f->indices = min_t(u16, rss_i, f->limit);
473 f->offset = rss_i * tc;
476 #endif /* IXGBE_FCOE */
477 for (i = 0; i < tcs; i++)
478 netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
480 adapter->num_tx_queues = rss_i * tcs;
481 adapter->num_rx_queues = rss_i * tcs;
488 * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
489 * @adapter: board private structure to initialize
491 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
492 * and VM pools where appropriate. If RSS is available, then also try and
493 * enable RSS and map accordingly.
496 static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
498 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
500 u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
501 u16 rss_m = IXGBE_RSS_DISABLED_MASK;
505 bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
507 /* only proceed if SR-IOV is enabled */
508 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
511 /* Add starting offset to total pool count */
512 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
514 /* double check we are limited to maximum pools */
515 vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
517 /* 64 pool mode with 2 queues per pool */
518 if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) {
519 vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
520 rss_m = IXGBE_RSS_2Q_MASK;
521 rss_i = min_t(u16, rss_i, 2);
522 /* 32 pool mode with up to 4 queues per pool */
524 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
525 rss_m = IXGBE_RSS_4Q_MASK;
526 /* We can support 4, 2, or 1 queues */
527 rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
531 /* queues in the remaining pools are available for FCoE */
532 fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
535 /* remove the starting offset from the pool count */
536 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
538 /* save features for later use */
539 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
540 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
542 /* limit RSS based on user input and save for later use */
543 adapter->ring_feature[RING_F_RSS].indices = rss_i;
544 adapter->ring_feature[RING_F_RSS].mask = rss_m;
546 adapter->num_rx_pools = vmdq_i;
547 adapter->num_rx_queues_per_pool = rss_i;
549 adapter->num_rx_queues = vmdq_i * rss_i;
550 adapter->num_tx_queues = vmdq_i * rss_i;
552 /* disable ATR as it is not supported when VMDq is enabled */
553 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
557 * FCoE can use rings from adjacent buffers to allow RSS
558 * like behavior. To account for this we need to add the
559 * FCoE indices to the total ring count.
561 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
562 struct ixgbe_ring_feature *fcoe;
564 fcoe = &adapter->ring_feature[RING_F_FCOE];
566 /* limit ourselves based on feature limits */
567 fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
569 if (vmdq_i > 1 && fcoe_i) {
570 /* alloc queues for FCoE separately */
571 fcoe->indices = fcoe_i;
572 fcoe->offset = vmdq_i * rss_i;
574 /* merge FCoE queues with RSS queues */
575 fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
577 /* limit indices to rss_i if MSI-X is disabled */
578 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
581 /* attempt to reserve some queues for just FCoE */
582 fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
583 fcoe->offset = fcoe_i - fcoe->indices;
588 /* add queues to adapter */
589 adapter->num_tx_queues += fcoe_i;
590 adapter->num_rx_queues += fcoe_i;
598 * ixgbe_set_rss_queues - Allocate queues for RSS
599 * @adapter: board private structure to initialize
601 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
602 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
605 static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
607 struct ixgbe_ring_feature *f;
610 /* set mask for 16 queue limit of RSS */
611 f = &adapter->ring_feature[RING_F_RSS];
615 f->mask = IXGBE_RSS_16Q_MASK;
617 /* disable ATR by default, it will be configured below */
618 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
621 * Use Flow Director in addition to RSS to ensure the best
622 * distribution of flows across cores, even when an FDIR flow
625 if (rss_i > 1 && adapter->atr_sample_rate) {
626 f = &adapter->ring_feature[RING_F_FDIR];
628 rss_i = f->indices = f->limit;
630 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
631 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
636 * FCoE can exist on the same rings as standard network traffic
637 * however it is preferred to avoid that if possible. In order
638 * to get the best performance we allocate as many FCoE queues
639 * as we can and we place them at the end of the ring array to
640 * avoid sharing queues with standard RSS on systems with 24 or
643 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
644 struct net_device *dev = adapter->netdev;
647 f = &adapter->ring_feature[RING_F_FCOE];
649 /* merge FCoE queues with RSS queues */
650 fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
651 fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
653 /* limit indices to rss_i if MSI-X is disabled */
654 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
657 /* attempt to reserve some queues for just FCoE */
658 f->indices = min_t(u16, fcoe_i, f->limit);
659 f->offset = fcoe_i - f->indices;
660 rss_i = max_t(u16, fcoe_i, rss_i);
663 #endif /* IXGBE_FCOE */
664 adapter->num_rx_queues = rss_i;
665 adapter->num_tx_queues = rss_i;
671 * ixgbe_set_num_queues - Allocate queues for device, feature dependent
672 * @adapter: board private structure to initialize
674 * This is the top level queue allocation routine. The order here is very
675 * important, starting with the "most" number of features turned on at once,
676 * and ending with the smallest set of features. This way large combinations
677 * can be allocated if they're turned on, and smaller combinations are the
678 * fallthrough conditions.
681 static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
683 /* Start with base case */
684 adapter->num_rx_queues = 1;
685 adapter->num_tx_queues = 1;
686 adapter->num_rx_pools = adapter->num_rx_queues;
687 adapter->num_rx_queues_per_pool = 1;
689 #ifdef CONFIG_IXGBE_DCB
690 if (ixgbe_set_dcb_sriov_queues(adapter))
693 if (ixgbe_set_dcb_queues(adapter))
697 if (ixgbe_set_sriov_queues(adapter))
700 ixgbe_set_rss_queues(adapter);
704 * ixgbe_acquire_msix_vectors - acquire MSI-X vectors
705 * @adapter: board private structure
707 * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
708 * return a negative error code if unable to acquire MSI-X vectors for any
711 static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
713 struct ixgbe_hw *hw = &adapter->hw;
714 int i, vectors, vector_threshold;
716 /* We start by asking for one vector per queue pair */
717 vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
719 /* It is easy to be greedy for MSI-X vectors. However, it really
720 * doesn't do much good if we have a lot more vectors than CPUs. We'll
721 * be somewhat conservative and only ask for (roughly) the same number
722 * of vectors as there are CPUs.
724 vectors = min_t(int, vectors, num_online_cpus());
726 /* Some vectors are necessary for non-queue interrupts */
727 vectors += NON_Q_VECTORS;
729 /* Hardware can only support a maximum of hw.mac->max_msix_vectors.
730 * With features such as RSS and VMDq, we can easily surpass the
731 * number of Rx and Tx descriptor queues supported by our device.
732 * Thus, we cap the maximum in the rare cases where the CPU count also
733 * exceeds our vector limit
735 vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
737 /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
738 * handler, and (2) an Other (Link Status Change, etc.) handler.
740 vector_threshold = MIN_MSIX_COUNT;
742 adapter->msix_entries = kcalloc(vectors,
743 sizeof(struct msix_entry),
745 if (!adapter->msix_entries)
748 for (i = 0; i < vectors; i++)
749 adapter->msix_entries[i].entry = i;
751 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
752 vector_threshold, vectors);
755 /* A negative count of allocated vectors indicates an error in
756 * acquiring within the specified range of MSI-X vectors
758 e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
761 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
762 kfree(adapter->msix_entries);
763 adapter->msix_entries = NULL;
768 /* we successfully allocated some number of vectors within our
771 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
773 /* Adjust for only the vectors we'll use, which is minimum
774 * of max_q_vectors, or the number of vectors we were allocated.
776 vectors -= NON_Q_VECTORS;
777 adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
782 static void ixgbe_add_ring(struct ixgbe_ring *ring,
783 struct ixgbe_ring_container *head)
785 ring->next = head->ring;
791 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
792 * @adapter: board private structure to initialize
793 * @v_count: q_vectors allocated on adapter, used for ring interleaving
794 * @v_idx: index of vector in adapter struct
795 * @txr_count: total number of Tx rings to allocate
796 * @txr_idx: index of first Tx ring to allocate
797 * @rxr_count: total number of Rx rings to allocate
798 * @rxr_idx: index of first Rx ring to allocate
800 * We allocate one q_vector. If allocation fails we return -ENOMEM.
802 static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
803 int v_count, int v_idx,
804 int txr_count, int txr_idx,
805 int rxr_count, int rxr_idx)
807 struct ixgbe_q_vector *q_vector;
808 struct ixgbe_ring *ring;
809 int node = NUMA_NO_NODE;
811 int ring_count, size;
812 u8 tcs = netdev_get_num_tc(adapter->netdev);
814 ring_count = txr_count + rxr_count;
815 size = sizeof(struct ixgbe_q_vector) +
816 (sizeof(struct ixgbe_ring) * ring_count);
818 /* customize cpu for Flow Director mapping */
819 if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
820 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
821 if (rss_i > 1 && adapter->atr_sample_rate) {
822 if (cpu_online(v_idx)) {
824 node = cpu_to_node(cpu);
829 /* allocate q_vector and rings */
830 q_vector = kzalloc_node(size, GFP_KERNEL, node);
832 q_vector = kzalloc(size, GFP_KERNEL);
836 /* setup affinity mask and node */
838 cpumask_set_cpu(cpu, &q_vector->affinity_mask);
839 q_vector->numa_node = node;
841 #ifdef CONFIG_IXGBE_DCA
842 /* initialize CPU for DCA */
846 /* initialize NAPI */
847 netif_napi_add(adapter->netdev, &q_vector->napi,
850 #ifdef CONFIG_NET_RX_BUSY_POLL
851 /* initialize busy poll */
852 atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE);
855 /* tie q_vector and adapter together */
856 adapter->q_vector[v_idx] = q_vector;
857 q_vector->adapter = adapter;
858 q_vector->v_idx = v_idx;
860 /* initialize work limits */
861 q_vector->tx.work_limit = adapter->tx_work_limit;
863 /* initialize pointer to rings */
864 ring = q_vector->ring;
867 if (txr_count && !rxr_count) {
869 if (adapter->tx_itr_setting == 1)
870 q_vector->itr = IXGBE_12K_ITR;
872 q_vector->itr = adapter->tx_itr_setting;
874 /* rx or rx/tx vector */
875 if (adapter->rx_itr_setting == 1)
876 q_vector->itr = IXGBE_20K_ITR;
878 q_vector->itr = adapter->rx_itr_setting;
882 /* assign generic ring traits */
883 ring->dev = &adapter->pdev->dev;
884 ring->netdev = adapter->netdev;
886 /* configure backlink on ring */
887 ring->q_vector = q_vector;
889 /* update q_vector Tx values */
890 ixgbe_add_ring(ring, &q_vector->tx);
892 /* apply Tx specific ring traits */
893 ring->count = adapter->tx_ring_count;
894 if (adapter->num_rx_pools > 1)
896 txr_idx % adapter->num_rx_queues_per_pool;
898 ring->queue_index = txr_idx;
900 /* assign ring to adapter */
901 adapter->tx_ring[txr_idx] = ring;
903 /* update count and index */
907 /* push pointer to next ring */
912 /* assign generic ring traits */
913 ring->dev = &adapter->pdev->dev;
914 ring->netdev = adapter->netdev;
916 /* configure backlink on ring */
917 ring->q_vector = q_vector;
919 /* update q_vector Rx values */
920 ixgbe_add_ring(ring, &q_vector->rx);
923 * 82599 errata, UDP frames with a 0 checksum
924 * can be marked as checksum errors.
926 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
927 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
930 if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
931 struct ixgbe_ring_feature *f;
932 f = &adapter->ring_feature[RING_F_FCOE];
933 if ((rxr_idx >= f->offset) &&
934 (rxr_idx < f->offset + f->indices))
935 set_bit(__IXGBE_RX_FCOE, &ring->state);
938 #endif /* IXGBE_FCOE */
939 /* apply Rx specific ring traits */
940 ring->count = adapter->rx_ring_count;
941 if (adapter->num_rx_pools > 1)
943 rxr_idx % adapter->num_rx_queues_per_pool;
945 ring->queue_index = rxr_idx;
947 /* assign ring to adapter */
948 adapter->rx_ring[rxr_idx] = ring;
950 /* update count and index */
954 /* push pointer to next ring */
962 * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
963 * @adapter: board private structure to initialize
964 * @v_idx: Index of vector to be freed
966 * This function frees the memory allocated to the q_vector. In addition if
967 * NAPI is enabled it will delete any references to the NAPI struct prior
968 * to freeing the q_vector.
970 static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
972 struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
973 struct ixgbe_ring *ring;
975 ixgbe_for_each_ring(ring, q_vector->tx)
976 adapter->tx_ring[ring->queue_index] = NULL;
978 ixgbe_for_each_ring(ring, q_vector->rx)
979 adapter->rx_ring[ring->queue_index] = NULL;
981 adapter->q_vector[v_idx] = NULL;
982 napi_hash_del(&q_vector->napi);
983 netif_napi_del(&q_vector->napi);
986 * ixgbe_get_stats64() might access the rings on this vector,
987 * we must wait a grace period before freeing it.
989 kfree_rcu(q_vector, rcu);
993 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
994 * @adapter: board private structure to initialize
996 * We allocate one q_vector per queue interrupt. If allocation fails we
999 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
1001 int q_vectors = adapter->num_q_vectors;
1002 int rxr_remaining = adapter->num_rx_queues;
1003 int txr_remaining = adapter->num_tx_queues;
1004 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1007 /* only one q_vector if MSI-X is disabled. */
1008 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1011 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1012 for (; rxr_remaining; v_idx++) {
1013 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1019 /* update counts and index */
1025 for (; v_idx < q_vectors; v_idx++) {
1026 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1027 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1028 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1035 /* update counts and index */
1036 rxr_remaining -= rqpv;
1037 txr_remaining -= tqpv;
1045 adapter->num_tx_queues = 0;
1046 adapter->num_rx_queues = 0;
1047 adapter->num_q_vectors = 0;
1050 ixgbe_free_q_vector(adapter, v_idx);
1056 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
1057 * @adapter: board private structure to initialize
1059 * This function frees the memory allocated to the q_vectors. In addition if
1060 * NAPI is enabled it will delete any references to the NAPI struct prior
1061 * to freeing the q_vector.
1063 static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
1065 int v_idx = adapter->num_q_vectors;
1067 adapter->num_tx_queues = 0;
1068 adapter->num_rx_queues = 0;
1069 adapter->num_q_vectors = 0;
1072 ixgbe_free_q_vector(adapter, v_idx);
1075 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
1077 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1078 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1079 pci_disable_msix(adapter->pdev);
1080 kfree(adapter->msix_entries);
1081 adapter->msix_entries = NULL;
1082 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1083 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
1084 pci_disable_msi(adapter->pdev);
1089 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
1090 * @adapter: board private structure to initialize
1092 * Attempt to configure the interrupts using the best available
1093 * capabilities of the hardware and the kernel.
1095 static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1099 /* We will try to get MSI-X interrupts first */
1100 if (!ixgbe_acquire_msix_vectors(adapter))
1103 /* At this point, we do not have MSI-X capabilities. We need to
1104 * reconfigure or disable various features which require MSI-X
1108 /* Disable DCB unless we only have a single traffic class */
1109 if (netdev_get_num_tc(adapter->netdev) > 1) {
1110 e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
1111 netdev_reset_tc(adapter->netdev);
1113 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1114 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
1116 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1117 adapter->temp_dcb_cfg.pfc_mode_enable = false;
1118 adapter->dcb_cfg.pfc_mode_enable = false;
1121 adapter->dcb_cfg.num_tcs.pg_tcs = 1;
1122 adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
1124 /* Disable SR-IOV support */
1125 e_dev_warn("Disabling SR-IOV support\n");
1126 ixgbe_disable_sriov(adapter);
1129 e_dev_warn("Disabling RSS support\n");
1130 adapter->ring_feature[RING_F_RSS].limit = 1;
1132 /* recalculate number of queues now that many features have been
1133 * changed or disabled.
1135 ixgbe_set_num_queues(adapter);
1136 adapter->num_q_vectors = 1;
1138 err = pci_enable_msi(adapter->pdev);
1140 e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
1143 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
1147 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
1148 * @adapter: board private structure to initialize
1150 * We determine which interrupt scheme to use based on...
1151 * - Kernel support (MSI, MSI-X)
1152 * - which can be user-defined (via MODULE_PARAM)
1153 * - Hardware queue count (num_*_queues)
1154 * - defined by miscellaneous hardware support/features (RSS, etc.)
1156 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
1160 /* Number of supported queues */
1161 ixgbe_set_num_queues(adapter);
1163 /* Set interrupt mode */
1164 ixgbe_set_interrupt_capability(adapter);
1166 err = ixgbe_alloc_q_vectors(adapter);
1168 e_dev_err("Unable to allocate memory for queue vectors\n");
1169 goto err_alloc_q_vectors;
1172 ixgbe_cache_ring_register(adapter);
1174 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
1175 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
1176 adapter->num_rx_queues, adapter->num_tx_queues);
1178 set_bit(__IXGBE_DOWN, &adapter->state);
1182 err_alloc_q_vectors:
1183 ixgbe_reset_interrupt_capability(adapter);
1188 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
1189 * @adapter: board private structure to clear interrupt scheme on
1191 * We go through and clear interrupt specific resources and reset the structure
1192 * to pre-load conditions
1194 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
1196 adapter->num_tx_queues = 0;
1197 adapter->num_rx_queues = 0;
1199 ixgbe_free_q_vectors(adapter);
1200 ixgbe_reset_interrupt_capability(adapter);
1203 void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
1204 u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
1206 struct ixgbe_adv_tx_context_desc *context_desc;
1207 u16 i = tx_ring->next_to_use;
1209 context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
1212 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1214 /* set bits to identify this as an advanced context descriptor */
1215 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1217 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
1218 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
1219 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
1220 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);