1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2013 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/socket.h>
13 #include <linux/slab.h>
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
17 #include <linux/prefetch.h>
18 #include <linux/moduleparam.h>
19 #include <linux/iommu.h>
21 #include <net/checksum.h>
22 #include "net_driver.h"
27 #include "workarounds.h"
29 /* Preferred number of descriptors to fill at once */
30 #define EFX_RX_PREFERRED_BATCH 8U
32 /* Number of RX buffers to recycle pages for. When creating the RX page recycle
33 * ring, this number is divided by the number of buffers per page to calculate
34 * the number of pages to store in the RX page recycle ring.
36 #define EFX_RECYCLE_RING_SIZE_IOMMU 4096
37 #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
39 /* Size of buffer allocated for skb header area. */
40 #define EFX_SKB_HEADERS 128u
42 /* This is the percentage fill level below which new RX descriptors
43 * will be added to the RX descriptor ring.
45 static unsigned int rx_refill_threshold;
47 /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
48 #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
52 * RX maximum head room required.
54 * This must be at least 1 to prevent overflow, plus one packet-worth
55 * to allow pipelined receives.
57 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
59 static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
61 return page_address(buf->page) + buf->page_offset;
64 static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
66 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
67 return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
69 const u8 *data = eh + efx->rx_packet_hash_offset;
77 static inline struct efx_rx_buffer *
78 efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
80 if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
81 return efx_rx_buffer(rx_queue, 0);
86 static inline void efx_sync_rx_buffer(struct efx_nic *efx,
87 struct efx_rx_buffer *rx_buf,
90 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
94 void efx_rx_config_page_split(struct efx_nic *efx)
96 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN,
97 EFX_RX_BUF_ALIGNMENT);
98 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
99 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
100 efx->rx_page_buf_step);
101 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
102 efx->rx_bufs_per_page;
103 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
104 efx->rx_bufs_per_page);
107 /* Check the RX page recycle ring for a page that can be reused. */
108 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
110 struct efx_nic *efx = rx_queue->efx;
112 struct efx_rx_page_state *state;
115 index = rx_queue->page_remove & rx_queue->page_ptr_mask;
116 page = rx_queue->page_ring[index];
120 rx_queue->page_ring[index] = NULL;
121 /* page_remove cannot exceed page_add. */
122 if (rx_queue->page_remove != rx_queue->page_add)
123 ++rx_queue->page_remove;
125 /* If page_count is 1 then we hold the only reference to this page. */
126 if (page_count(page) == 1) {
127 ++rx_queue->page_recycle_count;
130 state = page_address(page);
131 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
132 PAGE_SIZE << efx->rx_buffer_order,
135 ++rx_queue->page_recycle_failed;
142 * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
144 * @rx_queue: Efx RX queue
146 * This allocates a batch of pages, maps them for DMA, and populates
147 * struct efx_rx_buffers for each one. Return a negative error code or
148 * 0 on success. If a single page can be used for multiple buffers,
149 * then the page will either be inserted fully, or not at all.
151 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
153 struct efx_nic *efx = rx_queue->efx;
154 struct efx_rx_buffer *rx_buf;
156 unsigned int page_offset;
157 struct efx_rx_page_state *state;
159 unsigned index, count;
163 page = efx_reuse_page(rx_queue);
165 page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
166 efx->rx_buffer_order);
167 if (unlikely(page == NULL))
170 dma_map_page(&efx->pci_dev->dev, page, 0,
171 PAGE_SIZE << efx->rx_buffer_order,
173 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
175 __free_pages(page, efx->rx_buffer_order);
178 state = page_address(page);
179 state->dma_addr = dma_addr;
181 state = page_address(page);
182 dma_addr = state->dma_addr;
185 dma_addr += sizeof(struct efx_rx_page_state);
186 page_offset = sizeof(struct efx_rx_page_state);
189 index = rx_queue->added_count & rx_queue->ptr_mask;
190 rx_buf = efx_rx_buffer(rx_queue, index);
191 rx_buf->dma_addr = dma_addr + NET_IP_ALIGN;
193 rx_buf->page_offset = page_offset + NET_IP_ALIGN;
194 rx_buf->len = efx->rx_dma_len;
196 ++rx_queue->added_count;
198 dma_addr += efx->rx_page_buf_step;
199 page_offset += efx->rx_page_buf_step;
200 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
202 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
203 } while (++count < efx->rx_pages_per_batch);
208 /* Unmap a DMA-mapped page. This function is only called for the final RX
211 static void efx_unmap_rx_buffer(struct efx_nic *efx,
212 struct efx_rx_buffer *rx_buf)
214 struct page *page = rx_buf->page;
217 struct efx_rx_page_state *state = page_address(page);
218 dma_unmap_page(&efx->pci_dev->dev,
220 PAGE_SIZE << efx->rx_buffer_order,
225 static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
228 put_page(rx_buf->page);
233 /* Attempt to recycle the page if there is an RX recycle ring; the page can
234 * only be added if this is the final RX buffer, to prevent pages being used in
235 * the descriptor ring and appearing in the recycle ring simultaneously.
237 static void efx_recycle_rx_page(struct efx_channel *channel,
238 struct efx_rx_buffer *rx_buf)
240 struct page *page = rx_buf->page;
241 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
242 struct efx_nic *efx = rx_queue->efx;
245 /* Only recycle the page after processing the final buffer. */
246 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
249 index = rx_queue->page_add & rx_queue->page_ptr_mask;
250 if (rx_queue->page_ring[index] == NULL) {
251 unsigned read_index = rx_queue->page_remove &
252 rx_queue->page_ptr_mask;
254 /* The next slot in the recycle ring is available, but
255 * increment page_remove if the read pointer currently
258 if (read_index == index)
259 ++rx_queue->page_remove;
260 rx_queue->page_ring[index] = page;
261 ++rx_queue->page_add;
264 ++rx_queue->page_recycle_full;
265 efx_unmap_rx_buffer(efx, rx_buf);
266 put_page(rx_buf->page);
269 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
270 struct efx_rx_buffer *rx_buf)
272 /* Release the page reference we hold for the buffer. */
274 put_page(rx_buf->page);
276 /* If this is the last buffer in a page, unmap and free it. */
277 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
278 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
279 efx_free_rx_buffer(rx_buf);
284 /* Recycle the pages that are used by buffers that have just been received. */
285 static void efx_recycle_rx_pages(struct efx_channel *channel,
286 struct efx_rx_buffer *rx_buf,
287 unsigned int n_frags)
289 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
292 efx_recycle_rx_page(channel, rx_buf);
293 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
297 static void efx_discard_rx_packet(struct efx_channel *channel,
298 struct efx_rx_buffer *rx_buf,
299 unsigned int n_frags)
301 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
303 efx_recycle_rx_pages(channel, rx_buf, n_frags);
306 efx_free_rx_buffer(rx_buf);
307 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
312 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
313 * @rx_queue: RX descriptor queue
315 * This will aim to fill the RX descriptor queue up to
316 * @rx_queue->@max_fill. If there is insufficient atomic
317 * memory to do so, a slow fill will be scheduled.
319 * The caller must provide serialisation (none is used here). In practise,
320 * this means this function must run from the NAPI handler, or be called
321 * when NAPI is disabled.
323 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
325 struct efx_nic *efx = rx_queue->efx;
326 unsigned int fill_level, batch_size;
329 if (!rx_queue->refill_enabled)
332 /* Calculate current fill level, and exit if we don't need to fill */
333 fill_level = (rx_queue->added_count - rx_queue->removed_count);
334 EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
335 if (fill_level >= rx_queue->fast_fill_trigger)
338 /* Record minimum fill level */
339 if (unlikely(fill_level < rx_queue->min_fill)) {
341 rx_queue->min_fill = fill_level;
344 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
345 space = rx_queue->max_fill - fill_level;
346 EFX_BUG_ON_PARANOID(space < batch_size);
348 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
349 "RX queue %d fast-filling descriptor ring from"
350 " level %d to level %d\n",
351 efx_rx_queue_index(rx_queue), fill_level,
356 rc = efx_init_rx_buffers(rx_queue);
358 /* Ensure that we don't leave the rx queue empty */
359 if (rx_queue->added_count == rx_queue->removed_count)
360 efx_schedule_slow_fill(rx_queue);
363 } while ((space -= batch_size) >= batch_size);
365 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
366 "RX queue %d fast-filled descriptor ring "
367 "to level %d\n", efx_rx_queue_index(rx_queue),
368 rx_queue->added_count - rx_queue->removed_count);
371 if (rx_queue->notified_count != rx_queue->added_count)
372 efx_nic_notify_rx_desc(rx_queue);
375 void efx_rx_slow_fill(unsigned long context)
377 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
379 /* Post an event to cause NAPI to run and refill the queue */
380 efx_nic_generate_fill_event(rx_queue);
381 ++rx_queue->slow_fill_count;
384 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
385 struct efx_rx_buffer *rx_buf,
388 struct efx_nic *efx = rx_queue->efx;
389 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
391 if (likely(len <= max_len))
394 /* The packet must be discarded, but this is only a fatal error
395 * if the caller indicated it was
397 rx_buf->flags |= EFX_RX_PKT_DISCARD;
399 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
401 netif_err(efx, rx_err, efx->net_dev,
402 " RX queue %d seriously overlength "
403 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
404 efx_rx_queue_index(rx_queue), len, max_len,
405 efx->type->rx_buffer_padding);
406 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
409 netif_err(efx, rx_err, efx->net_dev,
410 " RX queue %d overlength RX event "
412 efx_rx_queue_index(rx_queue), len, max_len);
415 efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
418 /* Pass a received packet up through GRO. GRO can handle pages
419 * regardless of checksum state and skbs with a good checksum.
422 efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
423 unsigned int n_frags, u8 *eh)
425 struct napi_struct *napi = &channel->napi_str;
426 gro_result_t gro_result;
427 struct efx_nic *efx = channel->efx;
430 skb = napi_get_frags(napi);
431 if (unlikely(!skb)) {
433 put_page(rx_buf->page);
435 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
440 if (efx->net_dev->features & NETIF_F_RXHASH)
441 skb->rxhash = efx_rx_buf_hash(efx, eh);
442 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
443 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
446 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
447 rx_buf->page, rx_buf->page_offset,
450 skb->len += rx_buf->len;
451 if (skb_shinfo(skb)->nr_frags == n_frags)
454 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
457 skb->data_len = skb->len;
458 skb->truesize += n_frags * efx->rx_buffer_truesize;
460 skb_record_rx_queue(skb, channel->rx_queue.core_index);
462 gro_result = napi_gro_frags(napi);
463 if (gro_result != GRO_DROP)
464 channel->irq_mod_score += 2;
467 /* Allocate and construct an SKB around page fragments */
468 static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
469 struct efx_rx_buffer *rx_buf,
470 unsigned int n_frags,
473 struct efx_nic *efx = channel->efx;
476 /* Allocate an SKB to store the headers */
477 skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
478 if (unlikely(skb == NULL))
481 EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
483 skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
484 memcpy(__skb_put(skb, hdr_len), eh, hdr_len);
486 /* Append the remaining page(s) onto the frag list */
487 if (rx_buf->len > hdr_len) {
488 rx_buf->page_offset += hdr_len;
489 rx_buf->len -= hdr_len;
492 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
493 rx_buf->page, rx_buf->page_offset,
496 skb->len += rx_buf->len;
497 skb->data_len += rx_buf->len;
498 if (skb_shinfo(skb)->nr_frags == n_frags)
501 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
504 __free_pages(rx_buf->page, efx->rx_buffer_order);
509 skb->truesize += n_frags * efx->rx_buffer_truesize;
511 /* Move past the ethernet header */
512 skb->protocol = eth_type_trans(skb, efx->net_dev);
517 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
518 unsigned int n_frags, unsigned int len, u16 flags)
520 struct efx_nic *efx = rx_queue->efx;
521 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
522 struct efx_rx_buffer *rx_buf;
524 rx_buf = efx_rx_buffer(rx_queue, index);
525 rx_buf->flags |= flags;
527 /* Validate the number of fragments and completed length */
529 if (!(flags & EFX_RX_PKT_PREFIX_LEN))
530 efx_rx_packet__check_len(rx_queue, rx_buf, len);
531 } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
532 unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
533 unlikely(len > n_frags * efx->rx_dma_len) ||
534 unlikely(!efx->rx_scatter)) {
535 /* If this isn't an explicit discard request, either
536 * the hardware or the driver is broken.
538 WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
539 rx_buf->flags |= EFX_RX_PKT_DISCARD;
542 netif_vdbg(efx, rx_status, efx->net_dev,
543 "RX queue %d received ids %x-%x len %d %s%s\n",
544 efx_rx_queue_index(rx_queue), index,
545 (index + n_frags - 1) & rx_queue->ptr_mask, len,
546 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
547 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
549 /* Discard packet, if instructed to do so. Process the
550 * previous receive first.
552 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
553 efx_rx_flush_packet(channel);
554 efx_discard_rx_packet(channel, rx_buf, n_frags);
558 if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
561 /* Release and/or sync the DMA mapping - assumes all RX buffers
562 * consumed in-order per RX queue.
564 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
566 /* Prefetch nice and early so data will (hopefully) be in cache by
567 * the time we look at it.
569 prefetch(efx_rx_buf_va(rx_buf));
571 rx_buf->page_offset += efx->rx_prefix_size;
572 rx_buf->len -= efx->rx_prefix_size;
575 /* Release/sync DMA mapping for additional fragments.
576 * Fix length for last fragment.
578 unsigned int tail_frags = n_frags - 1;
581 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
582 if (--tail_frags == 0)
584 efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
586 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
587 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
590 /* All fragments have been DMA-synced, so recycle pages. */
591 rx_buf = efx_rx_buffer(rx_queue, index);
592 efx_recycle_rx_pages(channel, rx_buf, n_frags);
594 /* Pipeline receives so that we give time for packet headers to be
595 * prefetched into cache.
597 efx_rx_flush_packet(channel);
598 channel->rx_pkt_n_frags = n_frags;
599 channel->rx_pkt_index = index;
602 static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
603 struct efx_rx_buffer *rx_buf,
604 unsigned int n_frags)
607 u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
609 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
610 if (unlikely(skb == NULL)) {
611 efx_free_rx_buffer(rx_buf);
614 skb_record_rx_queue(skb, channel->rx_queue.core_index);
616 /* Set the SKB flags */
617 skb_checksum_none_assert(skb);
618 if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
619 skb->ip_summed = CHECKSUM_UNNECESSARY;
621 if (channel->type->receive_skb)
622 if (channel->type->receive_skb(channel, skb))
625 /* Pass the packet up */
626 netif_receive_skb(skb);
629 /* Handle a received packet. Second half: Touches packet payload. */
630 void __efx_rx_packet(struct efx_channel *channel)
632 struct efx_nic *efx = channel->efx;
633 struct efx_rx_buffer *rx_buf =
634 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
635 u8 *eh = efx_rx_buf_va(rx_buf);
637 /* Read length from the prefix if necessary. This already
638 * excludes the length of the prefix itself.
640 if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
641 rx_buf->len = le16_to_cpup((__le16 *)
642 (eh + efx->rx_packet_len_offset));
644 /* If we're in loopback test, then pass the packet directly to the
645 * loopback layer, and free the rx_buf here
647 if (unlikely(efx->loopback_selftest)) {
648 efx_loopback_rx_packet(efx, eh, rx_buf->len);
649 efx_free_rx_buffer(rx_buf);
653 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
654 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
656 if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
657 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
659 efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
661 channel->rx_pkt_n_frags = 0;
664 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
666 struct efx_nic *efx = rx_queue->efx;
667 unsigned int entries;
670 /* Create the smallest power-of-two aligned ring */
671 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
672 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
673 rx_queue->ptr_mask = entries - 1;
675 netif_dbg(efx, probe, efx->net_dev,
676 "creating RX queue %d size %#x mask %#x\n",
677 efx_rx_queue_index(rx_queue), efx->rxq_entries,
680 /* Allocate RX buffers */
681 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
683 if (!rx_queue->buffer)
686 rc = efx_nic_probe_rx(rx_queue);
688 kfree(rx_queue->buffer);
689 rx_queue->buffer = NULL;
695 static void efx_init_rx_recycle_ring(struct efx_nic *efx,
696 struct efx_rx_queue *rx_queue)
698 unsigned int bufs_in_recycle_ring, page_ring_size;
700 /* Set the RX recycle ring size */
702 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
704 if (iommu_present(&pci_bus_type))
705 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
707 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
708 #endif /* CONFIG_PPC64 */
710 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
711 efx->rx_bufs_per_page);
712 rx_queue->page_ring = kcalloc(page_ring_size,
713 sizeof(*rx_queue->page_ring), GFP_KERNEL);
714 rx_queue->page_ptr_mask = page_ring_size - 1;
717 void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
719 struct efx_nic *efx = rx_queue->efx;
720 unsigned int max_fill, trigger, max_trigger;
722 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
723 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
725 /* Initialise ptr fields */
726 rx_queue->added_count = 0;
727 rx_queue->notified_count = 0;
728 rx_queue->removed_count = 0;
729 rx_queue->min_fill = -1U;
730 efx_init_rx_recycle_ring(efx, rx_queue);
732 rx_queue->page_remove = 0;
733 rx_queue->page_add = rx_queue->page_ptr_mask + 1;
734 rx_queue->page_recycle_count = 0;
735 rx_queue->page_recycle_failed = 0;
736 rx_queue->page_recycle_full = 0;
738 /* Initialise limit fields */
739 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
741 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
742 if (rx_refill_threshold != 0) {
743 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
744 if (trigger > max_trigger)
745 trigger = max_trigger;
747 trigger = max_trigger;
750 rx_queue->max_fill = max_fill;
751 rx_queue->fast_fill_trigger = trigger;
752 rx_queue->refill_enabled = true;
754 /* Set up RX descriptor ring */
755 efx_nic_init_rx(rx_queue);
758 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
761 struct efx_nic *efx = rx_queue->efx;
762 struct efx_rx_buffer *rx_buf;
764 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
765 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
767 del_timer_sync(&rx_queue->slow_fill);
769 /* Release RX buffers from the current read ptr to the write ptr */
770 if (rx_queue->buffer) {
771 for (i = rx_queue->removed_count; i < rx_queue->added_count;
773 unsigned index = i & rx_queue->ptr_mask;
774 rx_buf = efx_rx_buffer(rx_queue, index);
775 efx_fini_rx_buffer(rx_queue, rx_buf);
779 /* Unmap and release the pages in the recycle ring. Remove the ring. */
780 for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
781 struct page *page = rx_queue->page_ring[i];
782 struct efx_rx_page_state *state;
787 state = page_address(page);
788 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
789 PAGE_SIZE << efx->rx_buffer_order,
793 kfree(rx_queue->page_ring);
794 rx_queue->page_ring = NULL;
797 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
799 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
800 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
802 efx_nic_remove_rx(rx_queue);
804 kfree(rx_queue->buffer);
805 rx_queue->buffer = NULL;
809 module_param(rx_refill_threshold, uint, 0444);
810 MODULE_PARM_DESC(rx_refill_threshold,
811 "RX descriptor ring refill threshold (%)");
813 #ifdef CONFIG_RFS_ACCEL
815 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
816 u16 rxq_index, u32 flow_id)
818 struct efx_nic *efx = netdev_priv(net_dev);
819 struct efx_channel *channel;
820 struct efx_filter_spec spec;
821 const struct iphdr *ip;
826 nhoff = skb_network_offset(skb);
828 if (skb->protocol == htons(ETH_P_8021Q)) {
829 EFX_BUG_ON_PARANOID(skb_headlen(skb) <
830 nhoff + sizeof(struct vlan_hdr));
831 if (((const struct vlan_hdr *)skb->data + nhoff)->
832 h_vlan_encapsulated_proto != htons(ETH_P_IP))
833 return -EPROTONOSUPPORT;
835 /* This is IP over 802.1q VLAN. We can't filter on the
836 * IP 5-tuple and the vlan together, so just strip the
837 * vlan header and filter on the IP part.
839 nhoff += sizeof(struct vlan_hdr);
840 } else if (skb->protocol != htons(ETH_P_IP)) {
841 return -EPROTONOSUPPORT;
844 /* RFS must validate the IP header length before calling us */
845 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
846 ip = (const struct iphdr *)(skb->data + nhoff);
847 if (ip_is_fragment(ip))
848 return -EPROTONOSUPPORT;
849 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
850 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
852 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
853 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
855 rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
856 ip->daddr, ports[1], ip->saddr, ports[0]);
860 rc = efx->type->filter_rfs_insert(efx, &spec);
864 /* Remember this so we can check whether to expire the filter later */
865 efx->rps_flow_id[rc] = flow_id;
866 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
867 ++channel->rfs_filters_added;
869 netif_info(efx, rx_status, efx->net_dev,
870 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
871 (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
872 &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
873 rxq_index, flow_id, rc);
878 bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
880 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
881 unsigned int index, size;
884 if (!spin_trylock_bh(&efx->filter_lock))
887 expire_one = efx->type->filter_rfs_expire_one;
888 index = efx->rps_expire_index;
889 size = efx->type->max_rx_ip_filters;
891 flow_id = efx->rps_flow_id[index];
892 if (expire_one(efx, flow_id, index))
893 netif_info(efx, rx_status, efx->net_dev,
894 "expired filter %d [flow %u]\n",
899 efx->rps_expire_index = index;
901 spin_unlock_bh(&efx->filter_lock);
905 #endif /* CONFIG_RFS_ACCEL */
908 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
909 * @spec: Specification to test
911 * Return: %true if the specification is a non-drop RX filter that
912 * matches a local MAC address I/G bit value of 1 or matches a local
913 * IPv4 or IPv6 address value in the respective multicast address
914 * range. Otherwise %false.
916 bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
918 if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
919 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
922 if (spec->match_flags &
923 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
924 is_multicast_ether_addr(spec->loc_mac))
927 if ((spec->match_flags &
928 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
929 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
930 if (spec->ether_type == htons(ETH_P_IP) &&
931 ipv4_is_multicast(spec->loc_host[0]))
933 if (spec->ether_type == htons(ETH_P_IPV6) &&
934 ((const u8 *)spec->loc_host)[0] == 0xff)