]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/sfc/rx.c
Merge branch 'for_linus' of git://cavan.codon.org.uk/platform-drivers-x86
[karo-tx-linux.git] / drivers / net / ethernet / sfc / rx.c
1 /****************************************************************************
2  * Driver for Solarflare Solarstorm network controllers and boards
3  * Copyright 2005-2006 Fen Systems Ltd.
4  * Copyright 2005-2011 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10
11 #include <linux/socket.h>
12 #include <linux/in.h>
13 #include <linux/slab.h>
14 #include <linux/ip.h>
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
17 #include <linux/prefetch.h>
18 #include <linux/moduleparam.h>
19 #include <linux/iommu.h>
20 #include <net/ip.h>
21 #include <net/checksum.h>
22 #include "net_driver.h"
23 #include "efx.h"
24 #include "nic.h"
25 #include "selftest.h"
26 #include "workarounds.h"
27
28 /* Preferred number of descriptors to fill at once */
29 #define EFX_RX_PREFERRED_BATCH 8U
30
31 /* Number of RX buffers to recycle pages for.  When creating the RX page recycle
32  * ring, this number is divided by the number of buffers per page to calculate
33  * the number of pages to store in the RX page recycle ring.
34  */
35 #define EFX_RECYCLE_RING_SIZE_IOMMU 4096
36 #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
37
38 /* Size of buffer allocated for skb header area. */
39 #define EFX_SKB_HEADERS  128u
40
41 /* This is the percentage fill level below which new RX descriptors
42  * will be added to the RX descriptor ring.
43  */
44 static unsigned int rx_refill_threshold;
45
46 /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
47 #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
48                                       EFX_RX_USR_BUF_SIZE)
49
50 /*
51  * RX maximum head room required.
52  *
53  * This must be at least 1 to prevent overflow, plus one packet-worth
54  * to allow pipelined receives.
55  */
56 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
57
58 static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
59 {
60         return page_address(buf->page) + buf->page_offset;
61 }
62
63 static inline u32 efx_rx_buf_hash(const u8 *eh)
64 {
65         /* The ethernet header is always directly after any hash. */
66 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
67         return __le32_to_cpup((const __le32 *)(eh - 4));
68 #else
69         const u8 *data = eh - 4;
70         return (u32)data[0]       |
71                (u32)data[1] << 8  |
72                (u32)data[2] << 16 |
73                (u32)data[3] << 24;
74 #endif
75 }
76
77 static inline struct efx_rx_buffer *
78 efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
79 {
80         if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
81                 return efx_rx_buffer(rx_queue, 0);
82         else
83                 return rx_buf + 1;
84 }
85
86 static inline void efx_sync_rx_buffer(struct efx_nic *efx,
87                                       struct efx_rx_buffer *rx_buf,
88                                       unsigned int len)
89 {
90         dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
91                                 DMA_FROM_DEVICE);
92 }
93
94 void efx_rx_config_page_split(struct efx_nic *efx)
95 {
96         efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN,
97                                       EFX_RX_BUF_ALIGNMENT);
98         efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
99                 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
100                  efx->rx_page_buf_step);
101         efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
102                 efx->rx_bufs_per_page;
103         efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
104                                                efx->rx_bufs_per_page);
105 }
106
107 /* Check the RX page recycle ring for a page that can be reused. */
108 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
109 {
110         struct efx_nic *efx = rx_queue->efx;
111         struct page *page;
112         struct efx_rx_page_state *state;
113         unsigned index;
114
115         index = rx_queue->page_remove & rx_queue->page_ptr_mask;
116         page = rx_queue->page_ring[index];
117         if (page == NULL)
118                 return NULL;
119
120         rx_queue->page_ring[index] = NULL;
121         /* page_remove cannot exceed page_add. */
122         if (rx_queue->page_remove != rx_queue->page_add)
123                 ++rx_queue->page_remove;
124
125         /* If page_count is 1 then we hold the only reference to this page. */
126         if (page_count(page) == 1) {
127                 ++rx_queue->page_recycle_count;
128                 return page;
129         } else {
130                 state = page_address(page);
131                 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
132                                PAGE_SIZE << efx->rx_buffer_order,
133                                DMA_FROM_DEVICE);
134                 put_page(page);
135                 ++rx_queue->page_recycle_failed;
136         }
137
138         return NULL;
139 }
140
141 /**
142  * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
143  *
144  * @rx_queue:           Efx RX queue
145  *
146  * This allocates a batch of pages, maps them for DMA, and populates
147  * struct efx_rx_buffers for each one. Return a negative error code or
148  * 0 on success. If a single page can be used for multiple buffers,
149  * then the page will either be inserted fully, or not at all.
150  */
151 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
152 {
153         struct efx_nic *efx = rx_queue->efx;
154         struct efx_rx_buffer *rx_buf;
155         struct page *page;
156         unsigned int page_offset;
157         struct efx_rx_page_state *state;
158         dma_addr_t dma_addr;
159         unsigned index, count;
160
161         count = 0;
162         do {
163                 page = efx_reuse_page(rx_queue);
164                 if (page == NULL) {
165                         page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
166                                            efx->rx_buffer_order);
167                         if (unlikely(page == NULL))
168                                 return -ENOMEM;
169                         dma_addr =
170                                 dma_map_page(&efx->pci_dev->dev, page, 0,
171                                              PAGE_SIZE << efx->rx_buffer_order,
172                                              DMA_FROM_DEVICE);
173                         if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
174                                                        dma_addr))) {
175                                 __free_pages(page, efx->rx_buffer_order);
176                                 return -EIO;
177                         }
178                         state = page_address(page);
179                         state->dma_addr = dma_addr;
180                 } else {
181                         state = page_address(page);
182                         dma_addr = state->dma_addr;
183                 }
184
185                 dma_addr += sizeof(struct efx_rx_page_state);
186                 page_offset = sizeof(struct efx_rx_page_state);
187
188                 do {
189                         index = rx_queue->added_count & rx_queue->ptr_mask;
190                         rx_buf = efx_rx_buffer(rx_queue, index);
191                         rx_buf->dma_addr = dma_addr + NET_IP_ALIGN;
192                         rx_buf->page = page;
193                         rx_buf->page_offset = page_offset + NET_IP_ALIGN;
194                         rx_buf->len = efx->rx_dma_len;
195                         rx_buf->flags = 0;
196                         ++rx_queue->added_count;
197                         get_page(page);
198                         dma_addr += efx->rx_page_buf_step;
199                         page_offset += efx->rx_page_buf_step;
200                 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
201
202                 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
203         } while (++count < efx->rx_pages_per_batch);
204
205         return 0;
206 }
207
208 /* Unmap a DMA-mapped page.  This function is only called for the final RX
209  * buffer in a page.
210  */
211 static void efx_unmap_rx_buffer(struct efx_nic *efx,
212                                 struct efx_rx_buffer *rx_buf)
213 {
214         struct page *page = rx_buf->page;
215
216         if (page) {
217                 struct efx_rx_page_state *state = page_address(page);
218                 dma_unmap_page(&efx->pci_dev->dev,
219                                state->dma_addr,
220                                PAGE_SIZE << efx->rx_buffer_order,
221                                DMA_FROM_DEVICE);
222         }
223 }
224
225 static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
226 {
227         if (rx_buf->page) {
228                 put_page(rx_buf->page);
229                 rx_buf->page = NULL;
230         }
231 }
232
233 /* Attempt to recycle the page if there is an RX recycle ring; the page can
234  * only be added if this is the final RX buffer, to prevent pages being used in
235  * the descriptor ring and appearing in the recycle ring simultaneously.
236  */
237 static void efx_recycle_rx_page(struct efx_channel *channel,
238                                 struct efx_rx_buffer *rx_buf)
239 {
240         struct page *page = rx_buf->page;
241         struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
242         struct efx_nic *efx = rx_queue->efx;
243         unsigned index;
244
245         /* Only recycle the page after processing the final buffer. */
246         if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
247                 return;
248
249         index = rx_queue->page_add & rx_queue->page_ptr_mask;
250         if (rx_queue->page_ring[index] == NULL) {
251                 unsigned read_index = rx_queue->page_remove &
252                         rx_queue->page_ptr_mask;
253
254                 /* The next slot in the recycle ring is available, but
255                  * increment page_remove if the read pointer currently
256                  * points here.
257                  */
258                 if (read_index == index)
259                         ++rx_queue->page_remove;
260                 rx_queue->page_ring[index] = page;
261                 ++rx_queue->page_add;
262                 return;
263         }
264         ++rx_queue->page_recycle_full;
265         efx_unmap_rx_buffer(efx, rx_buf);
266         put_page(rx_buf->page);
267 }
268
269 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
270                                struct efx_rx_buffer *rx_buf)
271 {
272         /* Release the page reference we hold for the buffer. */
273         if (rx_buf->page)
274                 put_page(rx_buf->page);
275
276         /* If this is the last buffer in a page, unmap and free it. */
277         if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
278                 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
279                 efx_free_rx_buffer(rx_buf);
280         }
281         rx_buf->page = NULL;
282 }
283
284 /* Recycle the pages that are used by buffers that have just been received. */
285 static void efx_recycle_rx_pages(struct efx_channel *channel,
286                                  struct efx_rx_buffer *rx_buf,
287                                  unsigned int n_frags)
288 {
289         struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
290
291         do {
292                 efx_recycle_rx_page(channel, rx_buf);
293                 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
294         } while (--n_frags);
295 }
296
297 static void efx_discard_rx_packet(struct efx_channel *channel,
298                                   struct efx_rx_buffer *rx_buf,
299                                   unsigned int n_frags)
300 {
301         struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
302
303         efx_recycle_rx_pages(channel, rx_buf, n_frags);
304
305         do {
306                 efx_free_rx_buffer(rx_buf);
307                 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
308         } while (--n_frags);
309 }
310
311 /**
312  * efx_fast_push_rx_descriptors - push new RX descriptors quickly
313  * @rx_queue:           RX descriptor queue
314  *
315  * This will aim to fill the RX descriptor queue up to
316  * @rx_queue->@max_fill. If there is insufficient atomic
317  * memory to do so, a slow fill will be scheduled.
318  *
319  * The caller must provide serialisation (none is used here). In practise,
320  * this means this function must run from the NAPI handler, or be called
321  * when NAPI is disabled.
322  */
323 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
324 {
325         struct efx_nic *efx = rx_queue->efx;
326         unsigned int fill_level, batch_size;
327         int space, rc = 0;
328
329         /* Calculate current fill level, and exit if we don't need to fill */
330         fill_level = (rx_queue->added_count - rx_queue->removed_count);
331         EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
332         if (fill_level >= rx_queue->fast_fill_trigger)
333                 goto out;
334
335         /* Record minimum fill level */
336         if (unlikely(fill_level < rx_queue->min_fill)) {
337                 if (fill_level)
338                         rx_queue->min_fill = fill_level;
339         }
340
341         batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
342         space = rx_queue->max_fill - fill_level;
343         EFX_BUG_ON_PARANOID(space < batch_size);
344
345         netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
346                    "RX queue %d fast-filling descriptor ring from"
347                    " level %d to level %d\n",
348                    efx_rx_queue_index(rx_queue), fill_level,
349                    rx_queue->max_fill);
350
351
352         do {
353                 rc = efx_init_rx_buffers(rx_queue);
354                 if (unlikely(rc)) {
355                         /* Ensure that we don't leave the rx queue empty */
356                         if (rx_queue->added_count == rx_queue->removed_count)
357                                 efx_schedule_slow_fill(rx_queue);
358                         goto out;
359                 }
360         } while ((space -= batch_size) >= batch_size);
361
362         netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
363                    "RX queue %d fast-filled descriptor ring "
364                    "to level %d\n", efx_rx_queue_index(rx_queue),
365                    rx_queue->added_count - rx_queue->removed_count);
366
367  out:
368         if (rx_queue->notified_count != rx_queue->added_count)
369                 efx_nic_notify_rx_desc(rx_queue);
370 }
371
372 void efx_rx_slow_fill(unsigned long context)
373 {
374         struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
375
376         /* Post an event to cause NAPI to run and refill the queue */
377         efx_nic_generate_fill_event(rx_queue);
378         ++rx_queue->slow_fill_count;
379 }
380
381 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
382                                      struct efx_rx_buffer *rx_buf,
383                                      int len)
384 {
385         struct efx_nic *efx = rx_queue->efx;
386         unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
387
388         if (likely(len <= max_len))
389                 return;
390
391         /* The packet must be discarded, but this is only a fatal error
392          * if the caller indicated it was
393          */
394         rx_buf->flags |= EFX_RX_PKT_DISCARD;
395
396         if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
397                 if (net_ratelimit())
398                         netif_err(efx, rx_err, efx->net_dev,
399                                   " RX queue %d seriously overlength "
400                                   "RX event (0x%x > 0x%x+0x%x). Leaking\n",
401                                   efx_rx_queue_index(rx_queue), len, max_len,
402                                   efx->type->rx_buffer_padding);
403                 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
404         } else {
405                 if (net_ratelimit())
406                         netif_err(efx, rx_err, efx->net_dev,
407                                   " RX queue %d overlength RX event "
408                                   "(0x%x > 0x%x)\n",
409                                   efx_rx_queue_index(rx_queue), len, max_len);
410         }
411
412         efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
413 }
414
415 /* Pass a received packet up through GRO.  GRO can handle pages
416  * regardless of checksum state and skbs with a good checksum.
417  */
418 static void
419 efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
420                   unsigned int n_frags, u8 *eh)
421 {
422         struct napi_struct *napi = &channel->napi_str;
423         gro_result_t gro_result;
424         struct efx_nic *efx = channel->efx;
425         struct sk_buff *skb;
426
427         skb = napi_get_frags(napi);
428         if (unlikely(!skb)) {
429                 while (n_frags--) {
430                         put_page(rx_buf->page);
431                         rx_buf->page = NULL;
432                         rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
433                 }
434                 return;
435         }
436
437         if (efx->net_dev->features & NETIF_F_RXHASH)
438                 skb->rxhash = efx_rx_buf_hash(eh);
439         skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
440                           CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
441
442         for (;;) {
443                 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
444                                    rx_buf->page, rx_buf->page_offset,
445                                    rx_buf->len);
446                 rx_buf->page = NULL;
447                 skb->len += rx_buf->len;
448                 if (skb_shinfo(skb)->nr_frags == n_frags)
449                         break;
450
451                 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
452         }
453
454         skb->data_len = skb->len;
455         skb->truesize += n_frags * efx->rx_buffer_truesize;
456
457         skb_record_rx_queue(skb, channel->rx_queue.core_index);
458
459         gro_result = napi_gro_frags(napi);
460         if (gro_result != GRO_DROP)
461                 channel->irq_mod_score += 2;
462 }
463
464 /* Allocate and construct an SKB around page fragments */
465 static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
466                                      struct efx_rx_buffer *rx_buf,
467                                      unsigned int n_frags,
468                                      u8 *eh, int hdr_len)
469 {
470         struct efx_nic *efx = channel->efx;
471         struct sk_buff *skb;
472
473         /* Allocate an SKB to store the headers */
474         skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
475         if (unlikely(skb == NULL))
476                 return NULL;
477
478         EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
479
480         skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
481         memcpy(__skb_put(skb, hdr_len), eh, hdr_len);
482
483         /* Append the remaining page(s) onto the frag list */
484         if (rx_buf->len > hdr_len) {
485                 rx_buf->page_offset += hdr_len;
486                 rx_buf->len -= hdr_len;
487
488                 for (;;) {
489                         skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
490                                            rx_buf->page, rx_buf->page_offset,
491                                            rx_buf->len);
492                         rx_buf->page = NULL;
493                         skb->len += rx_buf->len;
494                         skb->data_len += rx_buf->len;
495                         if (skb_shinfo(skb)->nr_frags == n_frags)
496                                 break;
497
498                         rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
499                 }
500         } else {
501                 __free_pages(rx_buf->page, efx->rx_buffer_order);
502                 rx_buf->page = NULL;
503                 n_frags = 0;
504         }
505
506         skb->truesize += n_frags * efx->rx_buffer_truesize;
507
508         /* Move past the ethernet header */
509         skb->protocol = eth_type_trans(skb, efx->net_dev);
510
511         return skb;
512 }
513
514 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
515                    unsigned int n_frags, unsigned int len, u16 flags)
516 {
517         struct efx_nic *efx = rx_queue->efx;
518         struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
519         struct efx_rx_buffer *rx_buf;
520
521         rx_buf = efx_rx_buffer(rx_queue, index);
522         rx_buf->flags |= flags;
523
524         /* Validate the number of fragments and completed length */
525         if (n_frags == 1) {
526                 efx_rx_packet__check_len(rx_queue, rx_buf, len);
527         } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
528                    unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) ||
529                    unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) ||
530                    unlikely(!efx->rx_scatter)) {
531                 /* If this isn't an explicit discard request, either
532                  * the hardware or the driver is broken.
533                  */
534                 WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
535                 rx_buf->flags |= EFX_RX_PKT_DISCARD;
536         }
537
538         netif_vdbg(efx, rx_status, efx->net_dev,
539                    "RX queue %d received ids %x-%x len %d %s%s\n",
540                    efx_rx_queue_index(rx_queue), index,
541                    (index + n_frags - 1) & rx_queue->ptr_mask, len,
542                    (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
543                    (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
544
545         /* Discard packet, if instructed to do so.  Process the
546          * previous receive first.
547          */
548         if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
549                 efx_rx_flush_packet(channel);
550                 efx_discard_rx_packet(channel, rx_buf, n_frags);
551                 return;
552         }
553
554         if (n_frags == 1)
555                 rx_buf->len = len;
556
557         /* Release and/or sync the DMA mapping - assumes all RX buffers
558          * consumed in-order per RX queue.
559          */
560         efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
561
562         /* Prefetch nice and early so data will (hopefully) be in cache by
563          * the time we look at it.
564          */
565         prefetch(efx_rx_buf_va(rx_buf));
566
567         rx_buf->page_offset += efx->type->rx_buffer_hash_size;
568         rx_buf->len -= efx->type->rx_buffer_hash_size;
569
570         if (n_frags > 1) {
571                 /* Release/sync DMA mapping for additional fragments.
572                  * Fix length for last fragment.
573                  */
574                 unsigned int tail_frags = n_frags - 1;
575
576                 for (;;) {
577                         rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
578                         if (--tail_frags == 0)
579                                 break;
580                         efx_sync_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE);
581                 }
582                 rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE;
583                 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
584         }
585
586         /* All fragments have been DMA-synced, so recycle pages. */
587         rx_buf = efx_rx_buffer(rx_queue, index);
588         efx_recycle_rx_pages(channel, rx_buf, n_frags);
589
590         /* Pipeline receives so that we give time for packet headers to be
591          * prefetched into cache.
592          */
593         efx_rx_flush_packet(channel);
594         channel->rx_pkt_n_frags = n_frags;
595         channel->rx_pkt_index = index;
596 }
597
598 static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
599                            struct efx_rx_buffer *rx_buf,
600                            unsigned int n_frags)
601 {
602         struct sk_buff *skb;
603         u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
604
605         skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
606         if (unlikely(skb == NULL)) {
607                 efx_free_rx_buffer(rx_buf);
608                 return;
609         }
610         skb_record_rx_queue(skb, channel->rx_queue.core_index);
611
612         /* Set the SKB flags */
613         skb_checksum_none_assert(skb);
614         if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
615                 skb->ip_summed = CHECKSUM_UNNECESSARY;
616
617         if (channel->type->receive_skb)
618                 if (channel->type->receive_skb(channel, skb))
619                         return;
620
621         /* Pass the packet up */
622         netif_receive_skb(skb);
623 }
624
625 /* Handle a received packet.  Second half: Touches packet payload. */
626 void __efx_rx_packet(struct efx_channel *channel)
627 {
628         struct efx_nic *efx = channel->efx;
629         struct efx_rx_buffer *rx_buf =
630                 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
631         u8 *eh = efx_rx_buf_va(rx_buf);
632
633         /* If we're in loopback test, then pass the packet directly to the
634          * loopback layer, and free the rx_buf here
635          */
636         if (unlikely(efx->loopback_selftest)) {
637                 efx_loopback_rx_packet(efx, eh, rx_buf->len);
638                 efx_free_rx_buffer(rx_buf);
639                 goto out;
640         }
641
642         if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
643                 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
644
645         if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
646                 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
647         else
648                 efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
649 out:
650         channel->rx_pkt_n_frags = 0;
651 }
652
653 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
654 {
655         struct efx_nic *efx = rx_queue->efx;
656         unsigned int entries;
657         int rc;
658
659         /* Create the smallest power-of-two aligned ring */
660         entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
661         EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
662         rx_queue->ptr_mask = entries - 1;
663
664         netif_dbg(efx, probe, efx->net_dev,
665                   "creating RX queue %d size %#x mask %#x\n",
666                   efx_rx_queue_index(rx_queue), efx->rxq_entries,
667                   rx_queue->ptr_mask);
668
669         /* Allocate RX buffers */
670         rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
671                                    GFP_KERNEL);
672         if (!rx_queue->buffer)
673                 return -ENOMEM;
674
675         rc = efx_nic_probe_rx(rx_queue);
676         if (rc) {
677                 kfree(rx_queue->buffer);
678                 rx_queue->buffer = NULL;
679         }
680
681         return rc;
682 }
683
684 static void efx_init_rx_recycle_ring(struct efx_nic *efx,
685                                      struct efx_rx_queue *rx_queue)
686 {
687         unsigned int bufs_in_recycle_ring, page_ring_size;
688
689         /* Set the RX recycle ring size */
690 #ifdef CONFIG_PPC64
691         bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
692 #else
693         if (iommu_present(&pci_bus_type))
694                 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
695         else
696                 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
697 #endif /* CONFIG_PPC64 */
698
699         page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
700                                             efx->rx_bufs_per_page);
701         rx_queue->page_ring = kcalloc(page_ring_size,
702                                       sizeof(*rx_queue->page_ring), GFP_KERNEL);
703         rx_queue->page_ptr_mask = page_ring_size - 1;
704 }
705
706 void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
707 {
708         struct efx_nic *efx = rx_queue->efx;
709         unsigned int max_fill, trigger, max_trigger;
710
711         netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
712                   "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
713
714         /* Initialise ptr fields */
715         rx_queue->added_count = 0;
716         rx_queue->notified_count = 0;
717         rx_queue->removed_count = 0;
718         rx_queue->min_fill = -1U;
719         efx_init_rx_recycle_ring(efx, rx_queue);
720
721         rx_queue->page_remove = 0;
722         rx_queue->page_add = rx_queue->page_ptr_mask + 1;
723         rx_queue->page_recycle_count = 0;
724         rx_queue->page_recycle_failed = 0;
725         rx_queue->page_recycle_full = 0;
726
727         /* Initialise limit fields */
728         max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
729         max_trigger =
730                 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
731         if (rx_refill_threshold != 0) {
732                 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
733                 if (trigger > max_trigger)
734                         trigger = max_trigger;
735         } else {
736                 trigger = max_trigger;
737         }
738
739         rx_queue->max_fill = max_fill;
740         rx_queue->fast_fill_trigger = trigger;
741
742         /* Set up RX descriptor ring */
743         rx_queue->enabled = true;
744         efx_nic_init_rx(rx_queue);
745 }
746
747 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
748 {
749         int i;
750         struct efx_nic *efx = rx_queue->efx;
751         struct efx_rx_buffer *rx_buf;
752
753         netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
754                   "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
755
756         /* A flush failure might have left rx_queue->enabled */
757         rx_queue->enabled = false;
758
759         del_timer_sync(&rx_queue->slow_fill);
760         efx_nic_fini_rx(rx_queue);
761
762         /* Release RX buffers from the current read ptr to the write ptr */
763         if (rx_queue->buffer) {
764                 for (i = rx_queue->removed_count; i < rx_queue->added_count;
765                      i++) {
766                         unsigned index = i & rx_queue->ptr_mask;
767                         rx_buf = efx_rx_buffer(rx_queue, index);
768                         efx_fini_rx_buffer(rx_queue, rx_buf);
769                 }
770         }
771
772         /* Unmap and release the pages in the recycle ring. Remove the ring. */
773         for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
774                 struct page *page = rx_queue->page_ring[i];
775                 struct efx_rx_page_state *state;
776
777                 if (page == NULL)
778                         continue;
779
780                 state = page_address(page);
781                 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
782                                PAGE_SIZE << efx->rx_buffer_order,
783                                DMA_FROM_DEVICE);
784                 put_page(page);
785         }
786         kfree(rx_queue->page_ring);
787         rx_queue->page_ring = NULL;
788 }
789
790 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
791 {
792         netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
793                   "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
794
795         efx_nic_remove_rx(rx_queue);
796
797         kfree(rx_queue->buffer);
798         rx_queue->buffer = NULL;
799 }
800
801
802 module_param(rx_refill_threshold, uint, 0444);
803 MODULE_PARM_DESC(rx_refill_threshold,
804                  "RX descriptor ring refill threshold (%)");
805