]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/net/sfc/tx.c
sfc: Use ACCESS_ONCE when copying efx_tx_queue::read_count
[mv-sheeva.git] / drivers / net / sfc / tx.c
1 /****************************************************************************
2  * Driver for Solarflare Solarstorm network controllers and boards
3  * Copyright 2005-2006 Fen Systems Ltd.
4  * Copyright 2005-2009 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10
11 #include <linux/pci.h>
12 #include <linux/tcp.h>
13 #include <linux/ip.h>
14 #include <linux/in.h>
15 #include <linux/ipv6.h>
16 #include <linux/slab.h>
17 #include <net/ipv6.h>
18 #include <linux/if_ether.h>
19 #include <linux/highmem.h>
20 #include "net_driver.h"
21 #include "efx.h"
22 #include "nic.h"
23 #include "workarounds.h"
24
25 /*
26  * TX descriptor ring full threshold
27  *
28  * The tx_queue descriptor ring fill-level must fall below this value
29  * before we restart the netif queue
30  */
31 #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
32
33 /* We need to be able to nest calls to netif_tx_stop_queue(), partly
34  * because of the 2 hardware queues associated with each core queue,
35  * but also so that we can inhibit TX for reasons other than a full
36  * hardware queue. */
37 void efx_stop_queue(struct efx_channel *channel)
38 {
39         struct efx_nic *efx = channel->efx;
40         struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
41
42         if (!tx_queue)
43                 return;
44
45         spin_lock_bh(&channel->tx_stop_lock);
46         netif_vdbg(efx, tx_queued, efx->net_dev, "stop TX queue\n");
47
48         atomic_inc(&channel->tx_stop_count);
49         netif_tx_stop_queue(
50                 netdev_get_tx_queue(efx->net_dev,
51                                     tx_queue->queue / EFX_TXQ_TYPES));
52
53         spin_unlock_bh(&channel->tx_stop_lock);
54 }
55
56 /* Decrement core TX queue stop count and wake it if the count is 0 */
57 void efx_wake_queue(struct efx_channel *channel)
58 {
59         struct efx_nic *efx = channel->efx;
60         struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
61
62         if (!tx_queue)
63                 return;
64
65         local_bh_disable();
66         if (atomic_dec_and_lock(&channel->tx_stop_count,
67                                 &channel->tx_stop_lock)) {
68                 netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n");
69                 netif_tx_wake_queue(
70                         netdev_get_tx_queue(efx->net_dev,
71                                             tx_queue->queue / EFX_TXQ_TYPES));
72                 spin_unlock(&channel->tx_stop_lock);
73         }
74         local_bh_enable();
75 }
76
77 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
78                                struct efx_tx_buffer *buffer)
79 {
80         if (buffer->unmap_len) {
81                 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
82                 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
83                                          buffer->unmap_len);
84                 if (buffer->unmap_single)
85                         pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len,
86                                          PCI_DMA_TODEVICE);
87                 else
88                         pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len,
89                                        PCI_DMA_TODEVICE);
90                 buffer->unmap_len = 0;
91                 buffer->unmap_single = false;
92         }
93
94         if (buffer->skb) {
95                 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
96                 buffer->skb = NULL;
97                 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
98                            "TX queue %d transmission id %x complete\n",
99                            tx_queue->queue, tx_queue->read_count);
100         }
101 }
102
103 /**
104  * struct efx_tso_header - a DMA mapped buffer for packet headers
105  * @next: Linked list of free ones.
106  *      The list is protected by the TX queue lock.
107  * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
108  * @dma_addr: The DMA address of the header below.
109  *
110  * This controls the memory used for a TSO header.  Use TSOH_DATA()
111  * to find the packet header data.  Use TSOH_SIZE() to calculate the
112  * total size required for a given packet header length.  TSO headers
113  * in the free list are exactly %TSOH_STD_SIZE bytes in size.
114  */
115 struct efx_tso_header {
116         union {
117                 struct efx_tso_header *next;
118                 size_t unmap_len;
119         };
120         dma_addr_t dma_addr;
121 };
122
123 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
124                                struct sk_buff *skb);
125 static void efx_fini_tso(struct efx_tx_queue *tx_queue);
126 static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
127                                struct efx_tso_header *tsoh);
128
129 static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
130                           struct efx_tx_buffer *buffer)
131 {
132         if (buffer->tsoh) {
133                 if (likely(!buffer->tsoh->unmap_len)) {
134                         buffer->tsoh->next = tx_queue->tso_headers_free;
135                         tx_queue->tso_headers_free = buffer->tsoh;
136                 } else {
137                         efx_tsoh_heap_free(tx_queue, buffer->tsoh);
138                 }
139                 buffer->tsoh = NULL;
140         }
141 }
142
143
144 static inline unsigned
145 efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
146 {
147         /* Depending on the NIC revision, we can use descriptor
148          * lengths up to 8K or 8K-1.  However, since PCI Express
149          * devices must split read requests at 4K boundaries, there is
150          * little benefit from using descriptors that cross those
151          * boundaries and we keep things simple by not doing so.
152          */
153         unsigned len = (~dma_addr & 0xfff) + 1;
154
155         /* Work around hardware bug for unaligned buffers. */
156         if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
157                 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
158
159         return len;
160 }
161
162 /*
163  * Add a socket buffer to a TX queue
164  *
165  * This maps all fragments of a socket buffer for DMA and adds them to
166  * the TX queue.  The queue's insert pointer will be incremented by
167  * the number of fragments in the socket buffer.
168  *
169  * If any DMA mapping fails, any mapped fragments will be unmapped,
170  * the queue's insert pointer will be restored to its original value.
171  *
172  * This function is split out from efx_hard_start_xmit to allow the
173  * loopback test to direct packets via specific TX queues.
174  *
175  * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
176  * You must hold netif_tx_lock() to call this function.
177  */
178 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
179 {
180         struct efx_nic *efx = tx_queue->efx;
181         struct pci_dev *pci_dev = efx->pci_dev;
182         struct efx_tx_buffer *buffer;
183         skb_frag_t *fragment;
184         struct page *page;
185         int page_offset;
186         unsigned int len, unmap_len = 0, fill_level, insert_ptr;
187         dma_addr_t dma_addr, unmap_addr = 0;
188         unsigned int dma_len;
189         bool unmap_single;
190         int q_space, i = 0;
191         netdev_tx_t rc = NETDEV_TX_OK;
192
193         EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
194
195         if (skb_shinfo(skb)->gso_size)
196                 return efx_enqueue_skb_tso(tx_queue, skb);
197
198         /* Get size of the initial fragment */
199         len = skb_headlen(skb);
200
201         /* Pad if necessary */
202         if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
203                 EFX_BUG_ON_PARANOID(skb->data_len);
204                 len = 32 + 1;
205                 if (skb_pad(skb, len - skb->len))
206                         return NETDEV_TX_OK;
207         }
208
209         fill_level = tx_queue->insert_count - tx_queue->old_read_count;
210         q_space = efx->txq_entries - 1 - fill_level;
211
212         /* Map for DMA.  Use pci_map_single rather than pci_map_page
213          * since this is more efficient on machines with sparse
214          * memory.
215          */
216         unmap_single = true;
217         dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
218
219         /* Process all fragments */
220         while (1) {
221                 if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
222                         goto pci_err;
223
224                 /* Store fields for marking in the per-fragment final
225                  * descriptor */
226                 unmap_len = len;
227                 unmap_addr = dma_addr;
228
229                 /* Add to TX queue, splitting across DMA boundaries */
230                 do {
231                         if (unlikely(q_space-- <= 0)) {
232                                 /* It might be that completions have
233                                  * happened since the xmit path last
234                                  * checked.  Update the xmit path's
235                                  * copy of read_count.
236                                  */
237                                 ++tx_queue->stopped;
238                                 /* This memory barrier protects the
239                                  * change of stopped from the access
240                                  * of read_count. */
241                                 smp_mb();
242                                 tx_queue->old_read_count =
243                                         ACCESS_ONCE(tx_queue->read_count);
244                                 fill_level = (tx_queue->insert_count
245                                               - tx_queue->old_read_count);
246                                 q_space = efx->txq_entries - 1 - fill_level;
247                                 if (unlikely(q_space-- <= 0))
248                                         goto stop;
249                                 smp_mb();
250                                 --tx_queue->stopped;
251                         }
252
253                         insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
254                         buffer = &tx_queue->buffer[insert_ptr];
255                         efx_tsoh_free(tx_queue, buffer);
256                         EFX_BUG_ON_PARANOID(buffer->tsoh);
257                         EFX_BUG_ON_PARANOID(buffer->skb);
258                         EFX_BUG_ON_PARANOID(buffer->len);
259                         EFX_BUG_ON_PARANOID(!buffer->continuation);
260                         EFX_BUG_ON_PARANOID(buffer->unmap_len);
261
262                         dma_len = efx_max_tx_len(efx, dma_addr);
263                         if (likely(dma_len >= len))
264                                 dma_len = len;
265
266                         /* Fill out per descriptor fields */
267                         buffer->len = dma_len;
268                         buffer->dma_addr = dma_addr;
269                         len -= dma_len;
270                         dma_addr += dma_len;
271                         ++tx_queue->insert_count;
272                 } while (len);
273
274                 /* Transfer ownership of the unmapping to the final buffer */
275                 buffer->unmap_single = unmap_single;
276                 buffer->unmap_len = unmap_len;
277                 unmap_len = 0;
278
279                 /* Get address and size of next fragment */
280                 if (i >= skb_shinfo(skb)->nr_frags)
281                         break;
282                 fragment = &skb_shinfo(skb)->frags[i];
283                 len = fragment->size;
284                 page = fragment->page;
285                 page_offset = fragment->page_offset;
286                 i++;
287                 /* Map for DMA */
288                 unmap_single = false;
289                 dma_addr = pci_map_page(pci_dev, page, page_offset, len,
290                                         PCI_DMA_TODEVICE);
291         }
292
293         /* Transfer ownership of the skb to the final buffer */
294         buffer->skb = skb;
295         buffer->continuation = false;
296
297         /* Pass off to hardware */
298         efx_nic_push_buffers(tx_queue);
299
300         return NETDEV_TX_OK;
301
302  pci_err:
303         netif_err(efx, tx_err, efx->net_dev,
304                   " TX queue %d could not map skb with %d bytes %d "
305                   "fragments for DMA\n", tx_queue->queue, skb->len,
306                   skb_shinfo(skb)->nr_frags + 1);
307
308         /* Mark the packet as transmitted, and free the SKB ourselves */
309         dev_kfree_skb_any(skb);
310         goto unwind;
311
312  stop:
313         rc = NETDEV_TX_BUSY;
314
315         if (tx_queue->stopped == 1)
316                 efx_stop_queue(tx_queue->channel);
317
318  unwind:
319         /* Work backwards until we hit the original insert pointer value */
320         while (tx_queue->insert_count != tx_queue->write_count) {
321                 --tx_queue->insert_count;
322                 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
323                 buffer = &tx_queue->buffer[insert_ptr];
324                 efx_dequeue_buffer(tx_queue, buffer);
325                 buffer->len = 0;
326         }
327
328         /* Free the fragment we were mid-way through pushing */
329         if (unmap_len) {
330                 if (unmap_single)
331                         pci_unmap_single(pci_dev, unmap_addr, unmap_len,
332                                          PCI_DMA_TODEVICE);
333                 else
334                         pci_unmap_page(pci_dev, unmap_addr, unmap_len,
335                                        PCI_DMA_TODEVICE);
336         }
337
338         return rc;
339 }
340
341 /* Remove packets from the TX queue
342  *
343  * This removes packets from the TX queue, up to and including the
344  * specified index.
345  */
346 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
347                                 unsigned int index)
348 {
349         struct efx_nic *efx = tx_queue->efx;
350         unsigned int stop_index, read_ptr;
351
352         stop_index = (index + 1) & tx_queue->ptr_mask;
353         read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
354
355         while (read_ptr != stop_index) {
356                 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
357                 if (unlikely(buffer->len == 0)) {
358                         netif_err(efx, tx_err, efx->net_dev,
359                                   "TX queue %d spurious TX completion id %x\n",
360                                   tx_queue->queue, read_ptr);
361                         efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
362                         return;
363                 }
364
365                 efx_dequeue_buffer(tx_queue, buffer);
366                 buffer->continuation = true;
367                 buffer->len = 0;
368
369                 ++tx_queue->read_count;
370                 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
371         }
372 }
373
374 /* Initiate a packet transmission.  We use one channel per CPU
375  * (sharing when we have more CPUs than channels).  On Falcon, the TX
376  * completion events will be directed back to the CPU that transmitted
377  * the packet, which should be cache-efficient.
378  *
379  * Context: non-blocking.
380  * Note that returning anything other than NETDEV_TX_OK will cause the
381  * OS to free the skb.
382  */
383 netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
384                                       struct net_device *net_dev)
385 {
386         struct efx_nic *efx = netdev_priv(net_dev);
387         struct efx_tx_queue *tx_queue;
388
389         if (unlikely(efx->port_inhibited))
390                 return NETDEV_TX_BUSY;
391
392         tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb),
393                                     skb->ip_summed == CHECKSUM_PARTIAL ?
394                                     EFX_TXQ_TYPE_OFFLOAD : 0);
395
396         return efx_enqueue_skb(tx_queue, skb);
397 }
398
399 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
400 {
401         unsigned fill_level;
402         struct efx_nic *efx = tx_queue->efx;
403         struct netdev_queue *queue;
404
405         EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
406
407         efx_dequeue_buffers(tx_queue, index);
408
409         /* See if we need to restart the netif queue.  This barrier
410          * separates the update of read_count from the test of
411          * stopped. */
412         smp_mb();
413         if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
414                 fill_level = tx_queue->insert_count - tx_queue->read_count;
415                 if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
416                         EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
417
418                         /* Do this under netif_tx_lock(), to avoid racing
419                          * with efx_xmit(). */
420                         queue = netdev_get_tx_queue(
421                                 efx->net_dev,
422                                 tx_queue->queue / EFX_TXQ_TYPES);
423                         __netif_tx_lock(queue, smp_processor_id());
424                         if (tx_queue->stopped) {
425                                 tx_queue->stopped = 0;
426                                 efx_wake_queue(tx_queue->channel);
427                         }
428                         __netif_tx_unlock(queue);
429                 }
430         }
431 }
432
433 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
434 {
435         struct efx_nic *efx = tx_queue->efx;
436         unsigned int entries;
437         int i, rc;
438
439         /* Create the smallest power-of-two aligned ring */
440         entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
441         EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
442         tx_queue->ptr_mask = entries - 1;
443
444         netif_dbg(efx, probe, efx->net_dev,
445                   "creating TX queue %d size %#x mask %#x\n",
446                   tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
447
448         /* Allocate software ring */
449         tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer),
450                                    GFP_KERNEL);
451         if (!tx_queue->buffer)
452                 return -ENOMEM;
453         for (i = 0; i <= tx_queue->ptr_mask; ++i)
454                 tx_queue->buffer[i].continuation = true;
455
456         /* Allocate hardware ring */
457         rc = efx_nic_probe_tx(tx_queue);
458         if (rc)
459                 goto fail;
460
461         return 0;
462
463  fail:
464         kfree(tx_queue->buffer);
465         tx_queue->buffer = NULL;
466         return rc;
467 }
468
469 void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
470 {
471         netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
472                   "initialising TX queue %d\n", tx_queue->queue);
473
474         tx_queue->insert_count = 0;
475         tx_queue->write_count = 0;
476         tx_queue->read_count = 0;
477         tx_queue->old_read_count = 0;
478         BUG_ON(tx_queue->stopped);
479
480         /* Set up TX descriptor ring */
481         efx_nic_init_tx(tx_queue);
482 }
483
484 void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
485 {
486         struct efx_tx_buffer *buffer;
487
488         if (!tx_queue->buffer)
489                 return;
490
491         /* Free any buffers left in the ring */
492         while (tx_queue->read_count != tx_queue->write_count) {
493                 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
494                 efx_dequeue_buffer(tx_queue, buffer);
495                 buffer->continuation = true;
496                 buffer->len = 0;
497
498                 ++tx_queue->read_count;
499         }
500 }
501
502 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
503 {
504         netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
505                   "shutting down TX queue %d\n", tx_queue->queue);
506
507         /* Flush TX queue, remove descriptor ring */
508         efx_nic_fini_tx(tx_queue);
509
510         efx_release_tx_buffers(tx_queue);
511
512         /* Free up TSO header cache */
513         efx_fini_tso(tx_queue);
514
515         /* Release queue's stop on port, if any */
516         if (tx_queue->stopped) {
517                 tx_queue->stopped = 0;
518                 efx_wake_queue(tx_queue->channel);
519         }
520 }
521
522 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
523 {
524         netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
525                   "destroying TX queue %d\n", tx_queue->queue);
526         efx_nic_remove_tx(tx_queue);
527
528         kfree(tx_queue->buffer);
529         tx_queue->buffer = NULL;
530 }
531
532
533 /* Efx TCP segmentation acceleration.
534  *
535  * Why?  Because by doing it here in the driver we can go significantly
536  * faster than the GSO.
537  *
538  * Requires TX checksum offload support.
539  */
540
541 /* Number of bytes inserted at the start of a TSO header buffer,
542  * similar to NET_IP_ALIGN.
543  */
544 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
545 #define TSOH_OFFSET     0
546 #else
547 #define TSOH_OFFSET     NET_IP_ALIGN
548 #endif
549
550 #define TSOH_BUFFER(tsoh)       ((u8 *)(tsoh + 1) + TSOH_OFFSET)
551
552 /* Total size of struct efx_tso_header, buffer and padding */
553 #define TSOH_SIZE(hdr_len)                                      \
554         (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
555
556 /* Size of blocks on free list.  Larger blocks must be allocated from
557  * the heap.
558  */
559 #define TSOH_STD_SIZE           128
560
561 #define PTR_DIFF(p1, p2)  ((u8 *)(p1) - (u8 *)(p2))
562 #define ETH_HDR_LEN(skb)  (skb_network_header(skb) - (skb)->data)
563 #define SKB_TCP_OFF(skb)  PTR_DIFF(tcp_hdr(skb), (skb)->data)
564 #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
565 #define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
566
567 /**
568  * struct tso_state - TSO state for an SKB
569  * @out_len: Remaining length in current segment
570  * @seqnum: Current sequence number
571  * @ipv4_id: Current IPv4 ID, host endian
572  * @packet_space: Remaining space in current packet
573  * @dma_addr: DMA address of current position
574  * @in_len: Remaining length in current SKB fragment
575  * @unmap_len: Length of SKB fragment
576  * @unmap_addr: DMA address of SKB fragment
577  * @unmap_single: DMA single vs page mapping flag
578  * @protocol: Network protocol (after any VLAN header)
579  * @header_len: Number of bytes of header
580  * @full_packet_size: Number of bytes to put in each outgoing segment
581  *
582  * The state used during segmentation.  It is put into this data structure
583  * just to make it easy to pass into inline functions.
584  */
585 struct tso_state {
586         /* Output position */
587         unsigned out_len;
588         unsigned seqnum;
589         unsigned ipv4_id;
590         unsigned packet_space;
591
592         /* Input position */
593         dma_addr_t dma_addr;
594         unsigned in_len;
595         unsigned unmap_len;
596         dma_addr_t unmap_addr;
597         bool unmap_single;
598
599         __be16 protocol;
600         unsigned header_len;
601         int full_packet_size;
602 };
603
604
605 /*
606  * Verify that our various assumptions about sk_buffs and the conditions
607  * under which TSO will be attempted hold true.  Return the protocol number.
608  */
609 static __be16 efx_tso_check_protocol(struct sk_buff *skb)
610 {
611         __be16 protocol = skb->protocol;
612
613         EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
614                             protocol);
615         if (protocol == htons(ETH_P_8021Q)) {
616                 /* Find the encapsulated protocol; reset network header
617                  * and transport header based on that. */
618                 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
619                 protocol = veh->h_vlan_encapsulated_proto;
620                 skb_set_network_header(skb, sizeof(*veh));
621                 if (protocol == htons(ETH_P_IP))
622                         skb_set_transport_header(skb, sizeof(*veh) +
623                                                  4 * ip_hdr(skb)->ihl);
624                 else if (protocol == htons(ETH_P_IPV6))
625                         skb_set_transport_header(skb, sizeof(*veh) +
626                                                  sizeof(struct ipv6hdr));
627         }
628
629         if (protocol == htons(ETH_P_IP)) {
630                 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
631         } else {
632                 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
633                 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
634         }
635         EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
636                              + (tcp_hdr(skb)->doff << 2u)) >
637                             skb_headlen(skb));
638
639         return protocol;
640 }
641
642
643 /*
644  * Allocate a page worth of efx_tso_header structures, and string them
645  * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
646  */
647 static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
648 {
649
650         struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
651         struct efx_tso_header *tsoh;
652         dma_addr_t dma_addr;
653         u8 *base_kva, *kva;
654
655         base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
656         if (base_kva == NULL) {
657                 netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
658                           "Unable to allocate page for TSO headers\n");
659                 return -ENOMEM;
660         }
661
662         /* pci_alloc_consistent() allocates pages. */
663         EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
664
665         for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
666                 tsoh = (struct efx_tso_header *)kva;
667                 tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
668                 tsoh->next = tx_queue->tso_headers_free;
669                 tx_queue->tso_headers_free = tsoh;
670         }
671
672         return 0;
673 }
674
675
676 /* Free up a TSO header, and all others in the same page. */
677 static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
678                                 struct efx_tso_header *tsoh,
679                                 struct pci_dev *pci_dev)
680 {
681         struct efx_tso_header **p;
682         unsigned long base_kva;
683         dma_addr_t base_dma;
684
685         base_kva = (unsigned long)tsoh & PAGE_MASK;
686         base_dma = tsoh->dma_addr & PAGE_MASK;
687
688         p = &tx_queue->tso_headers_free;
689         while (*p != NULL) {
690                 if (((unsigned long)*p & PAGE_MASK) == base_kva)
691                         *p = (*p)->next;
692                 else
693                         p = &(*p)->next;
694         }
695
696         pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
697 }
698
699 static struct efx_tso_header *
700 efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
701 {
702         struct efx_tso_header *tsoh;
703
704         tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
705         if (unlikely(!tsoh))
706                 return NULL;
707
708         tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
709                                         TSOH_BUFFER(tsoh), header_len,
710                                         PCI_DMA_TODEVICE);
711         if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
712                                            tsoh->dma_addr))) {
713                 kfree(tsoh);
714                 return NULL;
715         }
716
717         tsoh->unmap_len = header_len;
718         return tsoh;
719 }
720
721 static void
722 efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
723 {
724         pci_unmap_single(tx_queue->efx->pci_dev,
725                          tsoh->dma_addr, tsoh->unmap_len,
726                          PCI_DMA_TODEVICE);
727         kfree(tsoh);
728 }
729
730 /**
731  * efx_tx_queue_insert - push descriptors onto the TX queue
732  * @tx_queue:           Efx TX queue
733  * @dma_addr:           DMA address of fragment
734  * @len:                Length of fragment
735  * @final_buffer:       The final buffer inserted into the queue
736  *
737  * Push descriptors onto the TX queue.  Return 0 on success or 1 if
738  * @tx_queue full.
739  */
740 static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
741                                dma_addr_t dma_addr, unsigned len,
742                                struct efx_tx_buffer **final_buffer)
743 {
744         struct efx_tx_buffer *buffer;
745         struct efx_nic *efx = tx_queue->efx;
746         unsigned dma_len, fill_level, insert_ptr;
747         int q_space;
748
749         EFX_BUG_ON_PARANOID(len <= 0);
750
751         fill_level = tx_queue->insert_count - tx_queue->old_read_count;
752         /* -1 as there is no way to represent all descriptors used */
753         q_space = efx->txq_entries - 1 - fill_level;
754
755         while (1) {
756                 if (unlikely(q_space-- <= 0)) {
757                         /* It might be that completions have happened
758                          * since the xmit path last checked.  Update
759                          * the xmit path's copy of read_count.
760                          */
761                         ++tx_queue->stopped;
762                         /* This memory barrier protects the change of
763                          * stopped from the access of read_count. */
764                         smp_mb();
765                         tx_queue->old_read_count =
766                                 ACCESS_ONCE(tx_queue->read_count);
767                         fill_level = (tx_queue->insert_count
768                                       - tx_queue->old_read_count);
769                         q_space = efx->txq_entries - 1 - fill_level;
770                         if (unlikely(q_space-- <= 0)) {
771                                 *final_buffer = NULL;
772                                 return 1;
773                         }
774                         smp_mb();
775                         --tx_queue->stopped;
776                 }
777
778                 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
779                 buffer = &tx_queue->buffer[insert_ptr];
780                 ++tx_queue->insert_count;
781
782                 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
783                                     tx_queue->read_count >=
784                                     efx->txq_entries);
785
786                 efx_tsoh_free(tx_queue, buffer);
787                 EFX_BUG_ON_PARANOID(buffer->len);
788                 EFX_BUG_ON_PARANOID(buffer->unmap_len);
789                 EFX_BUG_ON_PARANOID(buffer->skb);
790                 EFX_BUG_ON_PARANOID(!buffer->continuation);
791                 EFX_BUG_ON_PARANOID(buffer->tsoh);
792
793                 buffer->dma_addr = dma_addr;
794
795                 dma_len = efx_max_tx_len(efx, dma_addr);
796
797                 /* If there is enough space to send then do so */
798                 if (dma_len >= len)
799                         break;
800
801                 buffer->len = dma_len; /* Don't set the other members */
802                 dma_addr += dma_len;
803                 len -= dma_len;
804         }
805
806         EFX_BUG_ON_PARANOID(!len);
807         buffer->len = len;
808         *final_buffer = buffer;
809         return 0;
810 }
811
812
813 /*
814  * Put a TSO header into the TX queue.
815  *
816  * This is special-cased because we know that it is small enough to fit in
817  * a single fragment, and we know it doesn't cross a page boundary.  It
818  * also allows us to not worry about end-of-packet etc.
819  */
820 static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
821                                struct efx_tso_header *tsoh, unsigned len)
822 {
823         struct efx_tx_buffer *buffer;
824
825         buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
826         efx_tsoh_free(tx_queue, buffer);
827         EFX_BUG_ON_PARANOID(buffer->len);
828         EFX_BUG_ON_PARANOID(buffer->unmap_len);
829         EFX_BUG_ON_PARANOID(buffer->skb);
830         EFX_BUG_ON_PARANOID(!buffer->continuation);
831         EFX_BUG_ON_PARANOID(buffer->tsoh);
832         buffer->len = len;
833         buffer->dma_addr = tsoh->dma_addr;
834         buffer->tsoh = tsoh;
835
836         ++tx_queue->insert_count;
837 }
838
839
840 /* Remove descriptors put into a tx_queue. */
841 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
842 {
843         struct efx_tx_buffer *buffer;
844         dma_addr_t unmap_addr;
845
846         /* Work backwards until we hit the original insert pointer value */
847         while (tx_queue->insert_count != tx_queue->write_count) {
848                 --tx_queue->insert_count;
849                 buffer = &tx_queue->buffer[tx_queue->insert_count &
850                                            tx_queue->ptr_mask];
851                 efx_tsoh_free(tx_queue, buffer);
852                 EFX_BUG_ON_PARANOID(buffer->skb);
853                 if (buffer->unmap_len) {
854                         unmap_addr = (buffer->dma_addr + buffer->len -
855                                       buffer->unmap_len);
856                         if (buffer->unmap_single)
857                                 pci_unmap_single(tx_queue->efx->pci_dev,
858                                                  unmap_addr, buffer->unmap_len,
859                                                  PCI_DMA_TODEVICE);
860                         else
861                                 pci_unmap_page(tx_queue->efx->pci_dev,
862                                                unmap_addr, buffer->unmap_len,
863                                                PCI_DMA_TODEVICE);
864                         buffer->unmap_len = 0;
865                 }
866                 buffer->len = 0;
867                 buffer->continuation = true;
868         }
869 }
870
871
872 /* Parse the SKB header and initialise state. */
873 static void tso_start(struct tso_state *st, const struct sk_buff *skb)
874 {
875         /* All ethernet/IP/TCP headers combined size is TCP header size
876          * plus offset of TCP header relative to start of packet.
877          */
878         st->header_len = ((tcp_hdr(skb)->doff << 2u)
879                           + PTR_DIFF(tcp_hdr(skb), skb->data));
880         st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
881
882         if (st->protocol == htons(ETH_P_IP))
883                 st->ipv4_id = ntohs(ip_hdr(skb)->id);
884         else
885                 st->ipv4_id = 0;
886         st->seqnum = ntohl(tcp_hdr(skb)->seq);
887
888         EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
889         EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
890         EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
891
892         st->packet_space = st->full_packet_size;
893         st->out_len = skb->len - st->header_len;
894         st->unmap_len = 0;
895         st->unmap_single = false;
896 }
897
898 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
899                             skb_frag_t *frag)
900 {
901         st->unmap_addr = pci_map_page(efx->pci_dev, frag->page,
902                                       frag->page_offset, frag->size,
903                                       PCI_DMA_TODEVICE);
904         if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
905                 st->unmap_single = false;
906                 st->unmap_len = frag->size;
907                 st->in_len = frag->size;
908                 st->dma_addr = st->unmap_addr;
909                 return 0;
910         }
911         return -ENOMEM;
912 }
913
914 static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
915                                  const struct sk_buff *skb)
916 {
917         int hl = st->header_len;
918         int len = skb_headlen(skb) - hl;
919
920         st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
921                                         len, PCI_DMA_TODEVICE);
922         if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
923                 st->unmap_single = true;
924                 st->unmap_len = len;
925                 st->in_len = len;
926                 st->dma_addr = st->unmap_addr;
927                 return 0;
928         }
929         return -ENOMEM;
930 }
931
932
933 /**
934  * tso_fill_packet_with_fragment - form descriptors for the current fragment
935  * @tx_queue:           Efx TX queue
936  * @skb:                Socket buffer
937  * @st:                 TSO state
938  *
939  * Form descriptors for the current fragment, until we reach the end
940  * of fragment or end-of-packet.  Return 0 on success, 1 if not enough
941  * space in @tx_queue.
942  */
943 static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
944                                          const struct sk_buff *skb,
945                                          struct tso_state *st)
946 {
947         struct efx_tx_buffer *buffer;
948         int n, end_of_packet, rc;
949
950         if (st->in_len == 0)
951                 return 0;
952         if (st->packet_space == 0)
953                 return 0;
954
955         EFX_BUG_ON_PARANOID(st->in_len <= 0);
956         EFX_BUG_ON_PARANOID(st->packet_space <= 0);
957
958         n = min(st->in_len, st->packet_space);
959
960         st->packet_space -= n;
961         st->out_len -= n;
962         st->in_len -= n;
963
964         rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
965         if (likely(rc == 0)) {
966                 if (st->out_len == 0)
967                         /* Transfer ownership of the skb */
968                         buffer->skb = skb;
969
970                 end_of_packet = st->out_len == 0 || st->packet_space == 0;
971                 buffer->continuation = !end_of_packet;
972
973                 if (st->in_len == 0) {
974                         /* Transfer ownership of the pci mapping */
975                         buffer->unmap_len = st->unmap_len;
976                         buffer->unmap_single = st->unmap_single;
977                         st->unmap_len = 0;
978                 }
979         }
980
981         st->dma_addr += n;
982         return rc;
983 }
984
985
986 /**
987  * tso_start_new_packet - generate a new header and prepare for the new packet
988  * @tx_queue:           Efx TX queue
989  * @skb:                Socket buffer
990  * @st:                 TSO state
991  *
992  * Generate a new header and prepare for the new packet.  Return 0 on
993  * success, or -1 if failed to alloc header.
994  */
995 static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
996                                 const struct sk_buff *skb,
997                                 struct tso_state *st)
998 {
999         struct efx_tso_header *tsoh;
1000         struct tcphdr *tsoh_th;
1001         unsigned ip_length;
1002         u8 *header;
1003
1004         /* Allocate a DMA-mapped header buffer. */
1005         if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
1006                 if (tx_queue->tso_headers_free == NULL) {
1007                         if (efx_tsoh_block_alloc(tx_queue))
1008                                 return -1;
1009                 }
1010                 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
1011                 tsoh = tx_queue->tso_headers_free;
1012                 tx_queue->tso_headers_free = tsoh->next;
1013                 tsoh->unmap_len = 0;
1014         } else {
1015                 tx_queue->tso_long_headers++;
1016                 tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
1017                 if (unlikely(!tsoh))
1018                         return -1;
1019         }
1020
1021         header = TSOH_BUFFER(tsoh);
1022         tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
1023
1024         /* Copy and update the headers. */
1025         memcpy(header, skb->data, st->header_len);
1026
1027         tsoh_th->seq = htonl(st->seqnum);
1028         st->seqnum += skb_shinfo(skb)->gso_size;
1029         if (st->out_len > skb_shinfo(skb)->gso_size) {
1030                 /* This packet will not finish the TSO burst. */
1031                 ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
1032                 tsoh_th->fin = 0;
1033                 tsoh_th->psh = 0;
1034         } else {
1035                 /* This packet will be the last in the TSO burst. */
1036                 ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
1037                 tsoh_th->fin = tcp_hdr(skb)->fin;
1038                 tsoh_th->psh = tcp_hdr(skb)->psh;
1039         }
1040
1041         if (st->protocol == htons(ETH_P_IP)) {
1042                 struct iphdr *tsoh_iph =
1043                         (struct iphdr *)(header + SKB_IPV4_OFF(skb));
1044
1045                 tsoh_iph->tot_len = htons(ip_length);
1046
1047                 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1048                 tsoh_iph->id = htons(st->ipv4_id);
1049                 st->ipv4_id++;
1050         } else {
1051                 struct ipv6hdr *tsoh_iph =
1052                         (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb));
1053
1054                 tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph));
1055         }
1056
1057         st->packet_space = skb_shinfo(skb)->gso_size;
1058         ++tx_queue->tso_packets;
1059
1060         /* Form a descriptor for this header. */
1061         efx_tso_put_header(tx_queue, tsoh, st->header_len);
1062
1063         return 0;
1064 }
1065
1066
1067 /**
1068  * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1069  * @tx_queue:           Efx TX queue
1070  * @skb:                Socket buffer
1071  *
1072  * Context: You must hold netif_tx_lock() to call this function.
1073  *
1074  * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1075  * @skb was not enqueued.  In all cases @skb is consumed.  Return
1076  * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
1077  */
1078 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1079                                struct sk_buff *skb)
1080 {
1081         struct efx_nic *efx = tx_queue->efx;
1082         int frag_i, rc, rc2 = NETDEV_TX_OK;
1083         struct tso_state state;
1084
1085         /* Find the packet protocol and sanity-check it */
1086         state.protocol = efx_tso_check_protocol(skb);
1087
1088         EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1089
1090         tso_start(&state, skb);
1091
1092         /* Assume that skb header area contains exactly the headers, and
1093          * all payload is in the frag list.
1094          */
1095         if (skb_headlen(skb) == state.header_len) {
1096                 /* Grab the first payload fragment. */
1097                 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1098                 frag_i = 0;
1099                 rc = tso_get_fragment(&state, efx,
1100                                       skb_shinfo(skb)->frags + frag_i);
1101                 if (rc)
1102                         goto mem_err;
1103         } else {
1104                 rc = tso_get_head_fragment(&state, efx, skb);
1105                 if (rc)
1106                         goto mem_err;
1107                 frag_i = -1;
1108         }
1109
1110         if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1111                 goto mem_err;
1112
1113         while (1) {
1114                 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
1115                 if (unlikely(rc))
1116                         goto stop;
1117
1118                 /* Move onto the next fragment? */
1119                 if (state.in_len == 0) {
1120                         if (++frag_i >= skb_shinfo(skb)->nr_frags)
1121                                 /* End of payload reached. */
1122                                 break;
1123                         rc = tso_get_fragment(&state, efx,
1124                                               skb_shinfo(skb)->frags + frag_i);
1125                         if (rc)
1126                                 goto mem_err;
1127                 }
1128
1129                 /* Start at new packet? */
1130                 if (state.packet_space == 0 &&
1131                     tso_start_new_packet(tx_queue, skb, &state) < 0)
1132                         goto mem_err;
1133         }
1134
1135         /* Pass off to hardware */
1136         efx_nic_push_buffers(tx_queue);
1137
1138         tx_queue->tso_bursts++;
1139         return NETDEV_TX_OK;
1140
1141  mem_err:
1142         netif_err(efx, tx_err, efx->net_dev,
1143                   "Out of memory for TSO headers, or PCI mapping error\n");
1144         dev_kfree_skb_any(skb);
1145         goto unwind;
1146
1147  stop:
1148         rc2 = NETDEV_TX_BUSY;
1149
1150         /* Stop the queue if it wasn't stopped before. */
1151         if (tx_queue->stopped == 1)
1152                 efx_stop_queue(tx_queue->channel);
1153
1154  unwind:
1155         /* Free the DMA mapping we were in the process of writing out */
1156         if (state.unmap_len) {
1157                 if (state.unmap_single)
1158                         pci_unmap_single(efx->pci_dev, state.unmap_addr,
1159                                          state.unmap_len, PCI_DMA_TODEVICE);
1160                 else
1161                         pci_unmap_page(efx->pci_dev, state.unmap_addr,
1162                                        state.unmap_len, PCI_DMA_TODEVICE);
1163         }
1164
1165         efx_enqueue_unwind(tx_queue);
1166         return rc2;
1167 }
1168
1169
1170 /*
1171  * Free up all TSO datastructures associated with tx_queue. This
1172  * routine should be called only once the tx_queue is both empty and
1173  * will no longer be used.
1174  */
1175 static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1176 {
1177         unsigned i;
1178
1179         if (tx_queue->buffer) {
1180                 for (i = 0; i <= tx_queue->ptr_mask; ++i)
1181                         efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1182         }
1183
1184         while (tx_queue->tso_headers_free != NULL)
1185                 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
1186                                     tx_queue->efx->pci_dev);
1187 }