1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2013 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/pci.h>
12 #include <linux/tcp.h>
15 #include <linux/ipv6.h>
16 #include <linux/slab.h>
18 #include <linux/if_ether.h>
19 #include <linux/highmem.h>
20 #include "net_driver.h"
23 #include "workarounds.h"
24 #include "ef10_regs.h"
26 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
27 struct efx_tx_buffer *buffer,
28 unsigned int *pkts_compl,
29 unsigned int *bytes_compl)
31 if (buffer->unmap_len) {
32 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
33 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
35 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
36 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
39 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
41 buffer->unmap_len = 0;
44 if (buffer->flags & EFX_TX_BUF_SKB) {
46 (*bytes_compl) += buffer->skb->len;
47 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
48 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
49 "TX queue %d transmission id %x complete\n",
50 tx_queue->queue, tx_queue->read_count);
51 } else if (buffer->flags & EFX_TX_BUF_HEAP) {
52 kfree(buffer->heap_buf);
59 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
62 static inline unsigned
63 efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
65 /* Depending on the NIC revision, we can use descriptor
66 * lengths up to 8K or 8K-1. However, since PCI Express
67 * devices must split read requests at 4K boundaries, there is
68 * little benefit from using descriptors that cross those
69 * boundaries and we keep things simple by not doing so.
71 unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
73 /* Work around hardware bug for unaligned buffers. */
74 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
75 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
80 unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
82 /* Header and payload descriptor for each output segment, plus
83 * one for every input fragment boundary within a segment
85 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
87 /* Possibly one more per segment for the alignment workaround,
88 * or for option descriptors
90 if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
91 max_descs += EFX_TSO_MAX_SEGS;
93 /* Possibly more for PCIe page boundaries within input fragments */
94 if (PAGE_SIZE > EFX_PAGE_SIZE)
95 max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
96 DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
101 /* Get partner of a TX queue, seen as part of the same net core queue */
102 static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
104 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
105 return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
107 return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
110 static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
112 /* We need to consider both queues that the net core sees as one */
113 struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
114 struct efx_nic *efx = txq1->efx;
115 unsigned int fill_level;
117 fill_level = max(txq1->insert_count - txq1->old_read_count,
118 txq2->insert_count - txq2->old_read_count);
119 if (likely(fill_level < efx->txq_stop_thresh))
122 /* We used the stale old_read_count above, which gives us a
123 * pessimistic estimate of the fill level (which may even
124 * validly be >= efx->txq_entries). Now try again using
125 * read_count (more likely to be a cache miss).
127 * If we read read_count and then conditionally stop the
128 * queue, it is possible for the completion path to race with
129 * us and complete all outstanding descriptors in the middle,
130 * after which there will be no more completions to wake it.
131 * Therefore we stop the queue first, then read read_count
132 * (with a memory barrier to ensure the ordering), then
133 * restart the queue if the fill level turns out to be low
136 netif_tx_stop_queue(txq1->core_txq);
138 txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
139 txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
141 fill_level = max(txq1->insert_count - txq1->old_read_count,
142 txq2->insert_count - txq2->old_read_count);
143 EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
144 if (likely(fill_level < efx->txq_stop_thresh)) {
146 if (likely(!efx->loopback_selftest))
147 netif_tx_start_queue(txq1->core_txq);
152 * Add a socket buffer to a TX queue
154 * This maps all fragments of a socket buffer for DMA and adds them to
155 * the TX queue. The queue's insert pointer will be incremented by
156 * the number of fragments in the socket buffer.
158 * If any DMA mapping fails, any mapped fragments will be unmapped,
159 * the queue's insert pointer will be restored to its original value.
161 * This function is split out from efx_hard_start_xmit to allow the
162 * loopback test to direct packets via specific TX queues.
164 * Returns NETDEV_TX_OK.
165 * You must hold netif_tx_lock() to call this function.
167 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
169 struct efx_nic *efx = tx_queue->efx;
170 struct device *dma_dev = &efx->pci_dev->dev;
171 struct efx_tx_buffer *buffer;
172 skb_frag_t *fragment;
173 unsigned int len, unmap_len = 0, insert_ptr;
174 dma_addr_t dma_addr, unmap_addr = 0;
175 unsigned int dma_len;
176 unsigned short dma_flags;
179 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
181 if (skb_shinfo(skb)->gso_size)
182 return efx_enqueue_skb_tso(tx_queue, skb);
184 /* Get size of the initial fragment */
185 len = skb_headlen(skb);
187 /* Pad if necessary */
188 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
189 EFX_BUG_ON_PARANOID(skb->data_len);
191 if (skb_pad(skb, len - skb->len))
195 /* Map for DMA. Use dma_map_single rather than dma_map_page
196 * since this is more efficient on machines with sparse
199 dma_flags = EFX_TX_BUF_MAP_SINGLE;
200 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
202 /* Process all fragments */
204 if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
207 /* Store fields for marking in the per-fragment final
210 unmap_addr = dma_addr;
212 /* Add to TX queue, splitting across DMA boundaries */
214 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
215 buffer = &tx_queue->buffer[insert_ptr];
216 EFX_BUG_ON_PARANOID(buffer->flags);
217 EFX_BUG_ON_PARANOID(buffer->len);
218 EFX_BUG_ON_PARANOID(buffer->unmap_len);
220 dma_len = efx_max_tx_len(efx, dma_addr);
221 if (likely(dma_len >= len))
224 /* Fill out per descriptor fields */
225 buffer->len = dma_len;
226 buffer->dma_addr = dma_addr;
227 buffer->flags = EFX_TX_BUF_CONT;
230 ++tx_queue->insert_count;
233 /* Transfer ownership of the unmapping to the final buffer */
234 buffer->flags = EFX_TX_BUF_CONT | dma_flags;
235 buffer->unmap_len = unmap_len;
238 /* Get address and size of next fragment */
239 if (i >= skb_shinfo(skb)->nr_frags)
241 fragment = &skb_shinfo(skb)->frags[i];
242 len = skb_frag_size(fragment);
246 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
250 /* Transfer ownership of the skb to the final buffer */
252 buffer->flags = EFX_TX_BUF_SKB | dma_flags;
254 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
256 /* Pass off to hardware */
257 efx_nic_push_buffers(tx_queue);
259 efx_tx_maybe_stop_queue(tx_queue);
264 netif_err(efx, tx_err, efx->net_dev,
265 " TX queue %d could not map skb with %d bytes %d "
266 "fragments for DMA\n", tx_queue->queue, skb->len,
267 skb_shinfo(skb)->nr_frags + 1);
269 /* Mark the packet as transmitted, and free the SKB ourselves */
270 dev_kfree_skb_any(skb);
272 /* Work backwards until we hit the original insert pointer value */
273 while (tx_queue->insert_count != tx_queue->write_count) {
274 unsigned int pkts_compl = 0, bytes_compl = 0;
275 --tx_queue->insert_count;
276 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
277 buffer = &tx_queue->buffer[insert_ptr];
278 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
281 /* Free the fragment we were mid-way through pushing */
283 if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
284 dma_unmap_single(dma_dev, unmap_addr, unmap_len,
287 dma_unmap_page(dma_dev, unmap_addr, unmap_len,
294 /* Remove packets from the TX queue
296 * This removes packets from the TX queue, up to and including the
299 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
301 unsigned int *pkts_compl,
302 unsigned int *bytes_compl)
304 struct efx_nic *efx = tx_queue->efx;
305 unsigned int stop_index, read_ptr;
307 stop_index = (index + 1) & tx_queue->ptr_mask;
308 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
310 while (read_ptr != stop_index) {
311 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
313 if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
314 unlikely(buffer->len == 0)) {
315 netif_err(efx, tx_err, efx->net_dev,
316 "TX queue %d spurious TX completion id %x\n",
317 tx_queue->queue, read_ptr);
318 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
322 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
324 ++tx_queue->read_count;
325 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
329 /* Initiate a packet transmission. We use one channel per CPU
330 * (sharing when we have more CPUs than channels). On Falcon, the TX
331 * completion events will be directed back to the CPU that transmitted
332 * the packet, which should be cache-efficient.
334 * Context: non-blocking.
335 * Note that returning anything other than NETDEV_TX_OK will cause the
336 * OS to free the skb.
338 netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
339 struct net_device *net_dev)
341 struct efx_nic *efx = netdev_priv(net_dev);
342 struct efx_tx_queue *tx_queue;
343 unsigned index, type;
345 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
347 /* PTP "event" packet */
348 if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
349 unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
350 return efx_ptp_tx(efx, skb);
353 index = skb_get_queue_mapping(skb);
354 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
355 if (index >= efx->n_tx_channels) {
356 index -= efx->n_tx_channels;
357 type |= EFX_TXQ_TYPE_HIGHPRI;
359 tx_queue = efx_get_tx_queue(efx, index, type);
361 return efx_enqueue_skb(tx_queue, skb);
364 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
366 struct efx_nic *efx = tx_queue->efx;
368 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
370 netdev_get_tx_queue(efx->net_dev,
371 tx_queue->queue / EFX_TXQ_TYPES +
372 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
373 efx->n_tx_channels : 0));
376 int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
378 struct efx_nic *efx = netdev_priv(net_dev);
379 struct efx_channel *channel;
380 struct efx_tx_queue *tx_queue;
384 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
387 if (num_tc == net_dev->num_tc)
390 for (tc = 0; tc < num_tc; tc++) {
391 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
392 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
395 if (num_tc > net_dev->num_tc) {
396 /* Initialise high-priority queues as necessary */
397 efx_for_each_channel(channel, efx) {
398 efx_for_each_possible_channel_tx_queue(tx_queue,
400 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
402 if (!tx_queue->buffer) {
403 rc = efx_probe_tx_queue(tx_queue);
407 if (!tx_queue->initialised)
408 efx_init_tx_queue(tx_queue);
409 efx_init_tx_queue_core_txq(tx_queue);
413 /* Reduce number of classes before number of queues */
414 net_dev->num_tc = num_tc;
417 rc = netif_set_real_num_tx_queues(net_dev,
418 max_t(int, num_tc, 1) *
423 /* Do not destroy high-priority queues when they become
424 * unused. We would have to flush them first, and it is
425 * fairly difficult to flush a subset of TX queues. Leave
426 * it to efx_fini_channels().
429 net_dev->num_tc = num_tc;
433 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
436 struct efx_nic *efx = tx_queue->efx;
437 struct efx_tx_queue *txq2;
438 unsigned int pkts_compl = 0, bytes_compl = 0;
440 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
442 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
443 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
446 ++tx_queue->merge_events;
448 /* See if we need to restart the netif queue. This memory
449 * barrier ensures that we write read_count (inside
450 * efx_dequeue_buffers()) before reading the queue status.
453 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
454 likely(efx->port_enabled) &&
455 likely(netif_device_present(efx->net_dev))) {
456 txq2 = efx_tx_queue_partner(tx_queue);
457 fill_level = max(tx_queue->insert_count - tx_queue->read_count,
458 txq2->insert_count - txq2->read_count);
459 if (fill_level <= efx->txq_wake_thresh)
460 netif_tx_wake_queue(tx_queue->core_txq);
463 /* Check whether the hardware queue is now empty */
464 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
465 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
466 if (tx_queue->read_count == tx_queue->old_write_count) {
468 tx_queue->empty_read_count =
469 tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
474 /* Size of page-based TSO header buffers. Larger blocks must be
475 * allocated from the heap.
477 #define TSOH_STD_SIZE 128
478 #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
480 /* At most half the descriptors in the queue at any time will refer to
481 * a TSO header buffer, since they must always be followed by a
482 * payload descriptor referring to an skb.
484 static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
486 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
489 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
491 struct efx_nic *efx = tx_queue->efx;
492 unsigned int entries;
495 /* Create the smallest power-of-two aligned ring */
496 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
497 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
498 tx_queue->ptr_mask = entries - 1;
500 netif_dbg(efx, probe, efx->net_dev,
501 "creating TX queue %d size %#x mask %#x\n",
502 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
504 /* Allocate software ring */
505 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
507 if (!tx_queue->buffer)
510 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
511 tx_queue->tsoh_page =
512 kcalloc(efx_tsoh_page_count(tx_queue),
513 sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
514 if (!tx_queue->tsoh_page) {
520 /* Allocate hardware ring */
521 rc = efx_nic_probe_tx(tx_queue);
528 kfree(tx_queue->tsoh_page);
529 tx_queue->tsoh_page = NULL;
531 kfree(tx_queue->buffer);
532 tx_queue->buffer = NULL;
536 void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
538 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
539 "initialising TX queue %d\n", tx_queue->queue);
541 tx_queue->insert_count = 0;
542 tx_queue->write_count = 0;
543 tx_queue->old_write_count = 0;
544 tx_queue->read_count = 0;
545 tx_queue->old_read_count = 0;
546 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
548 /* Set up TX descriptor ring */
549 efx_nic_init_tx(tx_queue);
551 tx_queue->initialised = true;
554 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
556 struct efx_tx_buffer *buffer;
558 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
559 "shutting down TX queue %d\n", tx_queue->queue);
561 if (!tx_queue->buffer)
564 /* Free any buffers left in the ring */
565 while (tx_queue->read_count != tx_queue->write_count) {
566 unsigned int pkts_compl = 0, bytes_compl = 0;
567 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
568 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
570 ++tx_queue->read_count;
572 netdev_tx_reset_queue(tx_queue->core_txq);
575 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
579 if (!tx_queue->buffer)
582 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
583 "destroying TX queue %d\n", tx_queue->queue);
584 efx_nic_remove_tx(tx_queue);
586 if (tx_queue->tsoh_page) {
587 for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
588 efx_nic_free_buffer(tx_queue->efx,
589 &tx_queue->tsoh_page[i]);
590 kfree(tx_queue->tsoh_page);
591 tx_queue->tsoh_page = NULL;
594 kfree(tx_queue->buffer);
595 tx_queue->buffer = NULL;
599 /* Efx TCP segmentation acceleration.
601 * Why? Because by doing it here in the driver we can go significantly
602 * faster than the GSO.
604 * Requires TX checksum offload support.
607 /* Number of bytes inserted at the start of a TSO header buffer,
608 * similar to NET_IP_ALIGN.
610 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
611 #define TSOH_OFFSET 0
613 #define TSOH_OFFSET NET_IP_ALIGN
616 #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
619 * struct tso_state - TSO state for an SKB
620 * @out_len: Remaining length in current segment
621 * @seqnum: Current sequence number
622 * @ipv4_id: Current IPv4 ID, host endian
623 * @packet_space: Remaining space in current packet
624 * @dma_addr: DMA address of current position
625 * @in_len: Remaining length in current SKB fragment
626 * @unmap_len: Length of SKB fragment
627 * @unmap_addr: DMA address of SKB fragment
628 * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
629 * @protocol: Network protocol (after any VLAN header)
630 * @ip_off: Offset of IP header
631 * @tcp_off: Offset of TCP header
632 * @header_len: Number of bytes of header
633 * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
634 * @header_dma_addr: Header DMA address, when using option descriptors
635 * @header_unmap_len: Header DMA mapped length, or 0 if not using option
638 * The state used during segmentation. It is put into this data structure
639 * just to make it easy to pass into inline functions.
642 /* Output position */
646 unsigned packet_space;
652 dma_addr_t unmap_addr;
653 unsigned short dma_flags;
657 unsigned int tcp_off;
659 unsigned int ip_base_len;
660 dma_addr_t header_dma_addr;
661 unsigned int header_unmap_len;
666 * Verify that our various assumptions about sk_buffs and the conditions
667 * under which TSO will be attempted hold true. Return the protocol number.
669 static __be16 efx_tso_check_protocol(struct sk_buff *skb)
671 __be16 protocol = skb->protocol;
673 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
675 if (protocol == htons(ETH_P_8021Q)) {
676 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
677 protocol = veh->h_vlan_encapsulated_proto;
680 if (protocol == htons(ETH_P_IP)) {
681 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
683 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
684 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
686 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
687 + (tcp_hdr(skb)->doff << 2u)) >
693 static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
694 struct efx_tx_buffer *buffer, unsigned int len)
698 EFX_BUG_ON_PARANOID(buffer->len);
699 EFX_BUG_ON_PARANOID(buffer->flags);
700 EFX_BUG_ON_PARANOID(buffer->unmap_len);
702 if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
704 (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
705 struct efx_buffer *page_buf =
706 &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
708 TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
710 if (unlikely(!page_buf->addr) &&
711 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
715 result = (u8 *)page_buf->addr + offset;
716 buffer->dma_addr = page_buf->dma_addr + offset;
717 buffer->flags = EFX_TX_BUF_CONT;
719 tx_queue->tso_long_headers++;
721 buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
722 if (unlikely(!buffer->heap_buf))
724 result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
725 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
734 * efx_tx_queue_insert - push descriptors onto the TX queue
735 * @tx_queue: Efx TX queue
736 * @dma_addr: DMA address of fragment
737 * @len: Length of fragment
738 * @final_buffer: The final buffer inserted into the queue
740 * Push descriptors onto the TX queue.
742 static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
743 dma_addr_t dma_addr, unsigned len,
744 struct efx_tx_buffer **final_buffer)
746 struct efx_tx_buffer *buffer;
747 struct efx_nic *efx = tx_queue->efx;
748 unsigned dma_len, insert_ptr;
750 EFX_BUG_ON_PARANOID(len <= 0);
753 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
754 buffer = &tx_queue->buffer[insert_ptr];
755 ++tx_queue->insert_count;
757 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
758 tx_queue->read_count >=
761 EFX_BUG_ON_PARANOID(buffer->len);
762 EFX_BUG_ON_PARANOID(buffer->unmap_len);
763 EFX_BUG_ON_PARANOID(buffer->flags);
765 buffer->dma_addr = dma_addr;
767 dma_len = efx_max_tx_len(efx, dma_addr);
769 /* If there is enough space to send then do so */
773 buffer->len = dma_len;
774 buffer->flags = EFX_TX_BUF_CONT;
779 EFX_BUG_ON_PARANOID(!len);
781 *final_buffer = buffer;
786 * Put a TSO header into the TX queue.
788 * This is special-cased because we know that it is small enough to fit in
789 * a single fragment, and we know it doesn't cross a page boundary. It
790 * also allows us to not worry about end-of-packet etc.
792 static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
793 struct efx_tx_buffer *buffer, u8 *header)
795 if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
796 buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
799 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
800 buffer->dma_addr))) {
801 kfree(buffer->heap_buf);
806 buffer->unmap_len = buffer->len;
807 buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
810 ++tx_queue->insert_count;
815 /* Remove buffers put into a tx_queue. None of the buffers must have
818 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
820 struct efx_tx_buffer *buffer;
822 /* Work backwards until we hit the original insert pointer value */
823 while (tx_queue->insert_count != tx_queue->write_count) {
824 --tx_queue->insert_count;
825 buffer = &tx_queue->buffer[tx_queue->insert_count &
827 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
832 /* Parse the SKB header and initialise state. */
833 static int tso_start(struct tso_state *st, struct efx_nic *efx,
834 const struct sk_buff *skb)
836 bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
837 struct device *dma_dev = &efx->pci_dev->dev;
838 unsigned int header_len, in_len;
841 st->ip_off = skb_network_header(skb) - skb->data;
842 st->tcp_off = skb_transport_header(skb) - skb->data;
843 header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
844 in_len = skb_headlen(skb) - header_len;
845 st->header_len = header_len;
847 if (st->protocol == htons(ETH_P_IP)) {
848 st->ip_base_len = st->header_len - st->ip_off;
849 st->ipv4_id = ntohs(ip_hdr(skb)->id);
851 st->ip_base_len = st->header_len - st->tcp_off;
854 st->seqnum = ntohl(tcp_hdr(skb)->seq);
856 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
857 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
858 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
860 st->out_len = skb->len - header_len;
863 st->header_unmap_len = 0;
865 if (likely(in_len == 0)) {
871 dma_addr = dma_map_single(dma_dev, skb->data + header_len,
872 in_len, DMA_TO_DEVICE);
873 st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
874 st->dma_addr = dma_addr;
875 st->unmap_addr = dma_addr;
876 st->unmap_len = in_len;
878 dma_addr = dma_map_single(dma_dev, skb->data,
879 skb_headlen(skb), DMA_TO_DEVICE);
880 st->header_dma_addr = dma_addr;
881 st->header_unmap_len = skb_headlen(skb);
883 st->dma_addr = dma_addr + header_len;
887 return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
890 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
893 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
894 skb_frag_size(frag), DMA_TO_DEVICE);
895 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
897 st->unmap_len = skb_frag_size(frag);
898 st->in_len = skb_frag_size(frag);
899 st->dma_addr = st->unmap_addr;
907 * tso_fill_packet_with_fragment - form descriptors for the current fragment
908 * @tx_queue: Efx TX queue
909 * @skb: Socket buffer
912 * Form descriptors for the current fragment, until we reach the end
913 * of fragment or end-of-packet.
915 static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
916 const struct sk_buff *skb,
917 struct tso_state *st)
919 struct efx_tx_buffer *buffer;
924 if (st->packet_space == 0)
927 EFX_BUG_ON_PARANOID(st->in_len <= 0);
928 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
930 n = min(st->in_len, st->packet_space);
932 st->packet_space -= n;
936 efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
938 if (st->out_len == 0) {
939 /* Transfer ownership of the skb */
941 buffer->flags = EFX_TX_BUF_SKB;
942 } else if (st->packet_space != 0) {
943 buffer->flags = EFX_TX_BUF_CONT;
946 if (st->in_len == 0) {
947 /* Transfer ownership of the DMA mapping */
948 buffer->unmap_len = st->unmap_len;
949 buffer->flags |= st->dma_flags;
958 * tso_start_new_packet - generate a new header and prepare for the new packet
959 * @tx_queue: Efx TX queue
960 * @skb: Socket buffer
963 * Generate a new header and prepare for the new packet. Return 0 on
964 * success, or -%ENOMEM if failed to alloc header.
966 static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
967 const struct sk_buff *skb,
968 struct tso_state *st)
970 struct efx_tx_buffer *buffer =
971 &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
972 bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
976 st->packet_space = skb_shinfo(skb)->gso_size;
977 tcp_flags_clear = 0x09; /* mask out FIN and PSH */
979 st->packet_space = st->out_len;
980 tcp_flags_clear = 0x00;
983 if (!st->header_unmap_len) {
984 /* Allocate and insert a DMA-mapped header buffer. */
985 struct tcphdr *tsoh_th;
990 header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
994 tsoh_th = (struct tcphdr *)(header + st->tcp_off);
996 /* Copy and update the headers. */
997 memcpy(header, skb->data, st->header_len);
999 tsoh_th->seq = htonl(st->seqnum);
1000 ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear;
1002 ip_length = st->ip_base_len + st->packet_space;
1004 if (st->protocol == htons(ETH_P_IP)) {
1005 struct iphdr *tsoh_iph =
1006 (struct iphdr *)(header + st->ip_off);
1008 tsoh_iph->tot_len = htons(ip_length);
1009 tsoh_iph->id = htons(st->ipv4_id);
1011 struct ipv6hdr *tsoh_iph =
1012 (struct ipv6hdr *)(header + st->ip_off);
1014 tsoh_iph->payload_len = htons(ip_length);
1017 rc = efx_tso_put_header(tx_queue, buffer, header);
1021 /* Send the original headers with a TSO option descriptor
1024 u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear;
1026 buffer->flags = EFX_TX_BUF_OPTION;
1028 buffer->unmap_len = 0;
1029 EFX_POPULATE_QWORD_5(buffer->option,
1030 ESF_DZ_TX_DESC_IS_OPT, 1,
1031 ESF_DZ_TX_OPTION_TYPE,
1032 ESE_DZ_TX_OPTION_DESC_TSO,
1033 ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
1034 ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
1035 ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
1036 ++tx_queue->insert_count;
1038 /* We mapped the headers in tso_start(). Unmap them
1039 * when the last segment is completed.
1041 buffer = &tx_queue->buffer[tx_queue->insert_count &
1042 tx_queue->ptr_mask];
1043 buffer->dma_addr = st->header_dma_addr;
1044 buffer->len = st->header_len;
1046 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
1047 buffer->unmap_len = st->header_unmap_len;
1048 /* Ensure we only unmap them once in case of a
1049 * later DMA mapping error and rollback
1051 st->header_unmap_len = 0;
1053 buffer->flags = EFX_TX_BUF_CONT;
1054 buffer->unmap_len = 0;
1056 ++tx_queue->insert_count;
1059 st->seqnum += skb_shinfo(skb)->gso_size;
1061 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1064 ++tx_queue->tso_packets;
1071 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1072 * @tx_queue: Efx TX queue
1073 * @skb: Socket buffer
1075 * Context: You must hold netif_tx_lock() to call this function.
1077 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1078 * @skb was not enqueued. In all cases @skb is consumed. Return
1081 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1082 struct sk_buff *skb)
1084 struct efx_nic *efx = tx_queue->efx;
1086 struct tso_state state;
1088 /* Find the packet protocol and sanity-check it */
1089 state.protocol = efx_tso_check_protocol(skb);
1091 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1093 rc = tso_start(&state, efx, skb);
1097 if (likely(state.in_len == 0)) {
1098 /* Grab the first payload fragment. */
1099 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1101 rc = tso_get_fragment(&state, efx,
1102 skb_shinfo(skb)->frags + frag_i);
1106 /* Payload starts in the header area. */
1110 if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1114 tso_fill_packet_with_fragment(tx_queue, skb, &state);
1116 /* Move onto the next fragment? */
1117 if (state.in_len == 0) {
1118 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1119 /* End of payload reached. */
1121 rc = tso_get_fragment(&state, efx,
1122 skb_shinfo(skb)->frags + frag_i);
1127 /* Start at new packet? */
1128 if (state.packet_space == 0 &&
1129 tso_start_new_packet(tx_queue, skb, &state) < 0)
1133 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
1135 /* Pass off to hardware */
1136 efx_nic_push_buffers(tx_queue);
1138 efx_tx_maybe_stop_queue(tx_queue);
1140 tx_queue->tso_bursts++;
1141 return NETDEV_TX_OK;
1144 netif_err(efx, tx_err, efx->net_dev,
1145 "Out of memory for TSO headers, or DMA mapping error\n");
1146 dev_kfree_skb_any(skb);
1148 /* Free the DMA mapping we were in the process of writing out */
1149 if (state.unmap_len) {
1150 if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
1151 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
1152 state.unmap_len, DMA_TO_DEVICE);
1154 dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
1155 state.unmap_len, DMA_TO_DEVICE);
1158 /* Free the header DMA mapping, if using option descriptors */
1159 if (state.header_unmap_len)
1160 dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
1161 state.header_unmap_len, DMA_TO_DEVICE);
1163 efx_enqueue_unwind(tx_queue);
1164 return NETDEV_TX_OK;