2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
13 #include <linux/skbuff.h>
15 #include <net/protocol.h>
17 static DEFINE_SPINLOCK(udp_offload_lock);
18 static struct udp_offload_priv __rcu *udp_offload_base __read_mostly;
20 #define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock))
22 struct udp_offload_priv {
23 struct udp_offload *offload;
25 struct udp_offload_priv __rcu *next;
28 static int udp4_ufo_send_check(struct sk_buff *skb)
30 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
33 if (likely(!skb->encapsulation)) {
34 const struct iphdr *iph;
40 uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
42 skb->csum_start = skb_transport_header(skb) - skb->head;
43 skb->csum_offset = offsetof(struct udphdr, check);
44 skb->ip_summed = CHECKSUM_PARTIAL;
50 static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
51 netdev_features_t features)
53 struct sk_buff *segs = ERR_PTR(-EINVAL);
58 if (skb->encapsulation &&
59 (skb_shinfo(skb)->gso_type &
60 (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
61 segs = skb_udp_tunnel_segment(skb, features);
65 mss = skb_shinfo(skb)->gso_size;
66 if (unlikely(skb->len <= mss))
69 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
70 /* Packet is from an untrusted source, reset gso_segs. */
71 int type = skb_shinfo(skb)->gso_type;
73 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
75 SKB_GSO_UDP_TUNNEL_CSUM |
77 SKB_GSO_GRE | SKB_GSO_GRE_CSUM |
79 !(type & (SKB_GSO_UDP))))
82 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
88 /* Do software UFO. Complete and fill in the UDP checksum as
89 * HW cannot do checksum of UDP packets sent as multiple
92 offset = skb_checksum_start_offset(skb);
93 csum = skb_checksum(skb, offset, skb->len - offset, 0);
94 offset += skb->csum_offset;
95 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
96 skb->ip_summed = CHECKSUM_NONE;
98 /* Fragment the skb. IP headers of the fragments are updated in
101 segs = skb_segment(skb, features);
106 int udp_add_offload(struct udp_offload *uo)
108 struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC);
113 new_offload->offload = uo;
115 spin_lock(&udp_offload_lock);
116 new_offload->next = udp_offload_base;
117 rcu_assign_pointer(udp_offload_base, new_offload);
118 spin_unlock(&udp_offload_lock);
122 EXPORT_SYMBOL(udp_add_offload);
124 static void udp_offload_free_routine(struct rcu_head *head)
126 struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu);
130 void udp_del_offload(struct udp_offload *uo)
132 struct udp_offload_priv __rcu **head = &udp_offload_base;
133 struct udp_offload_priv *uo_priv;
135 spin_lock(&udp_offload_lock);
137 uo_priv = udp_deref_protected(*head);
138 for (; uo_priv != NULL;
139 uo_priv = udp_deref_protected(*head)) {
140 if (uo_priv->offload == uo) {
141 rcu_assign_pointer(*head,
142 udp_deref_protected(uo_priv->next));
145 head = &uo_priv->next;
147 pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port));
149 spin_unlock(&udp_offload_lock);
151 call_rcu(&uo_priv->rcu, udp_offload_free_routine);
153 EXPORT_SYMBOL(udp_del_offload);
155 static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
157 struct udp_offload_priv *uo_priv;
158 struct sk_buff *p, **pp = NULL;
159 struct udphdr *uh, *uh2;
160 unsigned int hlen, off;
163 if (NAPI_GRO_CB(skb)->udp_mark ||
164 (!skb->encapsulation && skb->ip_summed != CHECKSUM_COMPLETE))
167 /* mark that this skb passed once through the udp gro layer */
168 NAPI_GRO_CB(skb)->udp_mark = 1;
170 off = skb_gro_offset(skb);
171 hlen = off + sizeof(*uh);
172 uh = skb_gro_header_fast(skb, off);
173 if (skb_gro_header_hard(skb, hlen)) {
174 uh = skb_gro_header_slow(skb, hlen, off);
180 uo_priv = rcu_dereference(udp_offload_base);
181 for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
182 if (uo_priv->offload->port == uh->dest &&
183 uo_priv->offload->callbacks.gro_receive)
191 for (p = *head; p; p = p->next) {
192 if (!NAPI_GRO_CB(p)->same_flow)
195 uh2 = (struct udphdr *)(p->data + off);
196 if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) {
197 NAPI_GRO_CB(p)->same_flow = 0;
202 skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
203 skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
204 pp = uo_priv->offload->callbacks.gro_receive(head, skb);
209 NAPI_GRO_CB(skb)->flush |= flush;
213 static int udp_gro_complete(struct sk_buff *skb, int nhoff)
215 struct udp_offload_priv *uo_priv;
216 __be16 newlen = htons(skb->len - nhoff);
217 struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
224 uo_priv = rcu_dereference(udp_offload_base);
225 for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
226 if (uo_priv->offload->port == uh->dest &&
227 uo_priv->offload->callbacks.gro_complete)
232 err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr));
238 static const struct net_offload udpv4_offload = {
240 .gso_send_check = udp4_ufo_send_check,
241 .gso_segment = udp4_ufo_fragment,
242 .gro_receive = udp_gro_receive,
243 .gro_complete = udp_gro_complete,
247 int __init udpv4_offload_init(void)
249 return inet_add_offload(&udpv4_offload, IPPROTO_UDP);