]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/ipv4/udp_offload.c
udp: restrict offloads to one namespace
[karo-tx-linux.git] / net / ipv4 / udp_offload.c
1 /*
2  *      IPV4 GSO/GRO offload support
3  *      Linux INET implementation
4  *
5  *      This program is free software; you can redistribute it and/or
6  *      modify it under the terms of the GNU General Public License
7  *      as published by the Free Software Foundation; either version
8  *      2 of the License, or (at your option) any later version.
9  *
10  *      UDPv4 GSO support
11  */
12
13 #include <linux/skbuff.h>
14 #include <net/udp.h>
15 #include <net/protocol.h>
16
17 static DEFINE_SPINLOCK(udp_offload_lock);
18 static struct udp_offload_priv __rcu *udp_offload_base __read_mostly;
19
20 #define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock))
21
22 struct udp_offload_priv {
23         struct udp_offload      *offload;
24         possible_net_t  net;
25         struct rcu_head         rcu;
26         struct udp_offload_priv __rcu *next;
27 };
28
29 static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
30         netdev_features_t features,
31         struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
32                                              netdev_features_t features),
33         __be16 new_protocol, bool is_ipv6)
34 {
35         struct sk_buff *segs = ERR_PTR(-EINVAL);
36         u16 mac_offset = skb->mac_header;
37         int mac_len = skb->mac_len;
38         int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
39         __be16 protocol = skb->protocol;
40         netdev_features_t enc_features;
41         int udp_offset, outer_hlen;
42         unsigned int oldlen;
43         bool need_csum = !!(skb_shinfo(skb)->gso_type &
44                             SKB_GSO_UDP_TUNNEL_CSUM);
45         bool remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM);
46         bool offload_csum = false, dont_encap = (need_csum || remcsum);
47
48         oldlen = (u16)~skb->len;
49
50         if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
51                 goto out;
52
53         skb->encapsulation = 0;
54         __skb_pull(skb, tnl_hlen);
55         skb_reset_mac_header(skb);
56         skb_set_network_header(skb, skb_inner_network_offset(skb));
57         skb->mac_len = skb_inner_network_offset(skb);
58         skb->protocol = new_protocol;
59         skb->encap_hdr_csum = need_csum;
60         skb->remcsum_offload = remcsum;
61
62         /* Try to offload checksum if possible */
63         offload_csum = !!(need_csum &&
64                           (skb->dev->features &
65                            (is_ipv6 ? NETIF_F_V6_CSUM : NETIF_F_V4_CSUM)));
66
67         /* segment inner packet. */
68         enc_features = skb->dev->hw_enc_features & features;
69         segs = gso_inner_segment(skb, enc_features);
70         if (IS_ERR_OR_NULL(segs)) {
71                 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
72                                      mac_len);
73                 goto out;
74         }
75
76         outer_hlen = skb_tnl_header_len(skb);
77         udp_offset = outer_hlen - tnl_hlen;
78         skb = segs;
79         do {
80                 struct udphdr *uh;
81                 int len;
82                 __be32 delta;
83
84                 if (dont_encap) {
85                         skb->encapsulation = 0;
86                         skb->ip_summed = CHECKSUM_NONE;
87                 } else {
88                         /* Only set up inner headers if we might be offloading
89                          * inner checksum.
90                          */
91                         skb_reset_inner_headers(skb);
92                         skb->encapsulation = 1;
93                 }
94
95                 skb->mac_len = mac_len;
96                 skb->protocol = protocol;
97
98                 skb_push(skb, outer_hlen);
99                 skb_reset_mac_header(skb);
100                 skb_set_network_header(skb, mac_len);
101                 skb_set_transport_header(skb, udp_offset);
102                 len = skb->len - udp_offset;
103                 uh = udp_hdr(skb);
104                 uh->len = htons(len);
105
106                 if (!need_csum)
107                         continue;
108
109                 delta = htonl(oldlen + len);
110
111                 uh->check = ~csum_fold((__force __wsum)
112                                        ((__force u32)uh->check +
113                                         (__force u32)delta));
114                 if (offload_csum) {
115                         skb->ip_summed = CHECKSUM_PARTIAL;
116                         skb->csum_start = skb_transport_header(skb) - skb->head;
117                         skb->csum_offset = offsetof(struct udphdr, check);
118                 } else if (remcsum) {
119                         /* Need to calculate checksum from scratch,
120                          * inner checksums are never when doing
121                          * remote_checksum_offload.
122                          */
123
124                         skb->csum = skb_checksum(skb, udp_offset,
125                                                  skb->len - udp_offset,
126                                                  0);
127                         uh->check = csum_fold(skb->csum);
128                         if (uh->check == 0)
129                                 uh->check = CSUM_MANGLED_0;
130                 } else {
131                         uh->check = gso_make_checksum(skb, ~uh->check);
132
133                         if (uh->check == 0)
134                                 uh->check = CSUM_MANGLED_0;
135                 }
136         } while ((skb = skb->next));
137 out:
138         return segs;
139 }
140
141 struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
142                                        netdev_features_t features,
143                                        bool is_ipv6)
144 {
145         __be16 protocol = skb->protocol;
146         const struct net_offload **offloads;
147         const struct net_offload *ops;
148         struct sk_buff *segs = ERR_PTR(-EINVAL);
149         struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
150                                              netdev_features_t features);
151
152         rcu_read_lock();
153
154         switch (skb->inner_protocol_type) {
155         case ENCAP_TYPE_ETHER:
156                 protocol = skb->inner_protocol;
157                 gso_inner_segment = skb_mac_gso_segment;
158                 break;
159         case ENCAP_TYPE_IPPROTO:
160                 offloads = is_ipv6 ? inet6_offloads : inet_offloads;
161                 ops = rcu_dereference(offloads[skb->inner_ipproto]);
162                 if (!ops || !ops->callbacks.gso_segment)
163                         goto out_unlock;
164                 gso_inner_segment = ops->callbacks.gso_segment;
165                 break;
166         default:
167                 goto out_unlock;
168         }
169
170         segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment,
171                                         protocol, is_ipv6);
172
173 out_unlock:
174         rcu_read_unlock();
175
176         return segs;
177 }
178
179 static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
180                                          netdev_features_t features)
181 {
182         struct sk_buff *segs = ERR_PTR(-EINVAL);
183         unsigned int mss;
184         __wsum csum;
185         struct udphdr *uh;
186         struct iphdr *iph;
187
188         if (skb->encapsulation &&
189             (skb_shinfo(skb)->gso_type &
190              (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
191                 segs = skb_udp_tunnel_segment(skb, features, false);
192                 goto out;
193         }
194
195         if (!pskb_may_pull(skb, sizeof(struct udphdr)))
196                 goto out;
197
198         mss = skb_shinfo(skb)->gso_size;
199         if (unlikely(skb->len <= mss))
200                 goto out;
201
202         if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
203                 /* Packet is from an untrusted source, reset gso_segs. */
204                 int type = skb_shinfo(skb)->gso_type;
205
206                 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
207                                       SKB_GSO_UDP_TUNNEL |
208                                       SKB_GSO_UDP_TUNNEL_CSUM |
209                                       SKB_GSO_TUNNEL_REMCSUM |
210                                       SKB_GSO_IPIP |
211                                       SKB_GSO_GRE | SKB_GSO_GRE_CSUM) ||
212                              !(type & (SKB_GSO_UDP))))
213                         goto out;
214
215                 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
216
217                 segs = NULL;
218                 goto out;
219         }
220
221         /* Do software UFO. Complete and fill in the UDP checksum as
222          * HW cannot do checksum of UDP packets sent as multiple
223          * IP fragments.
224          */
225
226         uh = udp_hdr(skb);
227         iph = ip_hdr(skb);
228
229         uh->check = 0;
230         csum = skb_checksum(skb, 0, skb->len, 0);
231         uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum);
232         if (uh->check == 0)
233                 uh->check = CSUM_MANGLED_0;
234
235         skb->ip_summed = CHECKSUM_NONE;
236
237         /* Fragment the skb. IP headers of the fragments are updated in
238          * inet_gso_segment()
239          */
240         segs = skb_segment(skb, features);
241 out:
242         return segs;
243 }
244
245 int udp_add_offload(struct net *net, struct udp_offload *uo)
246 {
247         struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC);
248
249         if (!new_offload)
250                 return -ENOMEM;
251
252         write_pnet(&new_offload->net, net);
253         new_offload->offload = uo;
254
255         spin_lock(&udp_offload_lock);
256         new_offload->next = udp_offload_base;
257         rcu_assign_pointer(udp_offload_base, new_offload);
258         spin_unlock(&udp_offload_lock);
259
260         return 0;
261 }
262 EXPORT_SYMBOL(udp_add_offload);
263
264 static void udp_offload_free_routine(struct rcu_head *head)
265 {
266         struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu);
267         kfree(ou_priv);
268 }
269
270 void udp_del_offload(struct udp_offload *uo)
271 {
272         struct udp_offload_priv __rcu **head = &udp_offload_base;
273         struct udp_offload_priv *uo_priv;
274
275         spin_lock(&udp_offload_lock);
276
277         uo_priv = udp_deref_protected(*head);
278         for (; uo_priv != NULL;
279              uo_priv = udp_deref_protected(*head)) {
280                 if (uo_priv->offload == uo) {
281                         rcu_assign_pointer(*head,
282                                            udp_deref_protected(uo_priv->next));
283                         goto unlock;
284                 }
285                 head = &uo_priv->next;
286         }
287         pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port));
288 unlock:
289         spin_unlock(&udp_offload_lock);
290         if (uo_priv)
291                 call_rcu(&uo_priv->rcu, udp_offload_free_routine);
292 }
293 EXPORT_SYMBOL(udp_del_offload);
294
295 struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
296                                  struct udphdr *uh)
297 {
298         struct udp_offload_priv *uo_priv;
299         struct sk_buff *p, **pp = NULL;
300         struct udphdr *uh2;
301         unsigned int off = skb_gro_offset(skb);
302         int flush = 1;
303
304         if (NAPI_GRO_CB(skb)->udp_mark ||
305             (skb->ip_summed != CHECKSUM_PARTIAL &&
306              NAPI_GRO_CB(skb)->csum_cnt == 0 &&
307              !NAPI_GRO_CB(skb)->csum_valid))
308                 goto out;
309
310         /* mark that this skb passed once through the udp gro layer */
311         NAPI_GRO_CB(skb)->udp_mark = 1;
312
313         rcu_read_lock();
314         uo_priv = rcu_dereference(udp_offload_base);
315         for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
316                 if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) &&
317                     uo_priv->offload->port == uh->dest &&
318                     uo_priv->offload->callbacks.gro_receive)
319                         goto unflush;
320         }
321         goto out_unlock;
322
323 unflush:
324         flush = 0;
325
326         for (p = *head; p; p = p->next) {
327                 if (!NAPI_GRO_CB(p)->same_flow)
328                         continue;
329
330                 uh2 = (struct udphdr   *)(p->data + off);
331
332                 /* Match ports and either checksums are either both zero
333                  * or nonzero.
334                  */
335                 if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) ||
336                     (!uh->check ^ !uh2->check)) {
337                         NAPI_GRO_CB(p)->same_flow = 0;
338                         continue;
339                 }
340         }
341
342         skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
343         skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
344         NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
345         pp = uo_priv->offload->callbacks.gro_receive(head, skb,
346                                                      uo_priv->offload);
347
348 out_unlock:
349         rcu_read_unlock();
350 out:
351         NAPI_GRO_CB(skb)->flush |= flush;
352         return pp;
353 }
354
355 static struct sk_buff **udp4_gro_receive(struct sk_buff **head,
356                                          struct sk_buff *skb)
357 {
358         struct udphdr *uh = udp_gro_udphdr(skb);
359
360         if (unlikely(!uh))
361                 goto flush;
362
363         /* Don't bother verifying checksum if we're going to flush anyway. */
364         if (NAPI_GRO_CB(skb)->flush)
365                 goto skip;
366
367         if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check,
368                                                  inet_gro_compute_pseudo))
369                 goto flush;
370         else if (uh->check)
371                 skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
372                                              inet_gro_compute_pseudo);
373 skip:
374         NAPI_GRO_CB(skb)->is_ipv6 = 0;
375         return udp_gro_receive(head, skb, uh);
376
377 flush:
378         NAPI_GRO_CB(skb)->flush = 1;
379         return NULL;
380 }
381
382 int udp_gro_complete(struct sk_buff *skb, int nhoff)
383 {
384         struct udp_offload_priv *uo_priv;
385         __be16 newlen = htons(skb->len - nhoff);
386         struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
387         int err = -ENOSYS;
388
389         uh->len = newlen;
390
391         rcu_read_lock();
392
393         uo_priv = rcu_dereference(udp_offload_base);
394         for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
395                 if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) &&
396                     uo_priv->offload->port == uh->dest &&
397                     uo_priv->offload->callbacks.gro_complete)
398                         break;
399         }
400
401         if (uo_priv) {
402                 NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
403                 err = uo_priv->offload->callbacks.gro_complete(skb,
404                                 nhoff + sizeof(struct udphdr),
405                                 uo_priv->offload);
406         }
407
408         rcu_read_unlock();
409
410         if (skb->remcsum_offload)
411                 skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
412
413         skb->encapsulation = 1;
414         skb_set_inner_mac_header(skb, nhoff + sizeof(struct udphdr));
415
416         return err;
417 }
418
419 static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
420 {
421         const struct iphdr *iph = ip_hdr(skb);
422         struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
423
424         if (uh->check) {
425                 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
426                 uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr,
427                                           iph->daddr, 0);
428         } else {
429                 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
430         }
431
432         return udp_gro_complete(skb, nhoff);
433 }
434
435 static const struct net_offload udpv4_offload = {
436         .callbacks = {
437                 .gso_segment = udp4_ufo_fragment,
438                 .gro_receive  = udp4_gro_receive,
439                 .gro_complete = udp4_gro_complete,
440         },
441 };
442
443 int __init udpv4_offload_init(void)
444 {
445         return inet_add_offload(&udpv4_offload, IPPROTO_UDP);
446 }