2 * IPV6 GSO/GRO offload support
3 * Linux INET6 implementation
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/socket.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/printk.h>
17 #include <net/protocol.h>
20 #include "ip6_offload.h"
22 static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
24 const struct net_offload *ops = NULL;
27 struct ipv6_opt_hdr *opth;
30 if (proto != NEXTHDR_HOP) {
31 ops = rcu_dereference(inet6_offloads[proto]);
36 if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
40 if (unlikely(!pskb_may_pull(skb, 8)))
43 opth = (void *)skb->data;
44 len = ipv6_optlen(opth);
46 if (unlikely(!pskb_may_pull(skb, len)))
49 proto = opth->nexthdr;
56 static int ipv6_gso_send_check(struct sk_buff *skb)
58 const struct ipv6hdr *ipv6h;
59 const struct net_offload *ops;
62 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
65 ipv6h = ipv6_hdr(skb);
66 __skb_pull(skb, sizeof(*ipv6h));
67 err = -EPROTONOSUPPORT;
69 ops = rcu_dereference(inet6_offloads[
70 ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
72 if (likely(ops && ops->callbacks.gso_send_check)) {
73 skb_reset_transport_header(skb);
74 err = ops->callbacks.gso_send_check(skb);
81 static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
82 netdev_features_t features)
84 struct sk_buff *segs = ERR_PTR(-EINVAL);
85 struct ipv6hdr *ipv6h;
86 const struct net_offload *ops;
88 struct frag_hdr *fptr;
89 unsigned int unfrag_ip6hlen;
95 if (unlikely(skb_shinfo(skb)->gso_type &
108 skb_reset_network_header(skb);
109 nhoff = skb_network_header(skb) - skb_mac_header(skb);
110 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
113 tunnel = SKB_GSO_CB(skb)->encap_level > 0;
115 features = skb->dev->hw_enc_features & netif_skb_features(skb);
116 SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
118 ipv6h = ipv6_hdr(skb);
119 __skb_pull(skb, sizeof(*ipv6h));
120 segs = ERR_PTR(-EPROTONOSUPPORT);
122 proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
124 ops = rcu_dereference(inet6_offloads[proto]);
125 if (likely(ops && ops->callbacks.gso_segment)) {
126 skb_reset_transport_header(skb);
127 segs = ops->callbacks.gso_segment(skb, features);
133 for (skb = segs; skb; skb = skb->next) {
134 ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
135 ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h));
137 skb_reset_inner_headers(skb);
138 skb->encapsulation = 1;
140 skb->network_header = (u8 *)ipv6h - skb->head;
142 if (!tunnel && proto == IPPROTO_UDP) {
143 unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
144 fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
145 fptr->frag_off = htons(offset);
146 if (skb->next != NULL)
147 fptr->frag_off |= htons(IP6_MF);
148 offset += (ntohs(ipv6h->payload_len) -
149 sizeof(struct frag_hdr));
157 static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
160 const struct net_offload *ops;
161 struct sk_buff **pp = NULL;
171 off = skb_gro_offset(skb);
172 hlen = off + sizeof(*iph);
173 iph = skb_gro_header_fast(skb, off);
174 if (skb_gro_header_hard(skb, hlen)) {
175 iph = skb_gro_header_slow(skb, hlen, off);
180 skb_gro_pull(skb, sizeof(*iph));
181 skb_set_transport_header(skb, skb_gro_offset(skb));
183 flush += ntohs(iph->payload_len) != skb_gro_len(skb);
186 proto = iph->nexthdr;
187 ops = rcu_dereference(inet6_offloads[proto]);
188 if (!ops || !ops->callbacks.gro_receive) {
189 __pskb_pull(skb, skb_gro_offset(skb));
190 proto = ipv6_gso_pull_exthdrs(skb, proto);
191 skb_gro_pull(skb, -skb_transport_offset(skb));
192 skb_reset_transport_header(skb);
193 __skb_push(skb, skb_gro_offset(skb));
195 ops = rcu_dereference(inet6_offloads[proto]);
196 if (!ops || !ops->callbacks.gro_receive)
202 NAPI_GRO_CB(skb)->proto = proto;
205 nlen = skb_network_header_len(skb);
207 for (p = *head; p; p = p->next) {
208 const struct ipv6hdr *iph2;
209 __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
211 if (!NAPI_GRO_CB(p)->same_flow)
215 first_word = *(__be32 *)iph ^ *(__be32 *)iph2 ;
217 /* All fields must match except length and Traffic Class. */
218 if (nlen != skb_network_header_len(p) ||
219 (first_word & htonl(0xF00FFFFF)) ||
220 memcmp(&iph->nexthdr, &iph2->nexthdr,
221 nlen - offsetof(struct ipv6hdr, nexthdr))) {
222 NAPI_GRO_CB(p)->same_flow = 0;
225 /* flush if Traffic Class fields are different */
226 NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
227 NAPI_GRO_CB(p)->flush |= flush;
230 NAPI_GRO_CB(skb)->flush |= flush;
233 skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
235 pp = ops->callbacks.gro_receive(head, skb);
243 NAPI_GRO_CB(skb)->flush |= flush;
248 static int ipv6_gro_complete(struct sk_buff *skb)
250 const struct net_offload *ops;
251 struct ipv6hdr *iph = ipv6_hdr(skb);
254 iph->payload_len = htons(skb->len - skb_network_offset(skb) -
258 ops = rcu_dereference(inet6_offloads[NAPI_GRO_CB(skb)->proto]);
259 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
262 err = ops->callbacks.gro_complete(skb);
270 static struct packet_offload ipv6_packet_offload __read_mostly = {
271 .type = cpu_to_be16(ETH_P_IPV6),
273 .gso_send_check = ipv6_gso_send_check,
274 .gso_segment = ipv6_gso_segment,
275 .gro_receive = ipv6_gro_receive,
276 .gro_complete = ipv6_gro_complete,
280 static const struct net_offload sit_offload = {
282 .gso_send_check = ipv6_gso_send_check,
283 .gso_segment = ipv6_gso_segment,
287 static int __init ipv6_offload_init(void)
290 if (tcpv6_offload_init() < 0)
291 pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
292 if (udp_offload_init() < 0)
293 pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
294 if (ipv6_exthdrs_offload_init() < 0)
295 pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
297 dev_add_offload(&ipv6_packet_offload);
299 inet_add_offload(&sit_offload, IPPROTO_IPV6);
304 fs_initcall(ipv6_offload_init);