2 * IPv6 output functions
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/net/ipv4/ip_output.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * A.N.Kuznetsov : airthmetics in fragmentation.
17 * extension headers are implemented.
18 * route changes now work.
19 * ip6_forward does not confuse sniffers.
22 * H. von Brand : Added missing #include <linux/string.h>
23 * Imran Patel : frag id should be in NBO
24 * Kazunori MIYAZAWA @USAGI
25 * : add ip6_append_data and related functions
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
42 #include <linux/bpf-cgroup.h>
43 #include <linux/netfilter.h>
44 #include <linux/netfilter_ipv6.h>
50 #include <net/ndisc.h>
51 #include <net/protocol.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/rawv6.h>
57 #include <net/checksum.h>
58 #include <linux/mroute6.h>
59 #include <net/l3mdev.h>
60 #include <net/lwtunnel.h>
62 static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
64 struct dst_entry *dst = skb_dst(skb);
65 struct net_device *dev = dst->dev;
66 struct neighbour *neigh;
67 struct in6_addr *nexthop;
70 skb->protocol = htons(ETH_P_IPV6);
73 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
74 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
76 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
77 ((mroute6_socket(net, skb) &&
78 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
79 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
80 &ipv6_hdr(skb)->saddr))) {
81 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
83 /* Do not check for IFF_ALLMULTI; multicast routing
84 is not supported in any case.
87 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
88 net, sk, newskb, NULL, newskb->dev,
91 if (ipv6_hdr(skb)->hop_limit == 0) {
92 IP6_INC_STATS(net, idev,
93 IPSTATS_MIB_OUTDISCARDS);
99 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, skb->len);
101 if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
102 IPV6_ADDR_SCOPE_NODELOCAL &&
103 !(dev->flags & IFF_LOOPBACK)) {
109 if (lwtunnel_xmit_redirect(dst->lwtstate)) {
110 int res = lwtunnel_xmit(skb);
112 if (res < 0 || res == LWTUNNEL_XMIT_DONE)
117 nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
118 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
119 if (unlikely(!neigh))
120 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
121 if (!IS_ERR(neigh)) {
122 sock_confirm_neigh(skb, neigh);
123 ret = neigh_output(neigh, skb);
124 rcu_read_unlock_bh();
127 rcu_read_unlock_bh();
129 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
134 static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
138 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
144 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
145 dst_allfrag(skb_dst(skb)) ||
146 (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
147 return ip6_fragment(net, sk, skb, ip6_finish_output2);
149 return ip6_finish_output2(net, sk, skb);
152 int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
154 struct net_device *dev = skb_dst(skb)->dev;
155 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
157 if (unlikely(idev->cnf.disable_ipv6)) {
158 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
163 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
164 net, sk, skb, NULL, dev,
166 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
170 * xmit an sk_buff (used by TCP, SCTP and DCCP)
171 * Note : socket lock is not held for SYNACK packets, but might be modified
172 * by calls to skb_set_owner_w() and ipv6_local_error(),
173 * which are using proper atomic operations or spinlocks.
175 int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
176 __u32 mark, struct ipv6_txoptions *opt, int tclass)
178 struct net *net = sock_net(sk);
179 const struct ipv6_pinfo *np = inet6_sk(sk);
180 struct in6_addr *first_hop = &fl6->daddr;
181 struct dst_entry *dst = skb_dst(skb);
183 u8 proto = fl6->flowi6_proto;
184 int seg_len = skb->len;
189 unsigned int head_room;
191 /* First: exthdrs may take lots of space (~8K for now)
192 MAX_HEADER is not enough.
194 head_room = opt->opt_nflen + opt->opt_flen;
195 seg_len += head_room;
196 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
198 if (skb_headroom(skb) < head_room) {
199 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
201 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
202 IPSTATS_MIB_OUTDISCARDS);
208 /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
209 * it is safe to call in our context (socket lock not held)
211 skb_set_owner_w(skb, (struct sock *)sk);
214 ipv6_push_frag_opts(skb, opt, &proto);
216 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop,
220 skb_push(skb, sizeof(struct ipv6hdr));
221 skb_reset_network_header(skb);
225 * Fill in the IPv6 header
228 hlimit = np->hop_limit;
230 hlimit = ip6_dst_hoplimit(dst);
232 ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
233 np->autoflowlabel, fl6));
235 hdr->payload_len = htons(seg_len);
236 hdr->nexthdr = proto;
237 hdr->hop_limit = hlimit;
239 hdr->saddr = fl6->saddr;
240 hdr->daddr = *first_hop;
242 skb->protocol = htons(ETH_P_IPV6);
243 skb->priority = sk->sk_priority;
247 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
248 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
249 IPSTATS_MIB_OUT, skb->len);
251 /* if egress device is enslaved to an L3 master device pass the
252 * skb to its handler for processing
254 skb = l3mdev_ip6_out((struct sock *)sk, skb);
258 /* hooks should never assume socket lock is held.
259 * we promote our socket to non const
261 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
262 net, (struct sock *)sk, skb, NULL, dst->dev,
267 /* ipv6_local_error() does not require socket lock,
268 * we promote our socket to non const
270 ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu);
272 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
276 EXPORT_SYMBOL(ip6_xmit);
278 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
280 struct ip6_ra_chain *ra;
281 struct sock *last = NULL;
283 read_lock(&ip6_ra_lock);
284 for (ra = ip6_ra_chain; ra; ra = ra->next) {
285 struct sock *sk = ra->sk;
286 if (sk && ra->sel == sel &&
287 (!sk->sk_bound_dev_if ||
288 sk->sk_bound_dev_if == skb->dev->ifindex)) {
290 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
292 rawv6_rcv(last, skb2);
299 rawv6_rcv(last, skb);
300 read_unlock(&ip6_ra_lock);
303 read_unlock(&ip6_ra_lock);
307 static int ip6_forward_proxy_check(struct sk_buff *skb)
309 struct ipv6hdr *hdr = ipv6_hdr(skb);
310 u8 nexthdr = hdr->nexthdr;
314 if (ipv6_ext_hdr(nexthdr)) {
315 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
319 offset = sizeof(struct ipv6hdr);
321 if (nexthdr == IPPROTO_ICMPV6) {
322 struct icmp6hdr *icmp6;
324 if (!pskb_may_pull(skb, (skb_network_header(skb) +
325 offset + 1 - skb->data)))
328 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
330 switch (icmp6->icmp6_type) {
331 case NDISC_ROUTER_SOLICITATION:
332 case NDISC_ROUTER_ADVERTISEMENT:
333 case NDISC_NEIGHBOUR_SOLICITATION:
334 case NDISC_NEIGHBOUR_ADVERTISEMENT:
336 /* For reaction involving unicast neighbor discovery
337 * message destined to the proxied address, pass it to
347 * The proxying router can't forward traffic sent to a link-local
348 * address, so signal the sender and discard the packet. This
349 * behavior is clarified by the MIPv6 specification.
351 if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
352 dst_link_failure(skb);
359 static inline int ip6_forward_finish(struct net *net, struct sock *sk,
362 return dst_output(net, sk, skb);
365 static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
368 struct inet6_dev *idev;
370 if (dst_metric_locked(dst, RTAX_MTU)) {
371 mtu = dst_metric_raw(dst, RTAX_MTU);
378 idev = __in6_dev_get(dst->dev);
380 mtu = idev->cnf.mtu6;
386 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
391 /* ipv6 conntrack defrag sets max_frag_size + ignore_df */
392 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
398 if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
404 int ip6_forward(struct sk_buff *skb)
406 struct dst_entry *dst = skb_dst(skb);
407 struct ipv6hdr *hdr = ipv6_hdr(skb);
408 struct inet6_skb_parm *opt = IP6CB(skb);
409 struct net *net = dev_net(dst->dev);
412 if (net->ipv6.devconf_all->forwarding == 0)
415 if (skb->pkt_type != PACKET_HOST)
418 if (unlikely(skb->sk))
421 if (skb_warn_if_lro(skb))
424 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
425 __IP6_INC_STATS(net, ip6_dst_idev(dst),
426 IPSTATS_MIB_INDISCARDS);
430 skb_forward_csum(skb);
433 * We DO NOT make any processing on
434 * RA packets, pushing them to user level AS IS
435 * without ane WARRANTY that application will be able
436 * to interpret them. The reason is that we
437 * cannot make anything clever here.
439 * We are not end-node, so that if packet contains
440 * AH/ESP, we cannot make anything.
441 * Defragmentation also would be mistake, RA packets
442 * cannot be fragmented, because there is no warranty
443 * that different fragments will go along one path. --ANK
445 if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
446 if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
451 * check and decrement ttl
453 if (hdr->hop_limit <= 1) {
454 /* Force OUTPUT device used as source address */
456 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
457 __IP6_INC_STATS(net, ip6_dst_idev(dst),
458 IPSTATS_MIB_INHDRERRORS);
464 /* XXX: idev->cnf.proxy_ndp? */
465 if (net->ipv6.devconf_all->proxy_ndp &&
466 pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
467 int proxied = ip6_forward_proxy_check(skb);
469 return ip6_input(skb);
470 else if (proxied < 0) {
471 __IP6_INC_STATS(net, ip6_dst_idev(dst),
472 IPSTATS_MIB_INDISCARDS);
477 if (!xfrm6_route_forward(skb)) {
478 __IP6_INC_STATS(net, ip6_dst_idev(dst),
479 IPSTATS_MIB_INDISCARDS);
484 /* IPv6 specs say nothing about it, but it is clear that we cannot
485 send redirects to source routed frames.
486 We don't send redirects to frames decapsulated from IPsec.
488 if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
489 struct in6_addr *target = NULL;
490 struct inet_peer *peer;
494 * incoming and outgoing devices are the same
498 rt = (struct rt6_info *) dst;
499 if (rt->rt6i_flags & RTF_GATEWAY)
500 target = &rt->rt6i_gateway;
502 target = &hdr->daddr;
504 peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
506 /* Limit redirects both by destination (here)
507 and by source (inside ndisc_send_redirect)
509 if (inet_peer_xrlim_allow(peer, 1*HZ))
510 ndisc_send_redirect(skb, target);
514 int addrtype = ipv6_addr_type(&hdr->saddr);
516 /* This check is security critical. */
517 if (addrtype == IPV6_ADDR_ANY ||
518 addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
520 if (addrtype & IPV6_ADDR_LINKLOCAL) {
521 icmpv6_send(skb, ICMPV6_DEST_UNREACH,
522 ICMPV6_NOT_NEIGHBOUR, 0);
527 mtu = ip6_dst_mtu_forward(dst);
528 if (mtu < IPV6_MIN_MTU)
531 if (ip6_pkt_too_big(skb, mtu)) {
532 /* Again, force OUTPUT device used as source address */
534 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
535 __IP6_INC_STATS(net, ip6_dst_idev(dst),
536 IPSTATS_MIB_INTOOBIGERRORS);
537 __IP6_INC_STATS(net, ip6_dst_idev(dst),
538 IPSTATS_MIB_FRAGFAILS);
543 if (skb_cow(skb, dst->dev->hard_header_len)) {
544 __IP6_INC_STATS(net, ip6_dst_idev(dst),
545 IPSTATS_MIB_OUTDISCARDS);
551 /* Mangling hops number delayed to point after skb COW */
555 __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
556 __IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
557 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
558 net, NULL, skb, skb->dev, dst->dev,
562 __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
568 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
570 to->pkt_type = from->pkt_type;
571 to->priority = from->priority;
572 to->protocol = from->protocol;
574 skb_dst_set(to, dst_clone(skb_dst(from)));
576 to->mark = from->mark;
578 #ifdef CONFIG_NET_SCHED
579 to->tc_index = from->tc_index;
582 skb_copy_secmark(to, from);
585 int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
586 int (*output)(struct net *, struct sock *, struct sk_buff *))
588 struct sk_buff *frag;
589 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
590 struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
591 inet6_sk(skb->sk) : NULL;
592 struct ipv6hdr *tmp_hdr;
594 unsigned int mtu, hlen, left, len;
597 int ptr, offset = 0, err = 0;
598 u8 *prevhdr, nexthdr = 0;
600 hlen = ip6_find_1stfragopt(skb, &prevhdr);
603 mtu = ip6_skb_dst_mtu(skb);
605 /* We must not fragment if the socket is set to force MTU discovery
606 * or if the skb it not generated by a local socket.
608 if (unlikely(!skb->ignore_df && skb->len > mtu))
611 if (IP6CB(skb)->frag_max_size) {
612 if (IP6CB(skb)->frag_max_size > mtu)
615 /* don't send fragments larger than what we received */
616 mtu = IP6CB(skb)->frag_max_size;
617 if (mtu < IPV6_MIN_MTU)
621 if (np && np->frag_size < mtu) {
625 if (mtu < hlen + sizeof(struct frag_hdr) + 8)
627 mtu -= hlen + sizeof(struct frag_hdr);
629 frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
630 &ipv6_hdr(skb)->saddr);
632 if (skb->ip_summed == CHECKSUM_PARTIAL &&
633 (err = skb_checksum_help(skb)))
636 hroom = LL_RESERVED_SPACE(rt->dst.dev);
637 if (skb_has_frag_list(skb)) {
638 unsigned int first_len = skb_pagelen(skb);
639 struct sk_buff *frag2;
641 if (first_len - hlen > mtu ||
642 ((first_len - hlen) & 7) ||
644 skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
647 skb_walk_frags(skb, frag) {
648 /* Correct geometry. */
649 if (frag->len > mtu ||
650 ((frag->len & 7) && frag->next) ||
651 skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
652 goto slow_path_clean;
654 /* Partially cloned skb? */
655 if (skb_shared(frag))
656 goto slow_path_clean;
661 frag->destructor = sock_wfree;
663 skb->truesize -= frag->truesize;
670 *prevhdr = NEXTHDR_FRAGMENT;
671 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
673 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
674 IPSTATS_MIB_FRAGFAILS);
678 frag = skb_shinfo(skb)->frag_list;
679 skb_frag_list_init(skb);
681 __skb_pull(skb, hlen);
682 fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
683 __skb_push(skb, hlen);
684 skb_reset_network_header(skb);
685 memcpy(skb_network_header(skb), tmp_hdr, hlen);
687 fh->nexthdr = nexthdr;
689 fh->frag_off = htons(IP6_MF);
690 fh->identification = frag_id;
692 first_len = skb_pagelen(skb);
693 skb->data_len = first_len - skb_headlen(skb);
694 skb->len = first_len;
695 ipv6_hdr(skb)->payload_len = htons(first_len -
696 sizeof(struct ipv6hdr));
701 /* Prepare header of the next frame,
702 * before previous one went down. */
704 frag->ip_summed = CHECKSUM_NONE;
705 skb_reset_transport_header(frag);
706 fh = (struct frag_hdr *)__skb_push(frag, sizeof(struct frag_hdr));
707 __skb_push(frag, hlen);
708 skb_reset_network_header(frag);
709 memcpy(skb_network_header(frag), tmp_hdr,
711 offset += skb->len - hlen - sizeof(struct frag_hdr);
712 fh->nexthdr = nexthdr;
714 fh->frag_off = htons(offset);
716 fh->frag_off |= htons(IP6_MF);
717 fh->identification = frag_id;
718 ipv6_hdr(frag)->payload_len =
720 sizeof(struct ipv6hdr));
721 ip6_copy_metadata(frag, skb);
724 err = output(net, sk, skb);
726 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
727 IPSTATS_MIB_FRAGCREATES);
740 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
741 IPSTATS_MIB_FRAGOKS);
746 kfree_skb_list(frag);
748 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
749 IPSTATS_MIB_FRAGFAILS);
754 skb_walk_frags(skb, frag2) {
758 frag2->destructor = NULL;
759 skb->truesize += frag2->truesize;
764 left = skb->len - hlen; /* Space per frame */
765 ptr = hlen; /* Where to start from */
768 * Fragment the datagram.
771 troom = rt->dst.dev->needed_tailroom;
774 * Keep copying data until we run out.
777 u8 *fragnexthdr_offset;
780 /* IF: it doesn't fit, use 'mtu' - the data space left */
783 /* IF: we are not sending up to and including the packet end
784 then align the next start on an eight byte boundary */
789 /* Allocate buffer */
790 frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
791 hroom + troom, GFP_ATOMIC);
793 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
794 IPSTATS_MIB_FRAGFAILS);
800 * Set up data on packet
803 ip6_copy_metadata(frag, skb);
804 skb_reserve(frag, hroom);
805 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
806 skb_reset_network_header(frag);
807 fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
808 frag->transport_header = (frag->network_header + hlen +
809 sizeof(struct frag_hdr));
812 * Charge the memory for the fragment to any owner
816 skb_set_owner_w(frag, skb->sk);
819 * Copy the packet header into the new buffer.
821 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
823 fragnexthdr_offset = skb_network_header(frag);
824 fragnexthdr_offset += prevhdr - skb_network_header(skb);
825 *fragnexthdr_offset = NEXTHDR_FRAGMENT;
828 * Build fragment header.
830 fh->nexthdr = nexthdr;
832 fh->identification = frag_id;
835 * Copy a block of the IP datagram.
837 BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
841 fh->frag_off = htons(offset);
843 fh->frag_off |= htons(IP6_MF);
844 ipv6_hdr(frag)->payload_len = htons(frag->len -
845 sizeof(struct ipv6hdr));
851 * Put this fragment into the sending queue.
853 err = output(net, sk, frag);
857 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
858 IPSTATS_MIB_FRAGCREATES);
860 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
861 IPSTATS_MIB_FRAGOKS);
866 if (skb->sk && dst_allfrag(skb_dst(skb)))
867 sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
869 skb->dev = skb_dst(skb)->dev;
870 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
874 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
875 IPSTATS_MIB_FRAGFAILS);
880 static inline int ip6_rt_check(const struct rt6key *rt_key,
881 const struct in6_addr *fl_addr,
882 const struct in6_addr *addr_cache)
884 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
885 (!addr_cache || !ipv6_addr_equal(fl_addr, addr_cache));
888 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
889 struct dst_entry *dst,
890 const struct flowi6 *fl6)
892 struct ipv6_pinfo *np = inet6_sk(sk);
898 if (dst->ops->family != AF_INET6) {
903 rt = (struct rt6_info *)dst;
904 /* Yes, checking route validity in not connected
905 * case is not very simple. Take into account,
906 * that we do not support routing by source, TOS,
907 * and MSG_DONTROUTE --ANK (980726)
909 * 1. ip6_rt_check(): If route was host route,
910 * check that cached destination is current.
911 * If it is network route, we still may
912 * check its validity using saved pointer
913 * to the last used address: daddr_cache.
914 * We do not want to save whole address now,
915 * (because main consumer of this service
916 * is tcp, which has not this problem),
917 * so that the last trick works only on connected
919 * 2. oif also should be the same.
921 if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
922 #ifdef CONFIG_IPV6_SUBTREES
923 ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
925 (!(fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) &&
926 (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex))) {
935 static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
936 struct dst_entry **dst, struct flowi6 *fl6)
938 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
945 /* The correct way to handle this would be to do
946 * ip6_route_get_saddr, and then ip6_route_output; however,
947 * the route-specific preferred source forces the
948 * ip6_route_output call _before_ ip6_route_get_saddr.
950 * In source specific routing (no src=any default route),
951 * ip6_route_output will fail given src=any saddr, though, so
952 * that's why we try it again later.
954 if (ipv6_addr_any(&fl6->saddr) && (!*dst || !(*dst)->error)) {
956 bool had_dst = *dst != NULL;
959 *dst = ip6_route_output(net, sk, fl6);
960 rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
961 err = ip6_route_get_saddr(net, rt, &fl6->daddr,
962 sk ? inet6_sk(sk)->srcprefs : 0,
965 goto out_err_release;
967 /* If we had an erroneous initial result, pretend it
968 * never existed and let the SA-enabled version take
971 if (!had_dst && (*dst)->error) {
977 flags |= RT6_LOOKUP_F_IFACE;
981 *dst = ip6_route_output_flags(net, sk, fl6, flags);
985 goto out_err_release;
987 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
989 * Here if the dst entry we've looked up
990 * has a neighbour entry that is in the INCOMPLETE
991 * state and the src address from the flow is
992 * marked as OPTIMISTIC, we release the found
993 * dst entry and replace it instead with the
994 * dst entry of the nexthop router
996 rt = (struct rt6_info *) *dst;
998 n = __ipv6_neigh_lookup_noref(rt->dst.dev,
999 rt6_nexthop(rt, &fl6->daddr));
1000 err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
1001 rcu_read_unlock_bh();
1004 struct inet6_ifaddr *ifp;
1005 struct flowi6 fl_gw6;
1008 ifp = ipv6_get_ifaddr(net, &fl6->saddr,
1011 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
1017 * We need to get the dst entry for the
1018 * default router instead
1021 memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
1022 memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
1023 *dst = ip6_route_output(net, sk, &fl_gw6);
1024 err = (*dst)->error;
1026 goto out_err_release;
1030 if (ipv6_addr_v4mapped(&fl6->saddr) &&
1031 !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) {
1032 err = -EAFNOSUPPORT;
1033 goto out_err_release;
1042 if (err == -ENETUNREACH)
1043 IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
1048 * ip6_dst_lookup - perform route lookup on flow
1049 * @sk: socket which provides route info
1050 * @dst: pointer to dst_entry * for result
1051 * @fl6: flow to lookup
1053 * This function performs a route lookup on the given flow.
1055 * It returns zero on success, or a standard errno code on error.
1057 int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
1061 return ip6_dst_lookup_tail(net, sk, dst, fl6);
1063 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
1066 * ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1067 * @sk: socket which provides route info
1068 * @fl6: flow to lookup
1069 * @final_dst: final destination address for ipsec lookup
1071 * This function performs a route lookup on the given flow.
1073 * It returns a valid dst pointer on success, or a pointer encoded
1076 struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
1077 const struct in6_addr *final_dst)
1079 struct dst_entry *dst = NULL;
1082 err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
1084 return ERR_PTR(err);
1086 fl6->daddr = *final_dst;
1088 return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1090 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1093 * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1094 * @sk: socket which provides the dst cache and route info
1095 * @fl6: flow to lookup
1096 * @final_dst: final destination address for ipsec lookup
1098 * This function performs a route lookup on the given flow with the
1099 * possibility of using the cached route in the socket if it is valid.
1100 * It will take the socket dst lock when operating on the dst cache.
1101 * As a result, this function can only be used in process context.
1103 * It returns a valid dst pointer on success, or a pointer encoded
1106 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1107 const struct in6_addr *final_dst)
1109 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1111 dst = ip6_sk_dst_check(sk, dst, fl6);
1113 dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
1117 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1119 static inline int ip6_ufo_append_data(struct sock *sk,
1120 struct sk_buff_head *queue,
1121 int getfrag(void *from, char *to, int offset, int len,
1122 int odd, struct sk_buff *skb),
1123 void *from, int length, int hh_len, int fragheaderlen,
1124 int exthdrlen, int transhdrlen, int mtu,
1125 unsigned int flags, const struct flowi6 *fl6)
1128 struct sk_buff *skb;
1131 /* There is support for UDP large send offload by network
1132 * device, so create one single skb packet containing complete
1135 skb = skb_peek_tail(queue);
1137 skb = sock_alloc_send_skb(sk,
1138 hh_len + fragheaderlen + transhdrlen + 20,
1139 (flags & MSG_DONTWAIT), &err);
1143 /* reserve space for Hardware header */
1144 skb_reserve(skb, hh_len);
1146 /* create space for UDP/IP header */
1147 skb_put(skb, fragheaderlen + transhdrlen);
1149 /* initialize network header pointer */
1150 skb_set_network_header(skb, exthdrlen);
1152 /* initialize protocol header pointer */
1153 skb->transport_header = skb->network_header + fragheaderlen;
1155 skb->protocol = htons(ETH_P_IPV6);
1158 if (flags & MSG_CONFIRM)
1159 skb_set_dst_pending_confirm(skb, 1);
1161 __skb_queue_tail(queue, skb);
1162 } else if (skb_is_gso(skb)) {
1166 skb->ip_summed = CHECKSUM_PARTIAL;
1167 /* Specify the length of each IPv6 datagram fragment.
1168 * It has to be a multiple of 8.
1170 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1171 sizeof(struct frag_hdr)) & ~7;
1172 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1173 skb_shinfo(skb)->ip6_frag_id = ipv6_select_ident(sock_net(sk),
1178 return skb_append_datato_frags(sk, skb, getfrag, from,
1179 (length - transhdrlen));
1182 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1185 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1188 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1191 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1194 static void ip6_append_data_mtu(unsigned int *mtu,
1196 unsigned int fragheaderlen,
1197 struct sk_buff *skb,
1198 struct rt6_info *rt,
1199 unsigned int orig_mtu)
1201 if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1203 /* first fragment, reserve header_len */
1204 *mtu = orig_mtu - rt->dst.header_len;
1208 * this fragment is not first, the headers
1209 * space is regarded as data space.
1213 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
1214 + fragheaderlen - sizeof(struct frag_hdr);
1218 static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1219 struct inet6_cork *v6_cork, struct ipcm6_cookie *ipc6,
1220 struct rt6_info *rt, struct flowi6 *fl6)
1222 struct ipv6_pinfo *np = inet6_sk(sk);
1224 struct ipv6_txoptions *opt = ipc6->opt;
1230 if (WARN_ON(v6_cork->opt))
1233 v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
1234 if (unlikely(!v6_cork->opt))
1237 v6_cork->opt->tot_len = opt->tot_len;
1238 v6_cork->opt->opt_flen = opt->opt_flen;
1239 v6_cork->opt->opt_nflen = opt->opt_nflen;
1241 v6_cork->opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1243 if (opt->dst0opt && !v6_cork->opt->dst0opt)
1246 v6_cork->opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1248 if (opt->dst1opt && !v6_cork->opt->dst1opt)
1251 v6_cork->opt->hopopt = ip6_opt_dup(opt->hopopt,
1253 if (opt->hopopt && !v6_cork->opt->hopopt)
1256 v6_cork->opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1258 if (opt->srcrt && !v6_cork->opt->srcrt)
1261 /* need source address above miyazawa*/
1264 cork->base.dst = &rt->dst;
1265 cork->fl.u.ip6 = *fl6;
1266 v6_cork->hop_limit = ipc6->hlimit;
1267 v6_cork->tclass = ipc6->tclass;
1268 if (rt->dst.flags & DST_XFRM_TUNNEL)
1269 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1270 rt->dst.dev->mtu : dst_mtu(&rt->dst);
1272 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1273 rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1274 if (np->frag_size < mtu) {
1276 mtu = np->frag_size;
1278 cork->base.fragsize = mtu;
1279 if (dst_allfrag(rt->dst.path))
1280 cork->base.flags |= IPCORK_ALLFRAG;
1281 cork->base.length = 0;
1286 static int __ip6_append_data(struct sock *sk,
1288 struct sk_buff_head *queue,
1289 struct inet_cork *cork,
1290 struct inet6_cork *v6_cork,
1291 struct page_frag *pfrag,
1292 int getfrag(void *from, char *to, int offset,
1293 int len, int odd, struct sk_buff *skb),
1294 void *from, int length, int transhdrlen,
1295 unsigned int flags, struct ipcm6_cookie *ipc6,
1296 const struct sockcm_cookie *sockc)
1298 struct sk_buff *skb, *skb_prev = NULL;
1299 unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1301 int dst_exthdrlen = 0;
1308 struct rt6_info *rt = (struct rt6_info *)cork->dst;
1309 struct ipv6_txoptions *opt = v6_cork->opt;
1310 int csummode = CHECKSUM_NONE;
1311 unsigned int maxnonfragsize, headersize;
1313 skb = skb_peek_tail(queue);
1315 exthdrlen = opt ? opt->opt_flen : 0;
1316 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1319 mtu = cork->fragsize;
1322 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1324 fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1325 (opt ? opt->opt_nflen : 0);
1326 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1327 sizeof(struct frag_hdr);
1329 headersize = sizeof(struct ipv6hdr) +
1330 (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1331 (dst_allfrag(&rt->dst) ?
1332 sizeof(struct frag_hdr) : 0) +
1333 rt->rt6i_nfheader_len;
1335 if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
1336 (sk->sk_protocol == IPPROTO_UDP ||
1337 sk->sk_protocol == IPPROTO_RAW)) {
1338 ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1339 sizeof(struct ipv6hdr));
1343 if (ip6_sk_ignore_df(sk))
1344 maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1346 maxnonfragsize = mtu;
1348 if (cork->length + length > maxnonfragsize - headersize) {
1350 ipv6_local_error(sk, EMSGSIZE, fl6,
1352 sizeof(struct ipv6hdr));
1356 /* CHECKSUM_PARTIAL only with no extension headers and when
1357 * we are not going to fragment
1359 if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
1360 headersize == sizeof(struct ipv6hdr) &&
1361 length <= mtu - headersize &&
1362 !(flags & MSG_MORE) &&
1363 rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
1364 csummode = CHECKSUM_PARTIAL;
1366 if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
1367 sock_tx_timestamp(sk, sockc->tsflags, &tx_flags);
1368 if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
1369 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
1370 tskey = sk->sk_tskey++;
1374 * Let's try using as much space as possible.
1375 * Use MTU if total length of the message fits into the MTU.
1376 * Otherwise, we need to reserve fragment header and
1377 * fragment alignment (= 8-15 octects, in total).
1379 * Note that we may need to "move" the data from the tail of
1380 * of the buffer to the new fragment when we split
1383 * FIXME: It may be fragmented into multiple chunks
1384 * at once if non-fragmentable extension headers
1389 cork->length += length;
1390 if ((((length + fragheaderlen) > mtu) ||
1391 (skb && skb_is_gso(skb))) &&
1392 (sk->sk_protocol == IPPROTO_UDP) &&
1393 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
1394 (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
1395 err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1396 hh_len, fragheaderlen, exthdrlen,
1397 transhdrlen, mtu, flags, fl6);
1406 while (length > 0) {
1407 /* Check if the remaining data fits into current packet. */
1408 copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1410 copy = maxfraglen - skb->len;
1414 unsigned int datalen;
1415 unsigned int fraglen;
1416 unsigned int fraggap;
1417 unsigned int alloclen;
1419 /* There's no room in the current skb */
1421 fraggap = skb->len - maxfraglen;
1424 /* update mtu and maxfraglen if necessary */
1425 if (!skb || !skb_prev)
1426 ip6_append_data_mtu(&mtu, &maxfraglen,
1427 fragheaderlen, skb, rt,
1433 * If remaining data exceeds the mtu,
1434 * we know we need more fragment(s).
1436 datalen = length + fraggap;
1438 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1439 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1440 if ((flags & MSG_MORE) &&
1441 !(rt->dst.dev->features&NETIF_F_SG))
1444 alloclen = datalen + fragheaderlen;
1446 alloclen += dst_exthdrlen;
1448 if (datalen != length + fraggap) {
1450 * this is not the last fragment, the trailer
1451 * space is regarded as data space.
1453 datalen += rt->dst.trailer_len;
1456 alloclen += rt->dst.trailer_len;
1457 fraglen = datalen + fragheaderlen;
1460 * We just reserve space for fragment header.
1461 * Note: this may be overallocation if the message
1462 * (without MSG_MORE) fits into the MTU.
1464 alloclen += sizeof(struct frag_hdr);
1467 skb = sock_alloc_send_skb(sk,
1469 (flags & MSG_DONTWAIT), &err);
1472 if (atomic_read(&sk->sk_wmem_alloc) <=
1474 skb = sock_wmalloc(sk,
1475 alloclen + hh_len, 1,
1483 * Fill in the control structures
1485 skb->protocol = htons(ETH_P_IPV6);
1486 skb->ip_summed = csummode;
1488 /* reserve for fragmentation and ipsec header */
1489 skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1492 /* Only the initial fragment is time stamped */
1493 skb_shinfo(skb)->tx_flags = tx_flags;
1495 skb_shinfo(skb)->tskey = tskey;
1499 * Find where to start putting bytes
1501 data = skb_put(skb, fraglen);
1502 skb_set_network_header(skb, exthdrlen);
1503 data += fragheaderlen;
1504 skb->transport_header = (skb->network_header +
1507 skb->csum = skb_copy_and_csum_bits(
1508 skb_prev, maxfraglen,
1509 data + transhdrlen, fraggap, 0);
1510 skb_prev->csum = csum_sub(skb_prev->csum,
1513 pskb_trim_unique(skb_prev, maxfraglen);
1515 copy = datalen - transhdrlen - fraggap;
1521 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1528 length -= datalen - fraggap;
1533 if ((flags & MSG_CONFIRM) && !skb_prev)
1534 skb_set_dst_pending_confirm(skb, 1);
1537 * Put the packet on the pending queue
1539 __skb_queue_tail(queue, skb);
1546 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1550 if (getfrag(from, skb_put(skb, copy),
1551 offset, copy, off, skb) < 0) {
1552 __skb_trim(skb, off);
1557 int i = skb_shinfo(skb)->nr_frags;
1560 if (!sk_page_frag_refill(sk, pfrag))
1563 if (!skb_can_coalesce(skb, i, pfrag->page,
1566 if (i == MAX_SKB_FRAGS)
1569 __skb_fill_page_desc(skb, i, pfrag->page,
1571 skb_shinfo(skb)->nr_frags = ++i;
1572 get_page(pfrag->page);
1574 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1576 page_address(pfrag->page) + pfrag->offset,
1577 offset, copy, skb->len, skb) < 0)
1580 pfrag->offset += copy;
1581 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1583 skb->data_len += copy;
1584 skb->truesize += copy;
1585 atomic_add(copy, &sk->sk_wmem_alloc);
1596 cork->length -= length;
1597 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1601 int ip6_append_data(struct sock *sk,
1602 int getfrag(void *from, char *to, int offset, int len,
1603 int odd, struct sk_buff *skb),
1604 void *from, int length, int transhdrlen,
1605 struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
1606 struct rt6_info *rt, unsigned int flags,
1607 const struct sockcm_cookie *sockc)
1609 struct inet_sock *inet = inet_sk(sk);
1610 struct ipv6_pinfo *np = inet6_sk(sk);
1614 if (flags&MSG_PROBE)
1616 if (skb_queue_empty(&sk->sk_write_queue)) {
1620 err = ip6_setup_cork(sk, &inet->cork, &np->cork,
1625 exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
1626 length += exthdrlen;
1627 transhdrlen += exthdrlen;
1629 fl6 = &inet->cork.fl.u.ip6;
1633 return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
1634 &np->cork, sk_page_frag(sk), getfrag,
1635 from, length, transhdrlen, flags, ipc6, sockc);
1637 EXPORT_SYMBOL_GPL(ip6_append_data);
1639 static void ip6_cork_release(struct inet_cork_full *cork,
1640 struct inet6_cork *v6_cork)
1643 kfree(v6_cork->opt->dst0opt);
1644 kfree(v6_cork->opt->dst1opt);
1645 kfree(v6_cork->opt->hopopt);
1646 kfree(v6_cork->opt->srcrt);
1647 kfree(v6_cork->opt);
1648 v6_cork->opt = NULL;
1651 if (cork->base.dst) {
1652 dst_release(cork->base.dst);
1653 cork->base.dst = NULL;
1654 cork->base.flags &= ~IPCORK_ALLFRAG;
1656 memset(&cork->fl, 0, sizeof(cork->fl));
1659 struct sk_buff *__ip6_make_skb(struct sock *sk,
1660 struct sk_buff_head *queue,
1661 struct inet_cork_full *cork,
1662 struct inet6_cork *v6_cork)
1664 struct sk_buff *skb, *tmp_skb;
1665 struct sk_buff **tail_skb;
1666 struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1667 struct ipv6_pinfo *np = inet6_sk(sk);
1668 struct net *net = sock_net(sk);
1669 struct ipv6hdr *hdr;
1670 struct ipv6_txoptions *opt = v6_cork->opt;
1671 struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
1672 struct flowi6 *fl6 = &cork->fl.u.ip6;
1673 unsigned char proto = fl6->flowi6_proto;
1675 skb = __skb_dequeue(queue);
1678 tail_skb = &(skb_shinfo(skb)->frag_list);
1680 /* move skb->data to ip header from ext header */
1681 if (skb->data < skb_network_header(skb))
1682 __skb_pull(skb, skb_network_offset(skb));
1683 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1684 __skb_pull(tmp_skb, skb_network_header_len(skb));
1685 *tail_skb = tmp_skb;
1686 tail_skb = &(tmp_skb->next);
1687 skb->len += tmp_skb->len;
1688 skb->data_len += tmp_skb->len;
1689 skb->truesize += tmp_skb->truesize;
1690 tmp_skb->destructor = NULL;
1694 /* Allow local fragmentation. */
1695 skb->ignore_df = ip6_sk_ignore_df(sk);
1697 *final_dst = fl6->daddr;
1698 __skb_pull(skb, skb_network_header_len(skb));
1699 if (opt && opt->opt_flen)
1700 ipv6_push_frag_opts(skb, opt, &proto);
1701 if (opt && opt->opt_nflen)
1702 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst, &fl6->saddr);
1704 skb_push(skb, sizeof(struct ipv6hdr));
1705 skb_reset_network_header(skb);
1706 hdr = ipv6_hdr(skb);
1708 ip6_flow_hdr(hdr, v6_cork->tclass,
1709 ip6_make_flowlabel(net, skb, fl6->flowlabel,
1710 np->autoflowlabel, fl6));
1711 hdr->hop_limit = v6_cork->hop_limit;
1712 hdr->nexthdr = proto;
1713 hdr->saddr = fl6->saddr;
1714 hdr->daddr = *final_dst;
1716 skb->priority = sk->sk_priority;
1717 skb->mark = sk->sk_mark;
1719 skb_dst_set(skb, dst_clone(&rt->dst));
1720 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1721 if (proto == IPPROTO_ICMPV6) {
1722 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1724 ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1725 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1728 ip6_cork_release(cork, v6_cork);
1733 int ip6_send_skb(struct sk_buff *skb)
1735 struct net *net = sock_net(skb->sk);
1736 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
1739 err = ip6_local_out(net, skb->sk, skb);
1742 err = net_xmit_errno(err);
1744 IP6_INC_STATS(net, rt->rt6i_idev,
1745 IPSTATS_MIB_OUTDISCARDS);
1751 int ip6_push_pending_frames(struct sock *sk)
1753 struct sk_buff *skb;
1755 skb = ip6_finish_skb(sk);
1759 return ip6_send_skb(skb);
1761 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1763 static void __ip6_flush_pending_frames(struct sock *sk,
1764 struct sk_buff_head *queue,
1765 struct inet_cork_full *cork,
1766 struct inet6_cork *v6_cork)
1768 struct sk_buff *skb;
1770 while ((skb = __skb_dequeue_tail(queue)) != NULL) {
1772 IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1773 IPSTATS_MIB_OUTDISCARDS);
1777 ip6_cork_release(cork, v6_cork);
1780 void ip6_flush_pending_frames(struct sock *sk)
1782 __ip6_flush_pending_frames(sk, &sk->sk_write_queue,
1783 &inet_sk(sk)->cork, &inet6_sk(sk)->cork);
1785 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
1787 struct sk_buff *ip6_make_skb(struct sock *sk,
1788 int getfrag(void *from, char *to, int offset,
1789 int len, int odd, struct sk_buff *skb),
1790 void *from, int length, int transhdrlen,
1791 struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
1792 struct rt6_info *rt, unsigned int flags,
1793 const struct sockcm_cookie *sockc)
1795 struct inet_cork_full cork;
1796 struct inet6_cork v6_cork;
1797 struct sk_buff_head queue;
1798 int exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
1801 if (flags & MSG_PROBE)
1804 __skb_queue_head_init(&queue);
1806 cork.base.flags = 0;
1808 cork.base.opt = NULL;
1810 err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
1812 return ERR_PTR(err);
1814 if (ipc6->dontfrag < 0)
1815 ipc6->dontfrag = inet6_sk(sk)->dontfrag;
1817 err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork,
1818 ¤t->task_frag, getfrag, from,
1819 length + exthdrlen, transhdrlen + exthdrlen,
1820 flags, ipc6, sockc);
1822 __ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork);
1823 return ERR_PTR(err);
1826 return __ip6_make_skb(sk, &queue, &cork, &v6_cork);