2 * IPv6 output functions
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/net/ipv4/ip_output.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * A.N.Kuznetsov : airthmetics in fragmentation.
17 * extension headers are implemented.
18 * route changes now work.
19 * ip6_forward does not confuse sniffers.
22 * H. von Brand : Added missing #include <linux/string.h>
23 * Imran Patel : frag id should be in NBO
24 * Kazunori MIYAZAWA @USAGI
25 * : add ip6_append_data and related functions
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
59 static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb)
61 struct dst_entry *dst = skb_dst(skb);
62 struct net_device *dev = dst->dev;
63 struct neighbour *neigh;
64 struct in6_addr *nexthop;
67 skb->protocol = htons(ETH_P_IPV6);
70 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
71 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
73 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
74 ((mroute6_socket(dev_net(dev), skb) &&
75 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
76 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
77 &ipv6_hdr(skb)->saddr))) {
78 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
80 /* Do not check for IFF_ALLMULTI; multicast routing
81 is not supported in any case.
84 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
85 sk, newskb, NULL, newskb->dev,
88 if (ipv6_hdr(skb)->hop_limit == 0) {
89 IP6_INC_STATS(dev_net(dev), idev,
90 IPSTATS_MIB_OUTDISCARDS);
96 IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
99 if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
100 IPV6_ADDR_SCOPE_NODELOCAL &&
101 !(dev->flags & IFF_LOOPBACK)) {
108 nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
109 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
110 if (unlikely(!neigh))
111 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
112 if (!IS_ERR(neigh)) {
113 ret = dst_neigh_output(dst, neigh, skb);
114 rcu_read_unlock_bh();
117 rcu_read_unlock_bh();
119 IP6_INC_STATS(dev_net(dst->dev),
120 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
125 static int ip6_finish_output(struct sock *sk, struct sk_buff *skb)
127 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
128 dst_allfrag(skb_dst(skb)) ||
129 (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
130 return ip6_fragment(sk, skb, ip6_finish_output2);
132 return ip6_finish_output2(sk, skb);
135 int ip6_output(struct sock *sk, struct sk_buff *skb)
137 struct net_device *dev = skb_dst(skb)->dev;
138 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
139 if (unlikely(idev->cnf.disable_ipv6)) {
140 IP6_INC_STATS(dev_net(dev), idev,
141 IPSTATS_MIB_OUTDISCARDS);
146 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, sk, skb,
149 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
153 * xmit an sk_buff (used by TCP, SCTP and DCCP)
156 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
157 struct ipv6_txoptions *opt, int tclass)
159 struct net *net = sock_net(sk);
160 struct ipv6_pinfo *np = inet6_sk(sk);
161 struct in6_addr *first_hop = &fl6->daddr;
162 struct dst_entry *dst = skb_dst(skb);
164 u8 proto = fl6->flowi6_proto;
165 int seg_len = skb->len;
170 unsigned int head_room;
172 /* First: exthdrs may take lots of space (~8K for now)
173 MAX_HEADER is not enough.
175 head_room = opt->opt_nflen + opt->opt_flen;
176 seg_len += head_room;
177 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
179 if (skb_headroom(skb) < head_room) {
180 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
182 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
183 IPSTATS_MIB_OUTDISCARDS);
189 skb_set_owner_w(skb, sk);
192 ipv6_push_frag_opts(skb, opt, &proto);
194 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
197 skb_push(skb, sizeof(struct ipv6hdr));
198 skb_reset_network_header(skb);
202 * Fill in the IPv6 header
205 hlimit = np->hop_limit;
207 hlimit = ip6_dst_hoplimit(dst);
209 ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
210 np->autoflowlabel, fl6));
212 hdr->payload_len = htons(seg_len);
213 hdr->nexthdr = proto;
214 hdr->hop_limit = hlimit;
216 hdr->saddr = fl6->saddr;
217 hdr->daddr = *first_hop;
219 skb->protocol = htons(ETH_P_IPV6);
220 skb->priority = sk->sk_priority;
221 skb->mark = sk->sk_mark;
224 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
225 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
226 IPSTATS_MIB_OUT, skb->len);
227 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
228 NULL, dst->dev, dst_output_sk);
232 ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
233 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
237 EXPORT_SYMBOL(ip6_xmit);
239 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
241 struct ip6_ra_chain *ra;
242 struct sock *last = NULL;
244 read_lock(&ip6_ra_lock);
245 for (ra = ip6_ra_chain; ra; ra = ra->next) {
246 struct sock *sk = ra->sk;
247 if (sk && ra->sel == sel &&
248 (!sk->sk_bound_dev_if ||
249 sk->sk_bound_dev_if == skb->dev->ifindex)) {
251 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
253 rawv6_rcv(last, skb2);
260 rawv6_rcv(last, skb);
261 read_unlock(&ip6_ra_lock);
264 read_unlock(&ip6_ra_lock);
268 static int ip6_forward_proxy_check(struct sk_buff *skb)
270 struct ipv6hdr *hdr = ipv6_hdr(skb);
271 u8 nexthdr = hdr->nexthdr;
275 if (ipv6_ext_hdr(nexthdr)) {
276 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
280 offset = sizeof(struct ipv6hdr);
282 if (nexthdr == IPPROTO_ICMPV6) {
283 struct icmp6hdr *icmp6;
285 if (!pskb_may_pull(skb, (skb_network_header(skb) +
286 offset + 1 - skb->data)))
289 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
291 switch (icmp6->icmp6_type) {
292 case NDISC_ROUTER_SOLICITATION:
293 case NDISC_ROUTER_ADVERTISEMENT:
294 case NDISC_NEIGHBOUR_SOLICITATION:
295 case NDISC_NEIGHBOUR_ADVERTISEMENT:
297 /* For reaction involving unicast neighbor discovery
298 * message destined to the proxied address, pass it to
308 * The proxying router can't forward traffic sent to a link-local
309 * address, so signal the sender and discard the packet. This
310 * behavior is clarified by the MIPv6 specification.
312 if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
313 dst_link_failure(skb);
320 static inline int ip6_forward_finish(struct sock *sk, struct sk_buff *skb)
322 skb_sender_cpu_clear(skb);
323 return dst_output_sk(sk, skb);
326 static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
329 struct inet6_dev *idev;
331 if (dst_metric_locked(dst, RTAX_MTU)) {
332 mtu = dst_metric_raw(dst, RTAX_MTU);
339 idev = __in6_dev_get(dst->dev);
341 mtu = idev->cnf.mtu6;
347 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
352 /* ipv6 conntrack defrag sets max_frag_size + ignore_df */
353 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
359 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
365 int ip6_forward(struct sk_buff *skb)
367 struct dst_entry *dst = skb_dst(skb);
368 struct ipv6hdr *hdr = ipv6_hdr(skb);
369 struct inet6_skb_parm *opt = IP6CB(skb);
370 struct net *net = dev_net(dst->dev);
373 if (net->ipv6.devconf_all->forwarding == 0)
376 if (skb->pkt_type != PACKET_HOST)
379 if (skb_warn_if_lro(skb))
382 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
383 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
384 IPSTATS_MIB_INDISCARDS);
388 skb_forward_csum(skb);
391 * We DO NOT make any processing on
392 * RA packets, pushing them to user level AS IS
393 * without ane WARRANTY that application will be able
394 * to interpret them. The reason is that we
395 * cannot make anything clever here.
397 * We are not end-node, so that if packet contains
398 * AH/ESP, we cannot make anything.
399 * Defragmentation also would be mistake, RA packets
400 * cannot be fragmented, because there is no warranty
401 * that different fragments will go along one path. --ANK
403 if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
404 if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
409 * check and decrement ttl
411 if (hdr->hop_limit <= 1) {
412 /* Force OUTPUT device used as source address */
414 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
415 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
416 IPSTATS_MIB_INHDRERRORS);
422 /* XXX: idev->cnf.proxy_ndp? */
423 if (net->ipv6.devconf_all->proxy_ndp &&
424 pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
425 int proxied = ip6_forward_proxy_check(skb);
427 return ip6_input(skb);
428 else if (proxied < 0) {
429 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
430 IPSTATS_MIB_INDISCARDS);
435 if (!xfrm6_route_forward(skb)) {
436 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
437 IPSTATS_MIB_INDISCARDS);
442 /* IPv6 specs say nothing about it, but it is clear that we cannot
443 send redirects to source routed frames.
444 We don't send redirects to frames decapsulated from IPsec.
446 if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
447 struct in6_addr *target = NULL;
448 struct inet_peer *peer;
452 * incoming and outgoing devices are the same
456 rt = (struct rt6_info *) dst;
457 if (rt->rt6i_flags & RTF_GATEWAY)
458 target = &rt->rt6i_gateway;
460 target = &hdr->daddr;
462 peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
464 /* Limit redirects both by destination (here)
465 and by source (inside ndisc_send_redirect)
467 if (inet_peer_xrlim_allow(peer, 1*HZ))
468 ndisc_send_redirect(skb, target);
472 int addrtype = ipv6_addr_type(&hdr->saddr);
474 /* This check is security critical. */
475 if (addrtype == IPV6_ADDR_ANY ||
476 addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
478 if (addrtype & IPV6_ADDR_LINKLOCAL) {
479 icmpv6_send(skb, ICMPV6_DEST_UNREACH,
480 ICMPV6_NOT_NEIGHBOUR, 0);
485 mtu = ip6_dst_mtu_forward(dst);
486 if (mtu < IPV6_MIN_MTU)
489 if (ip6_pkt_too_big(skb, mtu)) {
490 /* Again, force OUTPUT device used as source address */
492 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
493 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
494 IPSTATS_MIB_INTOOBIGERRORS);
495 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
496 IPSTATS_MIB_FRAGFAILS);
501 if (skb_cow(skb, dst->dev->hard_header_len)) {
502 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
503 IPSTATS_MIB_OUTDISCARDS);
509 /* Mangling hops number delayed to point after skb COW */
513 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
514 IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
515 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, NULL, skb,
520 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
526 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
528 to->pkt_type = from->pkt_type;
529 to->priority = from->priority;
530 to->protocol = from->protocol;
532 skb_dst_set(to, dst_clone(skb_dst(from)));
534 to->mark = from->mark;
536 #ifdef CONFIG_NET_SCHED
537 to->tc_index = from->tc_index;
540 skb_copy_secmark(to, from);
543 int ip6_fragment(struct sock *sk, struct sk_buff *skb,
544 int (*output)(struct sock *, struct sk_buff *))
546 struct sk_buff *frag;
547 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
548 struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
549 inet6_sk(skb->sk) : NULL;
550 struct ipv6hdr *tmp_hdr;
552 unsigned int mtu, hlen, left, len;
555 int ptr, offset = 0, err = 0;
556 u8 *prevhdr, nexthdr = 0;
557 struct net *net = dev_net(skb_dst(skb)->dev);
559 hlen = ip6_find_1stfragopt(skb, &prevhdr);
562 mtu = ip6_skb_dst_mtu(skb);
564 /* We must not fragment if the socket is set to force MTU discovery
565 * or if the skb it not generated by a local socket.
567 if (unlikely(!skb->ignore_df && skb->len > mtu))
570 if (IP6CB(skb)->frag_max_size) {
571 if (IP6CB(skb)->frag_max_size > mtu)
574 /* don't send fragments larger than what we received */
575 mtu = IP6CB(skb)->frag_max_size;
576 if (mtu < IPV6_MIN_MTU)
580 if (np && np->frag_size < mtu) {
584 mtu -= hlen + sizeof(struct frag_hdr);
586 frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
587 &ipv6_hdr(skb)->saddr);
589 if (skb_has_frag_list(skb)) {
590 int first_len = skb_pagelen(skb);
591 struct sk_buff *frag2;
593 if (first_len - hlen > mtu ||
594 ((first_len - hlen) & 7) ||
598 skb_walk_frags(skb, frag) {
599 /* Correct geometry. */
600 if (frag->len > mtu ||
601 ((frag->len & 7) && frag->next) ||
602 skb_headroom(frag) < hlen)
603 goto slow_path_clean;
605 /* Partially cloned skb? */
606 if (skb_shared(frag))
607 goto slow_path_clean;
612 frag->destructor = sock_wfree;
614 skb->truesize -= frag->truesize;
619 frag = skb_shinfo(skb)->frag_list;
620 skb_frag_list_init(skb);
623 *prevhdr = NEXTHDR_FRAGMENT;
624 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
626 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
627 IPSTATS_MIB_FRAGFAILS);
631 __skb_pull(skb, hlen);
632 fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
633 __skb_push(skb, hlen);
634 skb_reset_network_header(skb);
635 memcpy(skb_network_header(skb), tmp_hdr, hlen);
637 fh->nexthdr = nexthdr;
639 fh->frag_off = htons(IP6_MF);
640 fh->identification = frag_id;
642 first_len = skb_pagelen(skb);
643 skb->data_len = first_len - skb_headlen(skb);
644 skb->len = first_len;
645 ipv6_hdr(skb)->payload_len = htons(first_len -
646 sizeof(struct ipv6hdr));
651 /* Prepare header of the next frame,
652 * before previous one went down. */
654 frag->ip_summed = CHECKSUM_NONE;
655 skb_reset_transport_header(frag);
656 fh = (struct frag_hdr *)__skb_push(frag, sizeof(struct frag_hdr));
657 __skb_push(frag, hlen);
658 skb_reset_network_header(frag);
659 memcpy(skb_network_header(frag), tmp_hdr,
661 offset += skb->len - hlen - sizeof(struct frag_hdr);
662 fh->nexthdr = nexthdr;
664 fh->frag_off = htons(offset);
666 fh->frag_off |= htons(IP6_MF);
667 fh->identification = frag_id;
668 ipv6_hdr(frag)->payload_len =
670 sizeof(struct ipv6hdr));
671 ip6_copy_metadata(frag, skb);
674 err = output(sk, skb);
676 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
677 IPSTATS_MIB_FRAGCREATES);
690 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
691 IPSTATS_MIB_FRAGOKS);
696 kfree_skb_list(frag);
698 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
699 IPSTATS_MIB_FRAGFAILS);
704 skb_walk_frags(skb, frag2) {
708 frag2->destructor = NULL;
709 skb->truesize += frag2->truesize;
714 if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
715 skb_checksum_help(skb))
718 left = skb->len - hlen; /* Space per frame */
719 ptr = hlen; /* Where to start from */
722 * Fragment the datagram.
725 *prevhdr = NEXTHDR_FRAGMENT;
726 hroom = LL_RESERVED_SPACE(rt->dst.dev);
727 troom = rt->dst.dev->needed_tailroom;
730 * Keep copying data until we run out.
734 /* IF: it doesn't fit, use 'mtu' - the data space left */
737 /* IF: we are not sending up to and including the packet end
738 then align the next start on an eight byte boundary */
743 /* Allocate buffer */
744 frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
745 hroom + troom, GFP_ATOMIC);
747 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
748 IPSTATS_MIB_FRAGFAILS);
754 * Set up data on packet
757 ip6_copy_metadata(frag, skb);
758 skb_reserve(frag, hroom);
759 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
760 skb_reset_network_header(frag);
761 fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
762 frag->transport_header = (frag->network_header + hlen +
763 sizeof(struct frag_hdr));
766 * Charge the memory for the fragment to any owner
770 skb_set_owner_w(frag, skb->sk);
773 * Copy the packet header into the new buffer.
775 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
778 * Build fragment header.
780 fh->nexthdr = nexthdr;
782 fh->identification = frag_id;
785 * Copy a block of the IP datagram.
787 BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
791 fh->frag_off = htons(offset);
793 fh->frag_off |= htons(IP6_MF);
794 ipv6_hdr(frag)->payload_len = htons(frag->len -
795 sizeof(struct ipv6hdr));
801 * Put this fragment into the sending queue.
803 err = output(sk, frag);
807 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
808 IPSTATS_MIB_FRAGCREATES);
810 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
811 IPSTATS_MIB_FRAGOKS);
816 if (skb->sk && dst_allfrag(skb_dst(skb)))
817 sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
819 skb->dev = skb_dst(skb)->dev;
820 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
824 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
825 IPSTATS_MIB_FRAGFAILS);
830 static inline int ip6_rt_check(const struct rt6key *rt_key,
831 const struct in6_addr *fl_addr,
832 const struct in6_addr *addr_cache)
834 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
835 (!addr_cache || !ipv6_addr_equal(fl_addr, addr_cache));
838 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
839 struct dst_entry *dst,
840 const struct flowi6 *fl6)
842 struct ipv6_pinfo *np = inet6_sk(sk);
848 if (dst->ops->family != AF_INET6) {
853 rt = (struct rt6_info *)dst;
854 /* Yes, checking route validity in not connected
855 * case is not very simple. Take into account,
856 * that we do not support routing by source, TOS,
857 * and MSG_DONTROUTE --ANK (980726)
859 * 1. ip6_rt_check(): If route was host route,
860 * check that cached destination is current.
861 * If it is network route, we still may
862 * check its validity using saved pointer
863 * to the last used address: daddr_cache.
864 * We do not want to save whole address now,
865 * (because main consumer of this service
866 * is tcp, which has not this problem),
867 * so that the last trick works only on connected
869 * 2. oif also should be the same.
871 if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
872 #ifdef CONFIG_IPV6_SUBTREES
873 ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
875 (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
884 static int ip6_dst_lookup_tail(struct net *net, struct sock *sk,
885 struct dst_entry **dst, struct flowi6 *fl6)
887 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
893 /* The correct way to handle this would be to do
894 * ip6_route_get_saddr, and then ip6_route_output; however,
895 * the route-specific preferred source forces the
896 * ip6_route_output call _before_ ip6_route_get_saddr.
898 * In source specific routing (no src=any default route),
899 * ip6_route_output will fail given src=any saddr, though, so
900 * that's why we try it again later.
902 if (ipv6_addr_any(&fl6->saddr) && (!*dst || !(*dst)->error)) {
904 bool had_dst = *dst != NULL;
907 *dst = ip6_route_output(net, sk, fl6);
908 rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
909 err = ip6_route_get_saddr(net, rt, &fl6->daddr,
910 sk ? inet6_sk(sk)->srcprefs : 0,
913 goto out_err_release;
915 /* If we had an erroneous initial result, pretend it
916 * never existed and let the SA-enabled version take
919 if (!had_dst && (*dst)->error) {
926 *dst = ip6_route_output(net, sk, fl6);
930 goto out_err_release;
932 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
934 * Here if the dst entry we've looked up
935 * has a neighbour entry that is in the INCOMPLETE
936 * state and the src address from the flow is
937 * marked as OPTIMISTIC, we release the found
938 * dst entry and replace it instead with the
939 * dst entry of the nexthop router
941 rt = (struct rt6_info *) *dst;
943 n = __ipv6_neigh_lookup_noref(rt->dst.dev,
944 rt6_nexthop(rt, &fl6->daddr));
945 err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
946 rcu_read_unlock_bh();
949 struct inet6_ifaddr *ifp;
950 struct flowi6 fl_gw6;
953 ifp = ipv6_get_ifaddr(net, &fl6->saddr,
956 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
962 * We need to get the dst entry for the
963 * default router instead
966 memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
967 memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
968 *dst = ip6_route_output(net, sk, &fl_gw6);
971 goto out_err_release;
979 if (err == -ENETUNREACH)
980 IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
987 * ip6_dst_lookup - perform route lookup on flow
988 * @sk: socket which provides route info
989 * @dst: pointer to dst_entry * for result
990 * @fl6: flow to lookup
992 * This function performs a route lookup on the given flow.
994 * It returns zero on success, or a standard errno code on error.
996 int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
1000 return ip6_dst_lookup_tail(net, sk, dst, fl6);
1002 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
1005 * ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1006 * @sk: socket which provides route info
1007 * @fl6: flow to lookup
1008 * @final_dst: final destination address for ipsec lookup
1010 * This function performs a route lookup on the given flow.
1012 * It returns a valid dst pointer on success, or a pointer encoded
1015 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1016 const struct in6_addr *final_dst)
1018 struct dst_entry *dst = NULL;
1021 err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
1023 return ERR_PTR(err);
1025 fl6->daddr = *final_dst;
1026 if (!fl6->flowi6_oif)
1027 fl6->flowi6_oif = dst->dev->ifindex;
1029 return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1031 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1034 * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1035 * @sk: socket which provides the dst cache and route info
1036 * @fl6: flow to lookup
1037 * @final_dst: final destination address for ipsec lookup
1039 * This function performs a route lookup on the given flow with the
1040 * possibility of using the cached route in the socket if it is valid.
1041 * It will take the socket dst lock when operating on the dst cache.
1042 * As a result, this function can only be used in process context.
1044 * It returns a valid dst pointer on success, or a pointer encoded
1047 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1048 const struct in6_addr *final_dst)
1050 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1053 dst = ip6_sk_dst_check(sk, dst, fl6);
1055 err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
1057 return ERR_PTR(err);
1059 fl6->daddr = *final_dst;
1061 return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1063 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1065 static inline int ip6_ufo_append_data(struct sock *sk,
1066 struct sk_buff_head *queue,
1067 int getfrag(void *from, char *to, int offset, int len,
1068 int odd, struct sk_buff *skb),
1069 void *from, int length, int hh_len, int fragheaderlen,
1070 int transhdrlen, int mtu, unsigned int flags,
1071 const struct flowi6 *fl6)
1074 struct sk_buff *skb;
1077 /* There is support for UDP large send offload by network
1078 * device, so create one single skb packet containing complete
1081 skb = skb_peek_tail(queue);
1083 skb = sock_alloc_send_skb(sk,
1084 hh_len + fragheaderlen + transhdrlen + 20,
1085 (flags & MSG_DONTWAIT), &err);
1089 /* reserve space for Hardware header */
1090 skb_reserve(skb, hh_len);
1092 /* create space for UDP/IP header */
1093 skb_put(skb, fragheaderlen + transhdrlen);
1095 /* initialize network header pointer */
1096 skb_reset_network_header(skb);
1098 /* initialize protocol header pointer */
1099 skb->transport_header = skb->network_header + fragheaderlen;
1101 skb->protocol = htons(ETH_P_IPV6);
1104 __skb_queue_tail(queue, skb);
1105 } else if (skb_is_gso(skb)) {
1109 skb->ip_summed = CHECKSUM_PARTIAL;
1110 /* Specify the length of each IPv6 datagram fragment.
1111 * It has to be a multiple of 8.
1113 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1114 sizeof(struct frag_hdr)) & ~7;
1115 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1116 skb_shinfo(skb)->ip6_frag_id = ipv6_select_ident(sock_net(sk),
1121 return skb_append_datato_frags(sk, skb, getfrag, from,
1122 (length - transhdrlen));
1125 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1128 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1131 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1134 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1137 static void ip6_append_data_mtu(unsigned int *mtu,
1139 unsigned int fragheaderlen,
1140 struct sk_buff *skb,
1141 struct rt6_info *rt,
1142 unsigned int orig_mtu)
1144 if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1146 /* first fragment, reserve header_len */
1147 *mtu = orig_mtu - rt->dst.header_len;
1151 * this fragment is not first, the headers
1152 * space is regarded as data space.
1156 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
1157 + fragheaderlen - sizeof(struct frag_hdr);
1161 static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1162 struct inet6_cork *v6_cork,
1163 int hlimit, int tclass, struct ipv6_txoptions *opt,
1164 struct rt6_info *rt, struct flowi6 *fl6)
1166 struct ipv6_pinfo *np = inet6_sk(sk);
1173 if (WARN_ON(v6_cork->opt))
1176 v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
1177 if (unlikely(!v6_cork->opt))
1180 v6_cork->opt->tot_len = opt->tot_len;
1181 v6_cork->opt->opt_flen = opt->opt_flen;
1182 v6_cork->opt->opt_nflen = opt->opt_nflen;
1184 v6_cork->opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1186 if (opt->dst0opt && !v6_cork->opt->dst0opt)
1189 v6_cork->opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1191 if (opt->dst1opt && !v6_cork->opt->dst1opt)
1194 v6_cork->opt->hopopt = ip6_opt_dup(opt->hopopt,
1196 if (opt->hopopt && !v6_cork->opt->hopopt)
1199 v6_cork->opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1201 if (opt->srcrt && !v6_cork->opt->srcrt)
1204 /* need source address above miyazawa*/
1207 cork->base.dst = &rt->dst;
1208 cork->fl.u.ip6 = *fl6;
1209 v6_cork->hop_limit = hlimit;
1210 v6_cork->tclass = tclass;
1211 if (rt->dst.flags & DST_XFRM_TUNNEL)
1212 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1213 rt->dst.dev->mtu : dst_mtu(&rt->dst);
1215 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1216 rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1217 if (np->frag_size < mtu) {
1219 mtu = np->frag_size;
1221 cork->base.fragsize = mtu;
1222 if (dst_allfrag(rt->dst.path))
1223 cork->base.flags |= IPCORK_ALLFRAG;
1224 cork->base.length = 0;
1229 static int __ip6_append_data(struct sock *sk,
1231 struct sk_buff_head *queue,
1232 struct inet_cork *cork,
1233 struct inet6_cork *v6_cork,
1234 struct page_frag *pfrag,
1235 int getfrag(void *from, char *to, int offset,
1236 int len, int odd, struct sk_buff *skb),
1237 void *from, int length, int transhdrlen,
1238 unsigned int flags, int dontfrag)
1240 struct sk_buff *skb, *skb_prev = NULL;
1241 unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1243 int dst_exthdrlen = 0;
1250 struct rt6_info *rt = (struct rt6_info *)cork->dst;
1251 struct ipv6_txoptions *opt = v6_cork->opt;
1252 int csummode = CHECKSUM_NONE;
1254 skb = skb_peek_tail(queue);
1256 exthdrlen = opt ? opt->opt_flen : 0;
1257 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1260 mtu = cork->fragsize;
1263 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1265 fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1266 (opt ? opt->opt_nflen : 0);
1267 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1268 sizeof(struct frag_hdr);
1270 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1271 unsigned int maxnonfragsize, headersize;
1273 headersize = sizeof(struct ipv6hdr) +
1274 (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1275 (dst_allfrag(&rt->dst) ?
1276 sizeof(struct frag_hdr) : 0) +
1277 rt->rt6i_nfheader_len;
1279 if (ip6_sk_ignore_df(sk))
1280 maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1282 maxnonfragsize = mtu;
1284 /* dontfrag active */
1285 if ((cork->length + length > mtu - headersize) && dontfrag &&
1286 (sk->sk_protocol == IPPROTO_UDP ||
1287 sk->sk_protocol == IPPROTO_RAW)) {
1288 ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1289 sizeof(struct ipv6hdr));
1293 if (cork->length + length > maxnonfragsize - headersize) {
1295 ipv6_local_error(sk, EMSGSIZE, fl6,
1297 sizeof(struct ipv6hdr));
1302 if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
1303 sock_tx_timestamp(sk, &tx_flags);
1304 if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
1305 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
1306 tskey = sk->sk_tskey++;
1309 /* If this is the first and only packet and device
1310 * supports checksum offloading, let's use it.
1311 * Use transhdrlen, same as IPv4, because partial
1312 * sums only work when transhdrlen is set.
1314 if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
1315 length + fragheaderlen < mtu &&
1316 rt->dst.dev->features & NETIF_F_V6_CSUM &&
1318 csummode = CHECKSUM_PARTIAL;
1320 * Let's try using as much space as possible.
1321 * Use MTU if total length of the message fits into the MTU.
1322 * Otherwise, we need to reserve fragment header and
1323 * fragment alignment (= 8-15 octects, in total).
1325 * Note that we may need to "move" the data from the tail of
1326 * of the buffer to the new fragment when we split
1329 * FIXME: It may be fragmented into multiple chunks
1330 * at once if non-fragmentable extension headers
1335 cork->length += length;
1336 if (((length > mtu) ||
1337 (skb && skb_is_gso(skb))) &&
1338 (sk->sk_protocol == IPPROTO_UDP) &&
1339 (rt->dst.dev->features & NETIF_F_UFO) &&
1340 (sk->sk_type == SOCK_DGRAM)) {
1341 err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1342 hh_len, fragheaderlen,
1343 transhdrlen, mtu, flags, fl6);
1352 while (length > 0) {
1353 /* Check if the remaining data fits into current packet. */
1354 copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1356 copy = maxfraglen - skb->len;
1360 unsigned int datalen;
1361 unsigned int fraglen;
1362 unsigned int fraggap;
1363 unsigned int alloclen;
1365 /* There's no room in the current skb */
1367 fraggap = skb->len - maxfraglen;
1370 /* update mtu and maxfraglen if necessary */
1371 if (!skb || !skb_prev)
1372 ip6_append_data_mtu(&mtu, &maxfraglen,
1373 fragheaderlen, skb, rt,
1379 * If remaining data exceeds the mtu,
1380 * we know we need more fragment(s).
1382 datalen = length + fraggap;
1384 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1385 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1386 if ((flags & MSG_MORE) &&
1387 !(rt->dst.dev->features&NETIF_F_SG))
1390 alloclen = datalen + fragheaderlen;
1392 alloclen += dst_exthdrlen;
1394 if (datalen != length + fraggap) {
1396 * this is not the last fragment, the trailer
1397 * space is regarded as data space.
1399 datalen += rt->dst.trailer_len;
1402 alloclen += rt->dst.trailer_len;
1403 fraglen = datalen + fragheaderlen;
1406 * We just reserve space for fragment header.
1407 * Note: this may be overallocation if the message
1408 * (without MSG_MORE) fits into the MTU.
1410 alloclen += sizeof(struct frag_hdr);
1413 skb = sock_alloc_send_skb(sk,
1415 (flags & MSG_DONTWAIT), &err);
1418 if (atomic_read(&sk->sk_wmem_alloc) <=
1420 skb = sock_wmalloc(sk,
1421 alloclen + hh_len, 1,
1429 * Fill in the control structures
1431 skb->protocol = htons(ETH_P_IPV6);
1432 skb->ip_summed = csummode;
1434 /* reserve for fragmentation and ipsec header */
1435 skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1438 /* Only the initial fragment is time stamped */
1439 skb_shinfo(skb)->tx_flags = tx_flags;
1441 skb_shinfo(skb)->tskey = tskey;
1445 * Find where to start putting bytes
1447 data = skb_put(skb, fraglen);
1448 skb_set_network_header(skb, exthdrlen);
1449 data += fragheaderlen;
1450 skb->transport_header = (skb->network_header +
1453 skb->csum = skb_copy_and_csum_bits(
1454 skb_prev, maxfraglen,
1455 data + transhdrlen, fraggap, 0);
1456 skb_prev->csum = csum_sub(skb_prev->csum,
1459 pskb_trim_unique(skb_prev, maxfraglen);
1461 copy = datalen - transhdrlen - fraggap;
1467 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1474 length -= datalen - fraggap;
1480 * Put the packet on the pending queue
1482 __skb_queue_tail(queue, skb);
1489 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1493 if (getfrag(from, skb_put(skb, copy),
1494 offset, copy, off, skb) < 0) {
1495 __skb_trim(skb, off);
1500 int i = skb_shinfo(skb)->nr_frags;
1503 if (!sk_page_frag_refill(sk, pfrag))
1506 if (!skb_can_coalesce(skb, i, pfrag->page,
1509 if (i == MAX_SKB_FRAGS)
1512 __skb_fill_page_desc(skb, i, pfrag->page,
1514 skb_shinfo(skb)->nr_frags = ++i;
1515 get_page(pfrag->page);
1517 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1519 page_address(pfrag->page) + pfrag->offset,
1520 offset, copy, skb->len, skb) < 0)
1523 pfrag->offset += copy;
1524 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1526 skb->data_len += copy;
1527 skb->truesize += copy;
1528 atomic_add(copy, &sk->sk_wmem_alloc);
1539 cork->length -= length;
1540 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1544 int ip6_append_data(struct sock *sk,
1545 int getfrag(void *from, char *to, int offset, int len,
1546 int odd, struct sk_buff *skb),
1547 void *from, int length, int transhdrlen, int hlimit,
1548 int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
1549 struct rt6_info *rt, unsigned int flags, int dontfrag)
1551 struct inet_sock *inet = inet_sk(sk);
1552 struct ipv6_pinfo *np = inet6_sk(sk);
1556 if (flags&MSG_PROBE)
1558 if (skb_queue_empty(&sk->sk_write_queue)) {
1562 err = ip6_setup_cork(sk, &inet->cork, &np->cork, hlimit,
1563 tclass, opt, rt, fl6);
1567 exthdrlen = (opt ? opt->opt_flen : 0);
1568 length += exthdrlen;
1569 transhdrlen += exthdrlen;
1571 fl6 = &inet->cork.fl.u.ip6;
1575 return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
1576 &np->cork, sk_page_frag(sk), getfrag,
1577 from, length, transhdrlen, flags, dontfrag);
1579 EXPORT_SYMBOL_GPL(ip6_append_data);
1581 static void ip6_cork_release(struct inet_cork_full *cork,
1582 struct inet6_cork *v6_cork)
1585 kfree(v6_cork->opt->dst0opt);
1586 kfree(v6_cork->opt->dst1opt);
1587 kfree(v6_cork->opt->hopopt);
1588 kfree(v6_cork->opt->srcrt);
1589 kfree(v6_cork->opt);
1590 v6_cork->opt = NULL;
1593 if (cork->base.dst) {
1594 dst_release(cork->base.dst);
1595 cork->base.dst = NULL;
1596 cork->base.flags &= ~IPCORK_ALLFRAG;
1598 memset(&cork->fl, 0, sizeof(cork->fl));
1601 struct sk_buff *__ip6_make_skb(struct sock *sk,
1602 struct sk_buff_head *queue,
1603 struct inet_cork_full *cork,
1604 struct inet6_cork *v6_cork)
1606 struct sk_buff *skb, *tmp_skb;
1607 struct sk_buff **tail_skb;
1608 struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1609 struct ipv6_pinfo *np = inet6_sk(sk);
1610 struct net *net = sock_net(sk);
1611 struct ipv6hdr *hdr;
1612 struct ipv6_txoptions *opt = v6_cork->opt;
1613 struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
1614 struct flowi6 *fl6 = &cork->fl.u.ip6;
1615 unsigned char proto = fl6->flowi6_proto;
1617 skb = __skb_dequeue(queue);
1620 tail_skb = &(skb_shinfo(skb)->frag_list);
1622 /* move skb->data to ip header from ext header */
1623 if (skb->data < skb_network_header(skb))
1624 __skb_pull(skb, skb_network_offset(skb));
1625 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1626 __skb_pull(tmp_skb, skb_network_header_len(skb));
1627 *tail_skb = tmp_skb;
1628 tail_skb = &(tmp_skb->next);
1629 skb->len += tmp_skb->len;
1630 skb->data_len += tmp_skb->len;
1631 skb->truesize += tmp_skb->truesize;
1632 tmp_skb->destructor = NULL;
1636 /* Allow local fragmentation. */
1637 skb->ignore_df = ip6_sk_ignore_df(sk);
1639 *final_dst = fl6->daddr;
1640 __skb_pull(skb, skb_network_header_len(skb));
1641 if (opt && opt->opt_flen)
1642 ipv6_push_frag_opts(skb, opt, &proto);
1643 if (opt && opt->opt_nflen)
1644 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1646 skb_push(skb, sizeof(struct ipv6hdr));
1647 skb_reset_network_header(skb);
1648 hdr = ipv6_hdr(skb);
1650 ip6_flow_hdr(hdr, v6_cork->tclass,
1651 ip6_make_flowlabel(net, skb, fl6->flowlabel,
1652 np->autoflowlabel, fl6));
1653 hdr->hop_limit = v6_cork->hop_limit;
1654 hdr->nexthdr = proto;
1655 hdr->saddr = fl6->saddr;
1656 hdr->daddr = *final_dst;
1658 skb->priority = sk->sk_priority;
1659 skb->mark = sk->sk_mark;
1661 skb_dst_set(skb, dst_clone(&rt->dst));
1662 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1663 if (proto == IPPROTO_ICMPV6) {
1664 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1666 ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1667 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1670 ip6_cork_release(cork, v6_cork);
1675 int ip6_send_skb(struct sk_buff *skb)
1677 struct net *net = sock_net(skb->sk);
1678 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
1681 err = ip6_local_out(skb);
1684 err = net_xmit_errno(err);
1686 IP6_INC_STATS(net, rt->rt6i_idev,
1687 IPSTATS_MIB_OUTDISCARDS);
1693 int ip6_push_pending_frames(struct sock *sk)
1695 struct sk_buff *skb;
1697 skb = ip6_finish_skb(sk);
1701 return ip6_send_skb(skb);
1703 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1705 static void __ip6_flush_pending_frames(struct sock *sk,
1706 struct sk_buff_head *queue,
1707 struct inet_cork_full *cork,
1708 struct inet6_cork *v6_cork)
1710 struct sk_buff *skb;
1712 while ((skb = __skb_dequeue_tail(queue)) != NULL) {
1714 IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1715 IPSTATS_MIB_OUTDISCARDS);
1719 ip6_cork_release(cork, v6_cork);
1722 void ip6_flush_pending_frames(struct sock *sk)
1724 __ip6_flush_pending_frames(sk, &sk->sk_write_queue,
1725 &inet_sk(sk)->cork, &inet6_sk(sk)->cork);
1727 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
1729 struct sk_buff *ip6_make_skb(struct sock *sk,
1730 int getfrag(void *from, char *to, int offset,
1731 int len, int odd, struct sk_buff *skb),
1732 void *from, int length, int transhdrlen,
1733 int hlimit, int tclass,
1734 struct ipv6_txoptions *opt, struct flowi6 *fl6,
1735 struct rt6_info *rt, unsigned int flags,
1738 struct inet_cork_full cork;
1739 struct inet6_cork v6_cork;
1740 struct sk_buff_head queue;
1741 int exthdrlen = (opt ? opt->opt_flen : 0);
1744 if (flags & MSG_PROBE)
1747 __skb_queue_head_init(&queue);
1749 cork.base.flags = 0;
1751 cork.base.opt = NULL;
1753 err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6);
1755 return ERR_PTR(err);
1758 dontfrag = inet6_sk(sk)->dontfrag;
1760 err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork,
1761 ¤t->task_frag, getfrag, from,
1762 length + exthdrlen, transhdrlen + exthdrlen,
1765 __ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork);
1766 return ERR_PTR(err);
1769 return __ip6_make_skb(sk, &queue, &cork, &v6_cork);