2 * Copyright (c) 2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/capability.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/mroute.h>
34 #include <linux/init.h>
35 #include <linux/in6.h>
36 #include <linux/inetdevice.h>
37 #include <linux/igmp.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/etherdevice.h>
40 #include <linux/if_ether.h>
41 #include <linux/if_vlan.h>
42 #include <linux/rculist.h>
43 #include <linux/err.h>
48 #include <net/protocol.h>
49 #include <net/ip_tunnels.h>
51 #include <net/checksum.h>
52 #include <net/dsfield.h>
53 #include <net/inet_ecn.h>
55 #include <net/net_namespace.h>
56 #include <net/netns/generic.h>
57 #include <net/rtnetlink.h>
59 #if IS_ENABLED(CONFIG_IPV6)
61 #include <net/ip6_fib.h>
62 #include <net/ip6_route.h>
65 static unsigned int ip_tunnel_hash(__be32 key, __be32 remote)
67 return hash_32((__force u32)key ^ (__force u32)remote,
71 static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
72 struct dst_entry *dst)
74 struct dst_entry *old_dst;
77 if (dst->flags & DST_NOCACHE)
82 old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
86 static void tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst)
88 __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst);
91 static void tunnel_dst_reset(struct ip_tunnel *t)
93 tunnel_dst_set(t, NULL);
96 void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
100 for_each_possible_cpu(i)
101 __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
103 EXPORT_SYMBOL(ip_tunnel_dst_reset_all);
105 static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
107 struct dst_entry *dst;
110 dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
112 if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
120 return (struct rtable *)dst;
123 static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
124 __be16 flags, __be32 key)
126 if (p->i_flags & TUNNEL_KEY) {
127 if (flags & TUNNEL_KEY)
128 return key == p->i_key;
130 /* key expected, none present */
133 return !(flags & TUNNEL_KEY);
136 /* Fallback tunnel: no source, no destination, no key, no options
139 We require exact key match i.e. if a key is present in packet
140 it will match only tunnel with the same key; if it is not present,
141 it will match only keyless tunnel.
143 All keysless packets, if not matched configured keyless tunnels
144 will match fallback tunnel.
145 Given src, dst and key, find appropriate for input tunnel.
147 struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
148 int link, __be16 flags,
149 __be32 remote, __be32 local,
153 struct ip_tunnel *t, *cand = NULL;
154 struct hlist_head *head;
156 hash = ip_tunnel_hash(key, remote);
157 head = &itn->tunnels[hash];
159 hlist_for_each_entry_rcu(t, head, hash_node) {
160 if (local != t->parms.iph.saddr ||
161 remote != t->parms.iph.daddr ||
162 !(t->dev->flags & IFF_UP))
165 if (!ip_tunnel_key_match(&t->parms, flags, key))
168 if (t->parms.link == link)
174 hlist_for_each_entry_rcu(t, head, hash_node) {
175 if (remote != t->parms.iph.daddr ||
176 !(t->dev->flags & IFF_UP))
179 if (!ip_tunnel_key_match(&t->parms, flags, key))
182 if (t->parms.link == link)
188 hash = ip_tunnel_hash(key, 0);
189 head = &itn->tunnels[hash];
191 hlist_for_each_entry_rcu(t, head, hash_node) {
192 if ((local != t->parms.iph.saddr &&
193 (local != t->parms.iph.daddr ||
194 !ipv4_is_multicast(local))) ||
195 !(t->dev->flags & IFF_UP))
198 if (!ip_tunnel_key_match(&t->parms, flags, key))
201 if (t->parms.link == link)
207 if (flags & TUNNEL_NO_KEY)
208 goto skip_key_lookup;
210 hlist_for_each_entry_rcu(t, head, hash_node) {
211 if (t->parms.i_key != key ||
212 !(t->dev->flags & IFF_UP))
215 if (t->parms.link == link)
225 if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
226 return netdev_priv(itn->fb_tunnel_dev);
231 EXPORT_SYMBOL_GPL(ip_tunnel_lookup);
233 static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn,
234 struct ip_tunnel_parm *parms)
239 if (parms->iph.daddr && !ipv4_is_multicast(parms->iph.daddr))
240 remote = parms->iph.daddr;
244 h = ip_tunnel_hash(parms->i_key, remote);
245 return &itn->tunnels[h];
248 static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t)
250 struct hlist_head *head = ip_bucket(itn, &t->parms);
252 hlist_add_head_rcu(&t->hash_node, head);
255 static void ip_tunnel_del(struct ip_tunnel *t)
257 hlist_del_init_rcu(&t->hash_node);
260 static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
261 struct ip_tunnel_parm *parms,
264 __be32 remote = parms->iph.daddr;
265 __be32 local = parms->iph.saddr;
266 __be32 key = parms->i_key;
267 int link = parms->link;
268 struct ip_tunnel *t = NULL;
269 struct hlist_head *head = ip_bucket(itn, parms);
271 hlist_for_each_entry_rcu(t, head, hash_node) {
272 if (local == t->parms.iph.saddr &&
273 remote == t->parms.iph.daddr &&
274 key == t->parms.i_key &&
275 link == t->parms.link &&
276 type == t->dev->type)
282 static struct net_device *__ip_tunnel_create(struct net *net,
283 const struct rtnl_link_ops *ops,
284 struct ip_tunnel_parm *parms)
287 struct ip_tunnel *tunnel;
288 struct net_device *dev;
292 strlcpy(name, parms->name, IFNAMSIZ);
294 if (strlen(ops->kind) > (IFNAMSIZ - 3)) {
298 strlcpy(name, ops->kind, IFNAMSIZ);
299 strncat(name, "%d", 2);
303 dev = alloc_netdev(ops->priv_size, name, ops->setup);
308 dev_net_set(dev, net);
310 dev->rtnl_link_ops = ops;
312 tunnel = netdev_priv(dev);
313 tunnel->parms = *parms;
316 err = register_netdevice(dev);
328 static inline void init_tunnel_flow(struct flowi4 *fl4,
330 __be32 daddr, __be32 saddr,
331 __be32 key, __u8 tos, int oif)
333 memset(fl4, 0, sizeof(*fl4));
334 fl4->flowi4_oif = oif;
337 fl4->flowi4_tos = tos;
338 fl4->flowi4_proto = proto;
339 fl4->fl4_gre_key = key;
342 static int ip_tunnel_bind_dev(struct net_device *dev)
344 struct net_device *tdev = NULL;
345 struct ip_tunnel *tunnel = netdev_priv(dev);
346 const struct iphdr *iph;
347 int hlen = LL_MAX_HEADER;
348 int mtu = ETH_DATA_LEN;
349 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
351 iph = &tunnel->parms.iph;
353 /* Guess output device to choose reasonable mtu and needed_headroom */
358 init_tunnel_flow(&fl4, iph->protocol, iph->daddr,
359 iph->saddr, tunnel->parms.o_key,
360 RT_TOS(iph->tos), tunnel->parms.link);
361 rt = ip_route_output_key(tunnel->net, &fl4);
365 tunnel_dst_set(tunnel, &rt->dst);
368 if (dev->type != ARPHRD_ETHER)
369 dev->flags |= IFF_POINTOPOINT;
372 if (!tdev && tunnel->parms.link)
373 tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
376 hlen = tdev->hard_header_len + tdev->needed_headroom;
379 dev->iflink = tunnel->parms.link;
381 dev->needed_headroom = t_hlen + hlen;
382 mtu -= (dev->hard_header_len + t_hlen);
390 static struct ip_tunnel *ip_tunnel_create(struct net *net,
391 struct ip_tunnel_net *itn,
392 struct ip_tunnel_parm *parms)
394 struct ip_tunnel *nt, *fbt;
395 struct net_device *dev;
397 BUG_ON(!itn->fb_tunnel_dev);
398 fbt = netdev_priv(itn->fb_tunnel_dev);
399 dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms);
403 dev->mtu = ip_tunnel_bind_dev(dev);
405 nt = netdev_priv(dev);
406 ip_tunnel_add(itn, nt);
410 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
411 const struct tnl_ptk_info *tpi, bool log_ecn_error)
413 struct pcpu_sw_netstats *tstats;
414 const struct iphdr *iph = ip_hdr(skb);
417 #ifdef CONFIG_NET_IPGRE_BROADCAST
418 if (ipv4_is_multicast(iph->daddr)) {
419 /* Looped back packet, drop it! */
420 if (rt_is_output_route(skb_rtable(skb)))
422 tunnel->dev->stats.multicast++;
423 skb->pkt_type = PACKET_BROADCAST;
427 if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
428 ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
429 tunnel->dev->stats.rx_crc_errors++;
430 tunnel->dev->stats.rx_errors++;
434 if (tunnel->parms.i_flags&TUNNEL_SEQ) {
435 if (!(tpi->flags&TUNNEL_SEQ) ||
436 (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
437 tunnel->dev->stats.rx_fifo_errors++;
438 tunnel->dev->stats.rx_errors++;
441 tunnel->i_seqno = ntohl(tpi->seq) + 1;
444 err = IP_ECN_decapsulate(iph, skb);
447 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
448 &iph->saddr, iph->tos);
450 ++tunnel->dev->stats.rx_frame_errors;
451 ++tunnel->dev->stats.rx_errors;
456 tstats = this_cpu_ptr(tunnel->dev->tstats);
457 u64_stats_update_begin(&tstats->syncp);
458 tstats->rx_packets++;
459 tstats->rx_bytes += skb->len;
460 u64_stats_update_end(&tstats->syncp);
462 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
464 if (tunnel->dev->type == ARPHRD_ETHER) {
465 skb->protocol = eth_type_trans(skb, tunnel->dev);
466 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
468 skb->dev = tunnel->dev;
471 gro_cells_receive(&tunnel->gro_cells, skb);
478 EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
480 static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
481 struct rtable *rt, __be16 df)
483 struct ip_tunnel *tunnel = netdev_priv(dev);
484 int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
488 mtu = dst_mtu(&rt->dst) - dev->hard_header_len
489 - sizeof(struct iphdr) - tunnel->hlen;
491 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
494 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
496 if (skb->protocol == htons(ETH_P_IP)) {
497 if (!skb_is_gso(skb) &&
498 (df & htons(IP_DF)) && mtu < pkt_size) {
499 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
500 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
504 #if IS_ENABLED(CONFIG_IPV6)
505 else if (skb->protocol == htons(ETH_P_IPV6)) {
506 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
508 if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
509 mtu >= IPV6_MIN_MTU) {
510 if ((tunnel->parms.iph.daddr &&
511 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
512 rt6->rt6i_dst.plen == 128) {
513 rt6->rt6i_flags |= RTF_MODIFIED;
514 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
518 if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
520 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
528 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
529 const struct iphdr *tnl_params, const u8 protocol)
531 struct ip_tunnel *tunnel = netdev_priv(dev);
532 const struct iphdr *inner_iph;
536 struct rtable *rt; /* Route to the other host */
537 unsigned int max_headroom; /* The extra header space needed */
540 bool connected = true;
542 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
544 dst = tnl_params->daddr;
548 if (skb_dst(skb) == NULL) {
549 dev->stats.tx_fifo_errors++;
553 if (skb->protocol == htons(ETH_P_IP)) {
554 rt = skb_rtable(skb);
555 dst = rt_nexthop(rt, inner_iph->daddr);
557 #if IS_ENABLED(CONFIG_IPV6)
558 else if (skb->protocol == htons(ETH_P_IPV6)) {
559 const struct in6_addr *addr6;
560 struct neighbour *neigh;
561 bool do_tx_error_icmp;
564 neigh = dst_neigh_lookup(skb_dst(skb),
565 &ipv6_hdr(skb)->daddr);
569 addr6 = (const struct in6_addr *)&neigh->primary_key;
570 addr_type = ipv6_addr_type(addr6);
572 if (addr_type == IPV6_ADDR_ANY) {
573 addr6 = &ipv6_hdr(skb)->daddr;
574 addr_type = ipv6_addr_type(addr6);
577 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
578 do_tx_error_icmp = true;
580 do_tx_error_icmp = false;
581 dst = addr6->s6_addr32[3];
583 neigh_release(neigh);
584 if (do_tx_error_icmp)
594 tos = tnl_params->tos;
597 if (skb->protocol == htons(ETH_P_IP)) {
598 tos = inner_iph->tos;
600 } else if (skb->protocol == htons(ETH_P_IPV6)) {
601 tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
606 init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
607 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
609 rt = connected ? tunnel_rtable_get(tunnel, 0) : NULL;
612 rt = ip_route_output_key(tunnel->net, &fl4);
615 dev->stats.tx_carrier_errors++;
619 tunnel_dst_set(tunnel, &rt->dst);
622 if (rt->dst.dev == dev) {
624 dev->stats.collisions++;
628 if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) {
633 if (tunnel->err_count > 0) {
634 if (time_before(jiffies,
635 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
638 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
639 dst_link_failure(skb);
641 tunnel->err_count = 0;
644 tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
645 ttl = tnl_params->ttl;
647 if (skb->protocol == htons(ETH_P_IP))
648 ttl = inner_iph->ttl;
649 #if IS_ENABLED(CONFIG_IPV6)
650 else if (skb->protocol == htons(ETH_P_IPV6))
651 ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
654 ttl = ip4_dst_hoplimit(&rt->dst);
657 df = tnl_params->frag_off;
658 if (skb->protocol == htons(ETH_P_IP))
659 df |= (inner_iph->frag_off&htons(IP_DF));
661 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
662 + rt->dst.header_len;
663 if (max_headroom > dev->needed_headroom)
664 dev->needed_headroom = max_headroom;
666 if (skb_cow_head(skb, dev->needed_headroom)) {
667 dev->stats.tx_dropped++;
672 err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, protocol,
673 tos, ttl, df, !net_eq(tunnel->net, dev_net(dev)));
674 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
678 #if IS_ENABLED(CONFIG_IPV6)
680 dst_link_failure(skb);
683 dev->stats.tx_errors++;
686 EXPORT_SYMBOL_GPL(ip_tunnel_xmit);
688 static void ip_tunnel_update(struct ip_tunnel_net *itn,
690 struct net_device *dev,
691 struct ip_tunnel_parm *p,
695 t->parms.iph.saddr = p->iph.saddr;
696 t->parms.iph.daddr = p->iph.daddr;
697 t->parms.i_key = p->i_key;
698 t->parms.o_key = p->o_key;
699 if (dev->type != ARPHRD_ETHER) {
700 memcpy(dev->dev_addr, &p->iph.saddr, 4);
701 memcpy(dev->broadcast, &p->iph.daddr, 4);
703 ip_tunnel_add(itn, t);
705 t->parms.iph.ttl = p->iph.ttl;
706 t->parms.iph.tos = p->iph.tos;
707 t->parms.iph.frag_off = p->iph.frag_off;
709 if (t->parms.link != p->link) {
712 t->parms.link = p->link;
713 mtu = ip_tunnel_bind_dev(dev);
717 ip_tunnel_dst_reset_all(t);
718 netdev_state_change(dev);
721 int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
725 struct net *net = dev_net(dev);
726 struct ip_tunnel *tunnel = netdev_priv(dev);
727 struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
729 BUG_ON(!itn->fb_tunnel_dev);
733 if (dev == itn->fb_tunnel_dev)
734 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
736 t = netdev_priv(dev);
737 memcpy(p, &t->parms, sizeof(*p));
743 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
746 p->iph.frag_off |= htons(IP_DF);
747 if (!(p->i_flags&TUNNEL_KEY))
749 if (!(p->o_flags&TUNNEL_KEY))
752 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
754 if (!t && (cmd == SIOCADDTUNNEL))
755 t = ip_tunnel_create(net, itn, p);
757 if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
764 unsigned int nflags = 0;
766 if (ipv4_is_multicast(p->iph.daddr))
767 nflags = IFF_BROADCAST;
768 else if (p->iph.daddr)
769 nflags = IFF_POINTOPOINT;
771 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
776 t = netdev_priv(dev);
782 ip_tunnel_update(itn, t, dev, p, true);
784 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
789 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
792 if (dev == itn->fb_tunnel_dev) {
794 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
798 if (t == netdev_priv(itn->fb_tunnel_dev))
802 unregister_netdevice(dev);
813 EXPORT_SYMBOL_GPL(ip_tunnel_ioctl);
815 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
817 struct ip_tunnel *tunnel = netdev_priv(dev);
818 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
821 new_mtu > 0xFFF8 - dev->hard_header_len - t_hlen)
826 EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
828 static void ip_tunnel_dev_free(struct net_device *dev)
830 struct ip_tunnel *tunnel = netdev_priv(dev);
832 gro_cells_destroy(&tunnel->gro_cells);
833 free_percpu(tunnel->dst_cache);
834 free_percpu(dev->tstats);
838 void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
840 struct ip_tunnel *tunnel = netdev_priv(dev);
841 struct ip_tunnel_net *itn;
843 itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
845 if (itn->fb_tunnel_dev != dev) {
846 ip_tunnel_del(netdev_priv(dev));
847 unregister_netdevice_queue(dev, head);
850 EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
852 int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
853 struct rtnl_link_ops *ops, char *devname)
855 struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
856 struct ip_tunnel_parm parms;
859 for (i = 0; i < IP_TNL_HASH_SIZE; i++)
860 INIT_HLIST_HEAD(&itn->tunnels[i]);
863 itn->fb_tunnel_dev = NULL;
867 memset(&parms, 0, sizeof(parms));
869 strlcpy(parms.name, devname, IFNAMSIZ);
872 itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
873 /* FB netdevice is special: we have one, and only one per netns.
874 * Allowing to move it to another netns is clearly unsafe.
876 if (!IS_ERR(itn->fb_tunnel_dev)) {
877 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
878 ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
882 return PTR_ERR_OR_ZERO(itn->fb_tunnel_dev);
884 EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
886 static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head,
887 struct rtnl_link_ops *ops)
889 struct net *net = dev_net(itn->fb_tunnel_dev);
890 struct net_device *dev, *aux;
893 for_each_netdev_safe(net, dev, aux)
894 if (dev->rtnl_link_ops == ops)
895 unregister_netdevice_queue(dev, head);
897 for (h = 0; h < IP_TNL_HASH_SIZE; h++) {
899 struct hlist_node *n;
900 struct hlist_head *thead = &itn->tunnels[h];
902 hlist_for_each_entry_safe(t, n, thead, hash_node)
903 /* If dev is in the same netns, it has already
904 * been added to the list by the previous loop.
906 if (!net_eq(dev_net(t->dev), net))
907 unregister_netdevice_queue(t->dev, head);
911 void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops)
916 ip_tunnel_destroy(itn, &list, ops);
917 unregister_netdevice_many(&list);
920 EXPORT_SYMBOL_GPL(ip_tunnel_delete_net);
922 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
923 struct ip_tunnel_parm *p)
925 struct ip_tunnel *nt;
926 struct net *net = dev_net(dev);
927 struct ip_tunnel_net *itn;
931 nt = netdev_priv(dev);
932 itn = net_generic(net, nt->ip_tnl_net_id);
934 if (ip_tunnel_find(itn, p, dev->type))
939 err = register_netdevice(dev);
943 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
944 eth_hw_addr_random(dev);
946 mtu = ip_tunnel_bind_dev(dev);
950 ip_tunnel_add(itn, nt);
955 EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
957 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
958 struct ip_tunnel_parm *p)
961 struct ip_tunnel *tunnel = netdev_priv(dev);
962 struct net *net = tunnel->net;
963 struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
965 if (dev == itn->fb_tunnel_dev)
968 t = ip_tunnel_find(itn, p, dev->type);
976 if (dev->type != ARPHRD_ETHER) {
977 unsigned int nflags = 0;
979 if (ipv4_is_multicast(p->iph.daddr))
980 nflags = IFF_BROADCAST;
981 else if (p->iph.daddr)
982 nflags = IFF_POINTOPOINT;
984 if ((dev->flags ^ nflags) &
985 (IFF_POINTOPOINT | IFF_BROADCAST))
990 ip_tunnel_update(itn, t, dev, p, !tb[IFLA_MTU]);
993 EXPORT_SYMBOL_GPL(ip_tunnel_changelink);
995 int ip_tunnel_init(struct net_device *dev)
997 struct ip_tunnel *tunnel = netdev_priv(dev);
998 struct iphdr *iph = &tunnel->parms.iph;
1001 dev->destructor = ip_tunnel_dev_free;
1002 dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
1006 for_each_possible_cpu(i) {
1007 struct pcpu_sw_netstats *ipt_stats;
1008 ipt_stats = per_cpu_ptr(dev->tstats, i);
1009 u64_stats_init(&ipt_stats->syncp);
1012 tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
1013 if (!tunnel->dst_cache) {
1014 free_percpu(dev->tstats);
1018 err = gro_cells_init(&tunnel->gro_cells, dev);
1020 free_percpu(tunnel->dst_cache);
1021 free_percpu(dev->tstats);
1026 tunnel->net = dev_net(dev);
1027 strcpy(tunnel->parms.name, dev->name);
1033 EXPORT_SYMBOL_GPL(ip_tunnel_init);
1035 void ip_tunnel_uninit(struct net_device *dev)
1037 struct ip_tunnel *tunnel = netdev_priv(dev);
1038 struct net *net = tunnel->net;
1039 struct ip_tunnel_net *itn;
1041 itn = net_generic(net, tunnel->ip_tnl_net_id);
1042 /* fb_tunnel_dev will be unregisted in net-exit call. */
1043 if (itn->fb_tunnel_dev != dev)
1044 ip_tunnel_del(netdev_priv(dev));
1046 ip_tunnel_dst_reset_all(tunnel);
1048 EXPORT_SYMBOL_GPL(ip_tunnel_uninit);
1050 /* Do least required initialization, rest of init is done in tunnel_init call */
1051 void ip_tunnel_setup(struct net_device *dev, int net_id)
1053 struct ip_tunnel *tunnel = netdev_priv(dev);
1054 tunnel->ip_tnl_net_id = net_id;
1056 EXPORT_SYMBOL_GPL(ip_tunnel_setup);
1058 MODULE_LICENSE("GPL");