2 * GRE over IPv6 protocol decoder.
4 * Authors: Dmitry Kozlov (xeb@mail.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/mroute.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
35 #include <linux/hash.h>
36 #include <linux/if_tunnel.h>
37 #include <linux/ip6_tunnel.h>
41 #include <net/ip_tunnels.h>
43 #include <net/protocol.h>
44 #include <net/addrconf.h>
46 #include <net/checksum.h>
47 #include <net/dsfield.h>
48 #include <net/inet_ecn.h>
50 #include <net/net_namespace.h>
51 #include <net/netns/generic.h>
52 #include <net/rtnetlink.h>
55 #include <net/ip6_fib.h>
56 #include <net/ip6_route.h>
57 #include <net/ip6_tunnel.h>
60 static bool log_ecn_error = true;
61 module_param(log_ecn_error, bool, 0644);
62 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
64 #define HASH_SIZE_SHIFT 5
65 #define HASH_SIZE (1 << HASH_SIZE_SHIFT)
67 static int ip6gre_net_id __read_mostly;
69 struct ip6_tnl __rcu *tunnels[4][HASH_SIZE];
71 struct net_device *fb_tunnel_dev;
74 static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
75 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
76 static int ip6gre_tunnel_init(struct net_device *dev);
77 static void ip6gre_tunnel_setup(struct net_device *dev);
78 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
79 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
81 /* Tunnel hash table */
91 We require exact key match i.e. if a key is present in packet
92 it will match only tunnel with the same key; if it is not present,
93 it will match only keyless tunnel.
95 All keysless packets, if not matched configured keyless tunnels
96 will match fallback tunnel.
99 #define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(HASH_SIZE - 1))
100 static u32 HASH_ADDR(const struct in6_addr *addr)
102 u32 hash = ipv6_addr_hash(addr);
104 return hash_32(hash, HASH_SIZE_SHIFT);
107 #define tunnels_r_l tunnels[3]
108 #define tunnels_r tunnels[2]
109 #define tunnels_l tunnels[1]
110 #define tunnels_wc tunnels[0]
112 /* Given src, dst and key, find appropriate for input tunnel. */
114 static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
115 const struct in6_addr *remote, const struct in6_addr *local,
116 __be32 key, __be16 gre_proto)
118 struct net *net = dev_net(dev);
119 int link = dev->ifindex;
120 unsigned int h0 = HASH_ADDR(remote);
121 unsigned int h1 = HASH_KEY(key);
122 struct ip6_tnl *t, *cand = NULL;
123 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
124 int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
125 ARPHRD_ETHER : ARPHRD_IP6GRE;
126 int score, cand_score = 4;
128 for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
129 if (!ipv6_addr_equal(local, &t->parms.laddr) ||
130 !ipv6_addr_equal(remote, &t->parms.raddr) ||
131 key != t->parms.i_key ||
132 !(t->dev->flags & IFF_UP))
135 if (t->dev->type != ARPHRD_IP6GRE &&
136 t->dev->type != dev_type)
140 if (t->parms.link != link)
142 if (t->dev->type != dev_type)
147 if (score < cand_score) {
153 for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
154 if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
155 key != t->parms.i_key ||
156 !(t->dev->flags & IFF_UP))
159 if (t->dev->type != ARPHRD_IP6GRE &&
160 t->dev->type != dev_type)
164 if (t->parms.link != link)
166 if (t->dev->type != dev_type)
171 if (score < cand_score) {
177 for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
178 if ((!ipv6_addr_equal(local, &t->parms.laddr) &&
179 (!ipv6_addr_equal(local, &t->parms.raddr) ||
180 !ipv6_addr_is_multicast(local))) ||
181 key != t->parms.i_key ||
182 !(t->dev->flags & IFF_UP))
185 if (t->dev->type != ARPHRD_IP6GRE &&
186 t->dev->type != dev_type)
190 if (t->parms.link != link)
192 if (t->dev->type != dev_type)
197 if (score < cand_score) {
203 for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
204 if (t->parms.i_key != key ||
205 !(t->dev->flags & IFF_UP))
208 if (t->dev->type != ARPHRD_IP6GRE &&
209 t->dev->type != dev_type)
213 if (t->parms.link != link)
215 if (t->dev->type != dev_type)
220 if (score < cand_score) {
229 dev = ign->fb_tunnel_dev;
230 if (dev->flags & IFF_UP)
231 return netdev_priv(dev);
236 static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
237 const struct __ip6_tnl_parm *p)
239 const struct in6_addr *remote = &p->raddr;
240 const struct in6_addr *local = &p->laddr;
241 unsigned int h = HASH_KEY(p->i_key);
244 if (!ipv6_addr_any(local))
246 if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) {
248 h ^= HASH_ADDR(remote);
251 return &ign->tunnels[prio][h];
254 static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
255 const struct ip6_tnl *t)
257 return __ip6gre_bucket(ign, &t->parms);
260 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
262 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
264 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
265 rcu_assign_pointer(*tp, t);
268 static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
270 struct ip6_tnl __rcu **tp;
271 struct ip6_tnl *iter;
273 for (tp = ip6gre_bucket(ign, t);
274 (iter = rtnl_dereference(*tp)) != NULL;
277 rcu_assign_pointer(*tp, t->next);
283 static struct ip6_tnl *ip6gre_tunnel_find(struct net *net,
284 const struct __ip6_tnl_parm *parms,
287 const struct in6_addr *remote = &parms->raddr;
288 const struct in6_addr *local = &parms->laddr;
289 __be32 key = parms->i_key;
290 int link = parms->link;
292 struct ip6_tnl __rcu **tp;
293 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
295 for (tp = __ip6gre_bucket(ign, parms);
296 (t = rtnl_dereference(*tp)) != NULL;
298 if (ipv6_addr_equal(local, &t->parms.laddr) &&
299 ipv6_addr_equal(remote, &t->parms.raddr) &&
300 key == t->parms.i_key &&
301 link == t->parms.link &&
302 type == t->dev->type)
308 static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
309 const struct __ip6_tnl_parm *parms, int create)
311 struct ip6_tnl *t, *nt;
312 struct net_device *dev;
314 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
316 t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE);
321 strlcpy(name, parms->name, IFNAMSIZ);
323 strcpy(name, "ip6gre%d");
325 dev = alloc_netdev(sizeof(*t), name, ip6gre_tunnel_setup);
329 dev_net_set(dev, net);
331 nt = netdev_priv(dev);
333 dev->rtnl_link_ops = &ip6gre_link_ops;
336 nt->net = dev_net(dev);
337 ip6gre_tnl_link_config(nt, 1);
339 if (register_netdevice(dev) < 0)
342 /* Can use a lockless transmit, unless we generate output sequences */
343 if (!(nt->parms.o_flags & GRE_SEQ))
344 dev->features |= NETIF_F_LLTX;
347 ip6gre_tunnel_link(ign, nt);
355 static void ip6gre_tunnel_uninit(struct net_device *dev)
357 struct ip6_tnl *t = netdev_priv(dev);
358 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
360 ip6gre_tunnel_unlink(ign, t);
365 static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
366 u8 type, u8 code, int offset, __be32 info)
368 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
369 __be16 *p = (__be16 *)(skb->data + offset);
370 int grehlen = offset + 4;
375 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
376 if (flags&(GRE_VERSION|GRE_ROUTING))
385 /* If only 8 bytes returned, keyed message will be dropped here */
386 if (!pskb_may_pull(skb, grehlen))
388 ipv6h = (const struct ipv6hdr *)skb->data;
389 p = (__be16 *)(skb->data + offset);
391 t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
393 *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
400 struct ipv6_tlv_tnl_enc_lim *tel;
402 case ICMPV6_DEST_UNREACH:
403 net_warn_ratelimited("%s: Path to destination invalid or inactive!\n",
406 case ICMPV6_TIME_EXCEED:
407 if (code == ICMPV6_EXC_HOPLIMIT) {
408 net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
412 case ICMPV6_PARAMPROB:
414 if (code == ICMPV6_HDR_FIELD)
415 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
417 if (teli && teli == info - 2) {
418 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
419 if (tel->encap_limit == 0) {
420 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
424 net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
428 case ICMPV6_PKT_TOOBIG:
430 if (mtu < IPV6_MIN_MTU)
436 if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
440 t->err_time = jiffies;
443 static int ip6gre_rcv(struct sk_buff *skb)
445 const struct ipv6hdr *ipv6h;
451 struct ip6_tnl *tunnel;
456 if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
459 ipv6h = ipv6_hdr(skb);
461 flags = *(__be16 *)h;
463 if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
464 /* - Version must be 0.
465 - We do not support routing headers.
467 if (flags&(GRE_VERSION|GRE_ROUTING))
470 if (flags&GRE_CSUM) {
471 csum = skb_checksum_simple_validate(skb);
475 key = *(__be32 *)(h + offset);
479 seqno = ntohl(*(__be32 *)(h + offset));
484 gre_proto = *(__be16 *)(h + 2);
486 tunnel = ip6gre_tunnel_lookup(skb->dev,
487 &ipv6h->saddr, &ipv6h->daddr, key,
490 struct pcpu_sw_netstats *tstats;
492 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
495 if (!ip6_tnl_rcv_ctl(tunnel, &ipv6h->daddr, &ipv6h->saddr)) {
496 tunnel->dev->stats.rx_dropped++;
500 skb->protocol = gre_proto;
501 /* WCCP version 1 and 2 protocol decoding.
502 * - Change protocol to IP
503 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
505 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
506 skb->protocol = htons(ETH_P_IP);
507 if ((*(h + offset) & 0xF0) != 0x40)
511 skb->mac_header = skb->network_header;
512 __pskb_pull(skb, offset);
513 skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
515 if (((flags&GRE_CSUM) && csum) ||
516 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
517 tunnel->dev->stats.rx_crc_errors++;
518 tunnel->dev->stats.rx_errors++;
521 if (tunnel->parms.i_flags&GRE_SEQ) {
522 if (!(flags&GRE_SEQ) ||
524 (s32)(seqno - tunnel->i_seqno) < 0)) {
525 tunnel->dev->stats.rx_fifo_errors++;
526 tunnel->dev->stats.rx_errors++;
529 tunnel->i_seqno = seqno + 1;
532 /* Warning: All skb pointers will be invalidated! */
533 if (tunnel->dev->type == ARPHRD_ETHER) {
534 if (!pskb_may_pull(skb, ETH_HLEN)) {
535 tunnel->dev->stats.rx_length_errors++;
536 tunnel->dev->stats.rx_errors++;
540 ipv6h = ipv6_hdr(skb);
541 skb->protocol = eth_type_trans(skb, tunnel->dev);
542 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
545 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
547 skb_reset_network_header(skb);
549 err = IP6_ECN_decapsulate(ipv6h, skb);
552 net_info_ratelimited("non-ECT from %pI6 with dsfield=%#x\n",
554 ipv6_get_dsfield(ipv6h));
556 ++tunnel->dev->stats.rx_frame_errors;
557 ++tunnel->dev->stats.rx_errors;
562 tstats = this_cpu_ptr(tunnel->dev->tstats);
563 u64_stats_update_begin(&tstats->syncp);
564 tstats->rx_packets++;
565 tstats->rx_bytes += skb->len;
566 u64_stats_update_end(&tstats->syncp);
572 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
579 struct ipv6_tel_txoption {
580 struct ipv6_txoptions ops;
584 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
586 memset(opt, 0, sizeof(struct ipv6_tel_txoption));
588 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
590 opt->dst_opt[4] = encap_limit;
591 opt->dst_opt[5] = IPV6_TLV_PADN;
594 opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
595 opt->ops.opt_nflen = 8;
598 static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
599 struct net_device *dev,
605 struct ip6_tnl *tunnel = netdev_priv(dev);
606 struct net *net = tunnel->net;
607 struct net_device *tdev; /* Device to other host */
608 struct ipv6hdr *ipv6h; /* Our new IP header */
609 unsigned int max_headroom = 0; /* The extra header space needed */
611 struct ipv6_tel_txoption opt;
613 struct dst_entry *dst = NULL, *ndst = NULL;
614 struct net_device_stats *stats = &tunnel->dev->stats;
617 struct sk_buff *new_skb;
619 if (dev->type == ARPHRD_ETHER)
620 IPCB(skb)->flags = 0;
622 if (dev->header_ops && dev->type == ARPHRD_IP6GRE) {
624 ipv6h = (struct ipv6hdr *)skb->data;
625 fl6->daddr = ipv6h->daddr;
627 gre_hlen = tunnel->hlen;
628 fl6->daddr = tunnel->parms.raddr;
631 if (!fl6->flowi6_mark)
632 dst = ip6_tnl_dst_check(tunnel);
635 ndst = ip6_route_output(net, NULL, fl6);
638 goto tx_err_link_failure;
639 ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
643 goto tx_err_link_failure;
652 net_warn_ratelimited("%s: Local routing loop detected!\n",
654 goto tx_err_dst_release;
657 mtu = dst_mtu(dst) - sizeof(*ipv6h);
658 if (encap_limit >= 0) {
662 if (mtu < IPV6_MIN_MTU)
665 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
666 if (skb->len > mtu) {
669 goto tx_err_dst_release;
672 if (tunnel->err_count > 0) {
673 if (time_before(jiffies,
674 tunnel->err_time + IP6TUNNEL_ERR_TIMEO)) {
677 dst_link_failure(skb);
679 tunnel->err_count = 0;
682 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
684 max_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
686 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
687 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
688 new_skb = skb_realloc_headroom(skb, max_headroom);
689 if (max_headroom > dev->needed_headroom)
690 dev->needed_headroom = max_headroom;
692 goto tx_err_dst_release;
695 skb_set_owner_w(new_skb, skb->sk);
700 if (fl6->flowi6_mark) {
701 skb_dst_set(skb, dst);
704 skb_dst_set_noref(skb, dst);
708 if (encap_limit >= 0) {
709 init_tel_txopt(&opt, encap_limit);
710 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
713 if (likely(!skb->encapsulation)) {
714 skb_reset_inner_headers(skb);
715 skb->encapsulation = 1;
718 skb_push(skb, gre_hlen);
719 skb_reset_network_header(skb);
720 skb_set_transport_header(skb, sizeof(*ipv6h));
723 * Push down and install the IP header.
725 ipv6h = ipv6_hdr(skb);
726 ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), fl6->flowlabel);
727 ipv6h->hop_limit = tunnel->parms.hop_limit;
728 ipv6h->nexthdr = proto;
729 ipv6h->saddr = fl6->saddr;
730 ipv6h->daddr = fl6->daddr;
732 ((__be16 *)(ipv6h + 1))[0] = tunnel->parms.o_flags;
733 ((__be16 *)(ipv6h + 1))[1] = (dev->type == ARPHRD_ETHER) ?
734 htons(ETH_P_TEB) : skb->protocol;
736 if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
737 __be32 *ptr = (__be32 *)(((u8 *)ipv6h) + tunnel->hlen - 4);
739 if (tunnel->parms.o_flags&GRE_SEQ) {
741 *ptr = htonl(tunnel->o_seqno);
744 if (tunnel->parms.o_flags&GRE_KEY) {
745 *ptr = tunnel->parms.o_key;
748 if (tunnel->parms.o_flags&GRE_CSUM) {
750 *(__sum16 *)ptr = ip_compute_csum((void *)(ipv6h+1),
751 skb->len - sizeof(struct ipv6hdr));
755 ip6tunnel_xmit(skb, dev);
757 ip6_tnl_dst_store(tunnel, ndst);
760 stats->tx_carrier_errors++;
761 dst_link_failure(skb);
767 static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
769 struct ip6_tnl *t = netdev_priv(dev);
770 const struct iphdr *iph = ip_hdr(skb);
771 int encap_limit = -1;
777 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
778 encap_limit = t->parms.encap_limit;
780 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
781 fl6.flowi6_proto = IPPROTO_IPIP;
783 dsfield = ipv4_get_dsfield(iph);
785 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
786 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
788 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
789 fl6.flowi6_mark = skb->mark;
791 err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
793 /* XXX: send ICMP error even if DF is not set. */
794 if (err == -EMSGSIZE)
795 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
803 static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
805 struct ip6_tnl *t = netdev_priv(dev);
806 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
807 int encap_limit = -1;
814 if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
817 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
819 struct ipv6_tlv_tnl_enc_lim *tel;
820 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
821 if (tel->encap_limit == 0) {
822 icmpv6_send(skb, ICMPV6_PARAMPROB,
823 ICMPV6_HDR_FIELD, offset + 2);
826 encap_limit = tel->encap_limit - 1;
827 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
828 encap_limit = t->parms.encap_limit;
830 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
831 fl6.flowi6_proto = IPPROTO_IPV6;
833 dsfield = ipv6_get_dsfield(ipv6h);
834 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
835 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
836 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
837 fl6.flowlabel |= ip6_flowlabel(ipv6h);
838 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
839 fl6.flowi6_mark = skb->mark;
841 err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
843 if (err == -EMSGSIZE)
844 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
852 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
853 * @t: the outgoing tunnel device
854 * @hdr: IPv6 header from the incoming packet
857 * Avoid trivial tunneling loop by checking that tunnel exit-point
858 * doesn't match source of incoming packet.
865 static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t,
866 const struct ipv6hdr *hdr)
868 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
871 static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
873 struct ip6_tnl *t = netdev_priv(dev);
874 int encap_limit = -1;
879 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
880 encap_limit = t->parms.encap_limit;
882 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
883 fl6.flowi6_proto = skb->protocol;
885 err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu);
890 static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
891 struct net_device *dev)
893 struct ip6_tnl *t = netdev_priv(dev);
894 struct net_device_stats *stats = &t->dev->stats;
897 if (!ip6_tnl_xmit_ctl(t))
900 switch (skb->protocol) {
901 case htons(ETH_P_IP):
902 ret = ip6gre_xmit_ipv4(skb, dev);
904 case htons(ETH_P_IPV6):
905 ret = ip6gre_xmit_ipv6(skb, dev);
908 ret = ip6gre_xmit_other(skb, dev);
924 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
926 struct net_device *dev = t->dev;
927 struct __ip6_tnl_parm *p = &t->parms;
928 struct flowi6 *fl6 = &t->fl.u.ip6;
929 int addend = sizeof(struct ipv6hdr) + 4;
931 if (dev->type != ARPHRD_ETHER) {
932 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
933 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
936 /* Set up flowi template */
937 fl6->saddr = p->laddr;
938 fl6->daddr = p->raddr;
939 fl6->flowi6_oif = p->link;
942 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
943 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
944 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
945 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
947 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
948 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
950 if (p->flags&IP6_TNL_F_CAP_XMIT &&
951 p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER)
952 dev->flags |= IFF_POINTOPOINT;
954 dev->flags &= ~IFF_POINTOPOINT;
956 dev->iflink = p->link;
958 /* Precalculate GRE options length */
959 if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
960 if (t->parms.o_flags&GRE_CSUM)
962 if (t->parms.o_flags&GRE_KEY)
964 if (t->parms.o_flags&GRE_SEQ)
969 if (p->flags & IP6_TNL_F_CAP_XMIT) {
970 int strict = (ipv6_addr_type(&p->raddr) &
971 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
973 struct rt6_info *rt = rt6_lookup(t->net,
974 &p->raddr, &p->laddr,
981 dev->hard_header_len = rt->dst.dev->hard_header_len + addend;
984 dev->mtu = rt->dst.dev->mtu - addend;
985 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
988 if (dev->mtu < IPV6_MIN_MTU)
989 dev->mtu = IPV6_MIN_MTU;
996 static int ip6gre_tnl_change(struct ip6_tnl *t,
997 const struct __ip6_tnl_parm *p, int set_mtu)
999 t->parms.laddr = p->laddr;
1000 t->parms.raddr = p->raddr;
1001 t->parms.flags = p->flags;
1002 t->parms.hop_limit = p->hop_limit;
1003 t->parms.encap_limit = p->encap_limit;
1004 t->parms.flowinfo = p->flowinfo;
1005 t->parms.link = p->link;
1006 t->parms.proto = p->proto;
1007 t->parms.i_key = p->i_key;
1008 t->parms.o_key = p->o_key;
1009 t->parms.i_flags = p->i_flags;
1010 t->parms.o_flags = p->o_flags;
1011 ip6_tnl_dst_reset(t);
1012 ip6gre_tnl_link_config(t, set_mtu);
1016 static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p,
1017 const struct ip6_tnl_parm2 *u)
1019 p->laddr = u->laddr;
1020 p->raddr = u->raddr;
1021 p->flags = u->flags;
1022 p->hop_limit = u->hop_limit;
1023 p->encap_limit = u->encap_limit;
1024 p->flowinfo = u->flowinfo;
1026 p->i_key = u->i_key;
1027 p->o_key = u->o_key;
1028 p->i_flags = u->i_flags;
1029 p->o_flags = u->o_flags;
1030 memcpy(p->name, u->name, sizeof(u->name));
1033 static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u,
1034 const struct __ip6_tnl_parm *p)
1036 u->proto = IPPROTO_GRE;
1037 u->laddr = p->laddr;
1038 u->raddr = p->raddr;
1039 u->flags = p->flags;
1040 u->hop_limit = p->hop_limit;
1041 u->encap_limit = p->encap_limit;
1042 u->flowinfo = p->flowinfo;
1044 u->i_key = p->i_key;
1045 u->o_key = p->o_key;
1046 u->i_flags = p->i_flags;
1047 u->o_flags = p->o_flags;
1048 memcpy(u->name, p->name, sizeof(u->name));
1051 static int ip6gre_tunnel_ioctl(struct net_device *dev,
1052 struct ifreq *ifr, int cmd)
1055 struct ip6_tnl_parm2 p;
1056 struct __ip6_tnl_parm p1;
1057 struct ip6_tnl *t = netdev_priv(dev);
1058 struct net *net = t->net;
1059 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1063 if (dev == ign->fb_tunnel_dev) {
1064 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1068 ip6gre_tnl_parm_from_user(&p1, &p);
1069 t = ip6gre_tunnel_locate(net, &p1, 0);
1071 t = netdev_priv(dev);
1073 memset(&p, 0, sizeof(p));
1074 ip6gre_tnl_parm_to_user(&p, &t->parms);
1075 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1082 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1086 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1090 if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))
1093 if (!(p.i_flags&GRE_KEY))
1095 if (!(p.o_flags&GRE_KEY))
1098 ip6gre_tnl_parm_from_user(&p1, &p);
1099 t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL);
1101 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1103 if (t->dev != dev) {
1108 t = netdev_priv(dev);
1110 ip6gre_tunnel_unlink(ign, t);
1112 ip6gre_tnl_change(t, &p1, 1);
1113 ip6gre_tunnel_link(ign, t);
1114 netdev_state_change(dev);
1121 memset(&p, 0, sizeof(p));
1122 ip6gre_tnl_parm_to_user(&p, &t->parms);
1123 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1126 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1131 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1134 if (dev == ign->fb_tunnel_dev) {
1136 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1139 ip6gre_tnl_parm_from_user(&p1, &p);
1140 t = ip6gre_tunnel_locate(net, &p1, 0);
1144 if (t == netdev_priv(ign->fb_tunnel_dev))
1148 unregister_netdevice(dev);
1160 static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1163 new_mtu > 0xFFF8 - dev->hard_header_len)
1169 static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
1170 unsigned short type,
1171 const void *daddr, const void *saddr, unsigned int len)
1173 struct ip6_tnl *t = netdev_priv(dev);
1174 struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen);
1175 __be16 *p = (__be16 *)(ipv6h+1);
1177 ip6_flow_hdr(ipv6h, 0, t->fl.u.ip6.flowlabel);
1178 ipv6h->hop_limit = t->parms.hop_limit;
1179 ipv6h->nexthdr = NEXTHDR_GRE;
1180 ipv6h->saddr = t->parms.laddr;
1181 ipv6h->daddr = t->parms.raddr;
1183 p[0] = t->parms.o_flags;
1187 * Set the source hardware address.
1191 memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr));
1193 memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr));
1194 if (!ipv6_addr_any(&ipv6h->daddr))
1200 static const struct header_ops ip6gre_header_ops = {
1201 .create = ip6gre_header,
1204 static const struct net_device_ops ip6gre_netdev_ops = {
1205 .ndo_init = ip6gre_tunnel_init,
1206 .ndo_uninit = ip6gre_tunnel_uninit,
1207 .ndo_start_xmit = ip6gre_tunnel_xmit,
1208 .ndo_do_ioctl = ip6gre_tunnel_ioctl,
1209 .ndo_change_mtu = ip6gre_tunnel_change_mtu,
1210 .ndo_get_stats64 = ip_tunnel_get_stats64,
1213 static void ip6gre_dev_free(struct net_device *dev)
1215 free_percpu(dev->tstats);
1219 static void ip6gre_tunnel_setup(struct net_device *dev)
1223 dev->netdev_ops = &ip6gre_netdev_ops;
1224 dev->destructor = ip6gre_dev_free;
1226 dev->type = ARPHRD_IP6GRE;
1227 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr) + 4;
1228 dev->mtu = ETH_DATA_LEN - sizeof(struct ipv6hdr) - 4;
1229 t = netdev_priv(dev);
1230 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1232 dev->flags |= IFF_NOARP;
1234 dev->addr_len = sizeof(struct in6_addr);
1235 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1238 static int ip6gre_tunnel_init(struct net_device *dev)
1240 struct ip6_tnl *tunnel;
1243 tunnel = netdev_priv(dev);
1246 tunnel->net = dev_net(dev);
1247 strcpy(tunnel->parms.name, dev->name);
1249 memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
1250 memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
1252 if (ipv6_addr_any(&tunnel->parms.raddr))
1253 dev->header_ops = &ip6gre_header_ops;
1255 dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
1259 for_each_possible_cpu(i) {
1260 struct pcpu_sw_netstats *ip6gre_tunnel_stats;
1261 ip6gre_tunnel_stats = per_cpu_ptr(dev->tstats, i);
1262 u64_stats_init(&ip6gre_tunnel_stats->syncp);
1269 static void ip6gre_fb_tunnel_init(struct net_device *dev)
1271 struct ip6_tnl *tunnel = netdev_priv(dev);
1274 tunnel->net = dev_net(dev);
1275 strcpy(tunnel->parms.name, dev->name);
1277 tunnel->hlen = sizeof(struct ipv6hdr) + 4;
1283 static struct inet6_protocol ip6gre_protocol __read_mostly = {
1284 .handler = ip6gre_rcv,
1285 .err_handler = ip6gre_err,
1286 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1289 static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
1291 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1292 struct net_device *dev, *aux;
1295 for_each_netdev_safe(net, dev, aux)
1296 if (dev->rtnl_link_ops == &ip6gre_link_ops ||
1297 dev->rtnl_link_ops == &ip6gre_tap_ops)
1298 unregister_netdevice_queue(dev, head);
1300 for (prio = 0; prio < 4; prio++) {
1302 for (h = 0; h < HASH_SIZE; h++) {
1305 t = rtnl_dereference(ign->tunnels[prio][h]);
1308 /* If dev is in the same netns, it has already
1309 * been added to the list by the previous loop.
1311 if (!net_eq(dev_net(t->dev), net))
1312 unregister_netdevice_queue(t->dev,
1314 t = rtnl_dereference(t->next);
1320 static int __net_init ip6gre_init_net(struct net *net)
1322 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1325 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
1326 ip6gre_tunnel_setup);
1327 if (!ign->fb_tunnel_dev) {
1331 dev_net_set(ign->fb_tunnel_dev, net);
1332 /* FB netdevice is special: we have one, and only one per netns.
1333 * Allowing to move it to another netns is clearly unsafe.
1335 ign->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
1338 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
1339 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
1341 err = register_netdev(ign->fb_tunnel_dev);
1345 rcu_assign_pointer(ign->tunnels_wc[0],
1346 netdev_priv(ign->fb_tunnel_dev));
1350 ip6gre_dev_free(ign->fb_tunnel_dev);
1355 static void __net_exit ip6gre_exit_net(struct net *net)
1360 ip6gre_destroy_tunnels(net, &list);
1361 unregister_netdevice_many(&list);
1365 static struct pernet_operations ip6gre_net_ops = {
1366 .init = ip6gre_init_net,
1367 .exit = ip6gre_exit_net,
1368 .id = &ip6gre_net_id,
1369 .size = sizeof(struct ip6gre_net),
1372 static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
1380 if (data[IFLA_GRE_IFLAGS])
1381 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1382 if (data[IFLA_GRE_OFLAGS])
1383 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1384 if (flags & (GRE_VERSION|GRE_ROUTING))
1390 static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
1392 struct in6_addr daddr;
1394 if (tb[IFLA_ADDRESS]) {
1395 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1397 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1398 return -EADDRNOTAVAIL;
1404 if (data[IFLA_GRE_REMOTE]) {
1405 nla_memcpy(&daddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
1406 if (ipv6_addr_any(&daddr))
1411 return ip6gre_tunnel_validate(tb, data);
1415 static void ip6gre_netlink_parms(struct nlattr *data[],
1416 struct __ip6_tnl_parm *parms)
1418 memset(parms, 0, sizeof(*parms));
1423 if (data[IFLA_GRE_LINK])
1424 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1426 if (data[IFLA_GRE_IFLAGS])
1427 parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
1429 if (data[IFLA_GRE_OFLAGS])
1430 parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
1432 if (data[IFLA_GRE_IKEY])
1433 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1435 if (data[IFLA_GRE_OKEY])
1436 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1438 if (data[IFLA_GRE_LOCAL])
1439 nla_memcpy(&parms->laddr, data[IFLA_GRE_LOCAL], sizeof(struct in6_addr));
1441 if (data[IFLA_GRE_REMOTE])
1442 nla_memcpy(&parms->raddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
1444 if (data[IFLA_GRE_TTL])
1445 parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]);
1447 if (data[IFLA_GRE_ENCAP_LIMIT])
1448 parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]);
1450 if (data[IFLA_GRE_FLOWINFO])
1451 parms->flowinfo = nla_get_u32(data[IFLA_GRE_FLOWINFO]);
1453 if (data[IFLA_GRE_FLAGS])
1454 parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]);
1457 static int ip6gre_tap_init(struct net_device *dev)
1459 struct ip6_tnl *tunnel;
1461 tunnel = netdev_priv(dev);
1464 tunnel->net = dev_net(dev);
1465 strcpy(tunnel->parms.name, dev->name);
1467 ip6gre_tnl_link_config(tunnel, 1);
1469 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1476 static const struct net_device_ops ip6gre_tap_netdev_ops = {
1477 .ndo_init = ip6gre_tap_init,
1478 .ndo_uninit = ip6gre_tunnel_uninit,
1479 .ndo_start_xmit = ip6gre_tunnel_xmit,
1480 .ndo_set_mac_address = eth_mac_addr,
1481 .ndo_validate_addr = eth_validate_addr,
1482 .ndo_change_mtu = ip6gre_tunnel_change_mtu,
1483 .ndo_get_stats64 = ip_tunnel_get_stats64,
1486 static void ip6gre_tap_setup(struct net_device *dev)
1491 dev->netdev_ops = &ip6gre_tap_netdev_ops;
1492 dev->destructor = ip6gre_dev_free;
1495 dev->features |= NETIF_F_NETNS_LOCAL;
1498 static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
1499 struct nlattr *tb[], struct nlattr *data[])
1502 struct net *net = dev_net(dev);
1503 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1506 nt = netdev_priv(dev);
1507 ip6gre_netlink_parms(data, &nt->parms);
1509 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
1512 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1513 eth_hw_addr_random(dev);
1516 nt->net = dev_net(dev);
1517 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
1519 /* Can use a lockless transmit, unless we generate output sequences */
1520 if (!(nt->parms.o_flags & GRE_SEQ))
1521 dev->features |= NETIF_F_LLTX;
1523 err = register_netdevice(dev);
1528 ip6gre_tunnel_link(ign, nt);
1534 static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
1535 struct nlattr *data[])
1537 struct ip6_tnl *t, *nt = netdev_priv(dev);
1538 struct net *net = nt->net;
1539 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1540 struct __ip6_tnl_parm p;
1542 if (dev == ign->fb_tunnel_dev)
1545 ip6gre_netlink_parms(data, &p);
1547 t = ip6gre_tunnel_locate(net, &p, 0);
1555 ip6gre_tunnel_unlink(ign, t);
1556 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
1557 ip6gre_tunnel_link(ign, t);
1558 netdev_state_change(dev);
1564 static void ip6gre_dellink(struct net_device *dev, struct list_head *head)
1566 struct net *net = dev_net(dev);
1567 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1569 if (dev != ign->fb_tunnel_dev)
1570 unregister_netdevice_queue(dev, head);
1573 static size_t ip6gre_get_size(const struct net_device *dev)
1578 /* IFLA_GRE_IFLAGS */
1580 /* IFLA_GRE_OFLAGS */
1586 /* IFLA_GRE_LOCAL */
1587 nla_total_size(sizeof(struct in6_addr)) +
1588 /* IFLA_GRE_REMOTE */
1589 nla_total_size(sizeof(struct in6_addr)) +
1594 /* IFLA_GRE_ENCAP_LIMIT */
1596 /* IFLA_GRE_FLOWINFO */
1598 /* IFLA_GRE_FLAGS */
1603 static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1605 struct ip6_tnl *t = netdev_priv(dev);
1606 struct __ip6_tnl_parm *p = &t->parms;
1608 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1609 nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
1610 nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
1611 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1612 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1613 nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->laddr) ||
1614 nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->raddr) ||
1615 nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
1616 /*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/
1617 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
1618 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
1619 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags))
1620 goto nla_put_failure;
1627 static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
1628 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1629 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1630 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1631 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1632 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1633 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct ipv6hdr, saddr) },
1634 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct ipv6hdr, daddr) },
1635 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1636 [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 },
1637 [IFLA_GRE_FLOWINFO] = { .type = NLA_U32 },
1638 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
1641 static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
1643 .maxtype = IFLA_GRE_MAX,
1644 .policy = ip6gre_policy,
1645 .priv_size = sizeof(struct ip6_tnl),
1646 .setup = ip6gre_tunnel_setup,
1647 .validate = ip6gre_tunnel_validate,
1648 .newlink = ip6gre_newlink,
1649 .changelink = ip6gre_changelink,
1650 .dellink = ip6gre_dellink,
1651 .get_size = ip6gre_get_size,
1652 .fill_info = ip6gre_fill_info,
1655 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
1656 .kind = "ip6gretap",
1657 .maxtype = IFLA_GRE_MAX,
1658 .policy = ip6gre_policy,
1659 .priv_size = sizeof(struct ip6_tnl),
1660 .setup = ip6gre_tap_setup,
1661 .validate = ip6gre_tap_validate,
1662 .newlink = ip6gre_newlink,
1663 .changelink = ip6gre_changelink,
1664 .get_size = ip6gre_get_size,
1665 .fill_info = ip6gre_fill_info,
1669 * And now the modules code and kernel interface.
1672 static int __init ip6gre_init(void)
1676 pr_info("GRE over IPv6 tunneling driver\n");
1678 err = register_pernet_device(&ip6gre_net_ops);
1682 err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE);
1684 pr_info("%s: can't add protocol\n", __func__);
1685 goto add_proto_failed;
1688 err = rtnl_link_register(&ip6gre_link_ops);
1690 goto rtnl_link_failed;
1692 err = rtnl_link_register(&ip6gre_tap_ops);
1694 goto tap_ops_failed;
1700 rtnl_link_unregister(&ip6gre_link_ops);
1702 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
1704 unregister_pernet_device(&ip6gre_net_ops);
1708 static void __exit ip6gre_fini(void)
1710 rtnl_link_unregister(&ip6gre_tap_ops);
1711 rtnl_link_unregister(&ip6gre_link_ops);
1712 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
1713 unregister_pernet_device(&ip6gre_net_ops);
1716 module_init(ip6gre_init);
1717 module_exit(ip6gre_fini);
1718 MODULE_LICENSE("GPL");
1719 MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
1720 MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
1721 MODULE_ALIAS_RTNL_LINK("ip6gre");
1722 MODULE_ALIAS_NETDEV("ip6gre0");