2 * VXLAN: Virtual eXtensible Local Area Network
4 * Copyright (c) 2012-2013 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/module.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
18 #include <linux/skbuff.h>
19 #include <linux/rculist.h>
20 #include <linux/netdevice.h>
23 #include <linux/udp.h>
24 #include <linux/igmp.h>
25 #include <linux/etherdevice.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/hash.h>
29 #include <linux/ethtool.h>
31 #include <net/ndisc.h>
33 #include <net/ip_tunnels.h>
36 #include <net/udp_tunnel.h>
37 #include <net/rtnetlink.h>
38 #include <net/route.h>
39 #include <net/dsfield.h>
40 #include <net/inet_ecn.h>
41 #include <net/net_namespace.h>
42 #include <net/netns/generic.h>
43 #include <net/vxlan.h>
44 #include <net/protocol.h>
46 #if IS_ENABLED(CONFIG_IPV6)
48 #include <net/addrconf.h>
49 #include <net/ip6_tunnel.h>
50 #include <net/ip6_checksum.h>
52 #include <net/dst_metadata.h>
54 #define VXLAN_VERSION "0.1"
56 #define PORT_HASH_BITS 8
57 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
58 #define FDB_AGE_DEFAULT 300 /* 5 min */
59 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
61 /* UDP port for VXLAN traffic.
62 * The IANA assigned port is 4789, but the Linux default is 8472
63 * for compatibility with early adopters.
65 static unsigned short vxlan_port __read_mostly = 8472;
66 module_param_named(udp_port, vxlan_port, ushort, 0444);
67 MODULE_PARM_DESC(udp_port, "Destination UDP port");
69 static bool log_ecn_error = true;
70 module_param(log_ecn_error, bool, 0644);
71 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
73 static int vxlan_net_id;
74 static struct rtnl_link_ops vxlan_link_ops;
76 static const u8 all_zeros_mac[ETH_ALEN + 2];
78 static int vxlan_sock_add(struct vxlan_dev *vxlan);
80 /* per-network namespace private data for this module */
82 struct list_head vxlan_list;
83 struct hlist_head sock_list[PORT_HASH_SIZE];
87 /* Forwarding table entry */
89 struct hlist_node hlist; /* linked list of entries */
91 unsigned long updated; /* jiffies */
93 struct list_head remotes;
94 u8 eth_addr[ETH_ALEN];
95 u16 state; /* see ndm_state */
96 u8 flags; /* see ndm_flags */
99 /* salt for hash table */
100 static u32 vxlan_salt __read_mostly;
101 static struct workqueue_struct *vxlan_wq;
103 static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
105 return vs->flags & VXLAN_F_COLLECT_METADATA ||
106 ip_tunnel_collect_metadata();
109 #if IS_ENABLED(CONFIG_IPV6)
111 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
113 if (a->sa.sa_family != b->sa.sa_family)
115 if (a->sa.sa_family == AF_INET6)
116 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
118 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
121 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
123 if (ipa->sa.sa_family == AF_INET6)
124 return ipv6_addr_any(&ipa->sin6.sin6_addr);
126 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
129 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
131 if (ipa->sa.sa_family == AF_INET6)
132 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
134 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
137 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
139 if (nla_len(nla) >= sizeof(struct in6_addr)) {
140 ip->sin6.sin6_addr = nla_get_in6_addr(nla);
141 ip->sa.sa_family = AF_INET6;
143 } else if (nla_len(nla) >= sizeof(__be32)) {
144 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
145 ip->sa.sa_family = AF_INET;
148 return -EAFNOSUPPORT;
152 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
153 const union vxlan_addr *ip)
155 if (ip->sa.sa_family == AF_INET6)
156 return nla_put_in6_addr(skb, attr, &ip->sin6.sin6_addr);
158 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
161 #else /* !CONFIG_IPV6 */
164 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
166 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
169 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
171 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
174 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
176 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
179 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
181 if (nla_len(nla) >= sizeof(struct in6_addr)) {
182 return -EAFNOSUPPORT;
183 } else if (nla_len(nla) >= sizeof(__be32)) {
184 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
185 ip->sa.sa_family = AF_INET;
188 return -EAFNOSUPPORT;
192 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
193 const union vxlan_addr *ip)
195 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
199 /* Virtual Network hash table head */
200 static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni)
202 return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)];
205 /* Socket hash table head */
206 static inline struct hlist_head *vs_head(struct net *net, __be16 port)
208 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
210 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
213 /* First remote destination for a forwarding entry.
214 * Guaranteed to be non-NULL because remotes are never deleted.
216 static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
218 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
221 static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
223 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
226 /* Find VXLAN socket based on network namespace, address family and UDP port
227 * and enabled unshareable flags.
229 static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
230 __be16 port, u32 flags)
232 struct vxlan_sock *vs;
234 flags &= VXLAN_F_RCV_FLAGS;
236 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
237 if (inet_sk(vs->sock->sk)->inet_sport == port &&
238 vxlan_get_sk_family(vs) == family &&
245 static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni)
247 struct vxlan_dev *vxlan;
249 /* For flow based devices, map all packets to VNI 0 */
250 if (vs->flags & VXLAN_F_COLLECT_METADATA)
253 hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) {
254 if (vxlan->default_dst.remote_vni == vni)
261 /* Look up VNI in a per net namespace table */
262 static struct vxlan_dev *vxlan_find_vni(struct net *net, __be32 vni,
263 sa_family_t family, __be16 port,
266 struct vxlan_sock *vs;
268 vs = vxlan_find_sock(net, family, port, flags);
272 return vxlan_vs_find_vni(vs, vni);
275 /* Fill in neighbour message in skbuff. */
276 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
277 const struct vxlan_fdb *fdb,
278 u32 portid, u32 seq, int type, unsigned int flags,
279 const struct vxlan_rdst *rdst)
281 unsigned long now = jiffies;
282 struct nda_cacheinfo ci;
283 struct nlmsghdr *nlh;
285 bool send_ip, send_eth;
287 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
291 ndm = nlmsg_data(nlh);
292 memset(ndm, 0, sizeof(*ndm));
294 send_eth = send_ip = true;
296 if (type == RTM_GETNEIGH) {
297 ndm->ndm_family = AF_INET;
298 send_ip = !vxlan_addr_any(&rdst->remote_ip);
299 send_eth = !is_zero_ether_addr(fdb->eth_addr);
301 ndm->ndm_family = AF_BRIDGE;
302 ndm->ndm_state = fdb->state;
303 ndm->ndm_ifindex = vxlan->dev->ifindex;
304 ndm->ndm_flags = fdb->flags;
305 ndm->ndm_type = RTN_UNICAST;
307 if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
308 nla_put_s32(skb, NDA_LINK_NETNSID,
309 peernet2id_alloc(dev_net(vxlan->dev), vxlan->net)))
310 goto nla_put_failure;
312 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
313 goto nla_put_failure;
315 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
316 goto nla_put_failure;
318 if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port &&
319 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
320 goto nla_put_failure;
321 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
322 nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni)))
323 goto nla_put_failure;
324 if (rdst->remote_ifindex &&
325 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
326 goto nla_put_failure;
328 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
329 ci.ndm_confirmed = 0;
330 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
333 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
334 goto nla_put_failure;
340 nlmsg_cancel(skb, nlh);
344 static inline size_t vxlan_nlmsg_size(void)
346 return NLMSG_ALIGN(sizeof(struct ndmsg))
347 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
348 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
349 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
350 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
351 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
352 + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */
353 + nla_total_size(sizeof(struct nda_cacheinfo));
356 static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
357 struct vxlan_rdst *rd, int type)
359 struct net *net = dev_net(vxlan->dev);
363 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
367 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
369 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
370 WARN_ON(err == -EMSGSIZE);
375 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
379 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
382 static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
384 struct vxlan_dev *vxlan = netdev_priv(dev);
385 struct vxlan_fdb f = {
388 struct vxlan_rdst remote = {
389 .remote_ip = *ipa, /* goes to NDA_DST */
390 .remote_vni = cpu_to_be32(VXLAN_N_VID),
393 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
396 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
398 struct vxlan_fdb f = {
401 struct vxlan_rdst remote = { };
403 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
405 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
408 /* Hash Ethernet address */
409 static u32 eth_hash(const unsigned char *addr)
411 u64 value = get_unaligned((u64 *)addr);
413 /* only want 6 bytes */
419 return hash_64(value, FDB_HASH_BITS);
422 /* Hash chain to use given mac address */
423 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
426 return &vxlan->fdb_head[eth_hash(mac)];
429 /* Look up Ethernet address in forwarding table */
430 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
433 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
436 hlist_for_each_entry_rcu(f, head, hlist) {
437 if (ether_addr_equal(mac, f->eth_addr))
444 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
449 f = __vxlan_find_mac(vxlan, mac);
456 /* caller should hold vxlan->hash_lock */
457 static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
458 union vxlan_addr *ip, __be16 port,
459 __be32 vni, __u32 ifindex)
461 struct vxlan_rdst *rd;
463 list_for_each_entry(rd, &f->remotes, list) {
464 if (vxlan_addr_equal(&rd->remote_ip, ip) &&
465 rd->remote_port == port &&
466 rd->remote_vni == vni &&
467 rd->remote_ifindex == ifindex)
474 /* Replace destination of unicast mac */
475 static int vxlan_fdb_replace(struct vxlan_fdb *f,
476 union vxlan_addr *ip, __be16 port, __be32 vni,
479 struct vxlan_rdst *rd;
481 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
485 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
489 dst_cache_reset(&rd->dst_cache);
491 rd->remote_port = port;
492 rd->remote_vni = vni;
493 rd->remote_ifindex = ifindex;
497 /* Add/update destinations for multicast */
498 static int vxlan_fdb_append(struct vxlan_fdb *f,
499 union vxlan_addr *ip, __be16 port, __be32 vni,
500 __u32 ifindex, struct vxlan_rdst **rdp)
502 struct vxlan_rdst *rd;
504 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
508 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
512 if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
518 rd->remote_port = port;
519 rd->remote_vni = vni;
520 rd->remote_ifindex = ifindex;
522 list_add_tail_rcu(&rd->list, &f->remotes);
528 static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
530 struct vxlanhdr *vh, size_t hdrlen,
532 struct gro_remcsum *grc,
535 size_t start, offset;
537 if (skb->remcsum_offload)
540 if (!NAPI_GRO_CB(skb)->csum_valid)
543 start = vxlan_rco_start(vni_field);
544 offset = start + vxlan_rco_offset(vni_field);
546 vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
547 start, offset, grc, nopartial);
549 skb->remcsum_offload = 1;
554 static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
556 struct udp_offload *uoff)
558 struct sk_buff *p, **pp = NULL;
559 struct vxlanhdr *vh, *vh2;
560 unsigned int hlen, off_vx;
562 struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock,
565 struct gro_remcsum grc;
567 skb_gro_remcsum_init(&grc);
569 off_vx = skb_gro_offset(skb);
570 hlen = off_vx + sizeof(*vh);
571 vh = skb_gro_header_fast(skb, off_vx);
572 if (skb_gro_header_hard(skb, hlen)) {
573 vh = skb_gro_header_slow(skb, hlen, off_vx);
578 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
580 flags = vh->vx_flags;
582 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
583 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
586 VXLAN_F_REMCSUM_NOPARTIAL));
592 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
594 for (p = *head; p; p = p->next) {
595 if (!NAPI_GRO_CB(p)->same_flow)
598 vh2 = (struct vxlanhdr *)(p->data + off_vx);
599 if (vh->vx_flags != vh2->vx_flags ||
600 vh->vx_vni != vh2->vx_vni) {
601 NAPI_GRO_CB(p)->same_flow = 0;
606 pp = eth_gro_receive(head, skb);
610 skb_gro_remcsum_cleanup(skb, &grc);
611 NAPI_GRO_CB(skb)->flush |= flush;
616 static int vxlan_gro_complete(struct sk_buff *skb, int nhoff,
617 struct udp_offload *uoff)
619 udp_tunnel_gro_complete(skb, nhoff);
621 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
624 /* Notify netdevs that UDP port started listening */
625 static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
627 struct net_device *dev;
628 struct sock *sk = vs->sock->sk;
629 struct net *net = sock_net(sk);
630 sa_family_t sa_family = vxlan_get_sk_family(vs);
631 __be16 port = inet_sk(sk)->inet_sport;
634 if (sa_family == AF_INET) {
635 err = udp_add_offload(net, &vs->udp_offloads);
637 pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
641 for_each_netdev_rcu(net, dev) {
642 if (dev->netdev_ops->ndo_add_vxlan_port)
643 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
649 /* Notify netdevs that UDP port is no more listening */
650 static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
652 struct net_device *dev;
653 struct sock *sk = vs->sock->sk;
654 struct net *net = sock_net(sk);
655 sa_family_t sa_family = vxlan_get_sk_family(vs);
656 __be16 port = inet_sk(sk)->inet_sport;
659 for_each_netdev_rcu(net, dev) {
660 if (dev->netdev_ops->ndo_del_vxlan_port)
661 dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family,
666 if (sa_family == AF_INET)
667 udp_del_offload(&vs->udp_offloads);
670 /* Add new entry to forwarding table -- assumes lock held */
671 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
672 const u8 *mac, union vxlan_addr *ip,
673 __u16 state, __u16 flags,
674 __be16 port, __be32 vni, __u32 ifindex,
677 struct vxlan_rdst *rd = NULL;
681 f = __vxlan_find_mac(vxlan, mac);
683 if (flags & NLM_F_EXCL) {
684 netdev_dbg(vxlan->dev,
685 "lost race to create %pM\n", mac);
688 if (f->state != state) {
690 f->updated = jiffies;
693 if (f->flags != ndm_flags) {
694 f->flags = ndm_flags;
695 f->updated = jiffies;
698 if ((flags & NLM_F_REPLACE)) {
699 /* Only change unicasts */
700 if (!(is_multicast_ether_addr(f->eth_addr) ||
701 is_zero_ether_addr(f->eth_addr))) {
702 notify |= vxlan_fdb_replace(f, ip, port, vni,
707 if ((flags & NLM_F_APPEND) &&
708 (is_multicast_ether_addr(f->eth_addr) ||
709 is_zero_ether_addr(f->eth_addr))) {
710 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex,
718 if (!(flags & NLM_F_CREATE))
721 if (vxlan->cfg.addrmax &&
722 vxlan->addrcnt >= vxlan->cfg.addrmax)
725 /* Disallow replace to add a multicast entry */
726 if ((flags & NLM_F_REPLACE) &&
727 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
730 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
731 f = kmalloc(sizeof(*f), GFP_ATOMIC);
737 f->flags = ndm_flags;
738 f->updated = f->used = jiffies;
739 INIT_LIST_HEAD(&f->remotes);
740 memcpy(f->eth_addr, mac, ETH_ALEN);
742 vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
745 hlist_add_head_rcu(&f->hlist,
746 vxlan_fdb_head(vxlan, mac));
751 rd = first_remote_rtnl(f);
752 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
758 static void vxlan_fdb_free(struct rcu_head *head)
760 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
761 struct vxlan_rdst *rd, *nd;
763 list_for_each_entry_safe(rd, nd, &f->remotes, list) {
764 dst_cache_destroy(&rd->dst_cache);
770 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
772 netdev_dbg(vxlan->dev,
773 "delete %pM\n", f->eth_addr);
776 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
778 hlist_del_rcu(&f->hlist);
779 call_rcu(&f->rcu, vxlan_fdb_free);
782 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
783 union vxlan_addr *ip, __be16 *port, __be32 *vni,
786 struct net *net = dev_net(vxlan->dev);
790 err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
794 union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
795 if (remote->sa.sa_family == AF_INET) {
796 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
797 ip->sa.sa_family = AF_INET;
798 #if IS_ENABLED(CONFIG_IPV6)
800 ip->sin6.sin6_addr = in6addr_any;
801 ip->sa.sa_family = AF_INET6;
807 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
809 *port = nla_get_be16(tb[NDA_PORT]);
811 *port = vxlan->cfg.dst_port;
815 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
817 *vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI]));
819 *vni = vxlan->default_dst.remote_vni;
822 if (tb[NDA_IFINDEX]) {
823 struct net_device *tdev;
825 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
827 *ifindex = nla_get_u32(tb[NDA_IFINDEX]);
828 tdev = __dev_get_by_index(net, *ifindex);
830 return -EADDRNOTAVAIL;
838 /* Add static entry (via netlink) */
839 static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
840 struct net_device *dev,
841 const unsigned char *addr, u16 vid, u16 flags)
843 struct vxlan_dev *vxlan = netdev_priv(dev);
844 /* struct net *net = dev_net(vxlan->dev); */
851 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
852 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
857 if (tb[NDA_DST] == NULL)
860 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
864 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
865 return -EAFNOSUPPORT;
867 spin_lock_bh(&vxlan->hash_lock);
868 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
869 port, vni, ifindex, ndm->ndm_flags);
870 spin_unlock_bh(&vxlan->hash_lock);
875 /* Delete entry (via netlink) */
876 static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
877 struct net_device *dev,
878 const unsigned char *addr, u16 vid)
880 struct vxlan_dev *vxlan = netdev_priv(dev);
882 struct vxlan_rdst *rd = NULL;
889 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
895 spin_lock_bh(&vxlan->hash_lock);
896 f = vxlan_find_mac(vxlan, addr);
900 if (!vxlan_addr_any(&ip)) {
901 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
908 /* remove a destination if it's not the only one on the list,
909 * otherwise destroy the fdb entry
911 if (rd && !list_is_singular(&f->remotes)) {
912 list_del_rcu(&rd->list);
913 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
918 vxlan_fdb_destroy(vxlan, f);
921 spin_unlock_bh(&vxlan->hash_lock);
926 /* Dump forwarding table */
927 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
928 struct net_device *dev,
929 struct net_device *filter_dev, int idx)
931 struct vxlan_dev *vxlan = netdev_priv(dev);
934 for (h = 0; h < FDB_HASH_SIZE; ++h) {
938 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
939 struct vxlan_rdst *rd;
941 list_for_each_entry_rcu(rd, &f->remotes, list) {
942 if (idx < cb->args[0])
945 err = vxlan_fdb_info(skb, vxlan, f,
946 NETLINK_CB(cb->skb).portid,
963 /* Watch incoming packets to learn mapping between Ethernet address
964 * and Tunnel endpoint.
965 * Return true if packet is bogus and should be dropped.
967 static bool vxlan_snoop(struct net_device *dev,
968 union vxlan_addr *src_ip, const u8 *src_mac)
970 struct vxlan_dev *vxlan = netdev_priv(dev);
973 f = vxlan_find_mac(vxlan, src_mac);
975 struct vxlan_rdst *rdst = first_remote_rcu(f);
977 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip)))
980 /* Don't migrate static entries, drop packets */
981 if (f->state & NUD_NOARP)
986 "%pM migrated from %pIS to %pIS\n",
987 src_mac, &rdst->remote_ip.sa, &src_ip->sa);
989 rdst->remote_ip = *src_ip;
990 f->updated = jiffies;
991 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
993 /* learned new entry */
994 spin_lock(&vxlan->hash_lock);
996 /* close off race between vxlan_flush and incoming packets */
997 if (netif_running(dev))
998 vxlan_fdb_create(vxlan, src_mac, src_ip,
1000 NLM_F_EXCL|NLM_F_CREATE,
1001 vxlan->cfg.dst_port,
1002 vxlan->default_dst.remote_vni,
1004 spin_unlock(&vxlan->hash_lock);
1010 /* See if multicast group is already in use by other ID */
1011 static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
1013 struct vxlan_dev *vxlan;
1014 unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
1016 /* The vxlan_sock is only used by dev, leaving group has
1017 * no effect on other vxlan devices.
1019 if (family == AF_INET && dev->vn4_sock &&
1020 atomic_read(&dev->vn4_sock->refcnt) == 1)
1022 #if IS_ENABLED(CONFIG_IPV6)
1023 if (family == AF_INET6 && dev->vn6_sock &&
1024 atomic_read(&dev->vn6_sock->refcnt) == 1)
1028 list_for_each_entry(vxlan, &vn->vxlan_list, next) {
1029 if (!netif_running(vxlan->dev) || vxlan == dev)
1032 if (family == AF_INET && vxlan->vn4_sock != dev->vn4_sock)
1034 #if IS_ENABLED(CONFIG_IPV6)
1035 if (family == AF_INET6 && vxlan->vn6_sock != dev->vn6_sock)
1039 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
1040 &dev->default_dst.remote_ip))
1043 if (vxlan->default_dst.remote_ifindex !=
1044 dev->default_dst.remote_ifindex)
1053 static void __vxlan_sock_release(struct vxlan_sock *vs)
1055 struct vxlan_net *vn;
1059 if (!atomic_dec_and_test(&vs->refcnt))
1062 vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
1063 spin_lock(&vn->sock_lock);
1064 hlist_del_rcu(&vs->hlist);
1065 vxlan_notify_del_rx_port(vs);
1066 spin_unlock(&vn->sock_lock);
1068 queue_work(vxlan_wq, &vs->del_work);
1071 static void vxlan_sock_release(struct vxlan_dev *vxlan)
1073 __vxlan_sock_release(vxlan->vn4_sock);
1074 #if IS_ENABLED(CONFIG_IPV6)
1075 __vxlan_sock_release(vxlan->vn6_sock);
1079 /* Update multicast group membership when first VNI on
1080 * multicast address is brought up
1082 static int vxlan_igmp_join(struct vxlan_dev *vxlan)
1085 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1086 int ifindex = vxlan->default_dst.remote_ifindex;
1089 if (ip->sa.sa_family == AF_INET) {
1090 struct ip_mreqn mreq = {
1091 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1092 .imr_ifindex = ifindex,
1095 sk = vxlan->vn4_sock->sock->sk;
1097 ret = ip_mc_join_group(sk, &mreq);
1099 #if IS_ENABLED(CONFIG_IPV6)
1101 sk = vxlan->vn6_sock->sock->sk;
1103 ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
1104 &ip->sin6.sin6_addr);
1112 /* Inverse of vxlan_igmp_join when last VNI is brought down */
1113 static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
1116 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1117 int ifindex = vxlan->default_dst.remote_ifindex;
1120 if (ip->sa.sa_family == AF_INET) {
1121 struct ip_mreqn mreq = {
1122 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1123 .imr_ifindex = ifindex,
1126 sk = vxlan->vn4_sock->sock->sk;
1128 ret = ip_mc_leave_group(sk, &mreq);
1130 #if IS_ENABLED(CONFIG_IPV6)
1132 sk = vxlan->vn6_sock->sock->sk;
1134 ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
1135 &ip->sin6.sin6_addr);
1143 static bool vxlan_remcsum(struct vxlanhdr *unparsed,
1144 struct sk_buff *skb, u32 vxflags)
1146 size_t start, offset, plen;
1148 if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
1151 start = vxlan_rco_start(unparsed->vx_vni);
1152 offset = start + vxlan_rco_offset(unparsed->vx_vni);
1154 plen = sizeof(struct vxlanhdr) + offset + sizeof(u16);
1156 if (!pskb_may_pull(skb, plen))
1159 skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
1160 !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL));
1162 unparsed->vx_flags &= ~VXLAN_HF_RCO;
1163 unparsed->vx_vni &= VXLAN_VNI_MASK;
1167 static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
1168 struct sk_buff *skb, u32 vxflags,
1169 struct vxlan_metadata *md)
1171 struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed;
1172 struct metadata_dst *tun_dst;
1174 if (!(unparsed->vx_flags & VXLAN_HF_GBP))
1177 md->gbp = ntohs(gbp->policy_id);
1179 tun_dst = (struct metadata_dst *)skb_dst(skb);
1181 tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
1182 tun_dst->u.tun_info.options_len = sizeof(*md);
1184 if (gbp->dont_learn)
1185 md->gbp |= VXLAN_GBP_DONT_LEARN;
1187 if (gbp->policy_applied)
1188 md->gbp |= VXLAN_GBP_POLICY_APPLIED;
1190 /* In flow-based mode, GBP is carried in dst_metadata */
1191 if (!(vxflags & VXLAN_F_COLLECT_METADATA))
1192 skb->mark = md->gbp;
1194 unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
1197 static bool vxlan_set_mac(struct vxlan_dev *vxlan,
1198 struct vxlan_sock *vs,
1199 struct sk_buff *skb)
1201 union vxlan_addr saddr;
1203 skb_reset_mac_header(skb);
1204 skb->protocol = eth_type_trans(skb, vxlan->dev);
1205 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1207 /* Ignore packet loops (and multicast echo) */
1208 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
1211 /* Get address from the outer IP header */
1212 if (vxlan_get_sk_family(vs) == AF_INET) {
1213 saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
1214 saddr.sa.sa_family = AF_INET;
1215 #if IS_ENABLED(CONFIG_IPV6)
1217 saddr.sin6.sin6_addr = ipv6_hdr(skb)->saddr;
1218 saddr.sa.sa_family = AF_INET6;
1222 if ((vxlan->flags & VXLAN_F_LEARN) &&
1223 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
1229 static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
1230 struct sk_buff *skb)
1234 if (vxlan_get_sk_family(vs) == AF_INET)
1235 err = IP_ECN_decapsulate(oiph, skb);
1236 #if IS_ENABLED(CONFIG_IPV6)
1238 err = IP6_ECN_decapsulate(oiph, skb);
1241 if (unlikely(err) && log_ecn_error) {
1242 if (vxlan_get_sk_family(vs) == AF_INET)
1243 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
1244 &((struct iphdr *)oiph)->saddr,
1245 ((struct iphdr *)oiph)->tos);
1247 net_info_ratelimited("non-ECT from %pI6\n",
1248 &((struct ipv6hdr *)oiph)->saddr);
1253 /* Callback from net/ipv4/udp.c to receive packets */
1254 static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
1256 struct pcpu_sw_netstats *stats;
1257 struct vxlan_dev *vxlan;
1258 struct vxlan_sock *vs;
1259 struct vxlanhdr unparsed;
1260 struct vxlan_metadata _md;
1261 struct vxlan_metadata *md = &_md;
1264 /* Need Vxlan and inner Ethernet header to be present */
1265 if (!pskb_may_pull(skb, VXLAN_HLEN))
1268 unparsed = *vxlan_hdr(skb);
1269 /* VNI flag always required to be set */
1270 if (!(unparsed.vx_flags & VXLAN_HF_VNI)) {
1271 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
1272 ntohl(vxlan_hdr(skb)->vx_flags),
1273 ntohl(vxlan_hdr(skb)->vx_vni));
1274 /* Return non vxlan pkt */
1277 unparsed.vx_flags &= ~VXLAN_HF_VNI;
1278 unparsed.vx_vni &= ~VXLAN_VNI_MASK;
1280 vs = rcu_dereference_sk_user_data(sk);
1284 vxlan = vxlan_vs_find_vni(vs, vxlan_vni(vxlan_hdr(skb)->vx_vni));
1288 if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB),
1289 !net_eq(vxlan->net, dev_net(vxlan->dev))))
1292 if (vxlan_collect_metadata(vs)) {
1293 __be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
1294 struct metadata_dst *tun_dst;
1296 tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
1297 vxlan_vni_to_tun_id(vni), sizeof(*md));
1302 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
1304 skb_dst_set(skb, (struct dst_entry *)tun_dst);
1306 memset(md, 0, sizeof(*md));
1309 /* For backwards compatibility, only allow reserved fields to be
1310 * used by VXLAN extensions if explicitly requested.
1312 if (vs->flags & VXLAN_F_REMCSUM_RX)
1313 if (!vxlan_remcsum(&unparsed, skb, vs->flags))
1315 if (vs->flags & VXLAN_F_GBP)
1316 vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
1318 if (unparsed.vx_flags || unparsed.vx_vni) {
1319 /* If there are any unprocessed flags remaining treat
1320 * this as a malformed packet. This behavior diverges from
1321 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
1322 * in reserved fields are to be ignored. The approach here
1323 * maintains compatibility with previous stack code, and also
1324 * is more robust and provides a little more security in
1325 * adding extensions to VXLAN.
1330 if (!vxlan_set_mac(vxlan, vs, skb))
1333 oiph = skb_network_header(skb);
1334 skb_reset_network_header(skb);
1336 if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
1337 ++vxlan->dev->stats.rx_frame_errors;
1338 ++vxlan->dev->stats.rx_errors;
1342 stats = this_cpu_ptr(vxlan->dev->tstats);
1343 u64_stats_update_begin(&stats->syncp);
1344 stats->rx_packets++;
1345 stats->rx_bytes += skb->len;
1346 u64_stats_update_end(&stats->syncp);
1348 gro_cells_receive(&vxlan->gro_cells, skb);
1352 /* Consume bad packet */
1357 static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
1359 struct vxlan_dev *vxlan = netdev_priv(dev);
1360 struct arphdr *parp;
1363 struct neighbour *n;
1365 if (dev->flags & IFF_NOARP)
1368 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
1369 dev->stats.tx_dropped++;
1372 parp = arp_hdr(skb);
1374 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
1375 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
1376 parp->ar_pro != htons(ETH_P_IP) ||
1377 parp->ar_op != htons(ARPOP_REQUEST) ||
1378 parp->ar_hln != dev->addr_len ||
1381 arpptr = (u8 *)parp + sizeof(struct arphdr);
1383 arpptr += dev->addr_len; /* sha */
1384 memcpy(&sip, arpptr, sizeof(sip));
1385 arpptr += sizeof(sip);
1386 arpptr += dev->addr_len; /* tha */
1387 memcpy(&tip, arpptr, sizeof(tip));
1389 if (ipv4_is_loopback(tip) ||
1390 ipv4_is_multicast(tip))
1393 n = neigh_lookup(&arp_tbl, &tip, dev);
1396 struct vxlan_fdb *f;
1397 struct sk_buff *reply;
1399 if (!(n->nud_state & NUD_CONNECTED)) {
1404 f = vxlan_find_mac(vxlan, n->ha);
1405 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1406 /* bridge-local neighbor */
1411 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1419 skb_reset_mac_header(reply);
1420 __skb_pull(reply, skb_network_offset(reply));
1421 reply->ip_summed = CHECKSUM_UNNECESSARY;
1422 reply->pkt_type = PACKET_HOST;
1424 if (netif_rx_ni(reply) == NET_RX_DROP)
1425 dev->stats.rx_dropped++;
1426 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1427 union vxlan_addr ipa = {
1428 .sin.sin_addr.s_addr = tip,
1429 .sin.sin_family = AF_INET,
1432 vxlan_ip_miss(dev, &ipa);
1436 return NETDEV_TX_OK;
1439 #if IS_ENABLED(CONFIG_IPV6)
1440 static struct sk_buff *vxlan_na_create(struct sk_buff *request,
1441 struct neighbour *n, bool isrouter)
1443 struct net_device *dev = request->dev;
1444 struct sk_buff *reply;
1445 struct nd_msg *ns, *na;
1446 struct ipv6hdr *pip6;
1448 int na_olen = 8; /* opt hdr + ETH_ALEN for target */
1455 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
1456 sizeof(*na) + na_olen + dev->needed_tailroom;
1457 reply = alloc_skb(len, GFP_ATOMIC);
1461 reply->protocol = htons(ETH_P_IPV6);
1463 skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
1464 skb_push(reply, sizeof(struct ethhdr));
1465 skb_reset_mac_header(reply);
1467 ns = (struct nd_msg *)skb_transport_header(request);
1469 daddr = eth_hdr(request)->h_source;
1470 ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
1471 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
1472 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
1473 daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
1478 /* Ethernet header */
1479 ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
1480 ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
1481 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
1482 reply->protocol = htons(ETH_P_IPV6);
1484 skb_pull(reply, sizeof(struct ethhdr));
1485 skb_reset_network_header(reply);
1486 skb_put(reply, sizeof(struct ipv6hdr));
1490 pip6 = ipv6_hdr(reply);
1491 memset(pip6, 0, sizeof(struct ipv6hdr));
1493 pip6->priority = ipv6_hdr(request)->priority;
1494 pip6->nexthdr = IPPROTO_ICMPV6;
1495 pip6->hop_limit = 255;
1496 pip6->daddr = ipv6_hdr(request)->saddr;
1497 pip6->saddr = *(struct in6_addr *)n->primary_key;
1499 skb_pull(reply, sizeof(struct ipv6hdr));
1500 skb_reset_transport_header(reply);
1502 na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
1504 /* Neighbor Advertisement */
1505 memset(na, 0, sizeof(*na)+na_olen);
1506 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
1507 na->icmph.icmp6_router = isrouter;
1508 na->icmph.icmp6_override = 1;
1509 na->icmph.icmp6_solicited = 1;
1510 na->target = ns->target;
1511 ether_addr_copy(&na->opt[2], n->ha);
1512 na->opt[0] = ND_OPT_TARGET_LL_ADDR;
1513 na->opt[1] = na_olen >> 3;
1515 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
1516 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
1517 csum_partial(na, sizeof(*na)+na_olen, 0));
1519 pip6->payload_len = htons(sizeof(*na)+na_olen);
1521 skb_push(reply, sizeof(struct ipv6hdr));
1523 reply->ip_summed = CHECKSUM_UNNECESSARY;
1528 static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
1530 struct vxlan_dev *vxlan = netdev_priv(dev);
1532 const struct ipv6hdr *iphdr;
1533 const struct in6_addr *saddr, *daddr;
1534 struct neighbour *n;
1535 struct inet6_dev *in6_dev;
1537 in6_dev = __in6_dev_get(dev);
1541 iphdr = ipv6_hdr(skb);
1542 saddr = &iphdr->saddr;
1543 daddr = &iphdr->daddr;
1545 msg = (struct nd_msg *)skb_transport_header(skb);
1546 if (msg->icmph.icmp6_code != 0 ||
1547 msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
1550 if (ipv6_addr_loopback(daddr) ||
1551 ipv6_addr_is_multicast(&msg->target))
1554 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
1557 struct vxlan_fdb *f;
1558 struct sk_buff *reply;
1560 if (!(n->nud_state & NUD_CONNECTED)) {
1565 f = vxlan_find_mac(vxlan, n->ha);
1566 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1567 /* bridge-local neighbor */
1572 reply = vxlan_na_create(skb, n,
1573 !!(f ? f->flags & NTF_ROUTER : 0));
1580 if (netif_rx_ni(reply) == NET_RX_DROP)
1581 dev->stats.rx_dropped++;
1583 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1584 union vxlan_addr ipa = {
1585 .sin6.sin6_addr = msg->target,
1586 .sin6.sin6_family = AF_INET6,
1589 vxlan_ip_miss(dev, &ipa);
1594 return NETDEV_TX_OK;
1598 static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1600 struct vxlan_dev *vxlan = netdev_priv(dev);
1601 struct neighbour *n;
1603 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
1607 switch (ntohs(eth_hdr(skb)->h_proto)) {
1612 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1615 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
1616 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1617 union vxlan_addr ipa = {
1618 .sin.sin_addr.s_addr = pip->daddr,
1619 .sin.sin_family = AF_INET,
1622 vxlan_ip_miss(dev, &ipa);
1628 #if IS_ENABLED(CONFIG_IPV6)
1631 struct ipv6hdr *pip6;
1633 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
1635 pip6 = ipv6_hdr(skb);
1636 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
1637 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1638 union vxlan_addr ipa = {
1639 .sin6.sin6_addr = pip6->daddr,
1640 .sin6.sin6_family = AF_INET6,
1643 vxlan_ip_miss(dev, &ipa);
1657 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
1659 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
1661 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
1670 static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
1671 struct vxlan_metadata *md)
1673 struct vxlanhdr_gbp *gbp;
1678 gbp = (struct vxlanhdr_gbp *)vxh;
1679 vxh->vx_flags |= VXLAN_HF_GBP;
1681 if (md->gbp & VXLAN_GBP_DONT_LEARN)
1682 gbp->dont_learn = 1;
1684 if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
1685 gbp->policy_applied = 1;
1687 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
1690 static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
1691 int iphdr_len, __be32 vni,
1692 struct vxlan_metadata *md, u32 vxflags,
1695 struct vxlanhdr *vxh;
1698 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1700 if ((vxflags & VXLAN_F_REMCSUM_TX) &&
1701 skb->ip_summed == CHECKSUM_PARTIAL) {
1702 int csum_start = skb_checksum_start_offset(skb);
1704 if (csum_start <= VXLAN_MAX_REMCSUM_START &&
1705 !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
1706 (skb->csum_offset == offsetof(struct udphdr, check) ||
1707 skb->csum_offset == offsetof(struct tcphdr, check)))
1708 type |= SKB_GSO_TUNNEL_REMCSUM;
1711 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
1712 + VXLAN_HLEN + iphdr_len
1713 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
1715 /* Need space for new headers (invalidates iph ptr) */
1716 err = skb_cow_head(skb, min_headroom);
1717 if (unlikely(err)) {
1722 skb = vlan_hwaccel_push_inside(skb);
1726 skb = iptunnel_handle_offloads(skb, type);
1728 return PTR_ERR(skb);
1730 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1731 vxh->vx_flags = VXLAN_HF_VNI;
1732 vxh->vx_vni = vxlan_vni_field(vni);
1734 if (type & SKB_GSO_TUNNEL_REMCSUM) {
1737 start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr);
1738 vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset);
1739 vxh->vx_flags |= VXLAN_HF_RCO;
1741 if (!skb_is_gso(skb)) {
1742 skb->ip_summed = CHECKSUM_NONE;
1743 skb->encapsulation = 0;
1747 if (vxflags & VXLAN_F_GBP)
1748 vxlan_build_gbp_hdr(vxh, vxflags, md);
1750 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
1754 static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
1755 struct sk_buff *skb, int oif, u8 tos,
1756 __be32 daddr, __be32 *saddr,
1757 struct dst_cache *dst_cache,
1758 const struct ip_tunnel_info *info)
1760 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
1761 struct rtable *rt = NULL;
1767 rt = dst_cache_get_ip4(dst_cache, saddr);
1772 memset(&fl4, 0, sizeof(fl4));
1773 fl4.flowi4_oif = oif;
1774 fl4.flowi4_tos = RT_TOS(tos);
1775 fl4.flowi4_mark = skb->mark;
1776 fl4.flowi4_proto = IPPROTO_UDP;
1778 fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
1780 rt = ip_route_output_key(vxlan->net, &fl4);
1784 dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
1789 #if IS_ENABLED(CONFIG_IPV6)
1790 static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
1791 struct sk_buff *skb, int oif, u8 tos,
1793 const struct in6_addr *daddr,
1794 struct in6_addr *saddr,
1795 struct dst_cache *dst_cache,
1796 const struct ip_tunnel_info *info)
1798 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
1799 struct dst_entry *ndst;
1806 ndst = dst_cache_get_ip6(dst_cache, saddr);
1811 memset(&fl6, 0, sizeof(fl6));
1812 fl6.flowi6_oif = oif;
1813 fl6.flowi6_tos = RT_TOS(tos);
1815 fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
1816 fl6.flowlabel = label;
1817 fl6.flowi6_mark = skb->mark;
1818 fl6.flowi6_proto = IPPROTO_UDP;
1820 err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
1821 vxlan->vn6_sock->sock->sk,
1824 return ERR_PTR(err);
1828 dst_cache_set_ip6(dst_cache, ndst, saddr);
1833 /* Bypass encapsulation if the destination is local */
1834 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
1835 struct vxlan_dev *dst_vxlan)
1837 struct pcpu_sw_netstats *tx_stats, *rx_stats;
1838 union vxlan_addr loopback;
1839 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
1840 struct net_device *dev = skb->dev;
1843 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
1844 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
1845 skb->pkt_type = PACKET_HOST;
1846 skb->encapsulation = 0;
1847 skb->dev = dst_vxlan->dev;
1848 __skb_pull(skb, skb_network_offset(skb));
1850 if (remote_ip->sa.sa_family == AF_INET) {
1851 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
1852 loopback.sa.sa_family = AF_INET;
1853 #if IS_ENABLED(CONFIG_IPV6)
1855 loopback.sin6.sin6_addr = in6addr_loopback;
1856 loopback.sa.sa_family = AF_INET6;
1860 if (dst_vxlan->flags & VXLAN_F_LEARN)
1861 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source);
1863 u64_stats_update_begin(&tx_stats->syncp);
1864 tx_stats->tx_packets++;
1865 tx_stats->tx_bytes += len;
1866 u64_stats_update_end(&tx_stats->syncp);
1868 if (netif_rx(skb) == NET_RX_SUCCESS) {
1869 u64_stats_update_begin(&rx_stats->syncp);
1870 rx_stats->rx_packets++;
1871 rx_stats->rx_bytes += len;
1872 u64_stats_update_end(&rx_stats->syncp);
1874 dev->stats.rx_dropped++;
1878 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1879 struct vxlan_rdst *rdst, bool did_rsc)
1881 struct dst_cache *dst_cache;
1882 struct ip_tunnel_info *info;
1883 struct vxlan_dev *vxlan = netdev_priv(dev);
1885 struct rtable *rt = NULL;
1886 const struct iphdr *old_iph;
1887 union vxlan_addr *dst;
1888 union vxlan_addr remote_ip;
1889 struct vxlan_metadata _md;
1890 struct vxlan_metadata *md = &_md;
1891 __be16 src_port = 0, dst_port;
1896 u32 flags = vxlan->flags;
1897 bool udp_sum = false;
1898 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
1900 info = skb_tunnel_info(skb);
1903 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
1904 vni = rdst->remote_vni;
1905 dst = &rdst->remote_ip;
1906 dst_cache = &rdst->dst_cache;
1909 WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
1913 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
1914 vni = vxlan_tun_id_to_vni(info->key.tun_id);
1915 remote_ip.sa.sa_family = ip_tunnel_info_af(info);
1916 if (remote_ip.sa.sa_family == AF_INET)
1917 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
1919 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
1921 dst_cache = &info->dst_cache;
1924 if (vxlan_addr_any(dst)) {
1926 /* short-circuited back to local bridge */
1927 vxlan_encap_bypass(skb, vxlan, vxlan);
1933 old_iph = ip_hdr(skb);
1935 ttl = vxlan->cfg.ttl;
1936 if (!ttl && vxlan_addr_multicast(dst))
1939 tos = vxlan->cfg.tos;
1941 tos = ip_tunnel_get_dsfield(old_iph, skb);
1943 label = vxlan->cfg.label;
1944 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
1945 vxlan->cfg.port_max, true);
1948 ttl = info->key.ttl;
1949 tos = info->key.tos;
1950 label = info->key.label;
1951 udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
1953 if (info->options_len)
1954 md = ip_tunnel_info_opts(info);
1956 md->gbp = skb->mark;
1959 if (dst->sa.sa_family == AF_INET) {
1962 if (!vxlan->vn4_sock)
1964 sk = vxlan->vn4_sock->sock->sk;
1966 rt = vxlan_get_route(vxlan, skb,
1967 rdst ? rdst->remote_ifindex : 0, tos,
1968 dst->sin.sin_addr.s_addr, &saddr,
1971 netdev_dbg(dev, "no route to %pI4\n",
1972 &dst->sin.sin_addr.s_addr);
1973 dev->stats.tx_carrier_errors++;
1977 if (rt->dst.dev == dev) {
1978 netdev_dbg(dev, "circular route to %pI4\n",
1979 &dst->sin.sin_addr.s_addr);
1980 dev->stats.collisions++;
1984 /* Bypass encapsulation if the destination is local */
1985 if (rt->rt_flags & RTCF_LOCAL &&
1986 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
1987 struct vxlan_dev *dst_vxlan;
1990 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
1991 dst->sa.sa_family, dst_port,
1995 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
2000 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX);
2001 else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
2004 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2005 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
2006 err = vxlan_build_skb(skb, &rt->dst, sizeof(struct iphdr),
2007 vni, md, flags, udp_sum);
2011 udp_tunnel_xmit_skb(rt, sk, skb, saddr,
2012 dst->sin.sin_addr.s_addr, tos, ttl, df,
2013 src_port, dst_port, xnet, !udp_sum);
2014 #if IS_ENABLED(CONFIG_IPV6)
2016 struct dst_entry *ndst;
2017 struct in6_addr saddr;
2020 if (!vxlan->vn6_sock)
2022 sk = vxlan->vn6_sock->sock->sk;
2024 ndst = vxlan6_get_route(vxlan, skb,
2025 rdst ? rdst->remote_ifindex : 0, tos,
2026 label, &dst->sin6.sin6_addr, &saddr,
2029 netdev_dbg(dev, "no route to %pI6\n",
2030 &dst->sin6.sin6_addr);
2031 dev->stats.tx_carrier_errors++;
2035 if (ndst->dev == dev) {
2036 netdev_dbg(dev, "circular route to %pI6\n",
2037 &dst->sin6.sin6_addr);
2039 dev->stats.collisions++;
2043 /* Bypass encapsulation if the destination is local */
2044 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
2045 if (rt6i_flags & RTF_LOCAL &&
2046 !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
2047 struct vxlan_dev *dst_vxlan;
2050 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
2051 dst->sa.sa_family, dst_port,
2055 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
2060 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
2062 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2063 ttl = ttl ? : ip6_dst_hoplimit(ndst);
2064 skb_scrub_packet(skb, xnet);
2065 err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
2066 vni, md, flags, udp_sum);
2071 udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
2072 &saddr, &dst->sin6.sin6_addr, tos, ttl,
2073 label, src_port, dst_port, !udp_sum);
2080 dev->stats.tx_dropped++;
2084 /* skb is already freed. */
2089 dev->stats.tx_errors++;
2094 /* Transmit local packets over Vxlan
2096 * Outer IP header inherits ECN and DF from inner header.
2097 * Outer UDP destination is the VXLAN assigned port.
2098 * source port is based on hash of flow
2100 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
2102 struct vxlan_dev *vxlan = netdev_priv(dev);
2103 const struct ip_tunnel_info *info;
2105 bool did_rsc = false;
2106 struct vxlan_rdst *rdst, *fdst = NULL;
2107 struct vxlan_fdb *f;
2109 info = skb_tunnel_info(skb);
2111 skb_reset_mac_header(skb);
2114 if ((vxlan->flags & VXLAN_F_PROXY)) {
2115 if (ntohs(eth->h_proto) == ETH_P_ARP)
2116 return arp_reduce(dev, skb);
2117 #if IS_ENABLED(CONFIG_IPV6)
2118 else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
2119 pskb_may_pull(skb, sizeof(struct ipv6hdr)
2120 + sizeof(struct nd_msg)) &&
2121 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
2124 msg = (struct nd_msg *)skb_transport_header(skb);
2125 if (msg->icmph.icmp6_code == 0 &&
2126 msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
2127 return neigh_reduce(dev, skb);
2133 if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
2134 if (info && info->mode & IP_TUNNEL_INFO_TX)
2135 vxlan_xmit_one(skb, dev, NULL, false);
2138 return NETDEV_TX_OK;
2141 f = vxlan_find_mac(vxlan, eth->h_dest);
2144 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
2145 (ntohs(eth->h_proto) == ETH_P_IP ||
2146 ntohs(eth->h_proto) == ETH_P_IPV6)) {
2147 did_rsc = route_shortcircuit(dev, skb);
2149 f = vxlan_find_mac(vxlan, eth->h_dest);
2153 f = vxlan_find_mac(vxlan, all_zeros_mac);
2155 if ((vxlan->flags & VXLAN_F_L2MISS) &&
2156 !is_multicast_ether_addr(eth->h_dest))
2157 vxlan_fdb_miss(vxlan, eth->h_dest);
2159 dev->stats.tx_dropped++;
2161 return NETDEV_TX_OK;
2165 list_for_each_entry_rcu(rdst, &f->remotes, list) {
2166 struct sk_buff *skb1;
2172 skb1 = skb_clone(skb, GFP_ATOMIC);
2174 vxlan_xmit_one(skb1, dev, rdst, did_rsc);
2178 vxlan_xmit_one(skb, dev, fdst, did_rsc);
2181 return NETDEV_TX_OK;
2184 /* Walk the forwarding table and purge stale entries */
2185 static void vxlan_cleanup(unsigned long arg)
2187 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
2188 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
2191 if (!netif_running(vxlan->dev))
2194 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2195 struct hlist_node *p, *n;
2197 spin_lock_bh(&vxlan->hash_lock);
2198 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2200 = container_of(p, struct vxlan_fdb, hlist);
2201 unsigned long timeout;
2203 if (f->state & NUD_PERMANENT)
2206 timeout = f->used + vxlan->cfg.age_interval * HZ;
2207 if (time_before_eq(timeout, jiffies)) {
2208 netdev_dbg(vxlan->dev,
2209 "garbage collect %pM\n",
2211 f->state = NUD_STALE;
2212 vxlan_fdb_destroy(vxlan, f);
2213 } else if (time_before(timeout, next_timer))
2214 next_timer = timeout;
2216 spin_unlock_bh(&vxlan->hash_lock);
2219 mod_timer(&vxlan->age_timer, next_timer);
2222 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
2224 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2225 __be32 vni = vxlan->default_dst.remote_vni;
2227 spin_lock(&vn->sock_lock);
2228 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
2229 spin_unlock(&vn->sock_lock);
2232 /* Setup stats when device is created */
2233 static int vxlan_init(struct net_device *dev)
2235 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2242 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
2244 struct vxlan_fdb *f;
2246 spin_lock_bh(&vxlan->hash_lock);
2247 f = __vxlan_find_mac(vxlan, all_zeros_mac);
2249 vxlan_fdb_destroy(vxlan, f);
2250 spin_unlock_bh(&vxlan->hash_lock);
2253 static void vxlan_uninit(struct net_device *dev)
2255 struct vxlan_dev *vxlan = netdev_priv(dev);
2257 vxlan_fdb_delete_default(vxlan);
2259 free_percpu(dev->tstats);
2262 /* Start ageing timer and join group when device is brought up */
2263 static int vxlan_open(struct net_device *dev)
2265 struct vxlan_dev *vxlan = netdev_priv(dev);
2268 ret = vxlan_sock_add(vxlan);
2272 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
2273 ret = vxlan_igmp_join(vxlan);
2274 if (ret == -EADDRINUSE)
2277 vxlan_sock_release(vxlan);
2282 if (vxlan->cfg.age_interval)
2283 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
2288 /* Purge the forwarding table */
2289 static void vxlan_flush(struct vxlan_dev *vxlan)
2293 spin_lock_bh(&vxlan->hash_lock);
2294 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2295 struct hlist_node *p, *n;
2296 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2298 = container_of(p, struct vxlan_fdb, hlist);
2299 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2300 if (!is_zero_ether_addr(f->eth_addr))
2301 vxlan_fdb_destroy(vxlan, f);
2304 spin_unlock_bh(&vxlan->hash_lock);
2307 /* Cleanup timer and forwarding table on shutdown */
2308 static int vxlan_stop(struct net_device *dev)
2310 struct vxlan_dev *vxlan = netdev_priv(dev);
2311 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2314 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
2315 !vxlan_group_used(vn, vxlan))
2316 ret = vxlan_igmp_leave(vxlan);
2318 del_timer_sync(&vxlan->age_timer);
2321 vxlan_sock_release(vxlan);
2326 /* Stub, nothing needs to be done. */
2327 static void vxlan_set_multicast_list(struct net_device *dev)
2331 static int __vxlan_change_mtu(struct net_device *dev,
2332 struct net_device *lowerdev,
2333 struct vxlan_rdst *dst, int new_mtu, bool strict)
2335 int max_mtu = IP_MAX_MTU;
2338 max_mtu = lowerdev->mtu;
2340 if (dst->remote_ip.sa.sa_family == AF_INET6)
2341 max_mtu -= VXLAN6_HEADROOM;
2343 max_mtu -= VXLAN_HEADROOM;
2348 if (new_mtu > max_mtu) {
2359 static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2361 struct vxlan_dev *vxlan = netdev_priv(dev);
2362 struct vxlan_rdst *dst = &vxlan->default_dst;
2363 struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
2364 dst->remote_ifindex);
2365 return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true);
2368 static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2370 struct vxlan_dev *vxlan = netdev_priv(dev);
2371 struct ip_tunnel_info *info = skb_tunnel_info(skb);
2372 __be16 sport, dport;
2374 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
2375 vxlan->cfg.port_max, true);
2376 dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
2378 if (ip_tunnel_info_af(info) == AF_INET) {
2381 if (!vxlan->vn4_sock)
2383 rt = vxlan_get_route(vxlan, skb, 0, info->key.tos,
2384 info->key.u.ipv4.dst,
2385 &info->key.u.ipv4.src, NULL, info);
2390 #if IS_ENABLED(CONFIG_IPV6)
2391 struct dst_entry *ndst;
2393 if (!vxlan->vn6_sock)
2395 ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos,
2396 info->key.label, &info->key.u.ipv6.dst,
2397 &info->key.u.ipv6.src, NULL, info);
2399 return PTR_ERR(ndst);
2401 #else /* !CONFIG_IPV6 */
2402 return -EPFNOSUPPORT;
2405 info->key.tp_src = sport;
2406 info->key.tp_dst = dport;
2410 static const struct net_device_ops vxlan_netdev_ops = {
2411 .ndo_init = vxlan_init,
2412 .ndo_uninit = vxlan_uninit,
2413 .ndo_open = vxlan_open,
2414 .ndo_stop = vxlan_stop,
2415 .ndo_start_xmit = vxlan_xmit,
2416 .ndo_get_stats64 = ip_tunnel_get_stats64,
2417 .ndo_set_rx_mode = vxlan_set_multicast_list,
2418 .ndo_change_mtu = vxlan_change_mtu,
2419 .ndo_validate_addr = eth_validate_addr,
2420 .ndo_set_mac_address = eth_mac_addr,
2421 .ndo_fdb_add = vxlan_fdb_add,
2422 .ndo_fdb_del = vxlan_fdb_delete,
2423 .ndo_fdb_dump = vxlan_fdb_dump,
2424 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
2427 /* Info for udev, that this is a virtual tunnel endpoint */
2428 static struct device_type vxlan_type = {
2432 /* Calls the ndo_add_vxlan_port of the caller in order to
2433 * supply the listening VXLAN udp ports. Callers are expected
2434 * to implement the ndo_add_vxlan_port.
2436 void vxlan_get_rx_port(struct net_device *dev)
2438 struct vxlan_sock *vs;
2439 struct net *net = dev_net(dev);
2440 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2441 sa_family_t sa_family;
2445 spin_lock(&vn->sock_lock);
2446 for (i = 0; i < PORT_HASH_SIZE; ++i) {
2447 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
2448 port = inet_sk(vs->sock->sk)->inet_sport;
2449 sa_family = vxlan_get_sk_family(vs);
2450 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
2454 spin_unlock(&vn->sock_lock);
2456 EXPORT_SYMBOL_GPL(vxlan_get_rx_port);
2458 /* Initialize the device structure. */
2459 static void vxlan_setup(struct net_device *dev)
2461 struct vxlan_dev *vxlan = netdev_priv(dev);
2464 eth_hw_addr_random(dev);
2467 dev->netdev_ops = &vxlan_netdev_ops;
2468 dev->destructor = free_netdev;
2469 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
2471 dev->features |= NETIF_F_LLTX;
2472 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2473 dev->features |= NETIF_F_RXCSUM;
2474 dev->features |= NETIF_F_GSO_SOFTWARE;
2476 dev->vlan_features = dev->features;
2477 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2478 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2479 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2480 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2481 netif_keep_dst(dev);
2482 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2483 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
2485 INIT_LIST_HEAD(&vxlan->next);
2486 spin_lock_init(&vxlan->hash_lock);
2488 init_timer_deferrable(&vxlan->age_timer);
2489 vxlan->age_timer.function = vxlan_cleanup;
2490 vxlan->age_timer.data = (unsigned long) vxlan;
2492 vxlan->cfg.dst_port = htons(vxlan_port);
2496 gro_cells_init(&vxlan->gro_cells, dev);
2498 for (h = 0; h < FDB_HASH_SIZE; ++h)
2499 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
2502 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
2503 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
2504 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
2505 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
2506 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
2507 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
2508 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
2509 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
2510 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
2511 [IFLA_VXLAN_LABEL] = { .type = NLA_U32 },
2512 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
2513 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
2514 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
2515 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
2516 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
2517 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
2518 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
2519 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
2520 [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 },
2521 [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
2522 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
2523 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
2524 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
2525 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
2526 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
2527 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
2528 [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
2531 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
2533 if (tb[IFLA_ADDRESS]) {
2534 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
2535 pr_debug("invalid link address (not ethernet)\n");
2539 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
2540 pr_debug("invalid all zero ethernet address\n");
2541 return -EADDRNOTAVAIL;
2548 if (data[IFLA_VXLAN_ID]) {
2549 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
2550 if (id >= VXLAN_VID_MASK)
2554 if (data[IFLA_VXLAN_PORT_RANGE]) {
2555 const struct ifla_vxlan_port_range *p
2556 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2558 if (ntohs(p->high) < ntohs(p->low)) {
2559 pr_debug("port range %u .. %u not valid\n",
2560 ntohs(p->low), ntohs(p->high));
2568 static void vxlan_get_drvinfo(struct net_device *netdev,
2569 struct ethtool_drvinfo *drvinfo)
2571 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
2572 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
2575 static const struct ethtool_ops vxlan_ethtool_ops = {
2576 .get_drvinfo = vxlan_get_drvinfo,
2577 .get_link = ethtool_op_get_link,
2580 static void vxlan_del_work(struct work_struct *work)
2582 struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
2583 udp_tunnel_sock_release(vs->sock);
2587 static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
2588 __be16 port, u32 flags)
2590 struct socket *sock;
2591 struct udp_port_cfg udp_conf;
2594 memset(&udp_conf, 0, sizeof(udp_conf));
2597 udp_conf.family = AF_INET6;
2598 udp_conf.use_udp6_rx_checksums =
2599 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
2600 udp_conf.ipv6_v6only = 1;
2602 udp_conf.family = AF_INET;
2605 udp_conf.local_udp_port = port;
2607 /* Open UDP socket */
2608 err = udp_sock_create(net, &udp_conf, &sock);
2610 return ERR_PTR(err);
2615 /* Create new listen socket if needed */
2616 static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
2617 __be16 port, u32 flags)
2619 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2620 struct vxlan_sock *vs;
2621 struct socket *sock;
2623 struct udp_tunnel_sock_cfg tunnel_cfg;
2625 vs = kzalloc(sizeof(*vs), GFP_KERNEL);
2627 return ERR_PTR(-ENOMEM);
2629 for (h = 0; h < VNI_HASH_SIZE; ++h)
2630 INIT_HLIST_HEAD(&vs->vni_list[h]);
2632 INIT_WORK(&vs->del_work, vxlan_del_work);
2634 sock = vxlan_create_sock(net, ipv6, port, flags);
2636 pr_info("Cannot bind port %d, err=%ld\n", ntohs(port),
2639 return ERR_CAST(sock);
2643 atomic_set(&vs->refcnt, 1);
2644 vs->flags = (flags & VXLAN_F_RCV_FLAGS);
2646 /* Initialize the vxlan udp offloads structure */
2647 vs->udp_offloads.port = port;
2648 vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive;
2649 vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete;
2651 spin_lock(&vn->sock_lock);
2652 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
2653 vxlan_notify_add_rx_port(vs);
2654 spin_unlock(&vn->sock_lock);
2656 /* Mark socket as an encapsulation socket. */
2657 tunnel_cfg.sk_user_data = vs;
2658 tunnel_cfg.encap_type = 1;
2659 tunnel_cfg.encap_rcv = vxlan_rcv;
2660 tunnel_cfg.encap_destroy = NULL;
2662 setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
2667 static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
2669 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2670 struct vxlan_sock *vs = NULL;
2672 if (!vxlan->cfg.no_share) {
2673 spin_lock(&vn->sock_lock);
2674 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
2675 vxlan->cfg.dst_port, vxlan->flags);
2676 if (vs && !atomic_add_unless(&vs->refcnt, 1, 0)) {
2677 spin_unlock(&vn->sock_lock);
2680 spin_unlock(&vn->sock_lock);
2683 vs = vxlan_socket_create(vxlan->net, ipv6,
2684 vxlan->cfg.dst_port, vxlan->flags);
2687 #if IS_ENABLED(CONFIG_IPV6)
2689 vxlan->vn6_sock = vs;
2692 vxlan->vn4_sock = vs;
2693 vxlan_vs_add_dev(vs, vxlan);
2697 static int vxlan_sock_add(struct vxlan_dev *vxlan)
2699 bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
2700 bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA;
2703 vxlan->vn4_sock = NULL;
2704 #if IS_ENABLED(CONFIG_IPV6)
2705 vxlan->vn6_sock = NULL;
2706 if (ipv6 || metadata)
2707 ret = __vxlan_sock_add(vxlan, true);
2709 if (!ret && (!ipv6 || metadata))
2710 ret = __vxlan_sock_add(vxlan, false);
2712 vxlan_sock_release(vxlan);
2716 static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2717 struct vxlan_config *conf)
2719 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
2720 struct vxlan_dev *vxlan = netdev_priv(dev), *tmp;
2721 struct vxlan_rdst *dst = &vxlan->default_dst;
2722 unsigned short needed_headroom = ETH_HLEN;
2724 bool use_ipv6 = false;
2725 __be16 default_port = vxlan->cfg.dst_port;
2726 struct net_device *lowerdev = NULL;
2728 vxlan->net = src_net;
2730 dst->remote_vni = conf->vni;
2732 memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
2734 /* Unless IPv6 is explicitly requested, assume IPv4 */
2735 if (!dst->remote_ip.sa.sa_family)
2736 dst->remote_ip.sa.sa_family = AF_INET;
2738 if (dst->remote_ip.sa.sa_family == AF_INET6 ||
2739 vxlan->cfg.saddr.sa.sa_family == AF_INET6) {
2740 if (!IS_ENABLED(CONFIG_IPV6))
2741 return -EPFNOSUPPORT;
2743 vxlan->flags |= VXLAN_F_IPV6;
2746 if (conf->label && !use_ipv6) {
2747 pr_info("label only supported in use with IPv6\n");
2751 if (conf->remote_ifindex) {
2752 lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
2753 dst->remote_ifindex = conf->remote_ifindex;
2756 pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
2760 #if IS_ENABLED(CONFIG_IPV6)
2762 struct inet6_dev *idev = __in6_dev_get(lowerdev);
2763 if (idev && idev->cnf.disable_ipv6) {
2764 pr_info("IPv6 is disabled via sysctl\n");
2771 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2773 needed_headroom = lowerdev->hard_header_len;
2777 err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false);
2782 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
2783 needed_headroom += VXLAN6_HEADROOM;
2785 needed_headroom += VXLAN_HEADROOM;
2786 dev->needed_headroom = needed_headroom;
2788 memcpy(&vxlan->cfg, conf, sizeof(*conf));
2789 if (!vxlan->cfg.dst_port)
2790 vxlan->cfg.dst_port = default_port;
2791 vxlan->flags |= conf->flags;
2793 if (!vxlan->cfg.age_interval)
2794 vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
2796 list_for_each_entry(tmp, &vn->vxlan_list, next) {
2797 if (tmp->cfg.vni == conf->vni &&
2798 (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 ||
2799 tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
2800 tmp->cfg.dst_port == vxlan->cfg.dst_port &&
2801 (tmp->flags & VXLAN_F_RCV_FLAGS) ==
2802 (vxlan->flags & VXLAN_F_RCV_FLAGS))
2806 dev->ethtool_ops = &vxlan_ethtool_ops;
2808 /* create an fdb entry for a valid default destination */
2809 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
2810 err = vxlan_fdb_create(vxlan, all_zeros_mac,
2811 &vxlan->default_dst.remote_ip,
2812 NUD_REACHABLE|NUD_PERMANENT,
2813 NLM_F_EXCL|NLM_F_CREATE,
2814 vxlan->cfg.dst_port,
2815 vxlan->default_dst.remote_vni,
2816 vxlan->default_dst.remote_ifindex,
2822 err = register_netdevice(dev);
2824 vxlan_fdb_delete_default(vxlan);
2828 list_add(&vxlan->next, &vn->vxlan_list);
2833 struct net_device *vxlan_dev_create(struct net *net, const char *name,
2834 u8 name_assign_type, struct vxlan_config *conf)
2836 struct nlattr *tb[IFLA_MAX+1];
2837 struct net_device *dev;
2840 memset(&tb, 0, sizeof(tb));
2842 dev = rtnl_create_link(net, name, name_assign_type,
2843 &vxlan_link_ops, tb);
2847 err = vxlan_dev_configure(net, dev, conf);
2850 return ERR_PTR(err);
2855 EXPORT_SYMBOL_GPL(vxlan_dev_create);
2857 static int vxlan_newlink(struct net *src_net, struct net_device *dev,
2858 struct nlattr *tb[], struct nlattr *data[])
2860 struct vxlan_config conf;
2863 memset(&conf, 0, sizeof(conf));
2865 if (data[IFLA_VXLAN_ID])
2866 conf.vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
2868 if (data[IFLA_VXLAN_GROUP]) {
2869 conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
2870 } else if (data[IFLA_VXLAN_GROUP6]) {
2871 if (!IS_ENABLED(CONFIG_IPV6))
2872 return -EPFNOSUPPORT;
2874 conf.remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
2875 conf.remote_ip.sa.sa_family = AF_INET6;
2878 if (data[IFLA_VXLAN_LOCAL]) {
2879 conf.saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
2880 conf.saddr.sa.sa_family = AF_INET;
2881 } else if (data[IFLA_VXLAN_LOCAL6]) {
2882 if (!IS_ENABLED(CONFIG_IPV6))
2883 return -EPFNOSUPPORT;
2885 /* TODO: respect scope id */
2886 conf.saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
2887 conf.saddr.sa.sa_family = AF_INET6;
2890 if (data[IFLA_VXLAN_LINK])
2891 conf.remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]);
2893 if (data[IFLA_VXLAN_TOS])
2894 conf.tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
2896 if (data[IFLA_VXLAN_TTL])
2897 conf.ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
2899 if (data[IFLA_VXLAN_LABEL])
2900 conf.label = nla_get_be32(data[IFLA_VXLAN_LABEL]) &
2901 IPV6_FLOWLABEL_MASK;
2903 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
2904 conf.flags |= VXLAN_F_LEARN;
2906 if (data[IFLA_VXLAN_AGEING])
2907 conf.age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
2909 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
2910 conf.flags |= VXLAN_F_PROXY;
2912 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
2913 conf.flags |= VXLAN_F_RSC;
2915 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
2916 conf.flags |= VXLAN_F_L2MISS;
2918 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
2919 conf.flags |= VXLAN_F_L3MISS;
2921 if (data[IFLA_VXLAN_LIMIT])
2922 conf.addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
2924 if (data[IFLA_VXLAN_COLLECT_METADATA] &&
2925 nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA]))
2926 conf.flags |= VXLAN_F_COLLECT_METADATA;
2928 if (data[IFLA_VXLAN_PORT_RANGE]) {
2929 const struct ifla_vxlan_port_range *p
2930 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2931 conf.port_min = ntohs(p->low);
2932 conf.port_max = ntohs(p->high);
2935 if (data[IFLA_VXLAN_PORT])
2936 conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
2938 if (data[IFLA_VXLAN_UDP_CSUM] &&
2939 !nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
2940 conf.flags |= VXLAN_F_UDP_ZERO_CSUM_TX;
2942 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
2943 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
2944 conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
2946 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
2947 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
2948 conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
2950 if (data[IFLA_VXLAN_REMCSUM_TX] &&
2951 nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
2952 conf.flags |= VXLAN_F_REMCSUM_TX;
2954 if (data[IFLA_VXLAN_REMCSUM_RX] &&
2955 nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
2956 conf.flags |= VXLAN_F_REMCSUM_RX;
2958 if (data[IFLA_VXLAN_GBP])
2959 conf.flags |= VXLAN_F_GBP;
2961 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
2962 conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
2964 err = vxlan_dev_configure(src_net, dev, &conf);
2967 pr_info("ifindex %d does not exist\n", conf.remote_ifindex);
2971 pr_info("IPv6 is disabled via sysctl\n");
2975 pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni));
2982 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
2984 struct vxlan_dev *vxlan = netdev_priv(dev);
2985 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2987 spin_lock(&vn->sock_lock);
2988 if (!hlist_unhashed(&vxlan->hlist))
2989 hlist_del_rcu(&vxlan->hlist);
2990 spin_unlock(&vn->sock_lock);
2992 gro_cells_destroy(&vxlan->gro_cells);
2993 list_del(&vxlan->next);
2994 unregister_netdevice_queue(dev, head);
2997 static size_t vxlan_get_size(const struct net_device *dev)
3000 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
3001 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
3002 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
3003 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
3004 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
3005 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
3006 nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
3007 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
3008 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
3009 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
3010 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
3011 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
3012 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */
3013 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
3014 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
3015 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
3016 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
3017 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
3018 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
3019 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
3020 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
3021 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
3025 static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
3027 const struct vxlan_dev *vxlan = netdev_priv(dev);
3028 const struct vxlan_rdst *dst = &vxlan->default_dst;
3029 struct ifla_vxlan_port_range ports = {
3030 .low = htons(vxlan->cfg.port_min),
3031 .high = htons(vxlan->cfg.port_max),
3034 if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni)))
3035 goto nla_put_failure;
3037 if (!vxlan_addr_any(&dst->remote_ip)) {
3038 if (dst->remote_ip.sa.sa_family == AF_INET) {
3039 if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP,
3040 dst->remote_ip.sin.sin_addr.s_addr))
3041 goto nla_put_failure;
3042 #if IS_ENABLED(CONFIG_IPV6)
3044 if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6,
3045 &dst->remote_ip.sin6.sin6_addr))
3046 goto nla_put_failure;
3051 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
3052 goto nla_put_failure;
3054 if (!vxlan_addr_any(&vxlan->cfg.saddr)) {
3055 if (vxlan->cfg.saddr.sa.sa_family == AF_INET) {
3056 if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
3057 vxlan->cfg.saddr.sin.sin_addr.s_addr))
3058 goto nla_put_failure;
3059 #if IS_ENABLED(CONFIG_IPV6)
3061 if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
3062 &vxlan->cfg.saddr.sin6.sin6_addr))
3063 goto nla_put_failure;
3068 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
3069 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
3070 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
3071 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
3072 !!(vxlan->flags & VXLAN_F_LEARN)) ||
3073 nla_put_u8(skb, IFLA_VXLAN_PROXY,
3074 !!(vxlan->flags & VXLAN_F_PROXY)) ||
3075 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
3076 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
3077 !!(vxlan->flags & VXLAN_F_L2MISS)) ||
3078 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
3079 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
3080 nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
3081 !!(vxlan->flags & VXLAN_F_COLLECT_METADATA)) ||
3082 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
3083 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
3084 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
3085 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
3086 !(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
3087 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
3088 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
3089 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
3090 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
3091 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
3092 !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) ||
3093 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
3094 !!(vxlan->flags & VXLAN_F_REMCSUM_RX)))
3095 goto nla_put_failure;
3097 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
3098 goto nla_put_failure;
3100 if (vxlan->flags & VXLAN_F_GBP &&
3101 nla_put_flag(skb, IFLA_VXLAN_GBP))
3102 goto nla_put_failure;
3104 if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL &&
3105 nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
3106 goto nla_put_failure;
3114 static struct net *vxlan_get_link_net(const struct net_device *dev)
3116 struct vxlan_dev *vxlan = netdev_priv(dev);
3121 static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
3123 .maxtype = IFLA_VXLAN_MAX,
3124 .policy = vxlan_policy,
3125 .priv_size = sizeof(struct vxlan_dev),
3126 .setup = vxlan_setup,
3127 .validate = vxlan_validate,
3128 .newlink = vxlan_newlink,
3129 .dellink = vxlan_dellink,
3130 .get_size = vxlan_get_size,
3131 .fill_info = vxlan_fill_info,
3132 .get_link_net = vxlan_get_link_net,
3135 static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
3136 struct net_device *dev)
3138 struct vxlan_dev *vxlan, *next;
3139 LIST_HEAD(list_kill);
3141 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
3142 struct vxlan_rdst *dst = &vxlan->default_dst;
3144 /* In case we created vxlan device with carrier
3145 * and we loose the carrier due to module unload
3146 * we also need to remove vxlan device. In other
3147 * cases, it's not necessary and remote_ifindex
3148 * is 0 here, so no matches.
3150 if (dst->remote_ifindex == dev->ifindex)
3151 vxlan_dellink(vxlan->dev, &list_kill);
3154 unregister_netdevice_many(&list_kill);
3157 static int vxlan_lowerdev_event(struct notifier_block *unused,
3158 unsigned long event, void *ptr)
3160 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3161 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
3163 if (event == NETDEV_UNREGISTER)
3164 vxlan_handle_lowerdev_unregister(vn, dev);
3169 static struct notifier_block vxlan_notifier_block __read_mostly = {
3170 .notifier_call = vxlan_lowerdev_event,
3173 static __net_init int vxlan_init_net(struct net *net)
3175 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3178 INIT_LIST_HEAD(&vn->vxlan_list);
3179 spin_lock_init(&vn->sock_lock);
3181 for (h = 0; h < PORT_HASH_SIZE; ++h)
3182 INIT_HLIST_HEAD(&vn->sock_list[h]);
3187 static void __net_exit vxlan_exit_net(struct net *net)
3189 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3190 struct vxlan_dev *vxlan, *next;
3191 struct net_device *dev, *aux;
3195 for_each_netdev_safe(net, dev, aux)
3196 if (dev->rtnl_link_ops == &vxlan_link_ops)
3197 unregister_netdevice_queue(dev, &list);
3199 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
3200 /* If vxlan->dev is in the same netns, it has already been added
3201 * to the list by the previous loop.
3203 if (!net_eq(dev_net(vxlan->dev), net)) {
3204 gro_cells_destroy(&vxlan->gro_cells);
3205 unregister_netdevice_queue(vxlan->dev, &list);
3209 unregister_netdevice_many(&list);
3213 static struct pernet_operations vxlan_net_ops = {
3214 .init = vxlan_init_net,
3215 .exit = vxlan_exit_net,
3216 .id = &vxlan_net_id,
3217 .size = sizeof(struct vxlan_net),
3220 static int __init vxlan_init_module(void)
3224 vxlan_wq = alloc_workqueue("vxlan", 0, 0);
3228 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
3230 rc = register_pernet_subsys(&vxlan_net_ops);
3234 rc = register_netdevice_notifier(&vxlan_notifier_block);
3238 rc = rtnl_link_register(&vxlan_link_ops);
3244 unregister_netdevice_notifier(&vxlan_notifier_block);
3246 unregister_pernet_subsys(&vxlan_net_ops);
3248 destroy_workqueue(vxlan_wq);
3251 late_initcall(vxlan_init_module);
3253 static void __exit vxlan_cleanup_module(void)
3255 rtnl_link_unregister(&vxlan_link_ops);
3256 unregister_netdevice_notifier(&vxlan_notifier_block);
3257 destroy_workqueue(vxlan_wq);
3258 unregister_pernet_subsys(&vxlan_net_ops);
3259 /* rcu_barrier() is called by netns */
3261 module_exit(vxlan_cleanup_module);
3263 MODULE_LICENSE("GPL");
3264 MODULE_VERSION(VXLAN_VERSION);
3265 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
3266 MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
3267 MODULE_ALIAS_RTNL_LINK("vxlan");