2 * VXLAN: Virtual eXtensible Local Area Network
4 * Copyright (c) 2012-2013 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/errno.h>
16 #include <linux/slab.h>
17 #include <linux/udp.h>
18 #include <linux/igmp.h>
19 #include <linux/if_ether.h>
20 #include <linux/ethtool.h>
22 #include <net/ndisc.h>
25 #include <net/rtnetlink.h>
26 #include <net/inet_ecn.h>
27 #include <net/net_namespace.h>
28 #include <net/netns/generic.h>
29 #include <net/vxlan.h>
31 #if IS_ENABLED(CONFIG_IPV6)
32 #include <net/ip6_tunnel.h>
33 #include <net/ip6_checksum.h>
36 #define VXLAN_VERSION "0.1"
38 #define PORT_HASH_BITS 8
39 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
40 #define FDB_AGE_DEFAULT 300 /* 5 min */
41 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
43 /* UDP port for VXLAN traffic.
44 * The IANA assigned port is 4789, but the Linux default is 8472
45 * for compatibility with early adopters.
47 static unsigned short vxlan_port __read_mostly = 8472;
48 module_param_named(udp_port, vxlan_port, ushort, 0444);
49 MODULE_PARM_DESC(udp_port, "Destination UDP port");
51 static bool log_ecn_error = true;
52 module_param(log_ecn_error, bool, 0644);
53 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
55 static unsigned int vxlan_net_id;
56 static struct rtnl_link_ops vxlan_link_ops;
58 static const u8 all_zeros_mac[ETH_ALEN + 2];
60 static int vxlan_sock_add(struct vxlan_dev *vxlan);
62 /* per-network namespace private data for this module */
64 struct list_head vxlan_list;
65 struct hlist_head sock_list[PORT_HASH_SIZE];
69 /* Forwarding table entry */
71 struct hlist_node hlist; /* linked list of entries */
73 unsigned long updated; /* jiffies */
75 struct list_head remotes;
76 u8 eth_addr[ETH_ALEN];
77 u16 state; /* see ndm_state */
79 u8 flags; /* see ndm_flags */
82 /* salt for hash table */
83 static u32 vxlan_salt __read_mostly;
85 static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
87 return vs->flags & VXLAN_F_COLLECT_METADATA ||
88 ip_tunnel_collect_metadata();
91 #if IS_ENABLED(CONFIG_IPV6)
93 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
95 if (a->sa.sa_family != b->sa.sa_family)
97 if (a->sa.sa_family == AF_INET6)
98 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
100 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
103 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
105 if (ipa->sa.sa_family == AF_INET6)
106 return ipv6_addr_any(&ipa->sin6.sin6_addr);
108 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
111 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
113 if (ipa->sa.sa_family == AF_INET6)
114 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
116 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
119 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
121 if (nla_len(nla) >= sizeof(struct in6_addr)) {
122 ip->sin6.sin6_addr = nla_get_in6_addr(nla);
123 ip->sa.sa_family = AF_INET6;
125 } else if (nla_len(nla) >= sizeof(__be32)) {
126 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
127 ip->sa.sa_family = AF_INET;
130 return -EAFNOSUPPORT;
134 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
135 const union vxlan_addr *ip)
137 if (ip->sa.sa_family == AF_INET6)
138 return nla_put_in6_addr(skb, attr, &ip->sin6.sin6_addr);
140 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
143 #else /* !CONFIG_IPV6 */
146 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
148 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
151 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
153 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
156 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
158 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
161 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
163 if (nla_len(nla) >= sizeof(struct in6_addr)) {
164 return -EAFNOSUPPORT;
165 } else if (nla_len(nla) >= sizeof(__be32)) {
166 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
167 ip->sa.sa_family = AF_INET;
170 return -EAFNOSUPPORT;
174 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
175 const union vxlan_addr *ip)
177 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
181 /* Virtual Network hash table head */
182 static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni)
184 return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)];
187 /* Socket hash table head */
188 static inline struct hlist_head *vs_head(struct net *net, __be16 port)
190 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
192 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
195 /* First remote destination for a forwarding entry.
196 * Guaranteed to be non-NULL because remotes are never deleted.
198 static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
200 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
203 static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
205 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
208 /* Find VXLAN socket based on network namespace, address family and UDP port
209 * and enabled unshareable flags.
211 static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
212 __be16 port, u32 flags)
214 struct vxlan_sock *vs;
216 flags &= VXLAN_F_RCV_FLAGS;
218 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
219 if (inet_sk(vs->sock->sk)->inet_sport == port &&
220 vxlan_get_sk_family(vs) == family &&
227 static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni)
229 struct vxlan_dev *vxlan;
231 /* For flow based devices, map all packets to VNI 0 */
232 if (vs->flags & VXLAN_F_COLLECT_METADATA)
235 hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) {
236 if (vxlan->default_dst.remote_vni == vni)
243 /* Look up VNI in a per net namespace table */
244 static struct vxlan_dev *vxlan_find_vni(struct net *net, __be32 vni,
245 sa_family_t family, __be16 port,
248 struct vxlan_sock *vs;
250 vs = vxlan_find_sock(net, family, port, flags);
254 return vxlan_vs_find_vni(vs, vni);
257 /* Fill in neighbour message in skbuff. */
258 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
259 const struct vxlan_fdb *fdb,
260 u32 portid, u32 seq, int type, unsigned int flags,
261 const struct vxlan_rdst *rdst)
263 unsigned long now = jiffies;
264 struct nda_cacheinfo ci;
265 struct nlmsghdr *nlh;
267 bool send_ip, send_eth;
269 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
273 ndm = nlmsg_data(nlh);
274 memset(ndm, 0, sizeof(*ndm));
276 send_eth = send_ip = true;
278 if (type == RTM_GETNEIGH) {
279 send_ip = !vxlan_addr_any(&rdst->remote_ip);
280 send_eth = !is_zero_ether_addr(fdb->eth_addr);
281 ndm->ndm_family = send_ip ? rdst->remote_ip.sa.sa_family : AF_INET;
283 ndm->ndm_family = AF_BRIDGE;
284 ndm->ndm_state = fdb->state;
285 ndm->ndm_ifindex = vxlan->dev->ifindex;
286 ndm->ndm_flags = fdb->flags;
287 ndm->ndm_type = RTN_UNICAST;
289 if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
290 nla_put_s32(skb, NDA_LINK_NETNSID,
291 peernet2id(dev_net(vxlan->dev), vxlan->net)))
292 goto nla_put_failure;
294 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
295 goto nla_put_failure;
297 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
298 goto nla_put_failure;
300 if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port &&
301 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
302 goto nla_put_failure;
303 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
304 nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni)))
305 goto nla_put_failure;
306 if ((vxlan->flags & VXLAN_F_COLLECT_METADATA) && fdb->vni &&
307 nla_put_u32(skb, NDA_SRC_VNI,
308 be32_to_cpu(fdb->vni)))
309 goto nla_put_failure;
310 if (rdst->remote_ifindex &&
311 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
312 goto nla_put_failure;
314 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
315 ci.ndm_confirmed = 0;
316 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
319 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
320 goto nla_put_failure;
326 nlmsg_cancel(skb, nlh);
330 static inline size_t vxlan_nlmsg_size(void)
332 return NLMSG_ALIGN(sizeof(struct ndmsg))
333 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
334 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
335 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
336 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
337 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
338 + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */
339 + nla_total_size(sizeof(struct nda_cacheinfo));
342 static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
343 struct vxlan_rdst *rd, int type)
345 struct net *net = dev_net(vxlan->dev);
349 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
353 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
355 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
356 WARN_ON(err == -EMSGSIZE);
361 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
365 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
368 static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
370 struct vxlan_dev *vxlan = netdev_priv(dev);
371 struct vxlan_fdb f = {
374 struct vxlan_rdst remote = {
375 .remote_ip = *ipa, /* goes to NDA_DST */
376 .remote_vni = cpu_to_be32(VXLAN_N_VID),
379 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
382 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
384 struct vxlan_fdb f = {
387 struct vxlan_rdst remote = { };
389 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
391 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
394 /* Hash Ethernet address */
395 static u32 eth_hash(const unsigned char *addr)
397 u64 value = get_unaligned((u64 *)addr);
399 /* only want 6 bytes */
405 return hash_64(value, FDB_HASH_BITS);
408 static u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
410 /* use 1 byte of OUI and 3 bytes of NIC */
411 u32 key = get_unaligned((u32 *)(addr + 2));
413 return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1);
416 /* Hash chain to use given mac address */
417 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
418 const u8 *mac, __be32 vni)
420 if (vxlan->flags & VXLAN_F_COLLECT_METADATA)
421 return &vxlan->fdb_head[eth_vni_hash(mac, vni)];
423 return &vxlan->fdb_head[eth_hash(mac)];
426 /* Look up Ethernet address in forwarding table */
427 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
428 const u8 *mac, __be32 vni)
430 struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni);
433 hlist_for_each_entry_rcu(f, head, hlist) {
434 if (ether_addr_equal(mac, f->eth_addr)) {
435 if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
447 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
448 const u8 *mac, __be32 vni)
452 f = __vxlan_find_mac(vxlan, mac, vni);
459 /* caller should hold vxlan->hash_lock */
460 static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
461 union vxlan_addr *ip, __be16 port,
462 __be32 vni, __u32 ifindex)
464 struct vxlan_rdst *rd;
466 list_for_each_entry(rd, &f->remotes, list) {
467 if (vxlan_addr_equal(&rd->remote_ip, ip) &&
468 rd->remote_port == port &&
469 rd->remote_vni == vni &&
470 rd->remote_ifindex == ifindex)
477 /* Replace destination of unicast mac */
478 static int vxlan_fdb_replace(struct vxlan_fdb *f,
479 union vxlan_addr *ip, __be16 port, __be32 vni,
482 struct vxlan_rdst *rd;
484 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
488 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
492 dst_cache_reset(&rd->dst_cache);
494 rd->remote_port = port;
495 rd->remote_vni = vni;
496 rd->remote_ifindex = ifindex;
500 /* Add/update destinations for multicast */
501 static int vxlan_fdb_append(struct vxlan_fdb *f,
502 union vxlan_addr *ip, __be16 port, __be32 vni,
503 __u32 ifindex, struct vxlan_rdst **rdp)
505 struct vxlan_rdst *rd;
507 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
511 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
515 if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
521 rd->remote_port = port;
522 rd->remote_vni = vni;
523 rd->remote_ifindex = ifindex;
525 list_add_tail_rcu(&rd->list, &f->remotes);
531 static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
533 struct vxlanhdr *vh, size_t hdrlen,
535 struct gro_remcsum *grc,
538 size_t start, offset;
540 if (skb->remcsum_offload)
543 if (!NAPI_GRO_CB(skb)->csum_valid)
546 start = vxlan_rco_start(vni_field);
547 offset = start + vxlan_rco_offset(vni_field);
549 vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
550 start, offset, grc, nopartial);
552 skb->remcsum_offload = 1;
557 static struct sk_buff **vxlan_gro_receive(struct sock *sk,
558 struct sk_buff **head,
561 struct sk_buff *p, **pp = NULL;
562 struct vxlanhdr *vh, *vh2;
563 unsigned int hlen, off_vx;
565 struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk);
567 struct gro_remcsum grc;
569 skb_gro_remcsum_init(&grc);
571 off_vx = skb_gro_offset(skb);
572 hlen = off_vx + sizeof(*vh);
573 vh = skb_gro_header_fast(skb, off_vx);
574 if (skb_gro_header_hard(skb, hlen)) {
575 vh = skb_gro_header_slow(skb, hlen, off_vx);
580 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
582 flags = vh->vx_flags;
584 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
585 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
588 VXLAN_F_REMCSUM_NOPARTIAL));
594 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
596 for (p = *head; p; p = p->next) {
597 if (!NAPI_GRO_CB(p)->same_flow)
600 vh2 = (struct vxlanhdr *)(p->data + off_vx);
601 if (vh->vx_flags != vh2->vx_flags ||
602 vh->vx_vni != vh2->vx_vni) {
603 NAPI_GRO_CB(p)->same_flow = 0;
608 pp = call_gro_receive(eth_gro_receive, head, skb);
612 skb_gro_remcsum_cleanup(skb, &grc);
613 NAPI_GRO_CB(skb)->flush |= flush;
618 static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
620 /* Sets 'skb->inner_mac_header' since we are always called with
621 * 'skb->encapsulation' set.
623 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
626 /* Add new entry to forwarding table -- assumes lock held */
627 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
628 const u8 *mac, union vxlan_addr *ip,
629 __u16 state, __u16 flags,
630 __be16 port, __be32 src_vni, __be32 vni,
631 __u32 ifindex, __u8 ndm_flags)
633 struct vxlan_rdst *rd = NULL;
638 f = __vxlan_find_mac(vxlan, mac, src_vni);
640 if (flags & NLM_F_EXCL) {
641 netdev_dbg(vxlan->dev,
642 "lost race to create %pM\n", mac);
645 if (f->state != state) {
647 f->updated = jiffies;
650 if (f->flags != ndm_flags) {
651 f->flags = ndm_flags;
652 f->updated = jiffies;
655 if ((flags & NLM_F_REPLACE)) {
656 /* Only change unicasts */
657 if (!(is_multicast_ether_addr(f->eth_addr) ||
658 is_zero_ether_addr(f->eth_addr))) {
659 notify |= vxlan_fdb_replace(f, ip, port, vni,
664 if ((flags & NLM_F_APPEND) &&
665 (is_multicast_ether_addr(f->eth_addr) ||
666 is_zero_ether_addr(f->eth_addr))) {
667 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
674 if (!(flags & NLM_F_CREATE))
677 if (vxlan->cfg.addrmax &&
678 vxlan->addrcnt >= vxlan->cfg.addrmax)
681 /* Disallow replace to add a multicast entry */
682 if ((flags & NLM_F_REPLACE) &&
683 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
686 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
687 f = kmalloc(sizeof(*f), GFP_ATOMIC);
693 f->flags = ndm_flags;
694 f->updated = f->used = jiffies;
696 INIT_LIST_HEAD(&f->remotes);
697 memcpy(f->eth_addr, mac, ETH_ALEN);
699 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
706 hlist_add_head_rcu(&f->hlist,
707 vxlan_fdb_head(vxlan, mac, src_vni));
712 rd = first_remote_rtnl(f);
713 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
719 static void vxlan_fdb_free(struct rcu_head *head)
721 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
722 struct vxlan_rdst *rd, *nd;
724 list_for_each_entry_safe(rd, nd, &f->remotes, list) {
725 dst_cache_destroy(&rd->dst_cache);
731 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
733 netdev_dbg(vxlan->dev,
734 "delete %pM\n", f->eth_addr);
737 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
739 hlist_del_rcu(&f->hlist);
740 call_rcu(&f->rcu, vxlan_fdb_free);
743 static void vxlan_dst_free(struct rcu_head *head)
745 struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
747 dst_cache_destroy(&rd->dst_cache);
751 static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
752 struct vxlan_rdst *rd)
754 list_del_rcu(&rd->list);
755 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
756 call_rcu(&rd->rcu, vxlan_dst_free);
759 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
760 union vxlan_addr *ip, __be16 *port, __be32 *src_vni,
761 __be32 *vni, u32 *ifindex)
763 struct net *net = dev_net(vxlan->dev);
767 err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
771 union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
772 if (remote->sa.sa_family == AF_INET) {
773 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
774 ip->sa.sa_family = AF_INET;
775 #if IS_ENABLED(CONFIG_IPV6)
777 ip->sin6.sin6_addr = in6addr_any;
778 ip->sa.sa_family = AF_INET6;
784 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
786 *port = nla_get_be16(tb[NDA_PORT]);
788 *port = vxlan->cfg.dst_port;
792 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
794 *vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI]));
796 *vni = vxlan->default_dst.remote_vni;
799 if (tb[NDA_SRC_VNI]) {
800 if (nla_len(tb[NDA_SRC_VNI]) != sizeof(u32))
802 *src_vni = cpu_to_be32(nla_get_u32(tb[NDA_SRC_VNI]));
804 *src_vni = vxlan->default_dst.remote_vni;
807 if (tb[NDA_IFINDEX]) {
808 struct net_device *tdev;
810 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
812 *ifindex = nla_get_u32(tb[NDA_IFINDEX]);
813 tdev = __dev_get_by_index(net, *ifindex);
815 return -EADDRNOTAVAIL;
823 /* Add static entry (via netlink) */
824 static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
825 struct net_device *dev,
826 const unsigned char *addr, u16 vid, u16 flags)
828 struct vxlan_dev *vxlan = netdev_priv(dev);
829 /* struct net *net = dev_net(vxlan->dev); */
836 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
837 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
842 if (tb[NDA_DST] == NULL)
845 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
849 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
850 return -EAFNOSUPPORT;
852 spin_lock_bh(&vxlan->hash_lock);
853 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
854 port, src_vni, vni, ifindex, ndm->ndm_flags);
855 spin_unlock_bh(&vxlan->hash_lock);
860 static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
861 const unsigned char *addr, union vxlan_addr ip,
862 __be16 port, __be32 src_vni, u32 vni, u32 ifindex,
866 struct vxlan_rdst *rd = NULL;
869 f = vxlan_find_mac(vxlan, addr, src_vni);
873 if (!vxlan_addr_any(&ip)) {
874 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
879 /* remove a destination if it's not the only one on the list,
880 * otherwise destroy the fdb entry
882 if (rd && !list_is_singular(&f->remotes)) {
883 vxlan_fdb_dst_destroy(vxlan, f, rd);
887 vxlan_fdb_destroy(vxlan, f);
893 /* Delete entry (via netlink) */
894 static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
895 struct net_device *dev,
896 const unsigned char *addr, u16 vid)
898 struct vxlan_dev *vxlan = netdev_priv(dev);
905 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
909 spin_lock_bh(&vxlan->hash_lock);
910 err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex,
912 spin_unlock_bh(&vxlan->hash_lock);
917 /* Dump forwarding table */
918 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
919 struct net_device *dev,
920 struct net_device *filter_dev, int *idx)
922 struct vxlan_dev *vxlan = netdev_priv(dev);
926 for (h = 0; h < FDB_HASH_SIZE; ++h) {
929 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
930 struct vxlan_rdst *rd;
932 list_for_each_entry_rcu(rd, &f->remotes, list) {
933 if (*idx < cb->args[2])
936 err = vxlan_fdb_info(skb, vxlan, f,
937 NETLINK_CB(cb->skb).portid,
952 /* Watch incoming packets to learn mapping between Ethernet address
953 * and Tunnel endpoint.
954 * Return true if packet is bogus and should be dropped.
956 static bool vxlan_snoop(struct net_device *dev,
957 union vxlan_addr *src_ip, const u8 *src_mac,
960 struct vxlan_dev *vxlan = netdev_priv(dev);
963 f = vxlan_find_mac(vxlan, src_mac, vni);
965 struct vxlan_rdst *rdst = first_remote_rcu(f);
967 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip)))
970 /* Don't migrate static entries, drop packets */
971 if (f->state & NUD_NOARP)
976 "%pM migrated from %pIS to %pIS\n",
977 src_mac, &rdst->remote_ip.sa, &src_ip->sa);
979 rdst->remote_ip = *src_ip;
980 f->updated = jiffies;
981 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
983 /* learned new entry */
984 spin_lock(&vxlan->hash_lock);
986 /* close off race between vxlan_flush and incoming packets */
987 if (netif_running(dev))
988 vxlan_fdb_create(vxlan, src_mac, src_ip,
990 NLM_F_EXCL|NLM_F_CREATE,
993 vxlan->default_dst.remote_vni,
995 spin_unlock(&vxlan->hash_lock);
1001 /* See if multicast group is already in use by other ID */
1002 static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
1004 struct vxlan_dev *vxlan;
1005 struct vxlan_sock *sock4;
1006 #if IS_ENABLED(CONFIG_IPV6)
1007 struct vxlan_sock *sock6;
1009 unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
1011 sock4 = rtnl_dereference(dev->vn4_sock);
1013 /* The vxlan_sock is only used by dev, leaving group has
1014 * no effect on other vxlan devices.
1016 if (family == AF_INET && sock4 && atomic_read(&sock4->refcnt) == 1)
1018 #if IS_ENABLED(CONFIG_IPV6)
1019 sock6 = rtnl_dereference(dev->vn6_sock);
1020 if (family == AF_INET6 && sock6 && atomic_read(&sock6->refcnt) == 1)
1024 list_for_each_entry(vxlan, &vn->vxlan_list, next) {
1025 if (!netif_running(vxlan->dev) || vxlan == dev)
1028 if (family == AF_INET &&
1029 rtnl_dereference(vxlan->vn4_sock) != sock4)
1031 #if IS_ENABLED(CONFIG_IPV6)
1032 if (family == AF_INET6 &&
1033 rtnl_dereference(vxlan->vn6_sock) != sock6)
1037 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
1038 &dev->default_dst.remote_ip))
1041 if (vxlan->default_dst.remote_ifindex !=
1042 dev->default_dst.remote_ifindex)
1051 static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
1053 struct vxlan_net *vn;
1057 if (!atomic_dec_and_test(&vs->refcnt))
1060 vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
1061 spin_lock(&vn->sock_lock);
1062 hlist_del_rcu(&vs->hlist);
1063 udp_tunnel_notify_del_rx_port(vs->sock,
1064 (vs->flags & VXLAN_F_GPE) ?
1065 UDP_TUNNEL_TYPE_VXLAN_GPE :
1066 UDP_TUNNEL_TYPE_VXLAN);
1067 spin_unlock(&vn->sock_lock);
1072 static void vxlan_sock_release(struct vxlan_dev *vxlan)
1074 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1075 #if IS_ENABLED(CONFIG_IPV6)
1076 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1078 rcu_assign_pointer(vxlan->vn6_sock, NULL);
1081 rcu_assign_pointer(vxlan->vn4_sock, NULL);
1084 if (__vxlan_sock_release_prep(sock4)) {
1085 udp_tunnel_sock_release(sock4->sock);
1089 #if IS_ENABLED(CONFIG_IPV6)
1090 if (__vxlan_sock_release_prep(sock6)) {
1091 udp_tunnel_sock_release(sock6->sock);
1097 /* Update multicast group membership when first VNI on
1098 * multicast address is brought up
1100 static int vxlan_igmp_join(struct vxlan_dev *vxlan)
1103 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1104 int ifindex = vxlan->default_dst.remote_ifindex;
1107 if (ip->sa.sa_family == AF_INET) {
1108 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1109 struct ip_mreqn mreq = {
1110 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1111 .imr_ifindex = ifindex,
1114 sk = sock4->sock->sk;
1116 ret = ip_mc_join_group(sk, &mreq);
1118 #if IS_ENABLED(CONFIG_IPV6)
1120 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1122 sk = sock6->sock->sk;
1124 ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
1125 &ip->sin6.sin6_addr);
1133 /* Inverse of vxlan_igmp_join when last VNI is brought down */
1134 static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
1137 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1138 int ifindex = vxlan->default_dst.remote_ifindex;
1141 if (ip->sa.sa_family == AF_INET) {
1142 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1143 struct ip_mreqn mreq = {
1144 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1145 .imr_ifindex = ifindex,
1148 sk = sock4->sock->sk;
1150 ret = ip_mc_leave_group(sk, &mreq);
1152 #if IS_ENABLED(CONFIG_IPV6)
1154 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1156 sk = sock6->sock->sk;
1158 ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
1159 &ip->sin6.sin6_addr);
1167 static bool vxlan_remcsum(struct vxlanhdr *unparsed,
1168 struct sk_buff *skb, u32 vxflags)
1170 size_t start, offset;
1172 if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
1175 start = vxlan_rco_start(unparsed->vx_vni);
1176 offset = start + vxlan_rco_offset(unparsed->vx_vni);
1178 if (!pskb_may_pull(skb, offset + sizeof(u16)))
1181 skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
1182 !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL));
1184 unparsed->vx_flags &= ~VXLAN_HF_RCO;
1185 unparsed->vx_vni &= VXLAN_VNI_MASK;
1189 static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
1190 struct sk_buff *skb, u32 vxflags,
1191 struct vxlan_metadata *md)
1193 struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed;
1194 struct metadata_dst *tun_dst;
1196 if (!(unparsed->vx_flags & VXLAN_HF_GBP))
1199 md->gbp = ntohs(gbp->policy_id);
1201 tun_dst = (struct metadata_dst *)skb_dst(skb);
1203 tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
1204 tun_dst->u.tun_info.options_len = sizeof(*md);
1206 if (gbp->dont_learn)
1207 md->gbp |= VXLAN_GBP_DONT_LEARN;
1209 if (gbp->policy_applied)
1210 md->gbp |= VXLAN_GBP_POLICY_APPLIED;
1212 /* In flow-based mode, GBP is carried in dst_metadata */
1213 if (!(vxflags & VXLAN_F_COLLECT_METADATA))
1214 skb->mark = md->gbp;
1216 unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
1219 static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed,
1221 struct sk_buff *skb, u32 vxflags)
1223 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed;
1225 /* Need to have Next Protocol set for interfaces in GPE mode. */
1226 if (!gpe->np_applied)
1228 /* "The initial version is 0. If a receiver does not support the
1229 * version indicated it MUST drop the packet.
1231 if (gpe->version != 0)
1233 /* "When the O bit is set to 1, the packet is an OAM packet and OAM
1234 * processing MUST occur." However, we don't implement OAM
1235 * processing, thus drop the packet.
1240 switch (gpe->next_protocol) {
1241 case VXLAN_GPE_NP_IPV4:
1242 *protocol = htons(ETH_P_IP);
1244 case VXLAN_GPE_NP_IPV6:
1245 *protocol = htons(ETH_P_IPV6);
1247 case VXLAN_GPE_NP_ETHERNET:
1248 *protocol = htons(ETH_P_TEB);
1254 unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS;
1258 static bool vxlan_set_mac(struct vxlan_dev *vxlan,
1259 struct vxlan_sock *vs,
1260 struct sk_buff *skb, __be32 vni)
1262 union vxlan_addr saddr;
1264 skb_reset_mac_header(skb);
1265 skb->protocol = eth_type_trans(skb, vxlan->dev);
1266 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1268 /* Ignore packet loops (and multicast echo) */
1269 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
1272 /* Get address from the outer IP header */
1273 if (vxlan_get_sk_family(vs) == AF_INET) {
1274 saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
1275 saddr.sa.sa_family = AF_INET;
1276 #if IS_ENABLED(CONFIG_IPV6)
1278 saddr.sin6.sin6_addr = ipv6_hdr(skb)->saddr;
1279 saddr.sa.sa_family = AF_INET6;
1283 if ((vxlan->flags & VXLAN_F_LEARN) &&
1284 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, vni))
1290 static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
1291 struct sk_buff *skb)
1295 if (vxlan_get_sk_family(vs) == AF_INET)
1296 err = IP_ECN_decapsulate(oiph, skb);
1297 #if IS_ENABLED(CONFIG_IPV6)
1299 err = IP6_ECN_decapsulate(oiph, skb);
1302 if (unlikely(err) && log_ecn_error) {
1303 if (vxlan_get_sk_family(vs) == AF_INET)
1304 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
1305 &((struct iphdr *)oiph)->saddr,
1306 ((struct iphdr *)oiph)->tos);
1308 net_info_ratelimited("non-ECT from %pI6\n",
1309 &((struct ipv6hdr *)oiph)->saddr);
1314 /* Callback from net/ipv4/udp.c to receive packets */
1315 static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
1317 struct pcpu_sw_netstats *stats;
1318 struct vxlan_dev *vxlan;
1319 struct vxlan_sock *vs;
1320 struct vxlanhdr unparsed;
1321 struct vxlan_metadata _md;
1322 struct vxlan_metadata *md = &_md;
1323 __be16 protocol = htons(ETH_P_TEB);
1324 bool raw_proto = false;
1328 /* Need UDP and VXLAN header to be present */
1329 if (!pskb_may_pull(skb, VXLAN_HLEN))
1332 unparsed = *vxlan_hdr(skb);
1333 /* VNI flag always required to be set */
1334 if (!(unparsed.vx_flags & VXLAN_HF_VNI)) {
1335 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
1336 ntohl(vxlan_hdr(skb)->vx_flags),
1337 ntohl(vxlan_hdr(skb)->vx_vni));
1338 /* Return non vxlan pkt */
1341 unparsed.vx_flags &= ~VXLAN_HF_VNI;
1342 unparsed.vx_vni &= ~VXLAN_VNI_MASK;
1344 vs = rcu_dereference_sk_user_data(sk);
1348 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
1350 vxlan = vxlan_vs_find_vni(vs, vni);
1354 /* For backwards compatibility, only allow reserved fields to be
1355 * used by VXLAN extensions if explicitly requested.
1357 if (vs->flags & VXLAN_F_GPE) {
1358 if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags))
1363 if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto,
1364 !net_eq(vxlan->net, dev_net(vxlan->dev))))
1367 if (vxlan_collect_metadata(vs)) {
1368 struct metadata_dst *tun_dst;
1370 tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
1371 key32_to_tunnel_id(vni), sizeof(*md));
1376 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
1378 skb_dst_set(skb, (struct dst_entry *)tun_dst);
1380 memset(md, 0, sizeof(*md));
1383 if (vs->flags & VXLAN_F_REMCSUM_RX)
1384 if (!vxlan_remcsum(&unparsed, skb, vs->flags))
1386 if (vs->flags & VXLAN_F_GBP)
1387 vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
1388 /* Note that GBP and GPE can never be active together. This is
1389 * ensured in vxlan_dev_configure.
1392 if (unparsed.vx_flags || unparsed.vx_vni) {
1393 /* If there are any unprocessed flags remaining treat
1394 * this as a malformed packet. This behavior diverges from
1395 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
1396 * in reserved fields are to be ignored. The approach here
1397 * maintains compatibility with previous stack code, and also
1398 * is more robust and provides a little more security in
1399 * adding extensions to VXLAN.
1405 if (!vxlan_set_mac(vxlan, vs, skb, vni))
1408 skb_reset_mac_header(skb);
1409 skb->dev = vxlan->dev;
1410 skb->pkt_type = PACKET_HOST;
1413 oiph = skb_network_header(skb);
1414 skb_reset_network_header(skb);
1416 if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
1417 ++vxlan->dev->stats.rx_frame_errors;
1418 ++vxlan->dev->stats.rx_errors;
1422 stats = this_cpu_ptr(vxlan->dev->tstats);
1423 u64_stats_update_begin(&stats->syncp);
1424 stats->rx_packets++;
1425 stats->rx_bytes += skb->len;
1426 u64_stats_update_end(&stats->syncp);
1428 gro_cells_receive(&vxlan->gro_cells, skb);
1432 /* Consume bad packet */
1437 static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
1439 struct vxlan_dev *vxlan = netdev_priv(dev);
1440 struct arphdr *parp;
1443 struct neighbour *n;
1445 if (dev->flags & IFF_NOARP)
1448 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
1449 dev->stats.tx_dropped++;
1452 parp = arp_hdr(skb);
1454 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
1455 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
1456 parp->ar_pro != htons(ETH_P_IP) ||
1457 parp->ar_op != htons(ARPOP_REQUEST) ||
1458 parp->ar_hln != dev->addr_len ||
1461 arpptr = (u8 *)parp + sizeof(struct arphdr);
1463 arpptr += dev->addr_len; /* sha */
1464 memcpy(&sip, arpptr, sizeof(sip));
1465 arpptr += sizeof(sip);
1466 arpptr += dev->addr_len; /* tha */
1467 memcpy(&tip, arpptr, sizeof(tip));
1469 if (ipv4_is_loopback(tip) ||
1470 ipv4_is_multicast(tip))
1473 n = neigh_lookup(&arp_tbl, &tip, dev);
1476 struct vxlan_fdb *f;
1477 struct sk_buff *reply;
1479 if (!(n->nud_state & NUD_CONNECTED)) {
1484 f = vxlan_find_mac(vxlan, n->ha, vni);
1485 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1486 /* bridge-local neighbor */
1491 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1499 skb_reset_mac_header(reply);
1500 __skb_pull(reply, skb_network_offset(reply));
1501 reply->ip_summed = CHECKSUM_UNNECESSARY;
1502 reply->pkt_type = PACKET_HOST;
1504 if (netif_rx_ni(reply) == NET_RX_DROP)
1505 dev->stats.rx_dropped++;
1506 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1507 union vxlan_addr ipa = {
1508 .sin.sin_addr.s_addr = tip,
1509 .sin.sin_family = AF_INET,
1512 vxlan_ip_miss(dev, &ipa);
1516 return NETDEV_TX_OK;
1519 #if IS_ENABLED(CONFIG_IPV6)
1520 static struct sk_buff *vxlan_na_create(struct sk_buff *request,
1521 struct neighbour *n, bool isrouter)
1523 struct net_device *dev = request->dev;
1524 struct sk_buff *reply;
1525 struct nd_msg *ns, *na;
1526 struct ipv6hdr *pip6;
1528 int na_olen = 8; /* opt hdr + ETH_ALEN for target */
1532 if (dev == NULL || !pskb_may_pull(request, request->len))
1535 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
1536 sizeof(*na) + na_olen + dev->needed_tailroom;
1537 reply = alloc_skb(len, GFP_ATOMIC);
1541 reply->protocol = htons(ETH_P_IPV6);
1543 skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
1544 skb_push(reply, sizeof(struct ethhdr));
1545 skb_reset_mac_header(reply);
1547 ns = (struct nd_msg *)(ipv6_hdr(request) + 1);
1549 daddr = eth_hdr(request)->h_source;
1550 ns_olen = request->len - skb_network_offset(request) -
1551 sizeof(struct ipv6hdr) - sizeof(*ns);
1552 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
1553 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
1554 daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
1559 /* Ethernet header */
1560 ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
1561 ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
1562 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
1563 reply->protocol = htons(ETH_P_IPV6);
1565 skb_pull(reply, sizeof(struct ethhdr));
1566 skb_reset_network_header(reply);
1567 skb_put(reply, sizeof(struct ipv6hdr));
1571 pip6 = ipv6_hdr(reply);
1572 memset(pip6, 0, sizeof(struct ipv6hdr));
1574 pip6->priority = ipv6_hdr(request)->priority;
1575 pip6->nexthdr = IPPROTO_ICMPV6;
1576 pip6->hop_limit = 255;
1577 pip6->daddr = ipv6_hdr(request)->saddr;
1578 pip6->saddr = *(struct in6_addr *)n->primary_key;
1580 skb_pull(reply, sizeof(struct ipv6hdr));
1581 skb_reset_transport_header(reply);
1583 na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
1585 /* Neighbor Advertisement */
1586 memset(na, 0, sizeof(*na)+na_olen);
1587 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
1588 na->icmph.icmp6_router = isrouter;
1589 na->icmph.icmp6_override = 1;
1590 na->icmph.icmp6_solicited = 1;
1591 na->target = ns->target;
1592 ether_addr_copy(&na->opt[2], n->ha);
1593 na->opt[0] = ND_OPT_TARGET_LL_ADDR;
1594 na->opt[1] = na_olen >> 3;
1596 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
1597 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
1598 csum_partial(na, sizeof(*na)+na_olen, 0));
1600 pip6->payload_len = htons(sizeof(*na)+na_olen);
1602 skb_push(reply, sizeof(struct ipv6hdr));
1604 reply->ip_summed = CHECKSUM_UNNECESSARY;
1609 static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
1611 struct vxlan_dev *vxlan = netdev_priv(dev);
1613 const struct ipv6hdr *iphdr;
1614 const struct in6_addr *daddr;
1615 struct neighbour *n;
1616 struct inet6_dev *in6_dev;
1618 in6_dev = __in6_dev_get(dev);
1622 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
1625 iphdr = ipv6_hdr(skb);
1626 daddr = &iphdr->daddr;
1628 msg = (struct nd_msg *)(iphdr + 1);
1629 if (msg->icmph.icmp6_code != 0 ||
1630 msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
1633 if (ipv6_addr_loopback(daddr) ||
1634 ipv6_addr_is_multicast(&msg->target))
1637 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
1640 struct vxlan_fdb *f;
1641 struct sk_buff *reply;
1643 if (!(n->nud_state & NUD_CONNECTED)) {
1648 f = vxlan_find_mac(vxlan, n->ha, vni);
1649 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1650 /* bridge-local neighbor */
1655 reply = vxlan_na_create(skb, n,
1656 !!(f ? f->flags & NTF_ROUTER : 0));
1663 if (netif_rx_ni(reply) == NET_RX_DROP)
1664 dev->stats.rx_dropped++;
1666 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1667 union vxlan_addr ipa = {
1668 .sin6.sin6_addr = msg->target,
1669 .sin6.sin6_family = AF_INET6,
1672 vxlan_ip_miss(dev, &ipa);
1677 return NETDEV_TX_OK;
1681 static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1683 struct vxlan_dev *vxlan = netdev_priv(dev);
1684 struct neighbour *n;
1686 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
1690 switch (ntohs(eth_hdr(skb)->h_proto)) {
1695 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1698 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
1699 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1700 union vxlan_addr ipa = {
1701 .sin.sin_addr.s_addr = pip->daddr,
1702 .sin.sin_family = AF_INET,
1705 vxlan_ip_miss(dev, &ipa);
1711 #if IS_ENABLED(CONFIG_IPV6)
1714 struct ipv6hdr *pip6;
1716 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
1718 pip6 = ipv6_hdr(skb);
1719 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
1720 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1721 union vxlan_addr ipa = {
1722 .sin6.sin6_addr = pip6->daddr,
1723 .sin6.sin6_family = AF_INET6,
1726 vxlan_ip_miss(dev, &ipa);
1740 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
1742 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
1744 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
1753 static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
1754 struct vxlan_metadata *md)
1756 struct vxlanhdr_gbp *gbp;
1761 gbp = (struct vxlanhdr_gbp *)vxh;
1762 vxh->vx_flags |= VXLAN_HF_GBP;
1764 if (md->gbp & VXLAN_GBP_DONT_LEARN)
1765 gbp->dont_learn = 1;
1767 if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
1768 gbp->policy_applied = 1;
1770 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
1773 static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, u32 vxflags,
1776 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh;
1778 gpe->np_applied = 1;
1781 case htons(ETH_P_IP):
1782 gpe->next_protocol = VXLAN_GPE_NP_IPV4;
1784 case htons(ETH_P_IPV6):
1785 gpe->next_protocol = VXLAN_GPE_NP_IPV6;
1787 case htons(ETH_P_TEB):
1788 gpe->next_protocol = VXLAN_GPE_NP_ETHERNET;
1791 return -EPFNOSUPPORT;
1794 static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
1795 int iphdr_len, __be32 vni,
1796 struct vxlan_metadata *md, u32 vxflags,
1799 struct vxlanhdr *vxh;
1802 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1803 __be16 inner_protocol = htons(ETH_P_TEB);
1805 if ((vxflags & VXLAN_F_REMCSUM_TX) &&
1806 skb->ip_summed == CHECKSUM_PARTIAL) {
1807 int csum_start = skb_checksum_start_offset(skb);
1809 if (csum_start <= VXLAN_MAX_REMCSUM_START &&
1810 !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
1811 (skb->csum_offset == offsetof(struct udphdr, check) ||
1812 skb->csum_offset == offsetof(struct tcphdr, check)))
1813 type |= SKB_GSO_TUNNEL_REMCSUM;
1816 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
1817 + VXLAN_HLEN + iphdr_len;
1819 /* Need space for new headers (invalidates iph ptr) */
1820 err = skb_cow_head(skb, min_headroom);
1824 err = iptunnel_handle_offloads(skb, type);
1828 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1829 vxh->vx_flags = VXLAN_HF_VNI;
1830 vxh->vx_vni = vxlan_vni_field(vni);
1832 if (type & SKB_GSO_TUNNEL_REMCSUM) {
1835 start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr);
1836 vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset);
1837 vxh->vx_flags |= VXLAN_HF_RCO;
1839 if (!skb_is_gso(skb)) {
1840 skb->ip_summed = CHECKSUM_NONE;
1841 skb->encapsulation = 0;
1845 if (vxflags & VXLAN_F_GBP)
1846 vxlan_build_gbp_hdr(vxh, vxflags, md);
1847 if (vxflags & VXLAN_F_GPE) {
1848 err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol);
1851 inner_protocol = skb->protocol;
1854 skb_set_inner_protocol(skb, inner_protocol);
1858 static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev,
1859 struct vxlan_sock *sock4,
1860 struct sk_buff *skb, int oif, u8 tos,
1861 __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport,
1862 struct dst_cache *dst_cache,
1863 const struct ip_tunnel_info *info)
1865 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
1866 struct rtable *rt = NULL;
1870 return ERR_PTR(-EIO);
1875 rt = dst_cache_get_ip4(dst_cache, saddr);
1880 memset(&fl4, 0, sizeof(fl4));
1881 fl4.flowi4_oif = oif;
1882 fl4.flowi4_tos = RT_TOS(tos);
1883 fl4.flowi4_mark = skb->mark;
1884 fl4.flowi4_proto = IPPROTO_UDP;
1887 fl4.fl4_dport = dport;
1888 fl4.fl4_sport = sport;
1890 rt = ip_route_output_key(vxlan->net, &fl4);
1891 if (likely(!IS_ERR(rt))) {
1892 if (rt->dst.dev == dev) {
1893 netdev_dbg(dev, "circular route to %pI4\n", &daddr);
1895 return ERR_PTR(-ELOOP);
1900 dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
1902 netdev_dbg(dev, "no route to %pI4\n", &daddr);
1903 return ERR_PTR(-ENETUNREACH);
1908 #if IS_ENABLED(CONFIG_IPV6)
1909 static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
1910 struct net_device *dev,
1911 struct vxlan_sock *sock6,
1912 struct sk_buff *skb, int oif, u8 tos,
1914 const struct in6_addr *daddr,
1915 struct in6_addr *saddr,
1916 __be16 dport, __be16 sport,
1917 struct dst_cache *dst_cache,
1918 const struct ip_tunnel_info *info)
1920 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
1921 struct dst_entry *ndst;
1926 return ERR_PTR(-EIO);
1931 ndst = dst_cache_get_ip6(dst_cache, saddr);
1936 memset(&fl6, 0, sizeof(fl6));
1937 fl6.flowi6_oif = oif;
1940 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
1941 fl6.flowi6_mark = skb->mark;
1942 fl6.flowi6_proto = IPPROTO_UDP;
1943 fl6.fl6_dport = dport;
1944 fl6.fl6_sport = sport;
1946 err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
1949 if (unlikely(err < 0)) {
1950 netdev_dbg(dev, "no route to %pI6\n", daddr);
1951 return ERR_PTR(-ENETUNREACH);
1954 if (unlikely(ndst->dev == dev)) {
1955 netdev_dbg(dev, "circular route to %pI6\n", daddr);
1957 return ERR_PTR(-ELOOP);
1962 dst_cache_set_ip6(dst_cache, ndst, saddr);
1967 /* Bypass encapsulation if the destination is local */
1968 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
1969 struct vxlan_dev *dst_vxlan, __be32 vni)
1971 struct pcpu_sw_netstats *tx_stats, *rx_stats;
1972 union vxlan_addr loopback;
1973 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
1974 struct net_device *dev = skb->dev;
1977 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
1978 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
1979 skb->pkt_type = PACKET_HOST;
1980 skb->encapsulation = 0;
1981 skb->dev = dst_vxlan->dev;
1982 __skb_pull(skb, skb_network_offset(skb));
1984 if (remote_ip->sa.sa_family == AF_INET) {
1985 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
1986 loopback.sa.sa_family = AF_INET;
1987 #if IS_ENABLED(CONFIG_IPV6)
1989 loopback.sin6.sin6_addr = in6addr_loopback;
1990 loopback.sa.sa_family = AF_INET6;
1994 if (dst_vxlan->flags & VXLAN_F_LEARN)
1995 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, vni);
1997 u64_stats_update_begin(&tx_stats->syncp);
1998 tx_stats->tx_packets++;
1999 tx_stats->tx_bytes += len;
2000 u64_stats_update_end(&tx_stats->syncp);
2002 if (netif_rx(skb) == NET_RX_SUCCESS) {
2003 u64_stats_update_begin(&rx_stats->syncp);
2004 rx_stats->rx_packets++;
2005 rx_stats->rx_bytes += len;
2006 u64_stats_update_end(&rx_stats->syncp);
2008 dev->stats.rx_dropped++;
2012 static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
2013 struct vxlan_dev *vxlan, union vxlan_addr *daddr,
2014 __be16 dst_port, __be32 vni, struct dst_entry *dst,
2017 #if IS_ENABLED(CONFIG_IPV6)
2018 /* IPv6 rt-flags are checked against RTF_LOCAL, but the value of
2019 * RTF_LOCAL is equal to RTCF_LOCAL. So to keep code simple
2020 * we can use RTCF_LOCAL which works for ipv4 and ipv6 route entry.
2022 BUILD_BUG_ON(RTCF_LOCAL != RTF_LOCAL);
2024 /* Bypass encapsulation if the destination is local */
2025 if (rt_flags & RTCF_LOCAL &&
2026 !(rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
2027 struct vxlan_dev *dst_vxlan;
2030 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
2031 daddr->sa.sa_family, dst_port,
2034 dev->stats.tx_errors++;
2039 vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni);
2046 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2047 __be32 default_vni, struct vxlan_rdst *rdst,
2050 struct dst_cache *dst_cache;
2051 struct ip_tunnel_info *info;
2052 struct vxlan_dev *vxlan = netdev_priv(dev);
2053 const struct iphdr *old_iph = ip_hdr(skb);
2054 union vxlan_addr *dst;
2055 union vxlan_addr remote_ip, local_ip;
2056 struct vxlan_metadata _md;
2057 struct vxlan_metadata *md = &_md;
2058 __be16 src_port = 0, dst_port;
2059 struct dst_entry *ndst = NULL;
2063 u32 flags = vxlan->flags;
2064 bool udp_sum = false;
2065 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
2067 info = skb_tunnel_info(skb);
2070 dst = &rdst->remote_ip;
2071 if (vxlan_addr_any(dst)) {
2073 /* short-circuited back to local bridge */
2074 vxlan_encap_bypass(skb, vxlan, vxlan, default_vni);
2080 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
2081 vni = (rdst->remote_vni) ? : default_vni;
2082 local_ip = vxlan->cfg.saddr;
2083 dst_cache = &rdst->dst_cache;
2084 md->gbp = skb->mark;
2085 ttl = vxlan->cfg.ttl;
2086 if (!ttl && vxlan_addr_multicast(dst))
2089 tos = vxlan->cfg.tos;
2091 tos = ip_tunnel_get_dsfield(old_iph, skb);
2093 if (dst->sa.sa_family == AF_INET)
2094 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX);
2096 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
2097 label = vxlan->cfg.label;
2100 WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
2104 remote_ip.sa.sa_family = ip_tunnel_info_af(info);
2105 if (remote_ip.sa.sa_family == AF_INET) {
2106 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
2107 local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src;
2109 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
2110 local_ip.sin6.sin6_addr = info->key.u.ipv6.src;
2113 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
2114 vni = tunnel_id_to_key32(info->key.tun_id);
2115 dst_cache = &info->dst_cache;
2116 if (info->options_len)
2117 md = ip_tunnel_info_opts(info);
2118 ttl = info->key.ttl;
2119 tos = info->key.tos;
2120 label = info->key.label;
2121 udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
2123 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
2124 vxlan->cfg.port_max, true);
2127 if (dst->sa.sa_family == AF_INET) {
2128 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
2132 rt = vxlan_get_route(vxlan, dev, sock4, skb,
2133 rdst ? rdst->remote_ifindex : 0, tos,
2134 dst->sin.sin_addr.s_addr,
2135 &local_ip.sin.sin_addr.s_addr,
2143 /* Bypass encapsulation if the destination is local */
2145 err = encap_bypass_if_local(skb, dev, vxlan, dst,
2146 dst_port, vni, &rt->dst,
2150 } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) {
2155 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2156 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
2157 err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
2158 vni, md, flags, udp_sum);
2162 udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, local_ip.sin.sin_addr.s_addr,
2163 dst->sin.sin_addr.s_addr, tos, ttl, df,
2164 src_port, dst_port, xnet, !udp_sum);
2165 #if IS_ENABLED(CONFIG_IPV6)
2167 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
2169 ndst = vxlan6_get_route(vxlan, dev, sock6, skb,
2170 rdst ? rdst->remote_ifindex : 0, tos,
2171 label, &dst->sin6.sin6_addr,
2172 &local_ip.sin6.sin6_addr,
2176 err = PTR_ERR(ndst);
2182 u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
2184 err = encap_bypass_if_local(skb, dev, vxlan, dst,
2185 dst_port, vni, ndst,
2191 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2192 ttl = ttl ? : ip6_dst_hoplimit(ndst);
2193 skb_scrub_packet(skb, xnet);
2194 err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
2195 vni, md, flags, udp_sum);
2199 udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev,
2200 &local_ip.sin6.sin6_addr,
2201 &dst->sin6.sin6_addr, tos, ttl,
2202 label, src_port, dst_port, !udp_sum);
2210 dev->stats.tx_dropped++;
2217 dev->stats.collisions++;
2218 else if (err == -ENETUNREACH)
2219 dev->stats.tx_carrier_errors++;
2221 dev->stats.tx_errors++;
2225 /* Transmit local packets over Vxlan
2227 * Outer IP header inherits ECN and DF from inner header.
2228 * Outer UDP destination is the VXLAN assigned port.
2229 * source port is based on hash of flow
2231 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
2233 struct vxlan_dev *vxlan = netdev_priv(dev);
2234 const struct ip_tunnel_info *info;
2236 bool did_rsc = false;
2237 struct vxlan_rdst *rdst, *fdst = NULL;
2238 struct vxlan_fdb *f;
2241 info = skb_tunnel_info(skb);
2243 skb_reset_mac_header(skb);
2245 if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
2246 if (info && info->mode & IP_TUNNEL_INFO_BRIDGE &&
2247 info->mode & IP_TUNNEL_INFO_TX) {
2248 vni = tunnel_id_to_key32(info->key.tun_id);
2250 if (info && info->mode & IP_TUNNEL_INFO_TX)
2251 vxlan_xmit_one(skb, dev, vni, NULL, false);
2254 return NETDEV_TX_OK;
2258 if (vxlan->flags & VXLAN_F_PROXY) {
2260 if (ntohs(eth->h_proto) == ETH_P_ARP)
2261 return arp_reduce(dev, skb, vni);
2262 #if IS_ENABLED(CONFIG_IPV6)
2263 else if (ntohs(eth->h_proto) == ETH_P_IPV6) {
2264 struct ipv6hdr *hdr, _hdr;
2265 if ((hdr = skb_header_pointer(skb,
2266 skb_network_offset(skb),
2267 sizeof(_hdr), &_hdr)) &&
2268 hdr->nexthdr == IPPROTO_ICMPV6)
2269 return neigh_reduce(dev, skb, vni);
2275 f = vxlan_find_mac(vxlan, eth->h_dest, vni);
2278 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
2279 (ntohs(eth->h_proto) == ETH_P_IP ||
2280 ntohs(eth->h_proto) == ETH_P_IPV6)) {
2281 did_rsc = route_shortcircuit(dev, skb);
2283 f = vxlan_find_mac(vxlan, eth->h_dest, vni);
2287 f = vxlan_find_mac(vxlan, all_zeros_mac, vni);
2289 if ((vxlan->flags & VXLAN_F_L2MISS) &&
2290 !is_multicast_ether_addr(eth->h_dest))
2291 vxlan_fdb_miss(vxlan, eth->h_dest);
2293 dev->stats.tx_dropped++;
2295 return NETDEV_TX_OK;
2299 list_for_each_entry_rcu(rdst, &f->remotes, list) {
2300 struct sk_buff *skb1;
2306 skb1 = skb_clone(skb, GFP_ATOMIC);
2308 vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc);
2312 vxlan_xmit_one(skb, dev, vni, fdst, did_rsc);
2315 return NETDEV_TX_OK;
2318 /* Walk the forwarding table and purge stale entries */
2319 static void vxlan_cleanup(unsigned long arg)
2321 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
2322 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
2325 if (!netif_running(vxlan->dev))
2328 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2329 struct hlist_node *p, *n;
2331 spin_lock_bh(&vxlan->hash_lock);
2332 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2334 = container_of(p, struct vxlan_fdb, hlist);
2335 unsigned long timeout;
2337 if (f->state & (NUD_PERMANENT | NUD_NOARP))
2340 if (f->flags & NTF_EXT_LEARNED)
2343 timeout = f->used + vxlan->cfg.age_interval * HZ;
2344 if (time_before_eq(timeout, jiffies)) {
2345 netdev_dbg(vxlan->dev,
2346 "garbage collect %pM\n",
2348 f->state = NUD_STALE;
2349 vxlan_fdb_destroy(vxlan, f);
2350 } else if (time_before(timeout, next_timer))
2351 next_timer = timeout;
2353 spin_unlock_bh(&vxlan->hash_lock);
2356 mod_timer(&vxlan->age_timer, next_timer);
2359 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
2361 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2362 __be32 vni = vxlan->default_dst.remote_vni;
2364 spin_lock(&vn->sock_lock);
2365 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
2366 spin_unlock(&vn->sock_lock);
2369 /* Setup stats when device is created */
2370 static int vxlan_init(struct net_device *dev)
2372 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2379 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
2381 struct vxlan_fdb *f;
2383 spin_lock_bh(&vxlan->hash_lock);
2384 f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
2386 vxlan_fdb_destroy(vxlan, f);
2387 spin_unlock_bh(&vxlan->hash_lock);
2390 static void vxlan_uninit(struct net_device *dev)
2392 struct vxlan_dev *vxlan = netdev_priv(dev);
2394 vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
2396 free_percpu(dev->tstats);
2399 /* Start ageing timer and join group when device is brought up */
2400 static int vxlan_open(struct net_device *dev)
2402 struct vxlan_dev *vxlan = netdev_priv(dev);
2405 ret = vxlan_sock_add(vxlan);
2409 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
2410 ret = vxlan_igmp_join(vxlan);
2411 if (ret == -EADDRINUSE)
2414 vxlan_sock_release(vxlan);
2419 if (vxlan->cfg.age_interval)
2420 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
2425 /* Purge the forwarding table */
2426 static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
2430 spin_lock_bh(&vxlan->hash_lock);
2431 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2432 struct hlist_node *p, *n;
2433 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2435 = container_of(p, struct vxlan_fdb, hlist);
2436 if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
2438 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2439 if (!is_zero_ether_addr(f->eth_addr))
2440 vxlan_fdb_destroy(vxlan, f);
2443 spin_unlock_bh(&vxlan->hash_lock);
2446 /* Cleanup timer and forwarding table on shutdown */
2447 static int vxlan_stop(struct net_device *dev)
2449 struct vxlan_dev *vxlan = netdev_priv(dev);
2450 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2453 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
2454 !vxlan_group_used(vn, vxlan))
2455 ret = vxlan_igmp_leave(vxlan);
2457 del_timer_sync(&vxlan->age_timer);
2459 vxlan_flush(vxlan, false);
2460 vxlan_sock_release(vxlan);
2465 /* Stub, nothing needs to be done. */
2466 static void vxlan_set_multicast_list(struct net_device *dev)
2470 static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2472 struct vxlan_dev *vxlan = netdev_priv(dev);
2473 struct vxlan_rdst *dst = &vxlan->default_dst;
2474 struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
2475 dst->remote_ifindex);
2476 bool use_ipv6 = false;
2478 if (dst->remote_ip.sa.sa_family == AF_INET6)
2481 /* This check is different than dev->max_mtu, because it looks at
2482 * the lowerdev->mtu, rather than the static dev->max_mtu
2485 int max_mtu = lowerdev->mtu -
2486 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2487 if (new_mtu > max_mtu)
2495 static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2497 struct vxlan_dev *vxlan = netdev_priv(dev);
2498 struct ip_tunnel_info *info = skb_tunnel_info(skb);
2499 __be16 sport, dport;
2501 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
2502 vxlan->cfg.port_max, true);
2503 dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
2505 if (ip_tunnel_info_af(info) == AF_INET) {
2506 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
2509 rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
2510 info->key.u.ipv4.dst,
2511 &info->key.u.ipv4.src, dport, sport,
2512 &info->dst_cache, info);
2517 #if IS_ENABLED(CONFIG_IPV6)
2518 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
2519 struct dst_entry *ndst;
2521 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
2522 info->key.label, &info->key.u.ipv6.dst,
2523 &info->key.u.ipv6.src, dport, sport,
2524 &info->dst_cache, info);
2526 return PTR_ERR(ndst);
2528 #else /* !CONFIG_IPV6 */
2529 return -EPFNOSUPPORT;
2532 info->key.tp_src = sport;
2533 info->key.tp_dst = dport;
2537 static const struct net_device_ops vxlan_netdev_ether_ops = {
2538 .ndo_init = vxlan_init,
2539 .ndo_uninit = vxlan_uninit,
2540 .ndo_open = vxlan_open,
2541 .ndo_stop = vxlan_stop,
2542 .ndo_start_xmit = vxlan_xmit,
2543 .ndo_get_stats64 = ip_tunnel_get_stats64,
2544 .ndo_set_rx_mode = vxlan_set_multicast_list,
2545 .ndo_change_mtu = vxlan_change_mtu,
2546 .ndo_validate_addr = eth_validate_addr,
2547 .ndo_set_mac_address = eth_mac_addr,
2548 .ndo_fdb_add = vxlan_fdb_add,
2549 .ndo_fdb_del = vxlan_fdb_delete,
2550 .ndo_fdb_dump = vxlan_fdb_dump,
2551 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
2554 static const struct net_device_ops vxlan_netdev_raw_ops = {
2555 .ndo_init = vxlan_init,
2556 .ndo_uninit = vxlan_uninit,
2557 .ndo_open = vxlan_open,
2558 .ndo_stop = vxlan_stop,
2559 .ndo_start_xmit = vxlan_xmit,
2560 .ndo_get_stats64 = ip_tunnel_get_stats64,
2561 .ndo_change_mtu = vxlan_change_mtu,
2562 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
2565 /* Info for udev, that this is a virtual tunnel endpoint */
2566 static struct device_type vxlan_type = {
2570 /* Calls the ndo_udp_tunnel_add of the caller in order to
2571 * supply the listening VXLAN udp ports. Callers are expected
2572 * to implement the ndo_udp_tunnel_add.
2574 static void vxlan_push_rx_ports(struct net_device *dev)
2576 struct vxlan_sock *vs;
2577 struct net *net = dev_net(dev);
2578 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2581 spin_lock(&vn->sock_lock);
2582 for (i = 0; i < PORT_HASH_SIZE; ++i) {
2583 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist)
2584 udp_tunnel_push_rx_port(dev, vs->sock,
2585 (vs->flags & VXLAN_F_GPE) ?
2586 UDP_TUNNEL_TYPE_VXLAN_GPE :
2587 UDP_TUNNEL_TYPE_VXLAN);
2589 spin_unlock(&vn->sock_lock);
2592 /* Initialize the device structure. */
2593 static void vxlan_setup(struct net_device *dev)
2595 struct vxlan_dev *vxlan = netdev_priv(dev);
2598 eth_hw_addr_random(dev);
2601 dev->destructor = free_netdev;
2602 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
2604 dev->features |= NETIF_F_LLTX;
2605 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2606 dev->features |= NETIF_F_RXCSUM;
2607 dev->features |= NETIF_F_GSO_SOFTWARE;
2609 dev->vlan_features = dev->features;
2610 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2611 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2612 netif_keep_dst(dev);
2613 dev->priv_flags |= IFF_NO_QUEUE;
2615 INIT_LIST_HEAD(&vxlan->next);
2616 spin_lock_init(&vxlan->hash_lock);
2618 init_timer_deferrable(&vxlan->age_timer);
2619 vxlan->age_timer.function = vxlan_cleanup;
2620 vxlan->age_timer.data = (unsigned long) vxlan;
2622 vxlan->cfg.dst_port = htons(vxlan_port);
2626 gro_cells_init(&vxlan->gro_cells, dev);
2628 for (h = 0; h < FDB_HASH_SIZE; ++h)
2629 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
2632 static void vxlan_ether_setup(struct net_device *dev)
2634 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2635 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2636 dev->netdev_ops = &vxlan_netdev_ether_ops;
2639 static void vxlan_raw_setup(struct net_device *dev)
2641 dev->header_ops = NULL;
2642 dev->type = ARPHRD_NONE;
2643 dev->hard_header_len = 0;
2645 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
2646 dev->netdev_ops = &vxlan_netdev_raw_ops;
2649 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
2650 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
2651 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
2652 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
2653 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
2654 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
2655 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
2656 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
2657 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
2658 [IFLA_VXLAN_LABEL] = { .type = NLA_U32 },
2659 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
2660 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
2661 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
2662 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
2663 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
2664 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
2665 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
2666 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
2667 [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 },
2668 [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
2669 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
2670 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
2671 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
2672 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
2673 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
2674 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
2675 [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, },
2676 [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
2679 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
2681 if (tb[IFLA_ADDRESS]) {
2682 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
2683 pr_debug("invalid link address (not ethernet)\n");
2687 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
2688 pr_debug("invalid all zero ethernet address\n");
2689 return -EADDRNOTAVAIL;
2696 if (data[IFLA_VXLAN_ID]) {
2697 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
2698 if (id >= VXLAN_N_VID)
2702 if (data[IFLA_VXLAN_PORT_RANGE]) {
2703 const struct ifla_vxlan_port_range *p
2704 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2706 if (ntohs(p->high) < ntohs(p->low)) {
2707 pr_debug("port range %u .. %u not valid\n",
2708 ntohs(p->low), ntohs(p->high));
2716 static void vxlan_get_drvinfo(struct net_device *netdev,
2717 struct ethtool_drvinfo *drvinfo)
2719 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
2720 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
2723 static const struct ethtool_ops vxlan_ethtool_ops = {
2724 .get_drvinfo = vxlan_get_drvinfo,
2725 .get_link = ethtool_op_get_link,
2728 static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
2729 __be16 port, u32 flags)
2731 struct socket *sock;
2732 struct udp_port_cfg udp_conf;
2735 memset(&udp_conf, 0, sizeof(udp_conf));
2738 udp_conf.family = AF_INET6;
2739 udp_conf.use_udp6_rx_checksums =
2740 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
2741 udp_conf.ipv6_v6only = 1;
2743 udp_conf.family = AF_INET;
2746 udp_conf.local_udp_port = port;
2748 /* Open UDP socket */
2749 err = udp_sock_create(net, &udp_conf, &sock);
2751 return ERR_PTR(err);
2756 /* Create new listen socket if needed */
2757 static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
2758 __be16 port, u32 flags)
2760 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2761 struct vxlan_sock *vs;
2762 struct socket *sock;
2764 struct udp_tunnel_sock_cfg tunnel_cfg;
2766 vs = kzalloc(sizeof(*vs), GFP_KERNEL);
2768 return ERR_PTR(-ENOMEM);
2770 for (h = 0; h < VNI_HASH_SIZE; ++h)
2771 INIT_HLIST_HEAD(&vs->vni_list[h]);
2773 sock = vxlan_create_sock(net, ipv6, port, flags);
2776 return ERR_CAST(sock);
2780 atomic_set(&vs->refcnt, 1);
2781 vs->flags = (flags & VXLAN_F_RCV_FLAGS);
2783 spin_lock(&vn->sock_lock);
2784 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
2785 udp_tunnel_notify_add_rx_port(sock,
2786 (vs->flags & VXLAN_F_GPE) ?
2787 UDP_TUNNEL_TYPE_VXLAN_GPE :
2788 UDP_TUNNEL_TYPE_VXLAN);
2789 spin_unlock(&vn->sock_lock);
2791 /* Mark socket as an encapsulation socket. */
2792 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
2793 tunnel_cfg.sk_user_data = vs;
2794 tunnel_cfg.encap_type = 1;
2795 tunnel_cfg.encap_rcv = vxlan_rcv;
2796 tunnel_cfg.encap_destroy = NULL;
2797 tunnel_cfg.gro_receive = vxlan_gro_receive;
2798 tunnel_cfg.gro_complete = vxlan_gro_complete;
2800 setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
2805 static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
2807 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2808 struct vxlan_sock *vs = NULL;
2810 if (!vxlan->cfg.no_share) {
2811 spin_lock(&vn->sock_lock);
2812 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
2813 vxlan->cfg.dst_port, vxlan->flags);
2814 if (vs && !atomic_add_unless(&vs->refcnt, 1, 0)) {
2815 spin_unlock(&vn->sock_lock);
2818 spin_unlock(&vn->sock_lock);
2821 vs = vxlan_socket_create(vxlan->net, ipv6,
2822 vxlan->cfg.dst_port, vxlan->flags);
2825 #if IS_ENABLED(CONFIG_IPV6)
2827 rcu_assign_pointer(vxlan->vn6_sock, vs);
2830 rcu_assign_pointer(vxlan->vn4_sock, vs);
2831 vxlan_vs_add_dev(vs, vxlan);
2835 static int vxlan_sock_add(struct vxlan_dev *vxlan)
2837 bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA;
2838 bool ipv6 = vxlan->flags & VXLAN_F_IPV6 || metadata;
2839 bool ipv4 = !ipv6 || metadata;
2842 RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
2843 #if IS_ENABLED(CONFIG_IPV6)
2844 RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
2846 ret = __vxlan_sock_add(vxlan, true);
2847 if (ret < 0 && ret != -EAFNOSUPPORT)
2852 ret = __vxlan_sock_add(vxlan, false);
2854 vxlan_sock_release(vxlan);
2858 static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2859 struct vxlan_config *conf,
2862 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
2863 struct vxlan_dev *vxlan = netdev_priv(dev), *tmp;
2864 struct vxlan_rdst *dst = &vxlan->default_dst;
2865 unsigned short needed_headroom = ETH_HLEN;
2866 bool use_ipv6 = false;
2867 __be16 default_port = vxlan->cfg.dst_port;
2868 struct net_device *lowerdev = NULL;
2871 if (conf->flags & VXLAN_F_GPE) {
2872 /* For now, allow GPE only together with
2873 * COLLECT_METADATA. This can be relaxed later; in such
2874 * case, the other side of the PtP link will have to be
2877 if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
2878 !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
2879 pr_info("unsupported combination of extensions\n");
2882 vxlan_raw_setup(dev);
2884 vxlan_ether_setup(dev);
2887 /* MTU range: 68 - 65535 */
2888 dev->min_mtu = ETH_MIN_MTU;
2889 dev->max_mtu = ETH_MAX_MTU;
2890 vxlan->net = src_net;
2893 dst->remote_vni = conf->vni;
2895 memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
2897 /* Unless IPv6 is explicitly requested, assume IPv4 */
2898 if (!dst->remote_ip.sa.sa_family)
2899 dst->remote_ip.sa.sa_family = AF_INET;
2901 if (dst->remote_ip.sa.sa_family == AF_INET6 ||
2902 vxlan->cfg.saddr.sa.sa_family == AF_INET6) {
2903 if (!IS_ENABLED(CONFIG_IPV6))
2904 return -EPFNOSUPPORT;
2906 vxlan->flags |= VXLAN_F_IPV6;
2909 if (conf->label && !use_ipv6) {
2910 pr_info("label only supported in use with IPv6\n");
2914 if (conf->remote_ifindex &&
2915 conf->remote_ifindex != vxlan->cfg.remote_ifindex) {
2916 lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
2917 dst->remote_ifindex = conf->remote_ifindex;
2920 pr_info("ifindex %d does not exist\n",
2921 dst->remote_ifindex);
2925 #if IS_ENABLED(CONFIG_IPV6)
2927 struct inet6_dev *idev = __in6_dev_get(lowerdev);
2928 if (idev && idev->cnf.disable_ipv6) {
2929 pr_info("IPv6 is disabled via sysctl\n");
2936 dev->mtu = lowerdev->mtu -
2937 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2939 needed_headroom = lowerdev->hard_header_len;
2940 } else if (!conf->remote_ifindex &&
2941 vxlan_addr_multicast(&dst->remote_ip)) {
2942 pr_info("multicast destination requires interface to be specified\n");
2947 dev->gso_max_size = lowerdev->gso_max_size;
2948 dev->gso_max_segs = lowerdev->gso_max_segs;
2952 int max_mtu = ETH_MAX_MTU;
2955 max_mtu = lowerdev->mtu;
2957 max_mtu -= (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2959 if (conf->mtu < dev->min_mtu || conf->mtu > dev->max_mtu)
2962 dev->mtu = conf->mtu;
2964 if (conf->mtu > max_mtu)
2968 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
2969 needed_headroom += VXLAN6_HEADROOM;
2971 needed_headroom += VXLAN_HEADROOM;
2972 dev->needed_headroom = needed_headroom;
2974 memcpy(&vxlan->cfg, conf, sizeof(*conf));
2975 if (!vxlan->cfg.dst_port) {
2976 if (conf->flags & VXLAN_F_GPE)
2977 vxlan->cfg.dst_port = htons(4790); /* IANA VXLAN-GPE port */
2979 vxlan->cfg.dst_port = default_port;
2981 vxlan->flags |= conf->flags;
2983 if (!vxlan->cfg.age_interval)
2984 vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
2989 list_for_each_entry(tmp, &vn->vxlan_list, next) {
2990 if (tmp->cfg.vni == conf->vni &&
2991 (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 ||
2992 tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
2993 tmp->cfg.dst_port == vxlan->cfg.dst_port &&
2994 (tmp->flags & VXLAN_F_RCV_FLAGS) ==
2995 (vxlan->flags & VXLAN_F_RCV_FLAGS)) {
2996 pr_info("duplicate VNI %u\n", be32_to_cpu(conf->vni));
3004 static int __vxlan_dev_create(struct net *net, struct net_device *dev,
3005 struct vxlan_config *conf)
3007 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3008 struct vxlan_dev *vxlan = netdev_priv(dev);
3011 err = vxlan_dev_configure(net, dev, conf, false);
3015 dev->ethtool_ops = &vxlan_ethtool_ops;
3017 /* create an fdb entry for a valid default destination */
3018 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
3019 err = vxlan_fdb_create(vxlan, all_zeros_mac,
3020 &vxlan->default_dst.remote_ip,
3021 NUD_REACHABLE | NUD_PERMANENT,
3022 NLM_F_EXCL | NLM_F_CREATE,
3023 vxlan->cfg.dst_port,
3024 vxlan->default_dst.remote_vni,
3025 vxlan->default_dst.remote_vni,
3026 vxlan->default_dst.remote_ifindex,
3032 err = register_netdevice(dev);
3034 vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
3038 list_add(&vxlan->next, &vn->vxlan_list);
3042 static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
3043 struct net_device *dev, struct vxlan_config *conf,
3046 struct vxlan_dev *vxlan = netdev_priv(dev);
3048 memset(conf, 0, sizeof(*conf));
3050 /* if changelink operation, start with old existing cfg */
3052 memcpy(conf, &vxlan->cfg, sizeof(*conf));
3054 if (data[IFLA_VXLAN_ID]) {
3055 __be32 vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
3057 if (changelink && (vni != conf->vni))
3059 conf->vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
3062 if (data[IFLA_VXLAN_GROUP]) {
3063 conf->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
3064 } else if (data[IFLA_VXLAN_GROUP6]) {
3065 if (!IS_ENABLED(CONFIG_IPV6))
3066 return -EPFNOSUPPORT;
3068 conf->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
3069 conf->remote_ip.sa.sa_family = AF_INET6;
3072 if (data[IFLA_VXLAN_LOCAL]) {
3073 conf->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
3074 conf->saddr.sa.sa_family = AF_INET;
3075 } else if (data[IFLA_VXLAN_LOCAL6]) {
3076 if (!IS_ENABLED(CONFIG_IPV6))
3077 return -EPFNOSUPPORT;
3079 /* TODO: respect scope id */
3080 conf->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
3081 conf->saddr.sa.sa_family = AF_INET6;
3084 if (data[IFLA_VXLAN_LINK])
3085 conf->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]);
3087 if (data[IFLA_VXLAN_TOS])
3088 conf->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
3090 if (data[IFLA_VXLAN_TTL])
3091 conf->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
3093 if (data[IFLA_VXLAN_LABEL])
3094 conf->label = nla_get_be32(data[IFLA_VXLAN_LABEL]) &
3095 IPV6_FLOWLABEL_MASK;
3097 if (data[IFLA_VXLAN_LEARNING]) {
3098 if (nla_get_u8(data[IFLA_VXLAN_LEARNING])) {
3099 conf->flags |= VXLAN_F_LEARN;
3101 conf->flags &= ~VXLAN_F_LEARN;
3102 vxlan->flags &= ~VXLAN_F_LEARN;
3104 } else if (!changelink) {
3105 /* default to learn on a new device */
3106 conf->flags |= VXLAN_F_LEARN;
3109 if (data[IFLA_VXLAN_AGEING]) {
3112 conf->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
3115 if (data[IFLA_VXLAN_PROXY]) {
3118 if (nla_get_u8(data[IFLA_VXLAN_PROXY]))
3119 conf->flags |= VXLAN_F_PROXY;
3122 if (data[IFLA_VXLAN_RSC]) {
3125 if (nla_get_u8(data[IFLA_VXLAN_RSC]))
3126 conf->flags |= VXLAN_F_RSC;
3129 if (data[IFLA_VXLAN_L2MISS]) {
3132 if (nla_get_u8(data[IFLA_VXLAN_L2MISS]))
3133 conf->flags |= VXLAN_F_L2MISS;
3136 if (data[IFLA_VXLAN_L3MISS]) {
3139 if (nla_get_u8(data[IFLA_VXLAN_L3MISS]))
3140 conf->flags |= VXLAN_F_L3MISS;
3143 if (data[IFLA_VXLAN_LIMIT]) {
3146 conf->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
3149 if (data[IFLA_VXLAN_COLLECT_METADATA]) {
3152 if (nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA]))
3153 conf->flags |= VXLAN_F_COLLECT_METADATA;
3156 if (data[IFLA_VXLAN_PORT_RANGE]) {
3158 const struct ifla_vxlan_port_range *p
3159 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
3160 conf->port_min = ntohs(p->low);
3161 conf->port_max = ntohs(p->high);
3167 if (data[IFLA_VXLAN_PORT]) {
3170 conf->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
3173 if (data[IFLA_VXLAN_UDP_CSUM]) {
3176 if (!nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
3177 conf->flags |= VXLAN_F_UDP_ZERO_CSUM_TX;
3180 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]) {
3183 if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
3184 conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
3187 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]) {
3190 if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
3191 conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
3194 if (data[IFLA_VXLAN_REMCSUM_TX]) {
3197 if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
3198 conf->flags |= VXLAN_F_REMCSUM_TX;
3201 if (data[IFLA_VXLAN_REMCSUM_RX]) {
3204 if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
3205 conf->flags |= VXLAN_F_REMCSUM_RX;
3208 if (data[IFLA_VXLAN_GBP]) {
3211 conf->flags |= VXLAN_F_GBP;
3214 if (data[IFLA_VXLAN_GPE]) {
3217 conf->flags |= VXLAN_F_GPE;
3220 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) {
3223 conf->flags |= VXLAN_F_REMCSUM_NOPARTIAL;
3229 conf->mtu = nla_get_u32(tb[IFLA_MTU]);
3235 static int vxlan_newlink(struct net *src_net, struct net_device *dev,
3236 struct nlattr *tb[], struct nlattr *data[])
3238 struct vxlan_config conf;
3241 err = vxlan_nl2conf(tb, data, dev, &conf, false);
3245 return __vxlan_dev_create(src_net, dev, &conf);
3248 static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
3249 struct nlattr *data[])
3251 struct vxlan_dev *vxlan = netdev_priv(dev);
3252 struct vxlan_rdst *dst = &vxlan->default_dst;
3253 struct vxlan_rdst old_dst;
3254 struct vxlan_config conf;
3257 err = vxlan_nl2conf(tb, data,
3262 memcpy(&old_dst, dst, sizeof(struct vxlan_rdst));
3264 err = vxlan_dev_configure(vxlan->net, dev, &conf, true);
3268 /* handle default dst entry */
3269 if (!vxlan_addr_equal(&dst->remote_ip, &old_dst.remote_ip)) {
3270 spin_lock_bh(&vxlan->hash_lock);
3271 if (!vxlan_addr_any(&old_dst.remote_ip))
3272 __vxlan_fdb_delete(vxlan, all_zeros_mac,
3274 vxlan->cfg.dst_port,
3277 old_dst.remote_ifindex, 0);
3279 if (!vxlan_addr_any(&dst->remote_ip)) {
3280 err = vxlan_fdb_create(vxlan, all_zeros_mac,
3282 NUD_REACHABLE | NUD_PERMANENT,
3283 NLM_F_CREATE | NLM_F_APPEND,
3284 vxlan->cfg.dst_port,
3287 dst->remote_ifindex,
3290 spin_unlock_bh(&vxlan->hash_lock);
3294 spin_unlock_bh(&vxlan->hash_lock);
3300 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
3302 struct vxlan_dev *vxlan = netdev_priv(dev);
3303 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
3305 vxlan_flush(vxlan, true);
3307 spin_lock(&vn->sock_lock);
3308 if (!hlist_unhashed(&vxlan->hlist))
3309 hlist_del_rcu(&vxlan->hlist);
3310 spin_unlock(&vn->sock_lock);
3312 gro_cells_destroy(&vxlan->gro_cells);
3313 list_del(&vxlan->next);
3314 unregister_netdevice_queue(dev, head);
3317 static size_t vxlan_get_size(const struct net_device *dev)
3320 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
3321 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
3322 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
3323 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
3324 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
3325 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
3326 nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
3327 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
3328 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
3329 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
3330 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
3331 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
3332 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */
3333 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
3334 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
3335 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
3336 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
3337 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
3338 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
3339 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
3340 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
3341 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
3345 static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
3347 const struct vxlan_dev *vxlan = netdev_priv(dev);
3348 const struct vxlan_rdst *dst = &vxlan->default_dst;
3349 struct ifla_vxlan_port_range ports = {
3350 .low = htons(vxlan->cfg.port_min),
3351 .high = htons(vxlan->cfg.port_max),
3354 if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni)))
3355 goto nla_put_failure;
3357 if (!vxlan_addr_any(&dst->remote_ip)) {
3358 if (dst->remote_ip.sa.sa_family == AF_INET) {
3359 if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP,
3360 dst->remote_ip.sin.sin_addr.s_addr))
3361 goto nla_put_failure;
3362 #if IS_ENABLED(CONFIG_IPV6)
3364 if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6,
3365 &dst->remote_ip.sin6.sin6_addr))
3366 goto nla_put_failure;
3371 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
3372 goto nla_put_failure;
3374 if (!vxlan_addr_any(&vxlan->cfg.saddr)) {
3375 if (vxlan->cfg.saddr.sa.sa_family == AF_INET) {
3376 if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
3377 vxlan->cfg.saddr.sin.sin_addr.s_addr))
3378 goto nla_put_failure;
3379 #if IS_ENABLED(CONFIG_IPV6)
3381 if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
3382 &vxlan->cfg.saddr.sin6.sin6_addr))
3383 goto nla_put_failure;
3388 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
3389 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
3390 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
3391 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
3392 !!(vxlan->flags & VXLAN_F_LEARN)) ||
3393 nla_put_u8(skb, IFLA_VXLAN_PROXY,
3394 !!(vxlan->flags & VXLAN_F_PROXY)) ||
3395 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
3396 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
3397 !!(vxlan->flags & VXLAN_F_L2MISS)) ||
3398 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
3399 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
3400 nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
3401 !!(vxlan->flags & VXLAN_F_COLLECT_METADATA)) ||
3402 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
3403 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
3404 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
3405 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
3406 !(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
3407 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
3408 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
3409 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
3410 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
3411 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
3412 !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) ||
3413 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
3414 !!(vxlan->flags & VXLAN_F_REMCSUM_RX)))
3415 goto nla_put_failure;
3417 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
3418 goto nla_put_failure;
3420 if (vxlan->flags & VXLAN_F_GBP &&
3421 nla_put_flag(skb, IFLA_VXLAN_GBP))
3422 goto nla_put_failure;
3424 if (vxlan->flags & VXLAN_F_GPE &&
3425 nla_put_flag(skb, IFLA_VXLAN_GPE))
3426 goto nla_put_failure;
3428 if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL &&
3429 nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
3430 goto nla_put_failure;
3438 static struct net *vxlan_get_link_net(const struct net_device *dev)
3440 struct vxlan_dev *vxlan = netdev_priv(dev);
3445 static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
3447 .maxtype = IFLA_VXLAN_MAX,
3448 .policy = vxlan_policy,
3449 .priv_size = sizeof(struct vxlan_dev),
3450 .setup = vxlan_setup,
3451 .validate = vxlan_validate,
3452 .newlink = vxlan_newlink,
3453 .changelink = vxlan_changelink,
3454 .dellink = vxlan_dellink,
3455 .get_size = vxlan_get_size,
3456 .fill_info = vxlan_fill_info,
3457 .get_link_net = vxlan_get_link_net,
3460 struct net_device *vxlan_dev_create(struct net *net, const char *name,
3461 u8 name_assign_type,
3462 struct vxlan_config *conf)
3464 struct nlattr *tb[IFLA_MAX + 1];
3465 struct net_device *dev;
3468 memset(&tb, 0, sizeof(tb));
3470 dev = rtnl_create_link(net, name, name_assign_type,
3471 &vxlan_link_ops, tb);
3475 err = __vxlan_dev_create(net, dev, conf);
3478 return ERR_PTR(err);
3481 err = rtnl_configure_link(dev, NULL);
3483 LIST_HEAD(list_kill);
3485 vxlan_dellink(dev, &list_kill);
3486 unregister_netdevice_many(&list_kill);
3487 return ERR_PTR(err);
3492 EXPORT_SYMBOL_GPL(vxlan_dev_create);
3494 static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
3495 struct net_device *dev)
3497 struct vxlan_dev *vxlan, *next;
3498 LIST_HEAD(list_kill);
3500 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
3501 struct vxlan_rdst *dst = &vxlan->default_dst;
3503 /* In case we created vxlan device with carrier
3504 * and we loose the carrier due to module unload
3505 * we also need to remove vxlan device. In other
3506 * cases, it's not necessary and remote_ifindex
3507 * is 0 here, so no matches.
3509 if (dst->remote_ifindex == dev->ifindex)
3510 vxlan_dellink(vxlan->dev, &list_kill);
3513 unregister_netdevice_many(&list_kill);
3516 static int vxlan_netdevice_event(struct notifier_block *unused,
3517 unsigned long event, void *ptr)
3519 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3520 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
3522 if (event == NETDEV_UNREGISTER)
3523 vxlan_handle_lowerdev_unregister(vn, dev);
3524 else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO)
3525 vxlan_push_rx_ports(dev);
3530 static struct notifier_block vxlan_notifier_block __read_mostly = {
3531 .notifier_call = vxlan_netdevice_event,
3534 static __net_init int vxlan_init_net(struct net *net)
3536 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3539 INIT_LIST_HEAD(&vn->vxlan_list);
3540 spin_lock_init(&vn->sock_lock);
3542 for (h = 0; h < PORT_HASH_SIZE; ++h)
3543 INIT_HLIST_HEAD(&vn->sock_list[h]);
3548 static void __net_exit vxlan_exit_net(struct net *net)
3550 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3551 struct vxlan_dev *vxlan, *next;
3552 struct net_device *dev, *aux;
3556 for_each_netdev_safe(net, dev, aux)
3557 if (dev->rtnl_link_ops == &vxlan_link_ops)
3558 unregister_netdevice_queue(dev, &list);
3560 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
3561 /* If vxlan->dev is in the same netns, it has already been added
3562 * to the list by the previous loop.
3564 if (!net_eq(dev_net(vxlan->dev), net)) {
3565 gro_cells_destroy(&vxlan->gro_cells);
3566 unregister_netdevice_queue(vxlan->dev, &list);
3570 unregister_netdevice_many(&list);
3574 static struct pernet_operations vxlan_net_ops = {
3575 .init = vxlan_init_net,
3576 .exit = vxlan_exit_net,
3577 .id = &vxlan_net_id,
3578 .size = sizeof(struct vxlan_net),
3581 static int __init vxlan_init_module(void)
3585 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
3587 rc = register_pernet_subsys(&vxlan_net_ops);
3591 rc = register_netdevice_notifier(&vxlan_notifier_block);
3595 rc = rtnl_link_register(&vxlan_link_ops);
3601 unregister_netdevice_notifier(&vxlan_notifier_block);
3603 unregister_pernet_subsys(&vxlan_net_ops);
3607 late_initcall(vxlan_init_module);
3609 static void __exit vxlan_cleanup_module(void)
3611 rtnl_link_unregister(&vxlan_link_ops);
3612 unregister_netdevice_notifier(&vxlan_notifier_block);
3613 unregister_pernet_subsys(&vxlan_net_ops);
3614 /* rcu_barrier() is called by netns */
3616 module_exit(vxlan_cleanup_module);
3618 MODULE_LICENSE("GPL");
3619 MODULE_VERSION(VXLAN_VERSION);
3620 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
3621 MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
3622 MODULE_ALIAS_RTNL_LINK("vxlan");