#include <net/ip6_tunnel.h>
#include <net/ip6_checksum.h>
#endif
+#include <net/dst_metadata.h>
#define VXLAN_VERSION "0.1"
static u32 vxlan_salt __read_mostly;
static struct workqueue_struct *vxlan_wq;
+static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
+{
+ return vs->flags & VXLAN_F_COLLECT_METADATA;
+}
+
#if IS_ENABLED(CONFIG_IPV6)
static inline
bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
/* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
+ struct metadata_dst *tun_dst = NULL;
+ struct ip_tunnel_info *info;
struct vxlan_sock *vs;
struct vxlanhdr *vxh;
u32 flags, vni;
- struct vxlan_metadata md = {0};
+ struct vxlan_metadata _md;
+ struct vxlan_metadata *md = &_md;
/* Need Vxlan and inner Ethernet header to be present */
if (!pskb_may_pull(skb, VXLAN_HLEN))
vni &= VXLAN_VNI_MASK;
}
+ if (vxlan_collect_metadata(vs)) {
+ const struct iphdr *iph = ip_hdr(skb);
+
+ tun_dst = metadata_dst_alloc(sizeof(*md), GFP_ATOMIC);
+ if (!tun_dst)
+ goto drop;
+
+ info = &tun_dst->u.tun_info;
+ info->key.ipv4_src = iph->saddr;
+ info->key.ipv4_dst = iph->daddr;
+ info->key.ipv4_tos = iph->tos;
+ info->key.ipv4_ttl = iph->ttl;
+ info->key.tp_src = udp_hdr(skb)->source;
+ info->key.tp_dst = udp_hdr(skb)->dest;
+
+ info->mode = IP_TUNNEL_INFO_RX;
+ info->key.tun_flags = TUNNEL_KEY;
+ info->key.tun_id = cpu_to_be64(vni >> 8);
+ if (udp_hdr(skb)->check != 0)
+ info->key.tun_flags |= TUNNEL_CSUM;
+
+ md = ip_tunnel_info_opts(info, sizeof(*md));
+ md->tun_dst = tun_dst;
+ } else {
+ memset(md, 0, sizeof(*md));
+ }
+
/* For backwards compatibility, only allow reserved fields to be
* used by VXLAN extensions if explicitly requested.
*/
struct vxlanhdr_gbp *gbp;
gbp = (struct vxlanhdr_gbp *)vxh;
- md.gbp = ntohs(gbp->policy_id);
+ md->gbp = ntohs(gbp->policy_id);
+
+ if (tun_dst)
+ info->key.tun_flags |= TUNNEL_VXLAN_OPT;
if (gbp->dont_learn)
- md.gbp |= VXLAN_GBP_DONT_LEARN;
+ md->gbp |= VXLAN_GBP_DONT_LEARN;
if (gbp->policy_applied)
- md.gbp |= VXLAN_GBP_POLICY_APPLIED;
+ md->gbp |= VXLAN_GBP_POLICY_APPLIED;
flags &= ~VXLAN_GBP_USED_BITS;
}
goto bad_flags;
}
- md.vni = vxh->vx_vni;
- vs->rcv(vs, skb, &md);
+ md->vni = vxh->vx_vni;
+ vs->rcv(vs, skb, md);
return 0;
drop:
ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
error:
+ if (tun_dst)
+ dst_release((struct dst_entry *)tun_dst);
+
/* Return non vxlan pkt */
return 1;
}
int err = 0;
union vxlan_addr *remote_ip;
- vni = ntohl(md->vni) >> 8;
+ /* For flow based devices, map all packets to VNI 0 */
+ if (vs->flags & VXLAN_F_FLOW_BASED)
+ vni = 0;
+ else
+ vni = ntohl(md->vni) >> 8;
+
/* Is this VNI defined? */
vxlan = vxlan_vs_find_vni(vs, vni);
if (!vxlan)
#endif
}
+ if (md->tun_dst) {
+ skb_dst_set(skb, (struct dst_entry *)md->tun_dst);
+ md->tun_dst = NULL;
+ }
+
if ((vxlan->flags & VXLAN_F_LEARN) &&
vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
goto drop;
skb_reset_network_header(skb);
- skb->mark = md->gbp;
+ /* In flow-based mode, GBP is carried in dst_metadata */
+ if (!(vs->flags & VXLAN_F_FLOW_BASED))
+ skb->mark = md->gbp;
if (oip6)
err = IP6_ECN_decapsulate(oip6, skb);
return;
drop:
+ if (md->tun_dst)
+ dst_release((struct dst_entry *)md->tun_dst);
+
/* Consume bad packet */
kfree_skb(skb);
}
static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct vxlan_rdst *rdst, bool did_rsc)
{
+ struct ip_tunnel_info *info = skb_tunnel_info(skb);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct sock *sk = vxlan->vn_sock->sock->sk;
struct rtable *rt = NULL;
const struct iphdr *old_iph;
struct flowi4 fl4;
union vxlan_addr *dst;
- struct vxlan_metadata md;
+ union vxlan_addr remote_ip;
+ struct vxlan_metadata _md;
+ struct vxlan_metadata *md = &_md;
__be16 src_port = 0, dst_port;
u32 vni;
__be16 df = 0;
__u8 tos, ttl;
int err;
+ u32 flags = vxlan->flags;
- dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
- vni = rdst->remote_vni;
- dst = &rdst->remote_ip;
+ if (rdst) {
+ dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
+ vni = rdst->remote_vni;
+ dst = &rdst->remote_ip;
+ } else {
+ if (!info) {
+ WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
+ dev->name);
+ goto drop;
+ }
+
+ dst_port = info->key.tp_dst ? : vxlan->dst_port;
+ vni = be64_to_cpu(info->key.tun_id);
+ remote_ip.sin.sin_family = AF_INET;
+ remote_ip.sin.sin_addr.s_addr = info->key.ipv4_dst;
+ dst = &remote_ip;
+ }
if (vxlan_addr_any(dst)) {
if (did_rsc) {
vxlan->port_max, true);
if (dst->sa.sa_family == AF_INET) {
+ if (info) {
+ if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
+ df = htons(IP_DF);
+ if (info->key.tun_flags & TUNNEL_CSUM)
+ flags |= VXLAN_F_UDP_CSUM;
+ else
+ flags &= ~VXLAN_F_UDP_CSUM;
+
+ ttl = info->key.ipv4_ttl;
+ tos = info->key.ipv4_tos;
+
+ if (info->options_len)
+ md = ip_tunnel_info_opts(info, sizeof(*md));
+ } else {
+ md->gbp = skb->mark;
+ }
+
memset(&fl4, 0, sizeof(fl4));
- fl4.flowi4_oif = rdst->remote_ifindex;
+ fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
fl4.flowi4_tos = RT_TOS(tos);
fl4.flowi4_mark = skb->mark;
fl4.flowi4_proto = IPPROTO_UDP;
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
- md.vni = htonl(vni << 8);
- md.gbp = skb->mark;
-
+ md->vni = htonl(vni << 8);
err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr,
dst->sin.sin_addr.s_addr, tos, ttl, df,
- src_port, dst_port, &md,
+ src_port, dst_port, md,
!net_eq(vxlan->net, dev_net(vxlan->dev)),
- vxlan->flags);
+ flags);
if (err < 0) {
/* skb is already freed. */
skb = NULL;
u32 flags;
memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_oif = rdst->remote_ifindex;
+ fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0;
fl6.daddr = dst->sin6.sin6_addr;
fl6.saddr = vxlan->saddr.sin6.sin6_addr;
fl6.flowi6_mark = skb->mark;
}
ttl = ttl ? : ip6_dst_hoplimit(ndst);
- md.vni = htonl(vni << 8);
- md.gbp = skb->mark;
+ md->vni = htonl(vni << 8);
+ md->gbp = skb->mark;
err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr,
- 0, ttl, src_port, dst_port, &md,
+ 0, ttl, src_port, dst_port, md,
!net_eq(vxlan->net, dev_net(vxlan->dev)),
vxlan->flags);
#endif
static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
+ const struct ip_tunnel_info *info = skb_tunnel_info(skb);
struct ethhdr *eth;
bool did_rsc = false;
struct vxlan_rdst *rdst, *fdst = NULL;
#endif
}
+ if (vxlan->flags & VXLAN_F_FLOW_BASED &&
+ info && info->mode == IP_TUNNEL_INFO_TX) {
+ vxlan_xmit_one(skb, dev, NULL, false);
+ return NETDEV_TX_OK;
+ }
+
f = vxlan_find_mac(vxlan, eth->h_dest);
did_rsc = false;
[IFLA_VXLAN_RSC] = { .type = NLA_U8 },
[IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
[IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
+ [IFLA_VXLAN_FLOWBASED] = { .type = NLA_U8 },
[IFLA_VXLAN_PORT] = { .type = NLA_U16 },
[IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
if (data[IFLA_VXLAN_LIMIT])
vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
+ if (data[IFLA_VXLAN_FLOWBASED] &&
+ nla_get_u8(data[IFLA_VXLAN_FLOWBASED]))
+ vxlan->flags |= VXLAN_F_FLOW_BASED;
+
if (data[IFLA_VXLAN_PORT_RANGE]) {
const struct ifla_vxlan_port_range *p
= nla_data(data[IFLA_VXLAN_PORT_RANGE]);
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_FLOWBASED */
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
!!(vxlan->flags & VXLAN_F_L2MISS)) ||
nla_put_u8(skb, IFLA_VXLAN_L3MISS,
!!(vxlan->flags & VXLAN_F_L3MISS)) ||
+ nla_put_u8(skb, IFLA_VXLAN_FLOWBASED,
+ !!(vxlan->flags & VXLAN_F_FLOW_BASED)) ||
nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port) ||