return (((u64) hash * range) >> 32) + vxlan->port_min;
}
+static int handle_offloads(struct sk_buff *skb)
+{
+ if (skb_is_gso(skb)) {
+ int err = skb_unclone(skb, GFP_ATOMIC);
+ if (unlikely(err))
+ return err;
+
+ skb_shinfo(skb)->gso_type |= (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP);
+ } else if (skb->ip_summed != CHECKSUM_PARTIAL)
+ skb->ip_summed = CHECKSUM_NONE;
+
+ return 0;
+}
+
/* Transmit local packets over Vxlan
*
* Outer IP header inherits ECN and DF from inner header.
__u16 src_port;
__be16 df = 0;
__u8 tos, ttl;
- int err;
bool did_rsc = false;
const struct vxlan_fdb *f;
iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
tunnel_ip_select_ident(skb, old_iph, &rt->dst);
- vxlan_set_owner(dev, skb);
+ nf_reset(skb);
- /* See iptunnel_xmit() */
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- skb->ip_summed = CHECKSUM_NONE;
+ vxlan_set_owner(dev, skb);
- err = ip_local_out(skb);
- if (likely(net_xmit_eval(err) == 0)) {
- struct vxlan_stats *stats = this_cpu_ptr(vxlan->stats);
+ if (handle_offloads(skb))
+ goto drop;
- u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += pkt_len;
- u64_stats_update_end(&stats->syncp);
- } else {
- dev->stats.tx_errors++;
- dev->stats.tx_aborted_errors++;
- }
+ iptunnel_xmit(skb, dev);
return NETDEV_TX_OK;
drop:
dev->features |= NETIF_F_NETNS_LOCAL;
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->features |= NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_GSO_SOFTWARE;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
static __net_exit void vxlan_exit_net(struct net *net)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ struct vxlan_dev *vxlan;
+ unsigned h;
+
+ rtnl_lock();
+ for (h = 0; h < VNI_HASH_SIZE; ++h)
+ hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist)
+ dev_close(vxlan->dev);
+ rtnl_unlock();
if (vn->sock) {
sk_release_kernel(vn->sock->sk);