2 * Copyright (c) 2007-2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #ifdef CONFIG_OPENVSWITCH_GRE
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/skbuff.h>
25 #include <linux/if_tunnel.h>
26 #include <linux/if_vlan.h>
28 #include <linux/if_vlan.h>
30 #include <linux/in_route.h>
31 #include <linux/inetdevice.h>
32 #include <linux/jhash.h>
33 #include <linux/list.h>
34 #include <linux/kernel.h>
35 #include <linux/workqueue.h>
36 #include <linux/rculist.h>
37 #include <net/route.h>
42 #include <net/ip_tunnels.h>
44 #include <net/net_namespace.h>
45 #include <net/netns/generic.h>
46 #include <net/protocol.h>
51 /* Returns the least-significant 32 bits of a __be64. */
52 static __be32 be64_get_low32(__be64 x)
55 return (__force __be32)x;
57 return (__force __be32)((__force u64)x >> 32);
61 static __be16 filter_tnl_flags(__be16 flags)
63 return flags & (TUNNEL_CSUM | TUNNEL_KEY);
66 static struct sk_buff *__build_header(struct sk_buff *skb,
69 const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key;
70 struct tnl_ptk_info tpi;
72 skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
76 tpi.flags = filter_tnl_flags(tun_key->tun_flags);
77 tpi.proto = htons(ETH_P_TEB);
78 tpi.key = be64_get_low32(tun_key->tun_id);
80 gre_build_header(skb, &tpi, tunnel_hlen);
85 static __be64 key_to_tunnel_id(__be32 key, __be32 seq)
88 return (__force __be64)((__force u64)seq << 32 | (__force u32)key);
90 return (__force __be64)((__force u64)key << 32 | (__force u32)seq);
94 /* Called with rcu_read_lock and BH disabled. */
95 static int gre_rcv(struct sk_buff *skb,
96 const struct tnl_ptk_info *tpi)
98 struct ovs_key_ipv4_tunnel tun_key;
99 struct ovs_net *ovs_net;
103 ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
104 vport = rcu_dereference(ovs_net->vport_net.gre_vport);
105 if (unlikely(!vport))
106 return PACKET_REJECT;
108 key = key_to_tunnel_id(tpi->key, tpi->seq);
109 ovs_flow_tun_key_init(&tun_key, ip_hdr(skb), key,
110 filter_tnl_flags(tpi->flags));
112 ovs_vport_receive(vport, skb, &tun_key);
116 static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
118 struct net *net = ovs_dp_get_net(vport->dp);
126 if (unlikely(!OVS_CB(skb)->tun_key)) {
132 memset(&fl, 0, sizeof(fl));
133 fl.daddr = OVS_CB(skb)->tun_key->ipv4_dst;
134 fl.saddr = OVS_CB(skb)->tun_key->ipv4_src;
135 fl.flowi4_tos = RT_TOS(OVS_CB(skb)->tun_key->ipv4_tos);
136 fl.flowi4_mark = skb->mark;
137 fl.flowi4_proto = IPPROTO_GRE;
139 rt = ip_route_output_key(net, &fl);
143 tunnel_hlen = ip_gre_calc_hlen(OVS_CB(skb)->tun_key->tun_flags);
145 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
146 + tunnel_hlen + sizeof(struct iphdr)
147 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
148 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
149 int head_delta = SKB_DATA_ALIGN(min_headroom -
152 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
158 if (vlan_tx_tag_present(skb)) {
159 if (unlikely(!__vlan_put_tag(skb,
161 vlan_tx_tag_get(skb)))) {
168 /* Push Tunnel header. */
169 skb = __build_header(skb, tunnel_hlen);
170 if (unlikely(!skb)) {
175 df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
180 return iptunnel_xmit(net, rt, skb, fl.saddr,
181 OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE,
182 OVS_CB(skb)->tun_key->ipv4_tos,
183 OVS_CB(skb)->tun_key->ipv4_ttl, df);
190 static struct gre_cisco_protocol gre_protocol = {
195 static int gre_ports;
196 static int gre_init(void)
204 err = gre_cisco_register(&gre_protocol);
206 pr_warn("cannot register gre protocol handler\n");
211 static void gre_exit(void)
217 gre_cisco_unregister(&gre_protocol);
220 static const char *gre_get_name(const struct vport *vport)
222 return vport_priv(vport);
225 static struct vport *gre_create(const struct vport_parms *parms)
227 struct net *net = ovs_dp_get_net(parms->dp);
228 struct ovs_net *ovs_net;
236 ovs_net = net_generic(net, ovs_net_id);
237 if (ovsl_dereference(ovs_net->vport_net.gre_vport)) {
238 vport = ERR_PTR(-EEXIST);
242 vport = ovs_vport_alloc(IFNAMSIZ, &ovs_gre_vport_ops, parms);
246 strncpy(vport_priv(vport), parms->name, IFNAMSIZ);
247 rcu_assign_pointer(ovs_net->vport_net.gre_vport, vport);
255 static void gre_tnl_destroy(struct vport *vport)
257 struct net *net = ovs_dp_get_net(vport->dp);
258 struct ovs_net *ovs_net;
260 ovs_net = net_generic(net, ovs_net_id);
262 rcu_assign_pointer(ovs_net->vport_net.gre_vport, NULL);
263 ovs_vport_deferred_free(vport);
267 const struct vport_ops ovs_gre_vport_ops = {
268 .type = OVS_VPORT_TYPE_GRE,
269 .create = gre_create,
270 .destroy = gre_tnl_destroy,
271 .get_name = gre_get_name,
272 .send = gre_tnl_send,
275 #endif /* OPENVSWITCH_GRE */