5 * Kazunori MIYAZAWA @USAGI
6 * YOSHIFUJI Hideaki @USAGI
7 * Split up af-specific portion
11 #include <linux/err.h>
12 #include <linux/kernel.h>
13 #include <linux/inetdevice.h>
14 #include <linux/if_tunnel.h>
19 static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
21 static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
22 xfrm_address_t *saddr,
23 xfrm_address_t *daddr)
33 struct dst_entry *dst;
38 fl.fl4_src = saddr->a4;
40 err = __ip_route_output_key(net, &rt, &fl);
47 static int xfrm4_get_saddr(struct net *net,
48 xfrm_address_t *saddr, xfrm_address_t *daddr)
50 struct dst_entry *dst;
53 dst = xfrm4_dst_lookup(net, 0, NULL, daddr);
57 rt = (struct rtable *)dst;
58 saddr->a4 = rt->rt_src;
63 static int xfrm4_get_tos(struct flowi *fl)
65 return IPTOS_RT_MASK & fl->fl4_tos; /* Strip ECN bits */
68 static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
74 static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
77 struct rtable *rt = (struct rtable *)xdst->route;
81 xdst->u.dst.dev = dev;
84 xdst->u.rt.peer = rt->peer;
86 atomic_inc(&rt->peer->refcnt);
88 /* Sheit... I remember I did this right. Apparently,
89 * it was magically lost, so this code needs audit */
90 xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST |
92 xdst->u.rt.rt_type = rt->rt_type;
93 xdst->u.rt.rt_src = rt->rt_src;
94 xdst->u.rt.rt_dst = rt->rt_dst;
95 xdst->u.rt.rt_gateway = rt->rt_gateway;
96 xdst->u.rt.rt_spec_dst = rt->rt_spec_dst;
102 _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
104 struct iphdr *iph = ip_hdr(skb);
105 u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
107 memset(fl, 0, sizeof(struct flowi));
108 fl->mark = skb->mark;
110 if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) {
111 switch (iph->protocol) {
113 case IPPROTO_UDPLITE:
117 if (xprth + 4 < skb->data ||
118 pskb_may_pull(skb, xprth + 4 - skb->data)) {
119 __be16 *ports = (__be16 *)xprth;
121 fl->fl_ip_sport = ports[!!reverse];
122 fl->fl_ip_dport = ports[!reverse];
127 if (pskb_may_pull(skb, xprth + 2 - skb->data)) {
130 fl->fl_icmp_type = icmp[0];
131 fl->fl_icmp_code = icmp[1];
136 if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
137 __be32 *ehdr = (__be32 *)xprth;
139 fl->fl_ipsec_spi = ehdr[0];
144 if (pskb_may_pull(skb, xprth + 8 - skb->data)) {
145 __be32 *ah_hdr = (__be32*)xprth;
147 fl->fl_ipsec_spi = ah_hdr[1];
152 if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
153 __be16 *ipcomp_hdr = (__be16 *)xprth;
155 fl->fl_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
160 if (pskb_may_pull(skb, xprth + 12 - skb->data)) {
161 __be16 *greflags = (__be16 *)xprth;
162 __be32 *gre_hdr = (__be32 *)xprth;
164 if (greflags[0] & GRE_KEY) {
165 if (greflags[0] & GRE_CSUM)
167 fl->fl_gre_key = gre_hdr[1];
173 fl->fl_ipsec_spi = 0;
177 fl->proto = iph->protocol;
178 fl->fl4_dst = reverse ? iph->saddr : iph->daddr;
179 fl->fl4_src = reverse ? iph->daddr : iph->saddr;
180 fl->fl4_tos = iph->tos;
183 static inline int xfrm4_garbage_collect(struct dst_ops *ops)
185 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
187 xfrm4_policy_afinfo.garbage_collect(net);
188 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
191 static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu)
193 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
194 struct dst_entry *path = xdst->route;
196 path->ops->update_pmtu(path, mtu);
199 static void xfrm4_dst_destroy(struct dst_entry *dst)
201 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
203 if (likely(xdst->u.rt.peer))
204 inet_putpeer(xdst->u.rt.peer);
205 xfrm_dst_destroy(xdst);
208 static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
214 xfrm_dst_ifdown(dst, dev);
217 static struct dst_ops xfrm4_dst_ops = {
219 .protocol = cpu_to_be16(ETH_P_IP),
220 .gc = xfrm4_garbage_collect,
221 .update_pmtu = xfrm4_update_pmtu,
222 .destroy = xfrm4_dst_destroy,
223 .ifdown = xfrm4_dst_ifdown,
224 .local_out = __ip_local_out,
228 static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
230 .dst_ops = &xfrm4_dst_ops,
231 .dst_lookup = xfrm4_dst_lookup,
232 .get_saddr = xfrm4_get_saddr,
233 .decode_session = _decode_session4,
234 .get_tos = xfrm4_get_tos,
235 .init_path = xfrm4_init_path,
236 .fill_dst = xfrm4_fill_dst,
240 static struct ctl_table xfrm4_policy_table[] = {
242 .procname = "xfrm4_gc_thresh",
243 .data = &init_net.xfrm.xfrm4_dst_ops.gc_thresh,
244 .maxlen = sizeof(int),
246 .proc_handler = proc_dointvec,
251 static struct ctl_table_header *sysctl_hdr;
254 static void __init xfrm4_policy_init(void)
256 xfrm_policy_register_afinfo(&xfrm4_policy_afinfo);
259 static void __exit xfrm4_policy_fini(void)
263 unregister_net_sysctl_table(sysctl_hdr);
265 xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo);
268 void __init xfrm4_init(int rt_max_size)
271 * Select a default value for the gc_thresh based on the main route
272 * table hash size. It seems to me the worst case scenario is when
273 * we have ipsec operating in transport mode, in which we create a
274 * dst_entry per socket. The xfrm gc algorithm starts trying to remove
275 * entries at gc_thresh, and prevents new allocations as 2*gc_thresh
276 * so lets set an initial xfrm gc_thresh value at the rt_max_size/2.
277 * That will let us store an ipsec connection per route table entry,
278 * and start cleaning when were 1/2 full
280 xfrm4_dst_ops.gc_thresh = rt_max_size/2;
281 dst_entries_init(&xfrm4_dst_ops);
286 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path,