1 /* Event cache for netfilter. */
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/types.h>
13 #include <linux/netfilter.h>
14 #include <linux/skbuff.h>
15 #include <linux/vmalloc.h>
16 #include <linux/stddef.h>
17 #include <linux/err.h>
18 #include <linux/percpu.h>
19 #include <linux/kernel.h>
20 #include <linux/netdevice.h>
21 #include <linux/slab.h>
22 #include <linux/export.h>
24 #include <net/netfilter/nf_conntrack.h>
25 #include <net/netfilter/nf_conntrack_core.h>
26 #include <net/netfilter/nf_conntrack_extend.h>
28 static DEFINE_MUTEX(nf_ct_ecache_mutex);
30 /* deliver cached events and clear cache entry - must be called with locally
31 * disabled softirqs */
32 void nf_ct_deliver_cached_events(struct nf_conn *ct)
34 struct net *net = nf_ct_net(ct);
35 unsigned long events, missed;
36 struct nf_ct_event_notifier *notify;
37 struct nf_conntrack_ecache *e;
38 struct nf_ct_event item;
42 notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
46 e = nf_ct_ecache_find(ct);
50 events = xchg(&e->cache, 0);
52 if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct) || !events)
55 /* We make a copy of the missed event cache without taking
56 * the lock, thus we may send missed events twice. However,
57 * this does not harm and it happens very rarely. */
60 if (!((events | missed) & e->ctmask))
67 ret = notify->fcn(events | missed, &item);
69 if (likely(ret >= 0 && !missed))
72 spin_lock_bh(&ct->lock);
77 spin_unlock_bh(&ct->lock);
82 EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
84 int nf_conntrack_register_notifier(struct net *net,
85 struct nf_ct_event_notifier *new)
88 struct nf_ct_event_notifier *notify;
90 mutex_lock(&nf_ct_ecache_mutex);
91 notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
92 lockdep_is_held(&nf_ct_ecache_mutex));
97 rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new);
98 mutex_unlock(&nf_ct_ecache_mutex);
102 mutex_unlock(&nf_ct_ecache_mutex);
105 EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
107 void nf_conntrack_unregister_notifier(struct net *net,
108 struct nf_ct_event_notifier *new)
110 struct nf_ct_event_notifier *notify;
112 mutex_lock(&nf_ct_ecache_mutex);
113 notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
114 lockdep_is_held(&nf_ct_ecache_mutex));
115 BUG_ON(notify != new);
116 RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
117 mutex_unlock(&nf_ct_ecache_mutex);
119 EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
121 int nf_ct_expect_register_notifier(struct net *net,
122 struct nf_exp_event_notifier *new)
125 struct nf_exp_event_notifier *notify;
127 mutex_lock(&nf_ct_ecache_mutex);
128 notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
129 lockdep_is_held(&nf_ct_ecache_mutex));
130 if (notify != NULL) {
134 rcu_assign_pointer(net->ct.nf_expect_event_cb, new);
135 mutex_unlock(&nf_ct_ecache_mutex);
139 mutex_unlock(&nf_ct_ecache_mutex);
142 EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
144 void nf_ct_expect_unregister_notifier(struct net *net,
145 struct nf_exp_event_notifier *new)
147 struct nf_exp_event_notifier *notify;
149 mutex_lock(&nf_ct_ecache_mutex);
150 notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
151 lockdep_is_held(&nf_ct_ecache_mutex));
152 BUG_ON(notify != new);
153 RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
154 mutex_unlock(&nf_ct_ecache_mutex);
156 EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
158 #define NF_CT_EVENTS_DEFAULT 1
159 static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT;
160 static int nf_ct_events_retry_timeout __read_mostly = 15*HZ;
163 static struct ctl_table event_sysctl_table[] = {
165 .procname = "nf_conntrack_events",
166 .data = &init_net.ct.sysctl_events,
167 .maxlen = sizeof(unsigned int),
169 .proc_handler = proc_dointvec,
172 .procname = "nf_conntrack_events_retry_timeout",
173 .data = &init_net.ct.sysctl_events_retry_timeout,
174 .maxlen = sizeof(unsigned int),
176 .proc_handler = proc_dointvec_jiffies,
180 #endif /* CONFIG_SYSCTL */
182 static struct nf_ct_ext_type event_extend __read_mostly = {
183 .len = sizeof(struct nf_conntrack_ecache),
184 .align = __alignof__(struct nf_conntrack_ecache),
185 .id = NF_CT_EXT_ECACHE,
189 static int nf_conntrack_event_init_sysctl(struct net *net)
191 struct ctl_table *table;
193 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
198 table[0].data = &net->ct.sysctl_events;
199 table[1].data = &net->ct.sysctl_events_retry_timeout;
201 net->ct.event_sysctl_header =
202 register_net_sysctl_table(net,
203 nf_net_netfilter_sysctl_path, table);
204 if (!net->ct.event_sysctl_header) {
205 printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n");
216 static void nf_conntrack_event_fini_sysctl(struct net *net)
218 struct ctl_table *table;
220 table = net->ct.event_sysctl_header->ctl_table_arg;
221 unregister_net_sysctl_table(net->ct.event_sysctl_header);
225 static int nf_conntrack_event_init_sysctl(struct net *net)
230 static void nf_conntrack_event_fini_sysctl(struct net *net)
233 #endif /* CONFIG_SYSCTL */
235 int nf_conntrack_ecache_init(struct net *net)
239 net->ct.sysctl_events = nf_ct_events;
240 net->ct.sysctl_events_retry_timeout = nf_ct_events_retry_timeout;
242 if (net_eq(net, &init_net)) {
243 ret = nf_ct_extend_register(&event_extend);
245 printk(KERN_ERR "nf_ct_event: Unable to register "
246 "event extension.\n");
247 goto out_extend_register;
251 ret = nf_conntrack_event_init_sysctl(net);
258 if (net_eq(net, &init_net))
259 nf_ct_extend_unregister(&event_extend);
264 void nf_conntrack_ecache_fini(struct net *net)
266 nf_conntrack_event_fini_sysctl(net);
267 if (net_eq(net, &init_net))
268 nf_ct_extend_unregister(&event_extend);