2 * Connection state tracking for netfilter. This is separated from,
3 * but required by, the (future) NAT layer; it can also be used by an iptables
6 * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
7 * - generalize L3 protocol dependent part.
9 * Derived from include/linux/netfiter_ipv4/ip_conntrack.h
12 #ifndef _NF_CONNTRACK_H
13 #define _NF_CONNTRACK_H
15 #include <linux/netfilter/nf_conntrack_common.h>
18 #include <linux/bitops.h>
19 #include <linux/compiler.h>
20 #include <asm/atomic.h>
22 #include <linux/netfilter/nf_conntrack_tcp.h>
23 #include <linux/netfilter/nf_conntrack_sctp.h>
24 #include <net/netfilter/ipv4/nf_conntrack_icmp.h>
25 #include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
27 #include <net/netfilter/nf_conntrack_tuple.h>
29 /* per conntrack: protocol private data */
30 union nf_conntrack_proto {
31 /* insert conntrack proto private data here */
32 struct ip_ct_sctp sctp;
34 struct ip_ct_icmp icmp;
35 struct nf_ct_icmpv6 icmpv6;
38 union nf_conntrack_expect_proto {
39 /* insert expect proto private data here */
42 /* Add protocol helper include file here */
43 #include <linux/netfilter/nf_conntrack_ftp.h>
45 /* per conntrack: application helper private data */
46 union nf_conntrack_help {
47 /* insert conntrack helper private data (master) here */
48 struct ip_ct_ftp_master ct_ftp_info;
51 #include <linux/types.h>
52 #include <linux/skbuff.h>
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define NF_CT_ASSERT(x) \
58 /* Wooah! I'm tripping my conntrack in a frenzy of \
60 printk("NF_CT_ASSERT: %s:%i(%s)\n", \
61 __FILE__, __LINE__, __FUNCTION__); \
64 #define NF_CT_ASSERT(x)
67 struct nf_conntrack_helper;
69 /* nf_conn feature for connections that have a helper */
72 struct nf_conntrack_helper *helper;
74 union nf_conntrack_help help;
76 /* Current number of expected connections */
77 unsigned int expecting;
81 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
84 /* Usage count in here is 1 for hash table/destruct timer, 1 per skb,
85 plus 1 for any connection(s) we are `master' for */
86 struct nf_conntrack ct_general;
88 /* XXX should I move this to the tail ? - Y.K */
89 /* These are my tuples; original and reply */
90 struct nf_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX];
92 /* Have we seen traffic both ways yet? (bitset) */
95 /* If we were expected by an expectation, this will be it */
96 struct nf_conn *master;
98 /* Timer function; drops refcnt when it goes off. */
99 struct timer_list timeout;
101 #ifdef CONFIG_NF_CT_ACCT
102 /* Accounting Information (same cache line as other written members) */
103 struct ip_conntrack_counter counters[IP_CT_DIR_MAX];
106 /* Unique ID that identifies this conntrack*/
109 /* features - nat, helper, ... used by allocating system */
112 #if defined(CONFIG_NF_CONNTRACK_MARK)
116 #ifdef CONFIG_NF_CONNTRACK_SECMARK
120 /* Storage reserved for other modules: */
121 union nf_conntrack_proto proto;
123 /* features dynamically at the end: helper, nat (both optional) */
127 static inline struct nf_conn *
128 nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash)
130 return container_of(hash, struct nf_conn,
131 tuplehash[hash->tuple.dst.dir]);
134 /* get master conntrack via master expectation */
135 #define master_ct(conntr) (conntr->master)
137 /* Alter reply tuple (maybe alter helper). */
139 nf_conntrack_alter_reply(struct nf_conn *conntrack,
140 const struct nf_conntrack_tuple *newreply);
142 /* Is this tuple taken? (ignoring any belonging to the given
145 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
146 const struct nf_conn *ignored_conntrack);
148 /* Return conntrack_info and tuple hash for given skb. */
149 static inline struct nf_conn *
150 nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
152 *ctinfo = skb->nfctinfo;
153 return (struct nf_conn *)skb->nfct;
156 /* decrement reference count on a conntrack */
157 static inline void nf_ct_put(struct nf_conn *ct)
160 nf_conntrack_put(&ct->ct_general);
163 /* Protocol module loading */
164 extern int nf_ct_l3proto_try_module_get(unsigned short l3proto);
165 extern void nf_ct_l3proto_module_put(unsigned short l3proto);
167 extern struct nf_conntrack_tuple_hash *
168 __nf_conntrack_find(const struct nf_conntrack_tuple *tuple,
169 const struct nf_conn *ignored_conntrack);
171 extern void nf_conntrack_hash_insert(struct nf_conn *ct);
173 extern void nf_conntrack_flush(void);
175 extern struct nf_conntrack_helper *
176 nf_ct_helper_find_get( const struct nf_conntrack_tuple *tuple);
177 extern void nf_ct_helper_put(struct nf_conntrack_helper *helper);
179 extern struct nf_conntrack_helper *
180 __nf_conntrack_helper_find_byname(const char *name);
182 extern int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
183 const struct nf_conntrack_tuple *orig);
185 extern void __nf_ct_refresh_acct(struct nf_conn *ct,
186 enum ip_conntrack_info ctinfo,
187 const struct sk_buff *skb,
188 unsigned long extra_jiffies,
191 /* Refresh conntrack for this many jiffies and do accounting */
192 static inline void nf_ct_refresh_acct(struct nf_conn *ct,
193 enum ip_conntrack_info ctinfo,
194 const struct sk_buff *skb,
195 unsigned long extra_jiffies)
197 __nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, 1);
200 /* Refresh conntrack for this many jiffies */
201 static inline void nf_ct_refresh(struct nf_conn *ct,
202 const struct sk_buff *skb,
203 unsigned long extra_jiffies)
205 __nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0);
208 /* These are for NAT. Icky. */
209 /* Update TCP window tracking data when NAT mangles the packet */
210 extern void nf_conntrack_tcp_update(struct sk_buff *skb,
211 unsigned int dataoff,
212 struct nf_conn *conntrack,
215 /* Call me when a conntrack is destroyed. */
216 extern void (*nf_conntrack_destroyed)(struct nf_conn *conntrack);
218 /* Fake conntrack entry for untracked connections */
219 extern struct nf_conn nf_conntrack_untracked;
221 extern int nf_ct_no_defrag;
223 /* Iterate over all conntracks: if iter returns true, it's deleted. */
225 nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data);
226 extern void nf_conntrack_free(struct nf_conn *ct);
227 extern struct nf_conn *
228 nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
229 const struct nf_conntrack_tuple *repl);
231 /* It's confirmed if it is, or has been in the hash table. */
232 static inline int nf_ct_is_confirmed(struct nf_conn *ct)
234 return test_bit(IPS_CONFIRMED_BIT, &ct->status);
237 static inline int nf_ct_is_dying(struct nf_conn *ct)
239 return test_bit(IPS_DYING_BIT, &ct->status);
242 extern unsigned int nf_conntrack_htable_size;
243 extern int nf_conntrack_checksum;
245 #define NF_CT_STAT_INC(count) (__get_cpu_var(nf_conntrack_stat).count++)
247 #ifdef CONFIG_NF_CONNTRACK_EVENTS
248 #include <linux/notifier.h>
249 #include <linux/interrupt.h>
250 #include <net/netfilter/nf_conntrack_expect.h>
252 struct nf_conntrack_ecache {
256 DECLARE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
258 #define CONNTRACK_ECACHE(x) (__get_cpu_var(nf_conntrack_ecache).x)
260 extern struct atomic_notifier_head nf_conntrack_chain;
261 extern struct atomic_notifier_head nf_conntrack_expect_chain;
263 static inline int nf_conntrack_register_notifier(struct notifier_block *nb)
265 return atomic_notifier_chain_register(&nf_conntrack_chain, nb);
268 static inline int nf_conntrack_unregister_notifier(struct notifier_block *nb)
270 return atomic_notifier_chain_unregister(&nf_conntrack_chain, nb);
274 nf_conntrack_expect_register_notifier(struct notifier_block *nb)
276 return atomic_notifier_chain_register(&nf_conntrack_expect_chain, nb);
280 nf_conntrack_expect_unregister_notifier(struct notifier_block *nb)
282 return atomic_notifier_chain_unregister(&nf_conntrack_expect_chain,
286 extern void nf_ct_deliver_cached_events(const struct nf_conn *ct);
287 extern void __nf_ct_event_cache_init(struct nf_conn *ct);
290 nf_conntrack_event_cache(enum ip_conntrack_events event,
291 const struct sk_buff *skb)
293 struct nf_conn *ct = (struct nf_conn *)skb->nfct;
294 struct nf_conntrack_ecache *ecache;
297 ecache = &__get_cpu_var(nf_conntrack_ecache);
298 if (ct != ecache->ct)
299 __nf_ct_event_cache_init(ct);
300 ecache->events |= event;
304 static inline void nf_conntrack_event(enum ip_conntrack_events event,
307 if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct))
308 atomic_notifier_call_chain(&nf_conntrack_chain, event, ct);
312 nf_conntrack_expect_event(enum ip_conntrack_expect_events event,
313 struct nf_conntrack_expect *exp)
315 atomic_notifier_call_chain(&nf_conntrack_expect_chain, event, exp);
317 #else /* CONFIG_NF_CONNTRACK_EVENTS */
318 static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
319 const struct sk_buff *skb) {}
320 static inline void nf_conntrack_event(enum ip_conntrack_events event,
321 struct nf_conn *ct) {}
322 static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {}
324 nf_conntrack_expect_event(enum ip_conntrack_expect_events event,
325 struct nf_conntrack_expect *exp) {}
326 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
328 /* no helper, no nat */
329 #define NF_CT_F_BASIC 0
331 #define NF_CT_F_HELP 1
333 #define NF_CT_F_NAT 2
334 #define NF_CT_F_NUM 4
337 nf_conntrack_register_cache(u_int32_t features, const char *name, size_t size);
339 nf_conntrack_unregister_cache(u_int32_t features);
341 /* valid combinations:
342 * basic: nf_conn, nf_conn .. nf_conn_help
343 * nat: nf_conn .. nf_conn_nat, nf_conn .. nf_conn_nat, nf_conn help
345 static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct)
347 unsigned int offset = sizeof(struct nf_conn);
349 if (!(ct->features & NF_CT_F_HELP))
352 return (struct nf_conn_help *) ((void *)ct + offset);
355 #endif /* __KERNEL__ */
356 #endif /* _NF_CONNTRACK_H */