5 #include <linux/netdevice.h>
6 #include <linux/static_key.h>
7 #include <uapi/linux/netfilter/x_tables.h>
10 * struct xt_action_param - parameters for matches/targets
12 * @match: the match extension
13 * @target: the target extension
14 * @matchinfo: per-match data
15 * @targetinfo: per-target data
16 * @net network namespace through which the action was invoked
17 * @in: input netdevice
18 * @out: output netdevice
19 * @fragoff: packet is a fragment, this is the data offset
20 * @thoff: position of transport header relative to skb->data
21 * @hook: hook number given packet came from
22 * @family: Actual NFPROTO_* through which the function is invoked
23 * (helpful when match->family == NFPROTO_UNSPEC)
25 * Fields written to by extensions:
27 * @hotdrop: drop packet if we had inspection problems
29 struct xt_action_param {
31 const struct xt_match *match;
32 const struct xt_target *target;
35 const void *matchinfo, *targinfo;
38 const struct net_device *in, *out;
47 * struct xt_mtchk_param - parameters for match extensions'
48 * checkentry functions
50 * @net: network namespace through which the check was invoked
51 * @table: table the rule is tried to be inserted into
52 * @entryinfo: the family-specific rule data
53 * (struct ipt_ip, ip6t_ip, arpt_arp or (note) ebt_entry)
54 * @match: struct xt_match through which this function was invoked
55 * @matchinfo: per-match data
56 * @hook_mask: via which hooks the new rule is reachable
57 * Other fields as above.
59 struct xt_mtchk_param {
62 const void *entryinfo;
63 const struct xt_match *match;
65 unsigned int hook_mask;
71 * struct xt_mdtor_param - match destructor parameters
74 struct xt_mtdtor_param {
76 const struct xt_match *match;
82 * struct xt_tgchk_param - parameters for target extensions'
83 * checkentry functions
85 * @entryinfo: the family-specific rule data
86 * (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry)
88 * Other fields see above.
90 struct xt_tgchk_param {
93 const void *entryinfo;
94 const struct xt_target *target;
96 unsigned int hook_mask;
101 /* Target destructor parameters */
102 struct xt_tgdtor_param {
104 const struct xt_target *target;
110 struct list_head list;
112 const char name[XT_EXTENSION_MAXNAMELEN];
115 /* Return true or false: return FALSE and set *hotdrop = 1 to
116 force immediate packet drop. */
117 /* Arguments changed since 2.6.9, as this must now handle
118 non-linear skb, using skb_header_pointer and
119 skb_ip_make_writable. */
120 bool (*match)(const struct sk_buff *skb,
121 struct xt_action_param *);
123 /* Called when user tries to insert an entry of this type. */
124 int (*checkentry)(const struct xt_mtchk_param *);
126 /* Called when entry of this type deleted. */
127 void (*destroy)(const struct xt_mtdtor_param *);
129 /* Called when userspace align differs from kernel space one */
130 void (*compat_from_user)(void *dst, const void *src);
131 int (*compat_to_user)(void __user *dst, const void *src);
133 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
137 unsigned int matchsize;
139 unsigned int compatsize;
142 unsigned short proto;
144 unsigned short family;
147 /* Registration hooks for targets. */
149 struct list_head list;
151 const char name[XT_EXTENSION_MAXNAMELEN];
154 /* Returns verdict. Argument order changed since 2.6.9, as this
155 must now handle non-linear skbs, using skb_copy_bits and
156 skb_ip_make_writable. */
157 unsigned int (*target)(struct sk_buff *skb,
158 const struct xt_action_param *);
160 /* Called when user tries to insert an entry of this type:
161 hook_mask is a bitmask of hooks from which it can be
163 /* Should return 0 on success or an error code otherwise (-Exxxx). */
164 int (*checkentry)(const struct xt_tgchk_param *);
166 /* Called when entry of this type deleted. */
167 void (*destroy)(const struct xt_tgdtor_param *);
169 /* Called when userspace align differs from kernel space one */
170 void (*compat_from_user)(void *dst, const void *src);
171 int (*compat_to_user)(void __user *dst, const void *src);
173 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
177 unsigned int targetsize;
179 unsigned int compatsize;
182 unsigned short proto;
184 unsigned short family;
187 /* Furniture shopping... */
189 struct list_head list;
191 /* What hooks you will enter on */
192 unsigned int valid_hooks;
194 /* Man behind the curtain... */
195 struct xt_table_info *private;
197 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
200 u_int8_t af; /* address/protocol family */
201 int priority; /* hook order */
203 /* A unique name... */
204 const char name[XT_TABLE_MAXNAMELEN];
207 #include <linux/netfilter_ipv4.h>
209 /* The table itself */
210 struct xt_table_info {
213 /* Number of entries: FIXME. --RR */
215 /* Initial number of entries. Needed for module usage count */
216 unsigned int initial_entries;
218 /* Entry points and underflows */
219 unsigned int hook_entry[NF_INET_NUMHOOKS];
220 unsigned int underflow[NF_INET_NUMHOOKS];
223 * Number of user chains. Since tables cannot have loops, at most
224 * @stacksize jumps (number of user chains) can possibly be made.
226 unsigned int stacksize;
229 unsigned char entries[0] __aligned(8);
232 int xt_register_target(struct xt_target *target);
233 void xt_unregister_target(struct xt_target *target);
234 int xt_register_targets(struct xt_target *target, unsigned int n);
235 void xt_unregister_targets(struct xt_target *target, unsigned int n);
237 int xt_register_match(struct xt_match *target);
238 void xt_unregister_match(struct xt_match *target);
239 int xt_register_matches(struct xt_match *match, unsigned int n);
240 void xt_unregister_matches(struct xt_match *match, unsigned int n);
242 int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
244 int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
247 struct xt_table *xt_register_table(struct net *net,
248 const struct xt_table *table,
249 struct xt_table_info *bootstrap,
250 struct xt_table_info *newinfo);
251 void *xt_unregister_table(struct xt_table *table);
253 struct xt_table_info *xt_replace_table(struct xt_table *table,
254 unsigned int num_counters,
255 struct xt_table_info *newinfo,
258 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
259 struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
260 struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision);
261 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
262 int xt_find_revision(u8 af, const char *name, u8 revision, int target,
265 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
267 void xt_table_unlock(struct xt_table *t);
269 int xt_proto_init(struct net *net, u_int8_t af);
270 void xt_proto_fini(struct net *net, u_int8_t af);
272 struct xt_table_info *xt_alloc_table_info(unsigned int size);
273 void xt_free_table_info(struct xt_table_info *info);
276 * xt_recseq - recursive seqcount for netfilter use
278 * Packet processing changes the seqcount only if no recursion happened
279 * get_counters() can use read_seqcount_begin()/read_seqcount_retry(),
280 * because we use the normal seqcount convention :
281 * Low order bit set to 1 if a writer is active.
283 DECLARE_PER_CPU(seqcount_t, xt_recseq);
285 /* xt_tee_enabled - true if x_tables needs to handle reentrancy
287 * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
289 extern struct static_key xt_tee_enabled;
292 * xt_write_recseq_begin - start of a write section
294 * Begin packet processing : all readers must wait the end
295 * 1) Must be called with preemption disabled
296 * 2) softirqs must be disabled too (or we should use this_cpu_add())
298 * 1 if no recursion on this cpu
299 * 0 if recursion detected
301 static inline unsigned int xt_write_recseq_begin(void)
306 * Low order bit of sequence is set if we already
307 * called xt_write_recseq_begin().
309 addend = (__this_cpu_read(xt_recseq.sequence) + 1) & 1;
312 * This is kind of a write_seqcount_begin(), but addend is 0 or 1
313 * We dont check addend value to avoid a test and conditional jump,
314 * since addend is most likely 1
316 __this_cpu_add(xt_recseq.sequence, addend);
323 * xt_write_recseq_end - end of a write section
324 * @addend: return value from previous xt_write_recseq_begin()
326 * End packet processing : all readers can proceed
327 * 1) Must be called with preemption disabled
328 * 2) softirqs must be disabled too (or we should use this_cpu_add())
330 static inline void xt_write_recseq_end(unsigned int addend)
332 /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
334 __this_cpu_add(xt_recseq.sequence, addend);
338 * This helper is performance critical and must be inlined
340 static inline unsigned long ifname_compare_aligned(const char *_a,
344 const unsigned long *a = (const unsigned long *)_a;
345 const unsigned long *b = (const unsigned long *)_b;
346 const unsigned long *mask = (const unsigned long *)_mask;
349 ret = (a[0] ^ b[0]) & mask[0];
350 if (IFNAMSIZ > sizeof(unsigned long))
351 ret |= (a[1] ^ b[1]) & mask[1];
352 if (IFNAMSIZ > 2 * sizeof(unsigned long))
353 ret |= (a[2] ^ b[2]) & mask[2];
354 if (IFNAMSIZ > 3 * sizeof(unsigned long))
355 ret |= (a[3] ^ b[3]) & mask[3];
356 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
361 /* On SMP, ip(6)t_entry->counters.pcnt holds address of the
362 * real (percpu) counter. On !SMP, its just the packet count,
363 * so nothing needs to be done there.
365 * xt_percpu_counter_alloc returns the address of the percpu
366 * counter, or 0 on !SMP. We force an alignment of 16 bytes
367 * so that bytes/packets share a common cache line.
369 * Hence caller must use IS_ERR_VALUE to check for error, this
370 * allows us to return 0 for single core systems without forcing
371 * callers to deal with SMP vs. NONSMP issues.
373 static inline u64 xt_percpu_counter_alloc(void)
375 if (nr_cpu_ids > 1) {
376 void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
377 sizeof(struct xt_counters));
380 return (u64) -ENOMEM;
382 return (u64) (__force unsigned long) res;
387 static inline void xt_percpu_counter_free(u64 pcnt)
390 free_percpu((void __percpu *) (unsigned long) pcnt);
393 static inline struct xt_counters *
394 xt_get_this_cpu_counter(struct xt_counters *cnt)
397 return this_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt);
402 static inline struct xt_counters *
403 xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
406 return per_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt, cpu);
411 struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
412 void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
415 #include <net/compat.h>
417 struct compat_xt_entry_match {
420 u_int16_t match_size;
421 char name[XT_FUNCTION_MAXNAMELEN - 1];
425 u_int16_t match_size;
428 u_int16_t match_size;
430 unsigned char data[0];
433 struct compat_xt_entry_target {
436 u_int16_t target_size;
437 char name[XT_FUNCTION_MAXNAMELEN - 1];
441 u_int16_t target_size;
442 compat_uptr_t target;
444 u_int16_t target_size;
446 unsigned char data[0];
449 /* FIXME: this works only on 32 bit tasks
450 * need to change whole approach in order to calculate align as function of
451 * current task alignment */
453 struct compat_xt_counters {
454 compat_u64 pcnt, bcnt; /* Packet and byte counters */
457 struct compat_xt_counters_info {
458 char name[XT_TABLE_MAXNAMELEN];
459 compat_uint_t num_counters;
460 struct compat_xt_counters counters[0];
463 struct _compat_xt_align {
470 #define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
472 void xt_compat_lock(u_int8_t af);
473 void xt_compat_unlock(u_int8_t af);
475 int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
476 void xt_compat_flush_offsets(u_int8_t af);
477 void xt_compat_init_offsets(u_int8_t af, unsigned int number);
478 int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
480 int xt_compat_match_offset(const struct xt_match *match);
481 int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
483 int xt_compat_match_to_user(const struct xt_entry_match *m,
484 void __user **dstptr, unsigned int *size);
486 int xt_compat_target_offset(const struct xt_target *target);
487 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
489 int xt_compat_target_to_user(const struct xt_entry_target *t,
490 void __user **dstptr, unsigned int *size);
492 #endif /* CONFIG_COMPAT */
493 #endif /* _X_TABLES_H */