2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (C) 2006-2010 Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/cache.h>
14 #include <linux/capability.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/icmp.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter/x_tables.h>
30 #include <linux/netfilter_ipv4/ip_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv4 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
45 #define dprintf(format, args...)
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
51 #define duprintf(format, args...)
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) WARN_ON(!(x))
57 #define IP_NF_ASSERT(x)
61 /* All the better to debug you with... */
66 void *ipt_alloc_initial_table(const struct xt_table *info)
68 return xt_alloc_initial_table(ipt, IPT);
70 EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
72 /* Returns whether matches rule or not. */
73 /* Performance critical - called for every packet */
75 ip_packet_match(const struct iphdr *ip,
78 const struct ipt_ip *ipinfo,
83 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
85 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
87 FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
89 dprintf("Source or dest mismatch.\n");
91 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
92 &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr,
93 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
94 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
95 &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr,
96 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
100 ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
102 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
103 dprintf("VIA in mismatch (%s vs %s).%s\n",
104 indev, ipinfo->iniface,
105 ipinfo->invflags & IPT_INV_VIA_IN ? " (INV)" : "");
109 ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
111 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
112 dprintf("VIA out mismatch (%s vs %s).%s\n",
113 outdev, ipinfo->outiface,
114 ipinfo->invflags & IPT_INV_VIA_OUT ? " (INV)" : "");
118 /* Check specific protocol */
120 FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
121 dprintf("Packet protocol %hi does not match %hi.%s\n",
122 ip->protocol, ipinfo->proto,
123 ipinfo->invflags & IPT_INV_PROTO ? " (INV)" : "");
127 /* If we have a fragment rule but the packet is not a fragment
128 * then we return zero */
129 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
130 dprintf("Fragment rule but not fragment.%s\n",
131 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
139 ip_checkentry(const struct ipt_ip *ip)
141 if (ip->flags & ~IPT_F_MASK) {
142 duprintf("Unknown flag bits set: %08X\n",
143 ip->flags & ~IPT_F_MASK);
146 if (ip->invflags & ~IPT_INV_MASK) {
147 duprintf("Unknown invflag bits set: %08X\n",
148 ip->invflags & ~IPT_INV_MASK);
155 ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
157 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
162 /* Performance critical */
163 static inline struct ipt_entry *
164 get_entry(const void *base, unsigned int offset)
166 return (struct ipt_entry *)(base + offset);
169 /* All zeroes == unconditional rule. */
170 /* Mildly perf critical (only if packet tracing is on) */
171 static inline bool unconditional(const struct ipt_entry *e)
173 static const struct ipt_ip uncond;
175 return e->target_offset == sizeof(struct ipt_entry) &&
176 memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
180 /* for const-correctness */
181 static inline const struct xt_entry_target *
182 ipt_get_target_c(const struct ipt_entry *e)
184 return ipt_get_target((struct ipt_entry *)e);
187 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
188 static const char *const hooknames[] = {
189 [NF_INET_PRE_ROUTING] = "PREROUTING",
190 [NF_INET_LOCAL_IN] = "INPUT",
191 [NF_INET_FORWARD] = "FORWARD",
192 [NF_INET_LOCAL_OUT] = "OUTPUT",
193 [NF_INET_POST_ROUTING] = "POSTROUTING",
196 enum nf_ip_trace_comments {
197 NF_IP_TRACE_COMMENT_RULE,
198 NF_IP_TRACE_COMMENT_RETURN,
199 NF_IP_TRACE_COMMENT_POLICY,
202 static const char *const comments[] = {
203 [NF_IP_TRACE_COMMENT_RULE] = "rule",
204 [NF_IP_TRACE_COMMENT_RETURN] = "return",
205 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
208 static struct nf_loginfo trace_loginfo = {
209 .type = NF_LOG_TYPE_LOG,
213 .logflags = NF_LOG_MASK,
218 /* Mildly perf critical (only if packet tracing is on) */
220 get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
221 const char *hookname, const char **chainname,
222 const char **comment, unsigned int *rulenum)
224 const struct xt_standard_target *t = (void *)ipt_get_target_c(s);
226 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
227 /* Head of user chain: ERROR target with chainname */
228 *chainname = t->target.data;
233 if (unconditional(s) &&
234 strcmp(t->target.u.kernel.target->name,
235 XT_STANDARD_TARGET) == 0 &&
237 /* Tail of chains: STANDARD target (return/policy) */
238 *comment = *chainname == hookname
239 ? comments[NF_IP_TRACE_COMMENT_POLICY]
240 : comments[NF_IP_TRACE_COMMENT_RETURN];
249 static void trace_packet(struct net *net,
250 const struct sk_buff *skb,
252 const struct net_device *in,
253 const struct net_device *out,
254 const char *tablename,
255 const struct xt_table_info *private,
256 const struct ipt_entry *e)
258 const struct ipt_entry *root;
259 const char *hookname, *chainname, *comment;
260 const struct ipt_entry *iter;
261 unsigned int rulenum = 0;
263 root = get_entry(private->entries, private->hook_entry[hook]);
265 hookname = chainname = hooknames[hook];
266 comment = comments[NF_IP_TRACE_COMMENT_RULE];
268 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
269 if (get_chainname_rulenum(iter, e, hookname,
270 &chainname, &comment, &rulenum) != 0)
273 nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo,
274 "TRACE: %s:%s:%s:%u ",
275 tablename, chainname, comment, rulenum);
280 struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
282 return (void *)entry + entry->next_offset;
285 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
287 ipt_do_table(struct sk_buff *skb,
288 const struct nf_hook_state *state,
289 struct xt_table *table)
291 unsigned int hook = state->hook;
292 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
293 const struct iphdr *ip;
294 /* Initializing verdict to NF_DROP keeps gcc happy. */
295 unsigned int verdict = NF_DROP;
296 const char *indev, *outdev;
297 const void *table_base;
298 struct ipt_entry *e, **jumpstack;
299 unsigned int stackidx, cpu;
300 const struct xt_table_info *private;
301 struct xt_action_param acpar;
307 indev = state->in ? state->in->name : nulldevname;
308 outdev = state->out ? state->out->name : nulldevname;
309 /* We handle fragments by dealing with the first fragment as
310 * if it was a normal packet. All other fragments are treated
311 * normally, except that they will NEVER match rules that ask
312 * things we don't know, ie. tcp syn flag or ports). If the
313 * rule is also a fragment-specific rule, non-fragments won't
315 acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
316 acpar.thoff = ip_hdrlen(skb);
317 acpar.hotdrop = false;
318 acpar.net = state->net;
319 acpar.in = state->in;
320 acpar.out = state->out;
321 acpar.family = NFPROTO_IPV4;
322 acpar.hooknum = hook;
324 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
326 addend = xt_write_recseq_begin();
327 private = table->private;
328 cpu = smp_processor_id();
330 * Ensure we load private-> members after we've fetched the base
333 smp_read_barrier_depends();
334 table_base = private->entries;
335 jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
337 /* Switch to alternate jumpstack if we're being invoked via TEE.
338 * TEE issues XT_CONTINUE verdict on original skb so we must not
339 * clobber the jumpstack.
341 * For recursion via REJECT or SYNPROXY the stack will be clobbered
342 * but it is no problem since absolute verdict is issued by these.
344 if (static_key_false(&xt_tee_enabled))
345 jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
347 e = get_entry(table_base, private->hook_entry[hook]);
349 pr_debug("Entering %s(hook %u), UF %p\n",
351 get_entry(table_base, private->underflow[hook]));
354 const struct xt_entry_target *t;
355 const struct xt_entry_match *ematch;
356 struct xt_counters *counter;
359 if (!ip_packet_match(ip, indev, outdev,
360 &e->ip, acpar.fragoff)) {
362 e = ipt_next_entry(e);
366 xt_ematch_foreach(ematch, e) {
367 acpar.match = ematch->u.kernel.match;
368 acpar.matchinfo = ematch->data;
369 if (!acpar.match->match(skb, &acpar))
373 counter = xt_get_this_cpu_counter(&e->counters);
374 ADD_COUNTER(*counter, skb->len, 1);
376 t = ipt_get_target(e);
377 IP_NF_ASSERT(t->u.kernel.target);
379 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
380 /* The packet is traced: log it */
381 if (unlikely(skb->nf_trace))
382 trace_packet(state->net, skb, hook, state->in,
383 state->out, table->name, private, e);
385 /* Standard target? */
386 if (!t->u.kernel.target->target) {
389 v = ((struct xt_standard_target *)t)->verdict;
391 /* Pop from stack? */
392 if (v != XT_RETURN) {
393 verdict = (unsigned int)(-v) - 1;
397 e = get_entry(table_base,
398 private->underflow[hook]);
399 pr_debug("Underflow (this is normal) "
402 e = jumpstack[--stackidx];
403 pr_debug("Pulled %p out from pos %u\n",
405 e = ipt_next_entry(e);
409 if (table_base + v != ipt_next_entry(e) &&
410 !(e->ip.flags & IPT_F_GOTO)) {
411 jumpstack[stackidx++] = e;
412 pr_debug("Pushed %p into pos %u\n",
416 e = get_entry(table_base, v);
420 acpar.target = t->u.kernel.target;
421 acpar.targinfo = t->data;
423 verdict = t->u.kernel.target->target(skb, &acpar);
424 /* Target might have changed stuff. */
426 if (verdict == XT_CONTINUE)
427 e = ipt_next_entry(e);
431 } while (!acpar.hotdrop);
432 pr_debug("Exiting %s; sp at %u\n", __func__, stackidx);
434 xt_write_recseq_end(addend);
437 #ifdef DEBUG_ALLOW_ALL
446 static bool find_jump_target(const struct xt_table_info *t,
447 const struct ipt_entry *target)
449 struct ipt_entry *iter;
451 xt_entry_foreach(iter, t->entries, t->size) {
458 /* Figures out from what hook each rule can be called: returns 0 if
459 there are loops. Puts hook bitmask in comefrom. */
461 mark_source_chains(const struct xt_table_info *newinfo,
462 unsigned int valid_hooks, void *entry0)
466 /* No recursion; use packet counter to save back ptrs (reset
467 to 0 as we leave), and comefrom to save source hook bitmask */
468 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
469 unsigned int pos = newinfo->hook_entry[hook];
470 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
472 if (!(valid_hooks & (1 << hook)))
475 /* Set initial back pointer. */
476 e->counters.pcnt = pos;
479 const struct xt_standard_target *t
480 = (void *)ipt_get_target_c(e);
481 int visited = e->comefrom & (1 << hook);
483 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
484 pr_err("iptables: loop hook %u pos %u %08X.\n",
485 hook, pos, e->comefrom);
488 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
490 /* Unconditional return/END. */
491 if ((unconditional(e) &&
492 (strcmp(t->target.u.user.name,
493 XT_STANDARD_TARGET) == 0) &&
494 t->verdict < 0) || visited) {
495 unsigned int oldpos, size;
497 if ((strcmp(t->target.u.user.name,
498 XT_STANDARD_TARGET) == 0) &&
499 t->verdict < -NF_MAX_VERDICT - 1) {
500 duprintf("mark_source_chains: bad "
501 "negative verdict (%i)\n",
506 /* Return: backtrack through the last
509 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
510 #ifdef DEBUG_IP_FIREWALL_USER
512 & (1 << NF_INET_NUMHOOKS)) {
513 duprintf("Back unset "
520 pos = e->counters.pcnt;
521 e->counters.pcnt = 0;
523 /* We're at the start. */
527 e = (struct ipt_entry *)
529 } while (oldpos == pos + e->next_offset);
532 size = e->next_offset;
533 e = (struct ipt_entry *)
534 (entry0 + pos + size);
535 if (pos + size >= newinfo->size)
537 e->counters.pcnt = pos;
540 int newpos = t->verdict;
542 if (strcmp(t->target.u.user.name,
543 XT_STANDARD_TARGET) == 0 &&
545 if (newpos > newinfo->size -
546 sizeof(struct ipt_entry)) {
547 duprintf("mark_source_chains: "
548 "bad verdict (%i)\n",
552 /* This a jump; chase it. */
553 duprintf("Jump rule %u -> %u\n",
555 e = (struct ipt_entry *)
557 if (!find_jump_target(newinfo, e))
560 /* ... this is a fallthru */
561 newpos = pos + e->next_offset;
562 if (newpos >= newinfo->size)
565 e = (struct ipt_entry *)
567 e->counters.pcnt = pos;
572 duprintf("Finished chain %u\n", hook);
577 static void cleanup_match(struct xt_entry_match *m, struct net *net)
579 struct xt_mtdtor_param par;
582 par.match = m->u.kernel.match;
583 par.matchinfo = m->data;
584 par.family = NFPROTO_IPV4;
585 if (par.match->destroy != NULL)
586 par.match->destroy(&par);
587 module_put(par.match->me);
591 check_entry(const struct ipt_entry *e)
593 if (!ip_checkentry(&e->ip))
596 return xt_check_entry_offsets(e, e->target_offset, e->next_offset);
600 check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
602 const struct ipt_ip *ip = par->entryinfo;
605 par->match = m->u.kernel.match;
606 par->matchinfo = m->data;
608 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
609 ip->proto, ip->invflags & IPT_INV_PROTO);
611 duprintf("check failed for `%s'.\n", par->match->name);
618 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
620 struct xt_match *match;
623 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
626 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
627 return PTR_ERR(match);
629 m->u.kernel.match = match;
631 ret = check_match(m, par);
637 module_put(m->u.kernel.match->me);
641 static int check_target(struct ipt_entry *e, struct net *net, const char *name)
643 struct xt_entry_target *t = ipt_get_target(e);
644 struct xt_tgchk_param par = {
648 .target = t->u.kernel.target,
650 .hook_mask = e->comefrom,
651 .family = NFPROTO_IPV4,
655 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
656 e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
658 duprintf("check failed for `%s'.\n",
659 t->u.kernel.target->name);
666 find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
669 struct xt_entry_target *t;
670 struct xt_target *target;
673 struct xt_mtchk_param mtpar;
674 struct xt_entry_match *ematch;
676 e->counters.pcnt = xt_percpu_counter_alloc();
677 if (IS_ERR_VALUE(e->counters.pcnt))
683 mtpar.entryinfo = &e->ip;
684 mtpar.hook_mask = e->comefrom;
685 mtpar.family = NFPROTO_IPV4;
686 xt_ematch_foreach(ematch, e) {
687 ret = find_check_match(ematch, &mtpar);
689 goto cleanup_matches;
693 t = ipt_get_target(e);
694 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
696 if (IS_ERR(target)) {
697 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
698 ret = PTR_ERR(target);
699 goto cleanup_matches;
701 t->u.kernel.target = target;
703 ret = check_target(e, net, name);
709 module_put(t->u.kernel.target->me);
711 xt_ematch_foreach(ematch, e) {
714 cleanup_match(ematch, net);
717 xt_percpu_counter_free(e->counters.pcnt);
722 static bool check_underflow(const struct ipt_entry *e)
724 const struct xt_entry_target *t;
725 unsigned int verdict;
727 if (!unconditional(e))
729 t = ipt_get_target_c(e);
730 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
732 verdict = ((struct xt_standard_target *)t)->verdict;
733 verdict = -verdict - 1;
734 return verdict == NF_DROP || verdict == NF_ACCEPT;
738 check_entry_size_and_hooks(struct ipt_entry *e,
739 struct xt_table_info *newinfo,
740 const unsigned char *base,
741 const unsigned char *limit,
742 const unsigned int *hook_entries,
743 const unsigned int *underflows,
744 unsigned int valid_hooks)
749 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
750 (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
751 (unsigned char *)e + e->next_offset > limit) {
752 duprintf("Bad offset %p\n", e);
757 < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target)) {
758 duprintf("checking: element %p size %u\n",
763 err = check_entry(e);
767 /* Check hooks & underflows */
768 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
769 if (!(valid_hooks & (1 << h)))
771 if ((unsigned char *)e - base == hook_entries[h])
772 newinfo->hook_entry[h] = hook_entries[h];
773 if ((unsigned char *)e - base == underflows[h]) {
774 if (!check_underflow(e)) {
775 pr_debug("Underflows must be unconditional and "
776 "use the STANDARD target with "
780 newinfo->underflow[h] = underflows[h];
784 /* Clear counters and comefrom */
785 e->counters = ((struct xt_counters) { 0, 0 });
791 cleanup_entry(struct ipt_entry *e, struct net *net)
793 struct xt_tgdtor_param par;
794 struct xt_entry_target *t;
795 struct xt_entry_match *ematch;
797 /* Cleanup all matches */
798 xt_ematch_foreach(ematch, e)
799 cleanup_match(ematch, net);
800 t = ipt_get_target(e);
803 par.target = t->u.kernel.target;
804 par.targinfo = t->data;
805 par.family = NFPROTO_IPV4;
806 if (par.target->destroy != NULL)
807 par.target->destroy(&par);
808 module_put(par.target->me);
809 xt_percpu_counter_free(e->counters.pcnt);
812 /* Checks and translates the user-supplied table segment (held in
815 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
816 const struct ipt_replace *repl)
818 struct ipt_entry *iter;
822 newinfo->size = repl->size;
823 newinfo->number = repl->num_entries;
825 /* Init all hooks to impossible value. */
826 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
827 newinfo->hook_entry[i] = 0xFFFFFFFF;
828 newinfo->underflow[i] = 0xFFFFFFFF;
831 duprintf("translate_table: size %u\n", newinfo->size);
833 /* Walk through entries, checking offsets. */
834 xt_entry_foreach(iter, entry0, newinfo->size) {
835 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
843 if (strcmp(ipt_get_target(iter)->u.user.name,
844 XT_ERROR_TARGET) == 0)
845 ++newinfo->stacksize;
848 if (i != repl->num_entries) {
849 duprintf("translate_table: %u not %u entries\n",
850 i, repl->num_entries);
854 /* Check hooks all assigned */
855 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
856 /* Only hooks which are valid */
857 if (!(repl->valid_hooks & (1 << i)))
859 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
860 duprintf("Invalid hook entry %u %u\n",
861 i, repl->hook_entry[i]);
864 if (newinfo->underflow[i] == 0xFFFFFFFF) {
865 duprintf("Invalid underflow %u %u\n",
866 i, repl->underflow[i]);
871 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
874 /* Finally, each sanity check must pass */
876 xt_entry_foreach(iter, entry0, newinfo->size) {
877 ret = find_check_entry(iter, net, repl->name, repl->size);
884 xt_entry_foreach(iter, entry0, newinfo->size) {
887 cleanup_entry(iter, net);
896 get_counters(const struct xt_table_info *t,
897 struct xt_counters counters[])
899 struct ipt_entry *iter;
903 for_each_possible_cpu(cpu) {
904 seqcount_t *s = &per_cpu(xt_recseq, cpu);
907 xt_entry_foreach(iter, t->entries, t->size) {
908 struct xt_counters *tmp;
912 tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
914 start = read_seqcount_begin(s);
917 } while (read_seqcount_retry(s, start));
919 ADD_COUNTER(counters[i], bcnt, pcnt);
920 ++i; /* macro does multi eval of i */
925 static struct xt_counters *alloc_counters(const struct xt_table *table)
927 unsigned int countersize;
928 struct xt_counters *counters;
929 const struct xt_table_info *private = table->private;
931 /* We need atomic snapshot of counters: rest doesn't change
932 (other than comefrom, which userspace doesn't care
934 countersize = sizeof(struct xt_counters) * private->number;
935 counters = vzalloc(countersize);
937 if (counters == NULL)
938 return ERR_PTR(-ENOMEM);
940 get_counters(private, counters);
946 copy_entries_to_user(unsigned int total_size,
947 const struct xt_table *table,
948 void __user *userptr)
950 unsigned int off, num;
951 const struct ipt_entry *e;
952 struct xt_counters *counters;
953 const struct xt_table_info *private = table->private;
955 const void *loc_cpu_entry;
957 counters = alloc_counters(table);
958 if (IS_ERR(counters))
959 return PTR_ERR(counters);
961 loc_cpu_entry = private->entries;
962 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
967 /* FIXME: use iterator macros --RR */
968 /* ... then go back and fix counters and names */
969 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
971 const struct xt_entry_match *m;
972 const struct xt_entry_target *t;
974 e = (struct ipt_entry *)(loc_cpu_entry + off);
975 if (copy_to_user(userptr + off
976 + offsetof(struct ipt_entry, counters),
978 sizeof(counters[num])) != 0) {
983 for (i = sizeof(struct ipt_entry);
984 i < e->target_offset;
985 i += m->u.match_size) {
988 if (copy_to_user(userptr + off + i
989 + offsetof(struct xt_entry_match,
991 m->u.kernel.match->name,
992 strlen(m->u.kernel.match->name)+1)
999 t = ipt_get_target_c(e);
1000 if (copy_to_user(userptr + off + e->target_offset
1001 + offsetof(struct xt_entry_target,
1003 t->u.kernel.target->name,
1004 strlen(t->u.kernel.target->name)+1) != 0) {
1015 #ifdef CONFIG_COMPAT
1016 static void compat_standard_from_user(void *dst, const void *src)
1018 int v = *(compat_int_t *)src;
1021 v += xt_compat_calc_jump(AF_INET, v);
1022 memcpy(dst, &v, sizeof(v));
1025 static int compat_standard_to_user(void __user *dst, const void *src)
1027 compat_int_t cv = *(int *)src;
1030 cv -= xt_compat_calc_jump(AF_INET, cv);
1031 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1034 static int compat_calc_entry(const struct ipt_entry *e,
1035 const struct xt_table_info *info,
1036 const void *base, struct xt_table_info *newinfo)
1038 const struct xt_entry_match *ematch;
1039 const struct xt_entry_target *t;
1040 unsigned int entry_offset;
1043 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1044 entry_offset = (void *)e - base;
1045 xt_ematch_foreach(ematch, e)
1046 off += xt_compat_match_offset(ematch->u.kernel.match);
1047 t = ipt_get_target_c(e);
1048 off += xt_compat_target_offset(t->u.kernel.target);
1049 newinfo->size -= off;
1050 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1054 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1055 if (info->hook_entry[i] &&
1056 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1057 newinfo->hook_entry[i] -= off;
1058 if (info->underflow[i] &&
1059 (e < (struct ipt_entry *)(base + info->underflow[i])))
1060 newinfo->underflow[i] -= off;
1065 static int compat_table_info(const struct xt_table_info *info,
1066 struct xt_table_info *newinfo)
1068 struct ipt_entry *iter;
1069 const void *loc_cpu_entry;
1072 if (!newinfo || !info)
1075 /* we dont care about newinfo->entries */
1076 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1077 newinfo->initial_entries = 0;
1078 loc_cpu_entry = info->entries;
1079 xt_compat_init_offsets(AF_INET, info->number);
1080 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1081 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1089 static int get_info(struct net *net, void __user *user,
1090 const int *len, int compat)
1092 char name[XT_TABLE_MAXNAMELEN];
1096 if (*len != sizeof(struct ipt_getinfo)) {
1097 duprintf("length %u != %zu\n", *len,
1098 sizeof(struct ipt_getinfo));
1102 if (copy_from_user(name, user, sizeof(name)) != 0)
1105 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1106 #ifdef CONFIG_COMPAT
1108 xt_compat_lock(AF_INET);
1110 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1111 "iptable_%s", name);
1112 if (!IS_ERR_OR_NULL(t)) {
1113 struct ipt_getinfo info;
1114 const struct xt_table_info *private = t->private;
1115 #ifdef CONFIG_COMPAT
1116 struct xt_table_info tmp;
1119 ret = compat_table_info(private, &tmp);
1120 xt_compat_flush_offsets(AF_INET);
1124 memset(&info, 0, sizeof(info));
1125 info.valid_hooks = t->valid_hooks;
1126 memcpy(info.hook_entry, private->hook_entry,
1127 sizeof(info.hook_entry));
1128 memcpy(info.underflow, private->underflow,
1129 sizeof(info.underflow));
1130 info.num_entries = private->number;
1131 info.size = private->size;
1132 strcpy(info.name, name);
1134 if (copy_to_user(user, &info, *len) != 0)
1142 ret = t ? PTR_ERR(t) : -ENOENT;
1143 #ifdef CONFIG_COMPAT
1145 xt_compat_unlock(AF_INET);
1151 get_entries(struct net *net, struct ipt_get_entries __user *uptr,
1155 struct ipt_get_entries get;
1158 if (*len < sizeof(get)) {
1159 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1162 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1164 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1165 duprintf("get_entries: %u != %zu\n",
1166 *len, sizeof(get) + get.size);
1169 get.name[sizeof(get.name) - 1] = '\0';
1171 t = xt_find_table_lock(net, AF_INET, get.name);
1172 if (!IS_ERR_OR_NULL(t)) {
1173 const struct xt_table_info *private = t->private;
1174 duprintf("t->private->number = %u\n", private->number);
1175 if (get.size == private->size)
1176 ret = copy_entries_to_user(private->size,
1177 t, uptr->entrytable);
1179 duprintf("get_entries: I've got %u not %u!\n",
1180 private->size, get.size);
1186 ret = t ? PTR_ERR(t) : -ENOENT;
1192 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1193 struct xt_table_info *newinfo, unsigned int num_counters,
1194 void __user *counters_ptr)
1198 struct xt_table_info *oldinfo;
1199 struct xt_counters *counters;
1200 struct ipt_entry *iter;
1203 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1209 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1210 "iptable_%s", name);
1211 if (IS_ERR_OR_NULL(t)) {
1212 ret = t ? PTR_ERR(t) : -ENOENT;
1213 goto free_newinfo_counters_untrans;
1217 if (valid_hooks != t->valid_hooks) {
1218 duprintf("Valid hook crap: %08X vs %08X\n",
1219 valid_hooks, t->valid_hooks);
1224 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1228 /* Update module usage count based on number of rules */
1229 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1230 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1231 if ((oldinfo->number > oldinfo->initial_entries) ||
1232 (newinfo->number <= oldinfo->initial_entries))
1234 if ((oldinfo->number > oldinfo->initial_entries) &&
1235 (newinfo->number <= oldinfo->initial_entries))
1238 /* Get the old counters, and synchronize with replace */
1239 get_counters(oldinfo, counters);
1241 /* Decrease module usage counts and free resource */
1242 xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
1243 cleanup_entry(iter, net);
1245 xt_free_table_info(oldinfo);
1246 if (copy_to_user(counters_ptr, counters,
1247 sizeof(struct xt_counters) * num_counters) != 0) {
1248 /* Silent error, can't fail, new table is already in place */
1249 net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
1258 free_newinfo_counters_untrans:
1265 do_replace(struct net *net, const void __user *user, unsigned int len)
1268 struct ipt_replace tmp;
1269 struct xt_table_info *newinfo;
1270 void *loc_cpu_entry;
1271 struct ipt_entry *iter;
1273 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1276 /* overflow check */
1277 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1279 if (tmp.num_counters == 0)
1282 tmp.name[sizeof(tmp.name)-1] = 0;
1284 newinfo = xt_alloc_table_info(tmp.size);
1288 loc_cpu_entry = newinfo->entries;
1289 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1295 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1299 duprintf("Translated table\n");
1301 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1302 tmp.num_counters, tmp.counters);
1304 goto free_newinfo_untrans;
1307 free_newinfo_untrans:
1308 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1309 cleanup_entry(iter, net);
1311 xt_free_table_info(newinfo);
1316 do_add_counters(struct net *net, const void __user *user,
1317 unsigned int len, int compat)
1320 struct xt_counters_info tmp;
1321 struct xt_counters *paddc;
1322 unsigned int num_counters;
1327 const struct xt_table_info *private;
1329 struct ipt_entry *iter;
1330 unsigned int addend;
1331 #ifdef CONFIG_COMPAT
1332 struct compat_xt_counters_info compat_tmp;
1336 size = sizeof(struct compat_xt_counters_info);
1341 size = sizeof(struct xt_counters_info);
1344 if (copy_from_user(ptmp, user, size) != 0)
1347 #ifdef CONFIG_COMPAT
1349 num_counters = compat_tmp.num_counters;
1350 name = compat_tmp.name;
1354 num_counters = tmp.num_counters;
1358 if (len != size + num_counters * sizeof(struct xt_counters))
1361 paddc = vmalloc(len - size);
1365 if (copy_from_user(paddc, user + size, len - size) != 0) {
1370 t = xt_find_table_lock(net, AF_INET, name);
1371 if (IS_ERR_OR_NULL(t)) {
1372 ret = t ? PTR_ERR(t) : -ENOENT;
1377 private = t->private;
1378 if (private->number != num_counters) {
1380 goto unlock_up_free;
1384 addend = xt_write_recseq_begin();
1385 xt_entry_foreach(iter, private->entries, private->size) {
1386 struct xt_counters *tmp;
1388 tmp = xt_get_this_cpu_counter(&iter->counters);
1389 ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
1392 xt_write_recseq_end(addend);
1403 #ifdef CONFIG_COMPAT
1404 struct compat_ipt_replace {
1405 char name[XT_TABLE_MAXNAMELEN];
1409 u32 hook_entry[NF_INET_NUMHOOKS];
1410 u32 underflow[NF_INET_NUMHOOKS];
1412 compat_uptr_t counters; /* struct xt_counters * */
1413 struct compat_ipt_entry entries[0];
1417 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1418 unsigned int *size, struct xt_counters *counters,
1421 struct xt_entry_target *t;
1422 struct compat_ipt_entry __user *ce;
1423 u_int16_t target_offset, next_offset;
1424 compat_uint_t origsize;
1425 const struct xt_entry_match *ematch;
1429 ce = (struct compat_ipt_entry __user *)*dstptr;
1430 if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
1431 copy_to_user(&ce->counters, &counters[i],
1432 sizeof(counters[i])) != 0)
1435 *dstptr += sizeof(struct compat_ipt_entry);
1436 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1438 xt_ematch_foreach(ematch, e) {
1439 ret = xt_compat_match_to_user(ematch, dstptr, size);
1443 target_offset = e->target_offset - (origsize - *size);
1444 t = ipt_get_target(e);
1445 ret = xt_compat_target_to_user(t, dstptr, size);
1448 next_offset = e->next_offset - (origsize - *size);
1449 if (put_user(target_offset, &ce->target_offset) != 0 ||
1450 put_user(next_offset, &ce->next_offset) != 0)
1456 compat_find_calc_match(struct xt_entry_match *m,
1458 const struct ipt_ip *ip,
1461 struct xt_match *match;
1463 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
1464 m->u.user.revision);
1465 if (IS_ERR(match)) {
1466 duprintf("compat_check_calc_match: `%s' not found\n",
1468 return PTR_ERR(match);
1470 m->u.kernel.match = match;
1471 *size += xt_compat_match_offset(match);
1475 static void compat_release_entry(struct compat_ipt_entry *e)
1477 struct xt_entry_target *t;
1478 struct xt_entry_match *ematch;
1480 /* Cleanup all matches */
1481 xt_ematch_foreach(ematch, e)
1482 module_put(ematch->u.kernel.match->me);
1483 t = compat_ipt_get_target(e);
1484 module_put(t->u.kernel.target->me);
1488 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1489 struct xt_table_info *newinfo,
1491 const unsigned char *base,
1492 const unsigned char *limit,
1493 const unsigned int *hook_entries,
1494 const unsigned int *underflows,
1497 struct xt_entry_match *ematch;
1498 struct xt_entry_target *t;
1499 struct xt_target *target;
1500 unsigned int entry_offset;
1504 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1505 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
1506 (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
1507 (unsigned char *)e + e->next_offset > limit) {
1508 duprintf("Bad offset %p, limit = %p\n", e, limit);
1512 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1513 sizeof(struct compat_xt_entry_target)) {
1514 duprintf("checking: element %p size %u\n",
1519 /* For purposes of check_entry casting the compat entry is fine */
1520 ret = check_entry((struct ipt_entry *)e);
1524 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1525 entry_offset = (void *)e - (void *)base;
1527 xt_ematch_foreach(ematch, e) {
1528 ret = compat_find_calc_match(ematch, name, &e->ip, &off);
1530 goto release_matches;
1534 t = compat_ipt_get_target(e);
1535 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
1536 t->u.user.revision);
1537 if (IS_ERR(target)) {
1538 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1540 ret = PTR_ERR(target);
1541 goto release_matches;
1543 t->u.kernel.target = target;
1545 off += xt_compat_target_offset(target);
1547 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1551 /* Check hooks & underflows */
1552 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1553 if ((unsigned char *)e - base == hook_entries[h])
1554 newinfo->hook_entry[h] = hook_entries[h];
1555 if ((unsigned char *)e - base == underflows[h])
1556 newinfo->underflow[h] = underflows[h];
1559 /* Clear counters and comefrom */
1560 memset(&e->counters, 0, sizeof(e->counters));
1565 module_put(t->u.kernel.target->me);
1567 xt_ematch_foreach(ematch, e) {
1570 module_put(ematch->u.kernel.match->me);
1576 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1577 unsigned int *size, const char *name,
1578 struct xt_table_info *newinfo, unsigned char *base)
1580 struct xt_entry_target *t;
1581 struct xt_target *target;
1582 struct ipt_entry *de;
1583 unsigned int origsize;
1585 struct xt_entry_match *ematch;
1589 de = (struct ipt_entry *)*dstptr;
1590 memcpy(de, e, sizeof(struct ipt_entry));
1591 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1593 *dstptr += sizeof(struct ipt_entry);
1594 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1596 xt_ematch_foreach(ematch, e) {
1597 ret = xt_compat_match_from_user(ematch, dstptr, size);
1601 de->target_offset = e->target_offset - (origsize - *size);
1602 t = compat_ipt_get_target(e);
1603 target = t->u.kernel.target;
1604 xt_compat_target_from_user(t, dstptr, size);
1606 de->next_offset = e->next_offset - (origsize - *size);
1607 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1608 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1609 newinfo->hook_entry[h] -= origsize - *size;
1610 if ((unsigned char *)de - base < newinfo->underflow[h])
1611 newinfo->underflow[h] -= origsize - *size;
1617 compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
1619 struct xt_entry_match *ematch;
1620 struct xt_mtchk_param mtpar;
1624 e->counters.pcnt = xt_percpu_counter_alloc();
1625 if (IS_ERR_VALUE(e->counters.pcnt))
1631 mtpar.entryinfo = &e->ip;
1632 mtpar.hook_mask = e->comefrom;
1633 mtpar.family = NFPROTO_IPV4;
1634 xt_ematch_foreach(ematch, e) {
1635 ret = check_match(ematch, &mtpar);
1637 goto cleanup_matches;
1641 ret = check_target(e, net, name);
1643 goto cleanup_matches;
1647 xt_ematch_foreach(ematch, e) {
1650 cleanup_match(ematch, net);
1653 xt_percpu_counter_free(e->counters.pcnt);
1659 translate_compat_table(struct net *net,
1661 unsigned int valid_hooks,
1662 struct xt_table_info **pinfo,
1664 unsigned int total_size,
1665 unsigned int number,
1666 unsigned int *hook_entries,
1667 unsigned int *underflows)
1670 struct xt_table_info *newinfo, *info;
1671 void *pos, *entry0, *entry1;
1672 struct compat_ipt_entry *iter0;
1673 struct ipt_entry *iter1;
1680 info->number = number;
1682 /* Init all hooks to impossible value. */
1683 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1684 info->hook_entry[i] = 0xFFFFFFFF;
1685 info->underflow[i] = 0xFFFFFFFF;
1688 duprintf("translate_compat_table: size %u\n", info->size);
1690 xt_compat_lock(AF_INET);
1691 xt_compat_init_offsets(AF_INET, number);
1692 /* Walk through entries, checking offsets. */
1693 xt_entry_foreach(iter0, entry0, total_size) {
1694 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1696 entry0 + total_size,
1707 duprintf("translate_compat_table: %u not %u entries\n",
1712 /* Check hooks all assigned */
1713 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1714 /* Only hooks which are valid */
1715 if (!(valid_hooks & (1 << i)))
1717 if (info->hook_entry[i] == 0xFFFFFFFF) {
1718 duprintf("Invalid hook entry %u %u\n",
1719 i, hook_entries[i]);
1722 if (info->underflow[i] == 0xFFFFFFFF) {
1723 duprintf("Invalid underflow %u %u\n",
1730 newinfo = xt_alloc_table_info(size);
1734 newinfo->number = number;
1735 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1736 newinfo->hook_entry[i] = info->hook_entry[i];
1737 newinfo->underflow[i] = info->underflow[i];
1739 entry1 = newinfo->entries;
1742 xt_entry_foreach(iter0, entry0, total_size) {
1743 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1744 name, newinfo, entry1);
1748 xt_compat_flush_offsets(AF_INET);
1749 xt_compat_unlock(AF_INET);
1754 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1758 xt_entry_foreach(iter1, entry1, newinfo->size) {
1759 ret = compat_check_entry(iter1, net, name);
1763 if (strcmp(ipt_get_target(iter1)->u.user.name,
1764 XT_ERROR_TARGET) == 0)
1765 ++newinfo->stacksize;
1769 * The first i matches need cleanup_entry (calls ->destroy)
1770 * because they had called ->check already. The other j-i
1771 * entries need only release.
1775 xt_entry_foreach(iter0, entry0, newinfo->size) {
1780 compat_release_entry(iter0);
1782 xt_entry_foreach(iter1, entry1, newinfo->size) {
1785 cleanup_entry(iter1, net);
1787 xt_free_table_info(newinfo);
1793 xt_free_table_info(info);
1797 xt_free_table_info(newinfo);
1799 xt_entry_foreach(iter0, entry0, total_size) {
1802 compat_release_entry(iter0);
1806 xt_compat_flush_offsets(AF_INET);
1807 xt_compat_unlock(AF_INET);
1812 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1815 struct compat_ipt_replace tmp;
1816 struct xt_table_info *newinfo;
1817 void *loc_cpu_entry;
1818 struct ipt_entry *iter;
1820 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1823 /* overflow check */
1824 if (tmp.size >= INT_MAX / num_possible_cpus())
1826 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1828 if (tmp.num_counters == 0)
1831 tmp.name[sizeof(tmp.name)-1] = 0;
1833 newinfo = xt_alloc_table_info(tmp.size);
1837 loc_cpu_entry = newinfo->entries;
1838 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1844 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1845 &newinfo, &loc_cpu_entry, tmp.size,
1846 tmp.num_entries, tmp.hook_entry,
1851 duprintf("compat_do_replace: Translated table\n");
1853 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1854 tmp.num_counters, compat_ptr(tmp.counters));
1856 goto free_newinfo_untrans;
1859 free_newinfo_untrans:
1860 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1861 cleanup_entry(iter, net);
1863 xt_free_table_info(newinfo);
1868 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1873 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1877 case IPT_SO_SET_REPLACE:
1878 ret = compat_do_replace(sock_net(sk), user, len);
1881 case IPT_SO_SET_ADD_COUNTERS:
1882 ret = do_add_counters(sock_net(sk), user, len, 1);
1886 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1893 struct compat_ipt_get_entries {
1894 char name[XT_TABLE_MAXNAMELEN];
1896 struct compat_ipt_entry entrytable[0];
1900 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1901 void __user *userptr)
1903 struct xt_counters *counters;
1904 const struct xt_table_info *private = table->private;
1909 struct ipt_entry *iter;
1911 counters = alloc_counters(table);
1912 if (IS_ERR(counters))
1913 return PTR_ERR(counters);
1917 xt_entry_foreach(iter, private->entries, total_size) {
1918 ret = compat_copy_entry_to_user(iter, &pos,
1919 &size, counters, i++);
1929 compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1933 struct compat_ipt_get_entries get;
1936 if (*len < sizeof(get)) {
1937 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1941 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1944 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1945 duprintf("compat_get_entries: %u != %zu\n",
1946 *len, sizeof(get) + get.size);
1949 get.name[sizeof(get.name) - 1] = '\0';
1951 xt_compat_lock(AF_INET);
1952 t = xt_find_table_lock(net, AF_INET, get.name);
1953 if (!IS_ERR_OR_NULL(t)) {
1954 const struct xt_table_info *private = t->private;
1955 struct xt_table_info info;
1956 duprintf("t->private->number = %u\n", private->number);
1957 ret = compat_table_info(private, &info);
1958 if (!ret && get.size == info.size) {
1959 ret = compat_copy_entries_to_user(private->size,
1960 t, uptr->entrytable);
1962 duprintf("compat_get_entries: I've got %u not %u!\n",
1963 private->size, get.size);
1966 xt_compat_flush_offsets(AF_INET);
1970 ret = t ? PTR_ERR(t) : -ENOENT;
1972 xt_compat_unlock(AF_INET);
1976 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1979 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1983 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1987 case IPT_SO_GET_INFO:
1988 ret = get_info(sock_net(sk), user, len, 1);
1990 case IPT_SO_GET_ENTRIES:
1991 ret = compat_get_entries(sock_net(sk), user, len);
1994 ret = do_ipt_get_ctl(sk, cmd, user, len);
2001 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2005 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2009 case IPT_SO_SET_REPLACE:
2010 ret = do_replace(sock_net(sk), user, len);
2013 case IPT_SO_SET_ADD_COUNTERS:
2014 ret = do_add_counters(sock_net(sk), user, len, 0);
2018 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2026 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2030 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2034 case IPT_SO_GET_INFO:
2035 ret = get_info(sock_net(sk), user, len, 0);
2038 case IPT_SO_GET_ENTRIES:
2039 ret = get_entries(sock_net(sk), user, len);
2042 case IPT_SO_GET_REVISION_MATCH:
2043 case IPT_SO_GET_REVISION_TARGET: {
2044 struct xt_get_revision rev;
2047 if (*len != sizeof(rev)) {
2051 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2055 rev.name[sizeof(rev.name)-1] = 0;
2057 if (cmd == IPT_SO_GET_REVISION_TARGET)
2062 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2065 "ipt_%s", rev.name);
2070 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2077 static void __ipt_unregister_table(struct net *net, struct xt_table *table)
2079 struct xt_table_info *private;
2080 void *loc_cpu_entry;
2081 struct module *table_owner = table->me;
2082 struct ipt_entry *iter;
2084 private = xt_unregister_table(table);
2086 /* Decrease module usage counts and free resources */
2087 loc_cpu_entry = private->entries;
2088 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2089 cleanup_entry(iter, net);
2090 if (private->number > private->initial_entries)
2091 module_put(table_owner);
2092 xt_free_table_info(private);
2095 int ipt_register_table(struct net *net, const struct xt_table *table,
2096 const struct ipt_replace *repl,
2097 const struct nf_hook_ops *ops, struct xt_table **res)
2100 struct xt_table_info *newinfo;
2101 struct xt_table_info bootstrap = {0};
2102 void *loc_cpu_entry;
2103 struct xt_table *new_table;
2105 newinfo = xt_alloc_table_info(repl->size);
2109 loc_cpu_entry = newinfo->entries;
2110 memcpy(loc_cpu_entry, repl->entries, repl->size);
2112 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2116 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2117 if (IS_ERR(new_table)) {
2118 ret = PTR_ERR(new_table);
2122 /* set res now, will see skbs right after nf_register_net_hooks */
2123 WRITE_ONCE(*res, new_table);
2125 ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
2127 __ipt_unregister_table(net, new_table);
2134 xt_free_table_info(newinfo);
2138 void ipt_unregister_table(struct net *net, struct xt_table *table,
2139 const struct nf_hook_ops *ops)
2141 nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
2142 __ipt_unregister_table(net, table);
2145 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2147 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2148 u_int8_t type, u_int8_t code,
2151 return ((test_type == 0xFF) ||
2152 (type == test_type && code >= min_code && code <= max_code))
2157 icmp_match(const struct sk_buff *skb, struct xt_action_param *par)
2159 const struct icmphdr *ic;
2160 struct icmphdr _icmph;
2161 const struct ipt_icmp *icmpinfo = par->matchinfo;
2163 /* Must not be a fragment. */
2164 if (par->fragoff != 0)
2167 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2169 /* We've been asked to examine this packet, and we
2170 * can't. Hence, no choice but to drop.
2172 duprintf("Dropping evil ICMP tinygram.\n");
2173 par->hotdrop = true;
2177 return icmp_type_code_match(icmpinfo->type,
2181 !!(icmpinfo->invflags&IPT_ICMP_INV));
2184 static int icmp_checkentry(const struct xt_mtchk_param *par)
2186 const struct ipt_icmp *icmpinfo = par->matchinfo;
2188 /* Must specify no unknown invflags */
2189 return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0;
2192 static struct xt_target ipt_builtin_tg[] __read_mostly = {
2194 .name = XT_STANDARD_TARGET,
2195 .targetsize = sizeof(int),
2196 .family = NFPROTO_IPV4,
2197 #ifdef CONFIG_COMPAT
2198 .compatsize = sizeof(compat_int_t),
2199 .compat_from_user = compat_standard_from_user,
2200 .compat_to_user = compat_standard_to_user,
2204 .name = XT_ERROR_TARGET,
2205 .target = ipt_error,
2206 .targetsize = XT_FUNCTION_MAXNAMELEN,
2207 .family = NFPROTO_IPV4,
2211 static struct nf_sockopt_ops ipt_sockopts = {
2213 .set_optmin = IPT_BASE_CTL,
2214 .set_optmax = IPT_SO_SET_MAX+1,
2215 .set = do_ipt_set_ctl,
2216 #ifdef CONFIG_COMPAT
2217 .compat_set = compat_do_ipt_set_ctl,
2219 .get_optmin = IPT_BASE_CTL,
2220 .get_optmax = IPT_SO_GET_MAX+1,
2221 .get = do_ipt_get_ctl,
2222 #ifdef CONFIG_COMPAT
2223 .compat_get = compat_do_ipt_get_ctl,
2225 .owner = THIS_MODULE,
2228 static struct xt_match ipt_builtin_mt[] __read_mostly = {
2231 .match = icmp_match,
2232 .matchsize = sizeof(struct ipt_icmp),
2233 .checkentry = icmp_checkentry,
2234 .proto = IPPROTO_ICMP,
2235 .family = NFPROTO_IPV4,
2239 static int __net_init ip_tables_net_init(struct net *net)
2241 return xt_proto_init(net, NFPROTO_IPV4);
2244 static void __net_exit ip_tables_net_exit(struct net *net)
2246 xt_proto_fini(net, NFPROTO_IPV4);
2249 static struct pernet_operations ip_tables_net_ops = {
2250 .init = ip_tables_net_init,
2251 .exit = ip_tables_net_exit,
2254 static int __init ip_tables_init(void)
2258 ret = register_pernet_subsys(&ip_tables_net_ops);
2262 /* No one else will be downing sem now, so we won't sleep */
2263 ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2266 ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2270 /* Register setsockopt */
2271 ret = nf_register_sockopt(&ipt_sockopts);
2275 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2279 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2281 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2283 unregister_pernet_subsys(&ip_tables_net_ops);
2288 static void __exit ip_tables_fini(void)
2290 nf_unregister_sockopt(&ipt_sockopts);
2292 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2293 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2294 unregister_pernet_subsys(&ip_tables_net_ops);
2297 EXPORT_SYMBOL(ipt_register_table);
2298 EXPORT_SYMBOL(ipt_unregister_table);
2299 EXPORT_SYMBOL(ipt_do_table);
2300 module_init(ip_tables_init);
2301 module_exit(ip_tables_fini);