5 * Bart De Schuymer <bdschuym@pandora.be>
7 * ebtables.c,v 2.0, July, 2002
9 * This code is stongly inspired on the iptables code which is
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <linux/kmod.h>
20 #include <linux/module.h>
21 #include <linux/vmalloc.h>
22 #include <linux/netfilter/x_tables.h>
23 #include <linux/netfilter_bridge/ebtables.h>
24 #include <linux/spinlock.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <asm/uaccess.h>
28 #include <linux/smp.h>
29 #include <linux/cpumask.h>
31 /* needed for logical [in,out]-dev filtering */
32 #include "../br_private.h"
34 #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
35 "report to author: "format, ## args)
36 /* #define BUGPRINT(format, args...) */
39 * Each cpu has its own set of counters, so there is no need for write_lock in
41 * For reading or updating the counters, the user context needs to
45 /* The size of each set of counters is altered to get cache alignment */
46 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
47 #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
48 #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
49 COUNTER_OFFSET(n) * cpu))
53 static DEFINE_MUTEX(ebt_mutex);
56 static void ebt_standard_compat_from_user(void *dst, const void *src)
58 int v = *(compat_int_t *)src;
61 v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
62 memcpy(dst, &v, sizeof(v));
65 static int ebt_standard_compat_to_user(void __user *dst, const void *src)
67 compat_int_t cv = *(int *)src;
70 cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
71 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
76 static struct xt_target ebt_standard_target = {
79 .family = NFPROTO_BRIDGE,
80 .targetsize = sizeof(int),
82 .compatsize = sizeof(compat_int_t),
83 .compat_from_user = ebt_standard_compat_from_user,
84 .compat_to_user = ebt_standard_compat_to_user,
89 ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
90 struct xt_target_param *par)
92 par->target = w->u.watcher;
93 par->targinfo = w->data;
94 w->u.watcher->target(skb, par);
95 /* watchers don't give a verdict */
99 static inline int ebt_do_match (struct ebt_entry_match *m,
100 const struct sk_buff *skb, struct xt_match_param *par)
102 par->match = m->u.match;
103 par->matchinfo = m->data;
104 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
108 ebt_dev_check(const char *entry, const struct net_device *device)
117 devname = device->name;
118 /* 1 is the wildcard token */
119 while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
121 return (devname[i] != entry[i] && entry[i] != 1);
124 #define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
125 /* process standard matches */
127 ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h,
128 const struct net_device *in, const struct net_device *out)
132 if (e->bitmask & EBT_802_3) {
133 if (FWINV2(ntohs(h->h_proto) >= 1536, EBT_IPROTO))
135 } else if (!(e->bitmask & EBT_NOPROTO) &&
136 FWINV2(e->ethproto != h->h_proto, EBT_IPROTO))
139 if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
141 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
143 if ((!in || !in->br_port) ? 0 : FWINV2(ebt_dev_check(
144 e->logical_in, in->br_port->br->dev), EBT_ILOGICALIN))
146 if ((!out || !out->br_port) ? 0 : FWINV2(ebt_dev_check(
147 e->logical_out, out->br_port->br->dev), EBT_ILOGICALOUT))
150 if (e->bitmask & EBT_SOURCEMAC) {
152 for (i = 0; i < 6; i++)
153 verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
155 if (FWINV2(verdict != 0, EBT_ISOURCE) )
158 if (e->bitmask & EBT_DESTMAC) {
160 for (i = 0; i < 6; i++)
161 verdict |= (h->h_dest[i] ^ e->destmac[i]) &
163 if (FWINV2(verdict != 0, EBT_IDEST) )
170 struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
172 return (void *)entry + entry->next_offset;
175 /* Do some firewalling */
176 unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
177 const struct net_device *in, const struct net_device *out,
178 struct ebt_table *table)
181 struct ebt_entry *point;
182 struct ebt_counter *counter_base, *cb_base;
183 const struct ebt_entry_target *t;
185 struct ebt_chainstack *cs;
186 struct ebt_entries *chaininfo;
188 const struct ebt_table_info *private;
189 bool hotdrop = false;
190 struct xt_match_param mtpar;
191 struct xt_target_param tgpar;
193 mtpar.family = tgpar.family = NFPROTO_BRIDGE;
194 mtpar.in = tgpar.in = in;
195 mtpar.out = tgpar.out = out;
196 mtpar.hotdrop = &hotdrop;
197 mtpar.hooknum = tgpar.hooknum = hook;
199 read_lock_bh(&table->lock);
200 private = table->private;
201 cb_base = COUNTER_BASE(private->counters, private->nentries,
203 if (private->chainstack)
204 cs = private->chainstack[smp_processor_id()];
207 chaininfo = private->hook_entry[hook];
208 nentries = private->hook_entry[hook]->nentries;
209 point = (struct ebt_entry *)(private->hook_entry[hook]->data);
210 counter_base = cb_base + private->hook_entry[hook]->counter_offset;
211 /* base for chain jumps */
212 base = private->entries;
214 while (i < nentries) {
215 if (ebt_basic_match(point, eth_hdr(skb), in, out))
218 if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &mtpar) != 0)
221 read_unlock_bh(&table->lock);
225 /* increase counter */
226 (*(counter_base + i)).pcnt++;
227 (*(counter_base + i)).bcnt += skb->len;
229 /* these should only watch: not modify, nor tell us
230 what to do with the packet */
231 EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &tgpar);
233 t = (struct ebt_entry_target *)
234 (((char *)point) + point->target_offset);
235 /* standard target */
236 if (!t->u.target->target)
237 verdict = ((struct ebt_standard_target *)t)->verdict;
239 tgpar.target = t->u.target;
240 tgpar.targinfo = t->data;
241 verdict = t->u.target->target(skb, &tgpar);
243 if (verdict == EBT_ACCEPT) {
244 read_unlock_bh(&table->lock);
247 if (verdict == EBT_DROP) {
248 read_unlock_bh(&table->lock);
251 if (verdict == EBT_RETURN) {
253 #ifdef CONFIG_NETFILTER_DEBUG
255 BUGPRINT("RETURN on base chain");
256 /* act like this is EBT_CONTINUE */
261 /* put all the local variables right */
263 chaininfo = cs[sp].chaininfo;
264 nentries = chaininfo->nentries;
266 counter_base = cb_base +
267 chaininfo->counter_offset;
270 if (verdict == EBT_CONTINUE)
272 #ifdef CONFIG_NETFILTER_DEBUG
274 BUGPRINT("bogus standard verdict\n");
275 read_unlock_bh(&table->lock);
281 cs[sp].chaininfo = chaininfo;
282 cs[sp].e = ebt_next_entry(point);
284 chaininfo = (struct ebt_entries *) (base + verdict);
285 #ifdef CONFIG_NETFILTER_DEBUG
286 if (chaininfo->distinguisher) {
287 BUGPRINT("jump to non-chain\n");
288 read_unlock_bh(&table->lock);
292 nentries = chaininfo->nentries;
293 point = (struct ebt_entry *)chaininfo->data;
294 counter_base = cb_base + chaininfo->counter_offset;
298 point = ebt_next_entry(point);
302 /* I actually like this :) */
303 if (chaininfo->policy == EBT_RETURN)
305 if (chaininfo->policy == EBT_ACCEPT) {
306 read_unlock_bh(&table->lock);
309 read_unlock_bh(&table->lock);
313 /* If it succeeds, returns element and locks mutex */
315 find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
319 struct list_head list;
320 char name[EBT_FUNCTION_MAXNAMELEN];
323 *error = mutex_lock_interruptible(mutex);
327 list_for_each_entry(e, head, list) {
328 if (strcmp(e->name, name) == 0)
337 find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
338 int *error, struct mutex *mutex)
340 return try_then_request_module(
341 find_inlist_lock_noload(head, name, error, mutex),
342 "%s%s", prefix, name);
345 static inline struct ebt_table *
346 find_table_lock(struct net *net, const char *name, int *error,
349 return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
350 "ebtable_", error, mutex);
354 ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
357 const struct ebt_entry *e = par->entryinfo;
358 struct xt_match *match;
359 size_t left = ((char *)e + e->watchers_offset) - (char *)m;
362 if (left < sizeof(struct ebt_entry_match) ||
363 left - sizeof(struct ebt_entry_match) < m->match_size)
366 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
367 m->u.name, 0), "ebt_%s", m->u.name);
369 return PTR_ERR(match);
375 par->matchinfo = m->data;
376 ret = xt_check_match(par, m->match_size,
377 e->ethproto, e->invflags & EBT_IPROTO);
379 module_put(match->me);
388 ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
391 const struct ebt_entry *e = par->entryinfo;
392 struct xt_target *watcher;
393 size_t left = ((char *)e + e->target_offset) - (char *)w;
396 if (left < sizeof(struct ebt_entry_watcher) ||
397 left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
400 watcher = try_then_request_module(
401 xt_find_target(NFPROTO_BRIDGE, w->u.name, 0),
402 "ebt_%s", w->u.name);
404 return PTR_ERR(watcher);
407 w->u.watcher = watcher;
409 par->target = watcher;
410 par->targinfo = w->data;
411 ret = xt_check_target(par, w->watcher_size,
412 e->ethproto, e->invflags & EBT_IPROTO);
414 module_put(watcher->me);
422 static int ebt_verify_pointers(const struct ebt_replace *repl,
423 struct ebt_table_info *newinfo)
425 unsigned int limit = repl->entries_size;
426 unsigned int valid_hooks = repl->valid_hooks;
427 unsigned int offset = 0;
430 for (i = 0; i < NF_BR_NUMHOOKS; i++)
431 newinfo->hook_entry[i] = NULL;
433 newinfo->entries_size = repl->entries_size;
434 newinfo->nentries = repl->nentries;
436 while (offset < limit) {
437 size_t left = limit - offset;
438 struct ebt_entry *e = (void *)newinfo->entries + offset;
440 if (left < sizeof(unsigned int))
443 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
444 if ((valid_hooks & (1 << i)) == 0)
446 if ((char __user *)repl->hook_entry[i] ==
447 repl->entries + offset)
451 if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
452 if (e->bitmask != 0) {
453 /* we make userspace set this right,
454 so there is no misunderstanding */
455 BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
456 "in distinguisher\n");
459 if (i != NF_BR_NUMHOOKS)
460 newinfo->hook_entry[i] = (struct ebt_entries *)e;
461 if (left < sizeof(struct ebt_entries))
463 offset += sizeof(struct ebt_entries);
465 if (left < sizeof(struct ebt_entry))
467 if (left < e->next_offset)
469 if (e->next_offset < sizeof(struct ebt_entry))
471 offset += e->next_offset;
474 if (offset != limit) {
475 BUGPRINT("entries_size too small\n");
479 /* check if all valid hooks have a chain */
480 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
481 if (!newinfo->hook_entry[i] &&
482 (valid_hooks & (1 << i))) {
483 BUGPRINT("Valid hook without chain\n");
491 * this one is very careful, as it is the first function
492 * to parse the userspace data
495 ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
496 const struct ebt_table_info *newinfo,
497 unsigned int *n, unsigned int *cnt,
498 unsigned int *totalcnt, unsigned int *udc_cnt)
502 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
503 if ((void *)e == (void *)newinfo->hook_entry[i])
506 /* beginning of a new chain
507 if i == NF_BR_NUMHOOKS it must be a user defined chain */
508 if (i != NF_BR_NUMHOOKS || !e->bitmask) {
509 /* this checks if the previous chain has as many entries
512 BUGPRINT("nentries does not equal the nr of entries "
516 if (((struct ebt_entries *)e)->policy != EBT_DROP &&
517 ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
518 /* only RETURN from udc */
519 if (i != NF_BR_NUMHOOKS ||
520 ((struct ebt_entries *)e)->policy != EBT_RETURN) {
521 BUGPRINT("bad policy\n");
525 if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
527 if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
528 BUGPRINT("counter_offset != totalcnt");
531 *n = ((struct ebt_entries *)e)->nentries;
535 /* a plain old entry, heh */
536 if (sizeof(struct ebt_entry) > e->watchers_offset ||
537 e->watchers_offset > e->target_offset ||
538 e->target_offset >= e->next_offset) {
539 BUGPRINT("entry offsets not in right order\n");
542 /* this is not checked anywhere else */
543 if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
544 BUGPRINT("target size too small\n");
554 struct ebt_chainstack cs;
556 unsigned int hookmask;
560 * we need these positions to check that the jumps to a different part of the
561 * entries is a jump to the beginning of a new chain.
564 ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
565 unsigned int *n, struct ebt_cl_stack *udc)
569 /* we're only interested in chain starts */
572 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
573 if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
576 /* only care about udc */
577 if (i != NF_BR_NUMHOOKS)
580 udc[*n].cs.chaininfo = (struct ebt_entries *)e;
581 /* these initialisations are depended on later in check_chainloops() */
583 udc[*n].hookmask = 0;
590 ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
592 struct xt_mtdtor_param par;
594 if (i && (*i)-- == 0)
598 par.match = m->u.match;
599 par.matchinfo = m->data;
600 par.family = NFPROTO_BRIDGE;
601 if (par.match->destroy != NULL)
602 par.match->destroy(&par);
603 module_put(par.match->me);
608 ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
610 struct xt_tgdtor_param par;
612 if (i && (*i)-- == 0)
616 par.target = w->u.watcher;
617 par.targinfo = w->data;
618 par.family = NFPROTO_BRIDGE;
619 if (par.target->destroy != NULL)
620 par.target->destroy(&par);
621 module_put(par.target->me);
626 ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
628 struct xt_tgdtor_param par;
629 struct ebt_entry_target *t;
634 if (cnt && (*cnt)-- == 0)
636 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
637 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
638 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
641 par.target = t->u.target;
642 par.targinfo = t->data;
643 par.family = NFPROTO_BRIDGE;
644 if (par.target->destroy != NULL)
645 par.target->destroy(&par);
646 module_put(par.target->me);
651 ebt_check_entry(struct ebt_entry *e, struct net *net,
652 const struct ebt_table_info *newinfo,
653 const char *name, unsigned int *cnt,
654 struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
656 struct ebt_entry_target *t;
657 struct xt_target *target;
658 unsigned int i, j, hook = 0, hookmask = 0;
661 struct xt_mtchk_param mtpar;
662 struct xt_tgchk_param tgpar;
664 /* don't mess with the struct ebt_entries */
668 if (e->bitmask & ~EBT_F_MASK) {
669 BUGPRINT("Unknown flag for bitmask\n");
672 if (e->invflags & ~EBT_INV_MASK) {
673 BUGPRINT("Unknown flag for inv bitmask\n");
676 if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
677 BUGPRINT("NOPROTO & 802_3 not allowed\n");
680 /* what hook do we belong to? */
681 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
682 if (!newinfo->hook_entry[i])
684 if ((char *)newinfo->hook_entry[i] < (char *)e)
689 /* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
691 if (i < NF_BR_NUMHOOKS)
692 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
694 for (i = 0; i < udc_cnt; i++)
695 if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
698 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
700 hookmask = cl_s[i - 1].hookmask;
704 mtpar.net = tgpar.net = net;
705 mtpar.table = tgpar.table = name;
706 mtpar.entryinfo = tgpar.entryinfo = e;
707 mtpar.hook_mask = tgpar.hook_mask = hookmask;
708 mtpar.family = tgpar.family = NFPROTO_BRIDGE;
709 ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
711 goto cleanup_matches;
713 ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
715 goto cleanup_watchers;
716 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
717 gap = e->next_offset - e->target_offset;
719 target = try_then_request_module(
720 xt_find_target(NFPROTO_BRIDGE, t->u.name, 0),
721 "ebt_%s", t->u.name);
722 if (IS_ERR(target)) {
723 ret = PTR_ERR(target);
724 goto cleanup_watchers;
725 } else if (target == NULL) {
727 goto cleanup_watchers;
730 t->u.target = target;
731 if (t->u.target == &ebt_standard_target) {
732 if (gap < sizeof(struct ebt_standard_target)) {
733 BUGPRINT("Standard target size too big\n");
735 goto cleanup_watchers;
737 if (((struct ebt_standard_target *)t)->verdict <
738 -NUM_STANDARD_TARGETS) {
739 BUGPRINT("Invalid standard target\n");
741 goto cleanup_watchers;
743 } else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
744 module_put(t->u.target->me);
746 goto cleanup_watchers;
749 tgpar.target = target;
750 tgpar.targinfo = t->data;
751 ret = xt_check_target(&tgpar, t->target_size,
752 e->ethproto, e->invflags & EBT_IPROTO);
754 module_put(target->me);
755 goto cleanup_watchers;
760 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
762 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
767 * checks for loops and sets the hook mask for udc
768 * the hook mask for udc tells us from which base chains the udc can be
769 * accessed. This mask is a parameter to the check() functions of the extensions
771 static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
772 unsigned int udc_cnt, unsigned int hooknr, char *base)
774 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
775 const struct ebt_entry *e = (struct ebt_entry *)chain->data;
776 const struct ebt_entry_target *t;
778 while (pos < nentries || chain_nr != -1) {
779 /* end of udc, go back one 'recursion' step */
780 if (pos == nentries) {
781 /* put back values of the time when this chain was called */
782 e = cl_s[chain_nr].cs.e;
783 if (cl_s[chain_nr].from != -1)
785 cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
787 nentries = chain->nentries;
788 pos = cl_s[chain_nr].cs.n;
789 /* make sure we won't see a loop that isn't one */
790 cl_s[chain_nr].cs.n = 0;
791 chain_nr = cl_s[chain_nr].from;
795 t = (struct ebt_entry_target *)
796 (((char *)e) + e->target_offset);
797 if (strcmp(t->u.name, EBT_STANDARD_TARGET))
799 if (e->target_offset + sizeof(struct ebt_standard_target) >
801 BUGPRINT("Standard target size too big\n");
804 verdict = ((struct ebt_standard_target *)t)->verdict;
805 if (verdict >= 0) { /* jump to another chain */
806 struct ebt_entries *hlp2 =
807 (struct ebt_entries *)(base + verdict);
808 for (i = 0; i < udc_cnt; i++)
809 if (hlp2 == cl_s[i].cs.chaininfo)
811 /* bad destination or loop */
813 BUGPRINT("bad destination\n");
820 if (cl_s[i].hookmask & (1 << hooknr))
822 /* this can't be 0, so the loop test is correct */
823 cl_s[i].cs.n = pos + 1;
825 cl_s[i].cs.e = ebt_next_entry(e);
826 e = (struct ebt_entry *)(hlp2->data);
827 nentries = hlp2->nentries;
828 cl_s[i].from = chain_nr;
830 /* this udc is accessible from the base chain for hooknr */
831 cl_s[i].hookmask |= (1 << hooknr);
835 e = ebt_next_entry(e);
841 /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
842 static int translate_table(struct net *net, const char *name,
843 struct ebt_table_info *newinfo)
845 unsigned int i, j, k, udc_cnt;
847 struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
850 while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
852 if (i == NF_BR_NUMHOOKS) {
853 BUGPRINT("No valid hooks specified\n");
856 if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
857 BUGPRINT("Chains don't start at beginning\n");
860 /* make sure chains are ordered after each other in same order
861 as their corresponding hooks */
862 for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
863 if (!newinfo->hook_entry[j])
865 if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
866 BUGPRINT("Hook order must be followed\n");
872 /* do some early checkings and initialize some things */
873 i = 0; /* holds the expected nr. of entries for the chain */
874 j = 0; /* holds the up to now counted entries for the chain */
875 k = 0; /* holds the total nr. of entries, should equal
876 newinfo->nentries afterwards */
877 udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
878 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
879 ebt_check_entry_size_and_hooks, newinfo,
880 &i, &j, &k, &udc_cnt);
886 BUGPRINT("nentries does not equal the nr of entries in the "
890 if (k != newinfo->nentries) {
891 BUGPRINT("Total nentries is wrong\n");
895 /* get the location of the udc, put them in an array
896 while we're at it, allocate the chainstack */
898 /* this will get free'd in do_replace()/ebt_register_table()
899 if an error occurs */
900 newinfo->chainstack =
901 vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
902 if (!newinfo->chainstack)
904 for_each_possible_cpu(i) {
905 newinfo->chainstack[i] =
906 vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
907 if (!newinfo->chainstack[i]) {
909 vfree(newinfo->chainstack[--i]);
910 vfree(newinfo->chainstack);
911 newinfo->chainstack = NULL;
916 cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
919 i = 0; /* the i'th udc */
920 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
921 ebt_get_udc_positions, newinfo, &i, cl_s);
924 BUGPRINT("i != udc_cnt\n");
930 /* Check for loops */
931 for (i = 0; i < NF_BR_NUMHOOKS; i++)
932 if (newinfo->hook_entry[i])
933 if (check_chainloops(newinfo->hook_entry[i],
934 cl_s, udc_cnt, i, newinfo->entries)) {
939 /* we now know the following (along with E=mc²):
940 - the nr of entries in each chain is right
941 - the size of the allocated space is right
942 - all valid hooks have a corresponding chain
944 - wrong data can still be on the level of a single entry
945 - could be there are jumps to places that are not the
946 beginning of a chain. This can only occur in chains that
947 are not accessible from any base chains, so we don't care. */
949 /* used to know what we need to clean up if something goes wrong */
951 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
952 ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
954 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
955 ebt_cleanup_entry, net, &i);
961 /* called under write_lock */
962 static void get_counters(const struct ebt_counter *oldcounters,
963 struct ebt_counter *counters, unsigned int nentries)
966 struct ebt_counter *counter_base;
968 /* counters of cpu 0 */
969 memcpy(counters, oldcounters,
970 sizeof(struct ebt_counter) * nentries);
972 /* add other counters to those of cpu 0 */
973 for_each_possible_cpu(cpu) {
976 counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
977 for (i = 0; i < nentries; i++) {
978 counters[i].pcnt += counter_base[i].pcnt;
979 counters[i].bcnt += counter_base[i].bcnt;
984 static int do_replace_finish(struct net *net, struct ebt_replace *repl,
985 struct ebt_table_info *newinfo)
988 struct ebt_counter *counterstmp = NULL;
989 /* used to be able to unlock earlier */
990 struct ebt_table_info *table;
993 /* the user wants counters back
994 the check on the size is done later, when we have the lock */
995 if (repl->num_counters) {
996 unsigned long size = repl->num_counters * sizeof(*counterstmp);
997 counterstmp = vmalloc(size);
1002 newinfo->chainstack = NULL;
1003 ret = ebt_verify_pointers(repl, newinfo);
1005 goto free_counterstmp;
1007 ret = translate_table(net, repl->name, newinfo);
1010 goto free_counterstmp;
1012 t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1018 /* the table doesn't like it */
1019 if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1022 if (repl->num_counters && repl->num_counters != t->private->nentries) {
1023 BUGPRINT("Wrong nr. of counters requested\n");
1028 /* we have the mutex lock, so no danger in reading this pointer */
1030 /* make sure the table can only be rmmod'ed if it contains no rules */
1031 if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1034 } else if (table->nentries && !newinfo->nentries)
1036 /* we need an atomic snapshot of the counters */
1037 write_lock_bh(&t->lock);
1038 if (repl->num_counters)
1039 get_counters(t->private->counters, counterstmp,
1040 t->private->nentries);
1042 t->private = newinfo;
1043 write_unlock_bh(&t->lock);
1044 mutex_unlock(&ebt_mutex);
1045 /* so, a user can change the chains while having messed up her counter
1046 allocation. Only reason why this is done is because this way the lock
1047 is held only once, while this doesn't bring the kernel into a
1049 if (repl->num_counters &&
1050 copy_to_user(repl->counters, counterstmp,
1051 repl->num_counters * sizeof(struct ebt_counter))) {
1057 /* decrease module count and free resources */
1058 EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1059 ebt_cleanup_entry, net, NULL);
1061 vfree(table->entries);
1062 if (table->chainstack) {
1063 for_each_possible_cpu(i)
1064 vfree(table->chainstack[i]);
1065 vfree(table->chainstack);
1073 mutex_unlock(&ebt_mutex);
1075 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1076 ebt_cleanup_entry, net, NULL);
1079 /* can be initialized in translate_table() */
1080 if (newinfo->chainstack) {
1081 for_each_possible_cpu(i)
1082 vfree(newinfo->chainstack[i]);
1083 vfree(newinfo->chainstack);
1088 /* replace the table */
1089 static int do_replace(struct net *net, const void __user *user,
1092 int ret, countersize;
1093 struct ebt_table_info *newinfo;
1094 struct ebt_replace tmp;
1096 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1099 if (len != sizeof(tmp) + tmp.entries_size) {
1100 BUGPRINT("Wrong len argument\n");
1104 if (tmp.entries_size == 0) {
1105 BUGPRINT("Entries_size never zero\n");
1108 /* overflow check */
1109 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1110 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1112 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1115 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1116 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1121 memset(newinfo->counters, 0, countersize);
1123 newinfo->entries = vmalloc(tmp.entries_size);
1124 if (!newinfo->entries) {
1129 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1130 BUGPRINT("Couldn't copy entries from userspace\n");
1135 ret = do_replace_finish(net, &tmp, newinfo);
1139 vfree(newinfo->entries);
1146 ebt_register_table(struct net *net, const struct ebt_table *input_table)
1148 struct ebt_table_info *newinfo;
1149 struct ebt_table *t, *table;
1150 struct ebt_replace_kernel *repl;
1151 int ret, i, countersize;
1154 if (input_table == NULL || (repl = input_table->table) == NULL ||
1155 repl->entries == 0 || repl->entries_size == 0 ||
1156 repl->counters != NULL || input_table->private != NULL) {
1157 BUGPRINT("Bad table data for ebt_register_table!!!\n");
1158 return ERR_PTR(-EINVAL);
1161 /* Don't add one table to multiple lists. */
1162 table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1168 countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1169 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1174 p = vmalloc(repl->entries_size);
1178 memcpy(p, repl->entries, repl->entries_size);
1179 newinfo->entries = p;
1181 newinfo->entries_size = repl->entries_size;
1182 newinfo->nentries = repl->nentries;
1185 memset(newinfo->counters, 0, countersize);
1187 /* fill in newinfo and parse the entries */
1188 newinfo->chainstack = NULL;
1189 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1190 if ((repl->valid_hooks & (1 << i)) == 0)
1191 newinfo->hook_entry[i] = NULL;
1193 newinfo->hook_entry[i] = p +
1194 ((char *)repl->hook_entry[i] - repl->entries);
1196 ret = translate_table(net, repl->name, newinfo);
1198 BUGPRINT("Translate_table failed\n");
1199 goto free_chainstack;
1202 if (table->check && table->check(newinfo, table->valid_hooks)) {
1203 BUGPRINT("The table doesn't like its own initial data, lol\n");
1204 return ERR_PTR(-EINVAL);
1207 table->private = newinfo;
1208 rwlock_init(&table->lock);
1209 ret = mutex_lock_interruptible(&ebt_mutex);
1211 goto free_chainstack;
1213 list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1214 if (strcmp(t->name, table->name) == 0) {
1216 BUGPRINT("Table name already exists\n");
1221 /* Hold a reference count if the chains aren't empty */
1222 if (newinfo->nentries && !try_module_get(table->me)) {
1226 list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1227 mutex_unlock(&ebt_mutex);
1230 mutex_unlock(&ebt_mutex);
1232 if (newinfo->chainstack) {
1233 for_each_possible_cpu(i)
1234 vfree(newinfo->chainstack[i]);
1235 vfree(newinfo->chainstack);
1237 vfree(newinfo->entries);
1243 return ERR_PTR(ret);
1246 void ebt_unregister_table(struct net *net, struct ebt_table *table)
1251 BUGPRINT("Request to unregister NULL table!!!\n");
1254 mutex_lock(&ebt_mutex);
1255 list_del(&table->list);
1256 mutex_unlock(&ebt_mutex);
1257 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1258 ebt_cleanup_entry, net, NULL);
1259 if (table->private->nentries)
1260 module_put(table->me);
1261 vfree(table->private->entries);
1262 if (table->private->chainstack) {
1263 for_each_possible_cpu(i)
1264 vfree(table->private->chainstack[i]);
1265 vfree(table->private->chainstack);
1267 vfree(table->private);
1271 /* userspace just supplied us with counters */
1272 static int do_update_counters(struct net *net, const char *name,
1273 struct ebt_counter __user *counters,
1274 unsigned int num_counters,
1275 const void __user *user, unsigned int len)
1278 struct ebt_counter *tmp;
1279 struct ebt_table *t;
1281 if (num_counters == 0)
1284 tmp = vmalloc(num_counters * sizeof(*tmp));
1288 t = find_table_lock(net, name, &ret, &ebt_mutex);
1292 if (num_counters != t->private->nentries) {
1293 BUGPRINT("Wrong nr of counters\n");
1298 if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1303 /* we want an atomic add of the counters */
1304 write_lock_bh(&t->lock);
1306 /* we add to the counters of the first cpu */
1307 for (i = 0; i < num_counters; i++) {
1308 t->private->counters[i].pcnt += tmp[i].pcnt;
1309 t->private->counters[i].bcnt += tmp[i].bcnt;
1312 write_unlock_bh(&t->lock);
1315 mutex_unlock(&ebt_mutex);
1321 static int update_counters(struct net *net, const void __user *user,
1324 struct ebt_replace hlp;
1326 if (copy_from_user(&hlp, user, sizeof(hlp)))
1329 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1332 return do_update_counters(net, hlp.name, hlp.counters,
1333 hlp.num_counters, user, len);
1336 static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1337 const char *base, char __user *ubase)
1339 char __user *hlp = ubase + ((char *)m - base);
1340 if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
1345 static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1346 const char *base, char __user *ubase)
1348 char __user *hlp = ubase + ((char *)w - base);
1349 if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
1355 ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1359 const struct ebt_entry_target *t;
1361 if (e->bitmask == 0)
1364 hlp = ubase + (((char *)e + e->target_offset) - base);
1365 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1367 ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1370 ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1373 if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN))
1378 static int copy_counters_to_user(struct ebt_table *t,
1379 const struct ebt_counter *oldcounters,
1380 void __user *user, unsigned int num_counters,
1381 unsigned int nentries)
1383 struct ebt_counter *counterstmp;
1386 /* userspace might not need the counters */
1387 if (num_counters == 0)
1390 if (num_counters != nentries) {
1391 BUGPRINT("Num_counters wrong\n");
1395 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1399 write_lock_bh(&t->lock);
1400 get_counters(oldcounters, counterstmp, nentries);
1401 write_unlock_bh(&t->lock);
1403 if (copy_to_user(user, counterstmp,
1404 nentries * sizeof(struct ebt_counter)))
1410 /* called with ebt_mutex locked */
1411 static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1412 const int *len, int cmd)
1414 struct ebt_replace tmp;
1415 const struct ebt_counter *oldcounters;
1416 unsigned int entries_size, nentries;
1420 if (cmd == EBT_SO_GET_ENTRIES) {
1421 entries_size = t->private->entries_size;
1422 nentries = t->private->nentries;
1423 entries = t->private->entries;
1424 oldcounters = t->private->counters;
1426 entries_size = t->table->entries_size;
1427 nentries = t->table->nentries;
1428 entries = t->table->entries;
1429 oldcounters = t->table->counters;
1432 if (copy_from_user(&tmp, user, sizeof(tmp)))
1435 if (*len != sizeof(struct ebt_replace) + entries_size +
1436 (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
1439 if (tmp.nentries != nentries) {
1440 BUGPRINT("Nentries wrong\n");
1444 if (tmp.entries_size != entries_size) {
1445 BUGPRINT("Wrong size\n");
1449 ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1450 tmp.num_counters, nentries);
1454 if (copy_to_user(tmp.entries, entries, entries_size)) {
1455 BUGPRINT("Couldn't copy entries to userspace\n");
1458 /* set the match/watcher/target names right */
1459 return EBT_ENTRY_ITERATE(entries, entries_size,
1460 ebt_make_names, entries, tmp.entries);
1463 static int do_ebt_set_ctl(struct sock *sk,
1464 int cmd, void __user *user, unsigned int len)
1468 if (!capable(CAP_NET_ADMIN))
1472 case EBT_SO_SET_ENTRIES:
1473 ret = do_replace(sock_net(sk), user, len);
1475 case EBT_SO_SET_COUNTERS:
1476 ret = update_counters(sock_net(sk), user, len);
1484 static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1487 struct ebt_replace tmp;
1488 struct ebt_table *t;
1490 if (!capable(CAP_NET_ADMIN))
1493 if (copy_from_user(&tmp, user, sizeof(tmp)))
1496 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
1501 case EBT_SO_GET_INFO:
1502 case EBT_SO_GET_INIT_INFO:
1503 if (*len != sizeof(struct ebt_replace)){
1505 mutex_unlock(&ebt_mutex);
1508 if (cmd == EBT_SO_GET_INFO) {
1509 tmp.nentries = t->private->nentries;
1510 tmp.entries_size = t->private->entries_size;
1511 tmp.valid_hooks = t->valid_hooks;
1513 tmp.nentries = t->table->nentries;
1514 tmp.entries_size = t->table->entries_size;
1515 tmp.valid_hooks = t->table->valid_hooks;
1517 mutex_unlock(&ebt_mutex);
1518 if (copy_to_user(user, &tmp, *len) != 0){
1519 BUGPRINT("c2u Didn't work\n");
1526 case EBT_SO_GET_ENTRIES:
1527 case EBT_SO_GET_INIT_ENTRIES:
1528 ret = copy_everything_to_user(t, user, len, cmd);
1529 mutex_unlock(&ebt_mutex);
1533 mutex_unlock(&ebt_mutex);
1540 #ifdef CONFIG_COMPAT
1541 /* 32 bit-userspace compatibility definitions. */
1542 struct compat_ebt_replace {
1543 char name[EBT_TABLE_MAXNAMELEN];
1544 compat_uint_t valid_hooks;
1545 compat_uint_t nentries;
1546 compat_uint_t entries_size;
1547 /* start of the chains */
1548 compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1549 /* nr of counters userspace expects back */
1550 compat_uint_t num_counters;
1551 /* where the kernel will put the old counters. */
1552 compat_uptr_t counters;
1553 compat_uptr_t entries;
1556 /* struct ebt_entry_match, _target and _watcher have same layout */
1557 struct compat_ebt_entry_mwt {
1559 char name[EBT_FUNCTION_MAXNAMELEN];
1562 compat_uint_t match_size;
1563 compat_uint_t data[0];
1566 /* account for possible padding between match_size and ->data */
1567 static int ebt_compat_entry_padsize(void)
1569 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1570 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1571 return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1572 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1575 static int ebt_compat_match_offset(const struct xt_match *match,
1576 unsigned int userlen)
1579 * ebt_among needs special handling. The kernel .matchsize is
1580 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1581 * value is expected.
1582 * Example: userspace sends 4500, ebt_among.c wants 4504.
1584 if (unlikely(match->matchsize == -1))
1585 return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1586 return xt_compat_match_offset(match);
1589 static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1592 const struct xt_match *match = m->u.match;
1593 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1594 int off = ebt_compat_match_offset(match, m->match_size);
1595 compat_uint_t msize = m->match_size - off;
1597 BUG_ON(off >= m->match_size);
1599 if (copy_to_user(cm->u.name, match->name,
1600 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1603 if (match->compat_to_user) {
1604 if (match->compat_to_user(cm->data, m->data))
1606 } else if (copy_to_user(cm->data, m->data, msize))
1609 *size -= ebt_compat_entry_padsize() + off;
1615 static int compat_target_to_user(struct ebt_entry_target *t,
1616 void __user **dstptr,
1619 const struct xt_target *target = t->u.target;
1620 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1621 int off = xt_compat_target_offset(target);
1622 compat_uint_t tsize = t->target_size - off;
1624 BUG_ON(off >= t->target_size);
1626 if (copy_to_user(cm->u.name, target->name,
1627 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1630 if (target->compat_to_user) {
1631 if (target->compat_to_user(cm->data, t->data))
1633 } else if (copy_to_user(cm->data, t->data, tsize))
1636 *size -= ebt_compat_entry_padsize() + off;
1642 static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1643 void __user **dstptr,
1646 return compat_target_to_user((struct ebt_entry_target *)w,
1650 static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1653 struct ebt_entry_target *t;
1654 struct ebt_entry __user *ce;
1655 u32 watchers_offset, target_offset, next_offset;
1656 compat_uint_t origsize;
1659 if (e->bitmask == 0) {
1660 if (*size < sizeof(struct ebt_entries))
1662 if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1665 *dstptr += sizeof(struct ebt_entries);
1666 *size -= sizeof(struct ebt_entries);
1670 if (*size < sizeof(*ce))
1673 ce = (struct ebt_entry __user *)*dstptr;
1674 if (copy_to_user(ce, e, sizeof(*ce)))
1678 *dstptr += sizeof(*ce);
1680 ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1683 watchers_offset = e->watchers_offset - (origsize - *size);
1685 ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1688 target_offset = e->target_offset - (origsize - *size);
1690 t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1692 ret = compat_target_to_user(t, dstptr, size);
1695 next_offset = e->next_offset - (origsize - *size);
1697 if (put_user(watchers_offset, &ce->watchers_offset) ||
1698 put_user(target_offset, &ce->target_offset) ||
1699 put_user(next_offset, &ce->next_offset))
1702 *size -= sizeof(*ce);
1706 static int compat_calc_match(struct ebt_entry_match *m, int *off)
1708 *off += ebt_compat_match_offset(m->u.match, m->match_size);
1709 *off += ebt_compat_entry_padsize();
1713 static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1715 *off += xt_compat_target_offset(w->u.watcher);
1716 *off += ebt_compat_entry_padsize();
1720 static int compat_calc_entry(const struct ebt_entry *e,
1721 const struct ebt_table_info *info,
1723 struct compat_ebt_replace *newinfo)
1725 const struct ebt_entry_target *t;
1726 unsigned int entry_offset;
1729 if (e->bitmask == 0)
1733 entry_offset = (void *)e - base;
1735 EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1736 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1738 t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1740 off += xt_compat_target_offset(t->u.target);
1741 off += ebt_compat_entry_padsize();
1743 newinfo->entries_size -= off;
1745 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1749 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1750 const void *hookptr = info->hook_entry[i];
1751 if (info->hook_entry[i] &&
1752 (e < (struct ebt_entry *)(base - hookptr))) {
1753 newinfo->hook_entry[i] -= off;
1754 pr_debug("0x%08X -> 0x%08X\n",
1755 newinfo->hook_entry[i] + off,
1756 newinfo->hook_entry[i]);
1764 static int compat_table_info(const struct ebt_table_info *info,
1765 struct compat_ebt_replace *newinfo)
1767 unsigned int size = info->entries_size;
1768 const void *entries = info->entries;
1770 newinfo->entries_size = size;
1772 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1776 static int compat_copy_everything_to_user(struct ebt_table *t,
1777 void __user *user, int *len, int cmd)
1779 struct compat_ebt_replace repl, tmp;
1780 struct ebt_counter *oldcounters;
1781 struct ebt_table_info tinfo;
1785 memset(&tinfo, 0, sizeof(tinfo));
1787 if (cmd == EBT_SO_GET_ENTRIES) {
1788 tinfo.entries_size = t->private->entries_size;
1789 tinfo.nentries = t->private->nentries;
1790 tinfo.entries = t->private->entries;
1791 oldcounters = t->private->counters;
1793 tinfo.entries_size = t->table->entries_size;
1794 tinfo.nentries = t->table->nentries;
1795 tinfo.entries = t->table->entries;
1796 oldcounters = t->table->counters;
1799 if (copy_from_user(&tmp, user, sizeof(tmp)))
1802 if (tmp.nentries != tinfo.nentries ||
1803 (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1806 memcpy(&repl, &tmp, sizeof(repl));
1807 if (cmd == EBT_SO_GET_ENTRIES)
1808 ret = compat_table_info(t->private, &repl);
1810 ret = compat_table_info(&tinfo, &repl);
1814 if (*len != sizeof(tmp) + repl.entries_size +
1815 (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1816 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1817 *len, tinfo.entries_size, repl.entries_size);
1821 /* userspace might not need the counters */
1822 ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1823 tmp.num_counters, tinfo.nentries);
1827 pos = compat_ptr(tmp.entries);
1828 return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1829 compat_copy_entry_to_user, &pos, &tmp.entries_size);
1832 struct ebt_entries_buf_state {
1833 char *buf_kern_start; /* kernel buffer to copy (translated) data to */
1834 u32 buf_kern_len; /* total size of kernel buffer */
1835 u32 buf_kern_offset; /* amount of data copied so far */
1836 u32 buf_user_offset; /* read position in userspace buffer */
1839 static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1841 state->buf_kern_offset += sz;
1842 return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1845 static int ebt_buf_add(struct ebt_entries_buf_state *state,
1846 void *data, unsigned int sz)
1848 if (state->buf_kern_start == NULL)
1851 BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1853 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1856 state->buf_user_offset += sz;
1857 return ebt_buf_count(state, sz);
1860 static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1862 char *b = state->buf_kern_start;
1864 BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1866 if (b != NULL && sz > 0)
1867 memset(b + state->buf_kern_offset, 0, sz);
1868 /* do not adjust ->buf_user_offset here, we added kernel-side padding */
1869 return ebt_buf_count(state, sz);
1878 static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1879 enum compat_mwt compat_mwt,
1880 struct ebt_entries_buf_state *state,
1881 const unsigned char *base)
1883 char name[EBT_FUNCTION_MAXNAMELEN];
1884 struct xt_match *match;
1885 struct xt_target *wt;
1887 int off, pad = 0, ret = 0;
1888 unsigned int size_kern, entry_offset, match_size = mwt->match_size;
1890 strlcpy(name, mwt->u.name, sizeof(name));
1892 if (state->buf_kern_start)
1893 dst = state->buf_kern_start + state->buf_kern_offset;
1895 entry_offset = (unsigned char *) mwt - base;
1896 switch (compat_mwt) {
1897 case EBT_COMPAT_MATCH:
1898 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
1899 name, 0), "ebt_%s", name);
1903 return PTR_ERR(match);
1905 off = ebt_compat_match_offset(match, match_size);
1907 if (match->compat_from_user)
1908 match->compat_from_user(dst, mwt->data);
1910 memcpy(dst, mwt->data, match_size);
1913 size_kern = match->matchsize;
1914 if (unlikely(size_kern == -1))
1915 size_kern = match_size;
1916 module_put(match->me);
1918 case EBT_COMPAT_WATCHER: /* fallthrough */
1919 case EBT_COMPAT_TARGET:
1920 wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
1921 name, 0), "ebt_%s", name);
1926 off = xt_compat_target_offset(wt);
1929 if (wt->compat_from_user)
1930 wt->compat_from_user(dst, mwt->data);
1932 memcpy(dst, mwt->data, match_size);
1935 size_kern = wt->targetsize;
1941 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset,
1942 off + ebt_compat_entry_padsize());
1947 state->buf_kern_offset += match_size + off;
1948 state->buf_user_offset += match_size;
1949 pad = XT_ALIGN(size_kern) - size_kern;
1951 if (pad > 0 && dst) {
1952 BUG_ON(state->buf_kern_len <= pad);
1953 BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1954 memset(dst + size_kern, 0, pad);
1956 return off + match_size;
1960 * return size of all matches, watchers or target, including necessary
1961 * alignment and padding.
1963 static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1964 unsigned int size_left, enum compat_mwt type,
1965 struct ebt_entries_buf_state *state, const void *base)
1973 buf = (char *) match32;
1975 while (size_left >= sizeof(*match32)) {
1976 struct ebt_entry_match *match_kern;
1979 match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1982 tmp = state->buf_kern_start + state->buf_kern_offset;
1983 match_kern = (struct ebt_entry_match *) tmp;
1985 ret = ebt_buf_add(state, buf, sizeof(*match32));
1988 size_left -= sizeof(*match32);
1990 /* add padding before match->data (if any) */
1991 ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1995 if (match32->match_size > size_left)
1998 size_left -= match32->match_size;
2000 ret = compat_mtw_from_user(match32, type, state, base);
2004 BUG_ON(ret < match32->match_size);
2005 growth += ret - match32->match_size;
2006 growth += ebt_compat_entry_padsize();
2008 buf += sizeof(*match32);
2009 buf += match32->match_size;
2012 match_kern->match_size = ret;
2014 WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2015 match32 = (struct compat_ebt_entry_mwt *) buf;
2021 #define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \
2025 struct compat_ebt_entry_mwt *__watcher; \
2027 for (__i = e->watchers_offset; \
2028 __i < (e)->target_offset; \
2029 __i += __watcher->watcher_size + \
2030 sizeof(struct compat_ebt_entry_mwt)) { \
2031 __watcher = (void *)(e) + __i; \
2032 __ret = fn(__watcher , ## args); \
2037 if (__i != (e)->target_offset) \
2043 #define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \
2047 struct compat_ebt_entry_mwt *__match; \
2049 for (__i = sizeof(struct ebt_entry); \
2050 __i < (e)->watchers_offset; \
2051 __i += __match->match_size + \
2052 sizeof(struct compat_ebt_entry_mwt)) { \
2053 __match = (void *)(e) + __i; \
2054 __ret = fn(__match , ## args); \
2059 if (__i != (e)->watchers_offset) \
2065 /* called for all ebt_entry structures. */
2066 static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2067 unsigned int *total,
2068 struct ebt_entries_buf_state *state)
2070 unsigned int i, j, startoff, new_offset = 0;
2071 /* stores match/watchers/targets & offset of next struct ebt_entry: */
2072 unsigned int offsets[4];
2073 unsigned int *offsets_update = NULL;
2077 if (*total < sizeof(struct ebt_entries))
2080 if (!entry->bitmask) {
2081 *total -= sizeof(struct ebt_entries);
2082 return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2084 if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2087 startoff = state->buf_user_offset;
2088 /* pull in most part of ebt_entry, it does not need to be changed. */
2089 ret = ebt_buf_add(state, entry,
2090 offsetof(struct ebt_entry, watchers_offset));
2094 offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2095 memcpy(&offsets[1], &entry->watchers_offset,
2096 sizeof(offsets) - sizeof(offsets[0]));
2098 if (state->buf_kern_start) {
2099 buf_start = state->buf_kern_start + state->buf_kern_offset;
2100 offsets_update = (unsigned int *) buf_start;
2102 ret = ebt_buf_add(state, &offsets[1],
2103 sizeof(offsets) - sizeof(offsets[0]));
2106 buf_start = (char *) entry;
2108 * 0: matches offset, always follows ebt_entry.
2109 * 1: watchers offset, from ebt_entry structure
2110 * 2: target offset, from ebt_entry structure
2111 * 3: next ebt_entry offset, from ebt_entry structure
2113 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2115 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2116 struct compat_ebt_entry_mwt *match32;
2118 char *buf = buf_start;
2120 buf = buf_start + offsets[i];
2121 if (offsets[i] > offsets[j])
2124 match32 = (struct compat_ebt_entry_mwt *) buf;
2125 size = offsets[j] - offsets[i];
2126 ret = ebt_size_mwt(match32, size, i, state, base);
2130 if (offsets_update && new_offset) {
2131 pr_debug("ebtables: change offset %d to %d\n",
2132 offsets_update[i], offsets[j] + new_offset);
2133 offsets_update[i] = offsets[j] + new_offset;
2137 startoff = state->buf_user_offset - startoff;
2139 BUG_ON(*total < startoff);
2145 * repl->entries_size is the size of the ebt_entry blob in userspace.
2146 * It might need more memory when copied to a 64 bit kernel in case
2147 * userspace is 32-bit. So, first task: find out how much memory is needed.
2149 * Called before validation is performed.
2151 static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2152 struct ebt_entries_buf_state *state)
2154 unsigned int size_remaining = size_user;
2157 ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2158 &size_remaining, state);
2162 WARN_ON(size_remaining);
2163 return state->buf_kern_offset;
2167 static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2168 void __user *user, unsigned int len)
2170 struct compat_ebt_replace tmp;
2173 if (len < sizeof(tmp))
2176 if (copy_from_user(&tmp, user, sizeof(tmp)))
2179 if (len != sizeof(tmp) + tmp.entries_size)
2182 if (tmp.entries_size == 0)
2185 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2186 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2188 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2191 memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2193 /* starting with hook_entry, 32 vs. 64 bit structures are different */
2194 for (i = 0; i < NF_BR_NUMHOOKS; i++)
2195 repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2197 repl->num_counters = tmp.num_counters;
2198 repl->counters = compat_ptr(tmp.counters);
2199 repl->entries = compat_ptr(tmp.entries);
2203 static int compat_do_replace(struct net *net, void __user *user,
2206 int ret, i, countersize, size64;
2207 struct ebt_table_info *newinfo;
2208 struct ebt_replace tmp;
2209 struct ebt_entries_buf_state state;
2212 ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2214 /* try real handler in case userland supplied needed padding */
2215 if (ret == -EINVAL && do_replace(net, user, len) == 0)
2220 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2221 newinfo = vmalloc(sizeof(*newinfo) + countersize);
2226 memset(newinfo->counters, 0, countersize);
2228 memset(&state, 0, sizeof(state));
2230 newinfo->entries = vmalloc(tmp.entries_size);
2231 if (!newinfo->entries) {
2236 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2241 entries_tmp = newinfo->entries;
2243 xt_compat_lock(NFPROTO_BRIDGE);
2245 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2249 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2250 tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2251 xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2254 newinfo->entries = vmalloc(size64);
2255 if (!newinfo->entries) {
2261 memset(&state, 0, sizeof(state));
2262 state.buf_kern_start = newinfo->entries;
2263 state.buf_kern_len = size64;
2265 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2266 BUG_ON(ret < 0); /* parses same data again */
2269 tmp.entries_size = size64;
2271 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2272 char __user *usrptr;
2273 if (tmp.hook_entry[i]) {
2275 usrptr = (char __user *) tmp.hook_entry[i];
2276 delta = usrptr - tmp.entries;
2277 usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2278 tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2282 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2283 xt_compat_unlock(NFPROTO_BRIDGE);
2285 ret = do_replace_finish(net, &tmp, newinfo);
2289 vfree(newinfo->entries);
2294 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2295 xt_compat_unlock(NFPROTO_BRIDGE);
2299 static int compat_update_counters(struct net *net, void __user *user,
2302 struct compat_ebt_replace hlp;
2304 if (copy_from_user(&hlp, user, sizeof(hlp)))
2307 /* try real handler in case userland supplied needed padding */
2308 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2309 return update_counters(net, user, len);
2311 return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2312 hlp.num_counters, user, len);
2315 static int compat_do_ebt_set_ctl(struct sock *sk,
2316 int cmd, void __user *user, unsigned int len)
2320 if (!capable(CAP_NET_ADMIN))
2324 case EBT_SO_SET_ENTRIES:
2325 ret = compat_do_replace(sock_net(sk), user, len);
2327 case EBT_SO_SET_COUNTERS:
2328 ret = compat_update_counters(sock_net(sk), user, len);
2336 static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2337 void __user *user, int *len)
2340 struct compat_ebt_replace tmp;
2341 struct ebt_table *t;
2343 if (!capable(CAP_NET_ADMIN))
2346 /* try real handler in case userland supplied needed padding */
2347 if ((cmd == EBT_SO_GET_INFO ||
2348 cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2349 return do_ebt_get_ctl(sk, cmd, user, len);
2351 if (copy_from_user(&tmp, user, sizeof(tmp)))
2354 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
2358 xt_compat_lock(NFPROTO_BRIDGE);
2360 case EBT_SO_GET_INFO:
2361 tmp.nentries = t->private->nentries;
2362 ret = compat_table_info(t->private, &tmp);
2365 tmp.valid_hooks = t->valid_hooks;
2367 if (copy_to_user(user, &tmp, *len) != 0) {
2373 case EBT_SO_GET_INIT_INFO:
2374 tmp.nentries = t->table->nentries;
2375 tmp.entries_size = t->table->entries_size;
2376 tmp.valid_hooks = t->table->valid_hooks;
2378 if (copy_to_user(user, &tmp, *len) != 0) {
2384 case EBT_SO_GET_ENTRIES:
2385 case EBT_SO_GET_INIT_ENTRIES:
2387 * try real handler first in case of userland-side padding.
2388 * in case we are dealing with an 'ordinary' 32 bit binary
2389 * without 64bit compatibility padding, this will fail right
2390 * after copy_from_user when the *len argument is validated.
2392 * the compat_ variant needs to do one pass over the kernel
2393 * data set to adjust for size differences before it the check.
2395 if (copy_everything_to_user(t, user, len, cmd) == 0)
2398 ret = compat_copy_everything_to_user(t, user, len, cmd);
2404 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2405 xt_compat_unlock(NFPROTO_BRIDGE);
2406 mutex_unlock(&ebt_mutex);
2411 static struct nf_sockopt_ops ebt_sockopts =
2414 .set_optmin = EBT_BASE_CTL,
2415 .set_optmax = EBT_SO_SET_MAX + 1,
2416 .set = do_ebt_set_ctl,
2417 #ifdef CONFIG_COMPAT
2418 .compat_set = compat_do_ebt_set_ctl,
2420 .get_optmin = EBT_BASE_CTL,
2421 .get_optmax = EBT_SO_GET_MAX + 1,
2422 .get = do_ebt_get_ctl,
2423 #ifdef CONFIG_COMPAT
2424 .compat_get = compat_do_ebt_get_ctl,
2426 .owner = THIS_MODULE,
2429 static int __init ebtables_init(void)
2433 ret = xt_register_target(&ebt_standard_target);
2436 ret = nf_register_sockopt(&ebt_sockopts);
2438 xt_unregister_target(&ebt_standard_target);
2442 printk(KERN_INFO "Ebtables v2.0 registered\n");
2446 static void __exit ebtables_fini(void)
2448 nf_unregister_sockopt(&ebt_sockopts);
2449 xt_unregister_target(&ebt_standard_target);
2450 printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2453 EXPORT_SYMBOL(ebt_register_table);
2454 EXPORT_SYMBOL(ebt_unregister_table);
2455 EXPORT_SYMBOL(ebt_do_table);
2456 module_init(ebtables_init);
2457 module_exit(ebtables_fini);
2458 MODULE_LICENSE("GPL");