2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
81 ip6t_ext_hdr(u8 nexthdr)
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
92 static unsigned long ifname_compare(const char *_a, const char *_b,
93 const unsigned char *_mask)
95 const unsigned long *a = (const unsigned long *)_a;
96 const unsigned long *b = (const unsigned long *)_b;
97 const unsigned long *mask = (const unsigned long *)_mask;
100 ret = (a[0] ^ b[0]) & mask[0];
101 if (IFNAMSIZ > sizeof(unsigned long))
102 ret |= (a[1] ^ b[1]) & mask[1];
103 if (IFNAMSIZ > 2 * sizeof(unsigned long))
104 ret |= (a[2] ^ b[2]) & mask[2];
105 if (IFNAMSIZ > 3 * sizeof(unsigned long))
106 ret |= (a[3] ^ b[3]) & mask[3];
107 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
111 /* Returns whether matches rule or not. */
112 /* Performance critical - called for every packet */
114 ip6_packet_match(const struct sk_buff *skb,
117 const struct ip6t_ip6 *ip6info,
118 unsigned int *protoff,
119 int *fragoff, bool *hotdrop)
122 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
124 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
126 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
127 &ip6info->src), IP6T_INV_SRCIP)
128 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
129 &ip6info->dst), IP6T_INV_DSTIP)) {
130 dprintf("Source or dest mismatch.\n");
132 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
133 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
134 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
135 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
136 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
137 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
141 ret = ifname_compare(indev, ip6info->iniface, ip6info->iniface_mask);
143 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
144 dprintf("VIA in mismatch (%s vs %s).%s\n",
145 indev, ip6info->iniface,
146 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
150 ret = ifname_compare(outdev, ip6info->outiface, ip6info->outiface_mask);
152 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
153 dprintf("VIA out mismatch (%s vs %s).%s\n",
154 outdev, ip6info->outiface,
155 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
159 /* ... might want to do something with class and flowlabel here ... */
161 /* look for the desired protocol header */
162 if((ip6info->flags & IP6T_F_PROTO)) {
164 unsigned short _frag_off;
166 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
172 *fragoff = _frag_off;
174 dprintf("Packet protocol %hi ?= %s%hi.\n",
176 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
179 if (ip6info->proto == protohdr) {
180 if(ip6info->invflags & IP6T_INV_PROTO) {
186 /* We need match for the '-p all', too! */
187 if ((ip6info->proto != 0) &&
188 !(ip6info->invflags & IP6T_INV_PROTO))
194 /* should be ip6 safe */
196 ip6_checkentry(const struct ip6t_ip6 *ipv6)
198 if (ipv6->flags & ~IP6T_F_MASK) {
199 duprintf("Unknown flag bits set: %08X\n",
200 ipv6->flags & ~IP6T_F_MASK);
203 if (ipv6->invflags & ~IP6T_INV_MASK) {
204 duprintf("Unknown invflag bits set: %08X\n",
205 ipv6->invflags & ~IP6T_INV_MASK);
212 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
215 printk("ip6_tables: error: `%s'\n",
216 (const char *)par->targinfo);
221 /* Performance critical - called for every packet */
223 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
224 struct xt_match_param *par)
226 par->match = m->u.kernel.match;
227 par->matchinfo = m->data;
229 /* Stop iteration if it doesn't match */
230 if (!m->u.kernel.match->match(skb, par))
236 static inline struct ip6t_entry *
237 get_entry(void *base, unsigned int offset)
239 return (struct ip6t_entry *)(base + offset);
242 /* All zeroes == unconditional rule. */
243 /* Mildly perf critical (only if packet tracing is on) */
245 unconditional(const struct ip6t_ip6 *ipv6)
249 for (i = 0; i < sizeof(*ipv6); i++)
250 if (((char *)ipv6)[i])
253 return (i == sizeof(*ipv6));
256 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
257 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
258 /* This cries for unification! */
259 static const char *const hooknames[] = {
260 [NF_INET_PRE_ROUTING] = "PREROUTING",
261 [NF_INET_LOCAL_IN] = "INPUT",
262 [NF_INET_FORWARD] = "FORWARD",
263 [NF_INET_LOCAL_OUT] = "OUTPUT",
264 [NF_INET_POST_ROUTING] = "POSTROUTING",
267 enum nf_ip_trace_comments {
268 NF_IP6_TRACE_COMMENT_RULE,
269 NF_IP6_TRACE_COMMENT_RETURN,
270 NF_IP6_TRACE_COMMENT_POLICY,
273 static const char *const comments[] = {
274 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
275 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
276 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
279 static struct nf_loginfo trace_loginfo = {
280 .type = NF_LOG_TYPE_LOG,
284 .logflags = NF_LOG_MASK,
289 /* Mildly perf critical (only if packet tracing is on) */
291 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
292 char *hookname, char **chainname,
293 char **comment, unsigned int *rulenum)
295 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
297 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
298 /* Head of user chain: ERROR target with chainname */
299 *chainname = t->target.data;
304 if (s->target_offset == sizeof(struct ip6t_entry)
305 && strcmp(t->target.u.kernel.target->name,
306 IP6T_STANDARD_TARGET) == 0
308 && unconditional(&s->ipv6)) {
309 /* Tail of chains: STANDARD target (return/policy) */
310 *comment = *chainname == hookname
311 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
312 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
321 static void trace_packet(struct sk_buff *skb,
323 const struct net_device *in,
324 const struct net_device *out,
325 const char *tablename,
326 struct xt_table_info *private,
327 struct ip6t_entry *e)
330 const struct ip6t_entry *root;
331 char *hookname, *chainname, *comment;
332 unsigned int rulenum = 0;
334 table_base = (void *)private->entries[smp_processor_id()];
335 root = get_entry(table_base, private->hook_entry[hook]);
337 hookname = chainname = (char *)hooknames[hook];
338 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
340 IP6T_ENTRY_ITERATE(root,
341 private->size - private->hook_entry[hook],
342 get_chainname_rulenum,
343 e, hookname, &chainname, &comment, &rulenum);
345 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
346 "TRACE: %s:%s:%s:%u ",
347 tablename, chainname, comment, rulenum);
351 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
353 ip6t_do_table(struct sk_buff *skb,
355 const struct net_device *in,
356 const struct net_device *out,
357 struct xt_table *table)
359 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
360 bool hotdrop = false;
361 /* Initializing verdict to NF_DROP keeps gcc happy. */
362 unsigned int verdict = NF_DROP;
363 const char *indev, *outdev;
365 struct ip6t_entry *e, *back;
366 struct xt_table_info *private;
367 struct xt_match_param mtpar;
368 struct xt_target_param tgpar;
371 indev = in ? in->name : nulldevname;
372 outdev = out ? out->name : nulldevname;
373 /* We handle fragments by dealing with the first fragment as
374 * if it was a normal packet. All other fragments are treated
375 * normally, except that they will NEVER match rules that ask
376 * things we don't know, ie. tcp syn flag or ports). If the
377 * rule is also a fragment-specific rule, non-fragments won't
379 mtpar.hotdrop = &hotdrop;
380 mtpar.in = tgpar.in = in;
381 mtpar.out = tgpar.out = out;
382 mtpar.family = tgpar.family = NFPROTO_IPV6;
383 tgpar.hooknum = hook;
385 read_lock_bh(&table->lock);
386 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
387 private = table->private;
388 table_base = (void *)private->entries[smp_processor_id()];
389 e = get_entry(table_base, private->hook_entry[hook]);
391 /* For return from builtin chain */
392 back = get_entry(table_base, private->underflow[hook]);
397 if (ip6_packet_match(skb, indev, outdev, &e->ipv6,
398 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
399 struct ip6t_entry_target *t;
401 if (IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0)
404 ADD_COUNTER(e->counters,
405 ntohs(ipv6_hdr(skb)->payload_len) +
406 sizeof(struct ipv6hdr), 1);
408 t = ip6t_get_target(e);
409 IP_NF_ASSERT(t->u.kernel.target);
411 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
412 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
413 /* The packet is traced: log it */
414 if (unlikely(skb->nf_trace))
415 trace_packet(skb, hook, in, out,
416 table->name, private, e);
418 /* Standard target? */
419 if (!t->u.kernel.target->target) {
422 v = ((struct ip6t_standard_target *)t)->verdict;
424 /* Pop from stack? */
425 if (v != IP6T_RETURN) {
426 verdict = (unsigned)(-v) - 1;
430 back = get_entry(table_base,
434 if (table_base + v != (void *)e + e->next_offset
435 && !(e->ipv6.flags & IP6T_F_GOTO)) {
436 /* Save old back ptr in next entry */
437 struct ip6t_entry *next
438 = (void *)e + e->next_offset;
440 = (void *)back - table_base;
441 /* set back pointer to next entry */
445 e = get_entry(table_base, v);
447 /* Targets which reenter must return
449 tgpar.target = t->u.kernel.target;
450 tgpar.targinfo = t->data;
452 #ifdef CONFIG_NETFILTER_DEBUG
453 ((struct ip6t_entry *)table_base)->comefrom
456 verdict = t->u.kernel.target->target(skb,
459 #ifdef CONFIG_NETFILTER_DEBUG
460 if (((struct ip6t_entry *)table_base)->comefrom
462 && verdict == IP6T_CONTINUE) {
463 printk("Target %s reentered!\n",
464 t->u.kernel.target->name);
467 ((struct ip6t_entry *)table_base)->comefrom
470 if (verdict == IP6T_CONTINUE)
471 e = (void *)e + e->next_offset;
479 e = (void *)e + e->next_offset;
483 #ifdef CONFIG_NETFILTER_DEBUG
484 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
486 read_unlock_bh(&table->lock);
488 #ifdef DEBUG_ALLOW_ALL
497 /* Figures out from what hook each rule can be called: returns 0 if
498 there are loops. Puts hook bitmask in comefrom. */
500 mark_source_chains(struct xt_table_info *newinfo,
501 unsigned int valid_hooks, void *entry0)
505 /* No recursion; use packet counter to save back ptrs (reset
506 to 0 as we leave), and comefrom to save source hook bitmask */
507 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
508 unsigned int pos = newinfo->hook_entry[hook];
509 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
511 if (!(valid_hooks & (1 << hook)))
514 /* Set initial back pointer. */
515 e->counters.pcnt = pos;
518 struct ip6t_standard_target *t
519 = (void *)ip6t_get_target(e);
520 int visited = e->comefrom & (1 << hook);
522 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
523 printk("iptables: loop hook %u pos %u %08X.\n",
524 hook, pos, e->comefrom);
527 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
529 /* Unconditional return/END. */
530 if ((e->target_offset == sizeof(struct ip6t_entry)
531 && (strcmp(t->target.u.user.name,
532 IP6T_STANDARD_TARGET) == 0)
534 && unconditional(&e->ipv6)) || visited) {
535 unsigned int oldpos, size;
537 if (t->verdict < -NF_MAX_VERDICT - 1) {
538 duprintf("mark_source_chains: bad "
539 "negative verdict (%i)\n",
544 /* Return: backtrack through the last
547 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
548 #ifdef DEBUG_IP_FIREWALL_USER
550 & (1 << NF_INET_NUMHOOKS)) {
551 duprintf("Back unset "
558 pos = e->counters.pcnt;
559 e->counters.pcnt = 0;
561 /* We're at the start. */
565 e = (struct ip6t_entry *)
567 } while (oldpos == pos + e->next_offset);
570 size = e->next_offset;
571 e = (struct ip6t_entry *)
572 (entry0 + pos + size);
573 e->counters.pcnt = pos;
576 int newpos = t->verdict;
578 if (strcmp(t->target.u.user.name,
579 IP6T_STANDARD_TARGET) == 0
581 if (newpos > newinfo->size -
582 sizeof(struct ip6t_entry)) {
583 duprintf("mark_source_chains: "
584 "bad verdict (%i)\n",
588 /* This a jump; chase it. */
589 duprintf("Jump rule %u -> %u\n",
592 /* ... this is a fallthru */
593 newpos = pos + e->next_offset;
595 e = (struct ip6t_entry *)
597 e->counters.pcnt = pos;
602 duprintf("Finished chain %u\n", hook);
608 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
610 struct xt_mtdtor_param par;
612 if (i && (*i)-- == 0)
615 par.match = m->u.kernel.match;
616 par.matchinfo = m->data;
617 par.family = NFPROTO_IPV6;
618 if (par.match->destroy != NULL)
619 par.match->destroy(&par);
620 module_put(par.match->me);
625 check_entry(struct ip6t_entry *e, const char *name)
627 struct ip6t_entry_target *t;
629 if (!ip6_checkentry(&e->ipv6)) {
630 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
634 if (e->target_offset + sizeof(struct ip6t_entry_target) >
638 t = ip6t_get_target(e);
639 if (e->target_offset + t->u.target_size > e->next_offset)
645 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
648 const struct ip6t_ip6 *ipv6 = par->entryinfo;
651 par->match = m->u.kernel.match;
652 par->matchinfo = m->data;
654 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
655 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
657 duprintf("ip_tables: check failed for `%s'.\n",
666 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
669 struct xt_match *match;
672 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
674 "ip6t_%s", m->u.user.name);
675 if (IS_ERR(match) || !match) {
676 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
677 return match ? PTR_ERR(match) : -ENOENT;
679 m->u.kernel.match = match;
681 ret = check_match(m, par, i);
687 module_put(m->u.kernel.match->me);
691 static int check_target(struct ip6t_entry *e, const char *name)
693 struct ip6t_entry_target *t = ip6t_get_target(e);
694 struct xt_tgchk_param par = {
697 .target = t->u.kernel.target,
699 .hook_mask = e->comefrom,
700 .family = NFPROTO_IPV6,
704 t = ip6t_get_target(e);
705 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
706 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
708 duprintf("ip_tables: check failed for `%s'.\n",
709 t->u.kernel.target->name);
716 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
719 struct ip6t_entry_target *t;
720 struct xt_target *target;
723 struct xt_mtchk_param mtpar;
725 ret = check_entry(e, name);
731 mtpar.entryinfo = &e->ipv6;
732 mtpar.hook_mask = e->comefrom;
733 mtpar.family = NFPROTO_IPV6;
734 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
736 goto cleanup_matches;
738 t = ip6t_get_target(e);
739 target = try_then_request_module(xt_find_target(AF_INET6,
742 "ip6t_%s", t->u.user.name);
743 if (IS_ERR(target) || !target) {
744 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
745 ret = target ? PTR_ERR(target) : -ENOENT;
746 goto cleanup_matches;
748 t->u.kernel.target = target;
750 ret = check_target(e, name);
757 module_put(t->u.kernel.target->me);
759 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
764 check_entry_size_and_hooks(struct ip6t_entry *e,
765 struct xt_table_info *newinfo,
767 unsigned char *limit,
768 const unsigned int *hook_entries,
769 const unsigned int *underflows,
774 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
775 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
776 duprintf("Bad offset %p\n", e);
781 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
782 duprintf("checking: element %p size %u\n",
787 /* Check hooks & underflows */
788 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
789 if ((unsigned char *)e - base == hook_entries[h])
790 newinfo->hook_entry[h] = hook_entries[h];
791 if ((unsigned char *)e - base == underflows[h])
792 newinfo->underflow[h] = underflows[h];
795 /* FIXME: underflows must be unconditional, standard verdicts
796 < 0 (not IP6T_RETURN). --RR */
798 /* Clear counters and comefrom */
799 e->counters = ((struct xt_counters) { 0, 0 });
807 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
809 struct xt_tgdtor_param par;
810 struct ip6t_entry_target *t;
812 if (i && (*i)-- == 0)
815 /* Cleanup all matches */
816 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
817 t = ip6t_get_target(e);
819 par.target = t->u.kernel.target;
820 par.targinfo = t->data;
821 par.family = NFPROTO_IPV6;
822 if (par.target->destroy != NULL)
823 par.target->destroy(&par);
824 module_put(par.target->me);
828 /* Checks and translates the user-supplied table segment (held in
831 translate_table(const char *name,
832 unsigned int valid_hooks,
833 struct xt_table_info *newinfo,
837 const unsigned int *hook_entries,
838 const unsigned int *underflows)
843 newinfo->size = size;
844 newinfo->number = number;
846 /* Init all hooks to impossible value. */
847 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
848 newinfo->hook_entry[i] = 0xFFFFFFFF;
849 newinfo->underflow[i] = 0xFFFFFFFF;
852 duprintf("translate_table: size %u\n", newinfo->size);
854 /* Walk through entries, checking offsets. */
855 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
856 check_entry_size_and_hooks,
860 hook_entries, underflows, &i);
865 duprintf("translate_table: %u not %u entries\n",
870 /* Check hooks all assigned */
871 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
872 /* Only hooks which are valid */
873 if (!(valid_hooks & (1 << i)))
875 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
876 duprintf("Invalid hook entry %u %u\n",
880 if (newinfo->underflow[i] == 0xFFFFFFFF) {
881 duprintf("Invalid underflow %u %u\n",
887 if (!mark_source_chains(newinfo, valid_hooks, entry0))
890 /* Finally, each sanity check must pass */
892 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
893 find_check_entry, name, size, &i);
896 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
901 /* And one copy for every other CPU */
902 for_each_possible_cpu(i) {
903 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
904 memcpy(newinfo->entries[i], entry0, newinfo->size);
912 add_entry_to_counter(const struct ip6t_entry *e,
913 struct xt_counters total[],
916 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
923 set_entry_to_counter(const struct ip6t_entry *e,
924 struct ip6t_counters total[],
927 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
934 get_counters(const struct xt_table_info *t,
935 struct xt_counters counters[])
941 /* Instead of clearing (by a previous call to memset())
942 * the counters and using adds, we set the counters
943 * with data used by 'current' CPU
944 * We dont care about preemption here.
946 curcpu = raw_smp_processor_id();
949 IP6T_ENTRY_ITERATE(t->entries[curcpu],
951 set_entry_to_counter,
955 for_each_possible_cpu(cpu) {
959 IP6T_ENTRY_ITERATE(t->entries[cpu],
961 add_entry_to_counter,
967 static struct xt_counters *alloc_counters(struct xt_table *table)
969 unsigned int countersize;
970 struct xt_counters *counters;
971 const struct xt_table_info *private = table->private;
973 /* We need atomic snapshot of counters: rest doesn't change
974 (other than comefrom, which userspace doesn't care
976 countersize = sizeof(struct xt_counters) * private->number;
977 counters = vmalloc_node(countersize, numa_node_id());
979 if (counters == NULL)
980 return ERR_PTR(-ENOMEM);
982 /* First, sum counters... */
983 write_lock_bh(&table->lock);
984 get_counters(private, counters);
985 write_unlock_bh(&table->lock);
991 copy_entries_to_user(unsigned int total_size,
992 struct xt_table *table,
993 void __user *userptr)
995 unsigned int off, num;
996 struct ip6t_entry *e;
997 struct xt_counters *counters;
998 const struct xt_table_info *private = table->private;
1000 const void *loc_cpu_entry;
1002 counters = alloc_counters(table);
1003 if (IS_ERR(counters))
1004 return PTR_ERR(counters);
1006 /* choose the copy that is on our node/cpu, ...
1007 * This choice is lazy (because current thread is
1008 * allowed to migrate to another cpu)
1010 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1011 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1016 /* FIXME: use iterator macros --RR */
1017 /* ... then go back and fix counters and names */
1018 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1020 const struct ip6t_entry_match *m;
1021 const struct ip6t_entry_target *t;
1023 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1024 if (copy_to_user(userptr + off
1025 + offsetof(struct ip6t_entry, counters),
1027 sizeof(counters[num])) != 0) {
1032 for (i = sizeof(struct ip6t_entry);
1033 i < e->target_offset;
1034 i += m->u.match_size) {
1037 if (copy_to_user(userptr + off + i
1038 + offsetof(struct ip6t_entry_match,
1040 m->u.kernel.match->name,
1041 strlen(m->u.kernel.match->name)+1)
1048 t = ip6t_get_target(e);
1049 if (copy_to_user(userptr + off + e->target_offset
1050 + offsetof(struct ip6t_entry_target,
1052 t->u.kernel.target->name,
1053 strlen(t->u.kernel.target->name)+1) != 0) {
1064 #ifdef CONFIG_COMPAT
1065 static void compat_standard_from_user(void *dst, void *src)
1067 int v = *(compat_int_t *)src;
1070 v += xt_compat_calc_jump(AF_INET6, v);
1071 memcpy(dst, &v, sizeof(v));
1074 static int compat_standard_to_user(void __user *dst, void *src)
1076 compat_int_t cv = *(int *)src;
1079 cv -= xt_compat_calc_jump(AF_INET6, cv);
1080 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1084 compat_calc_match(struct ip6t_entry_match *m, int *size)
1086 *size += xt_compat_match_offset(m->u.kernel.match);
1090 static int compat_calc_entry(struct ip6t_entry *e,
1091 const struct xt_table_info *info,
1092 void *base, struct xt_table_info *newinfo)
1094 struct ip6t_entry_target *t;
1095 unsigned int entry_offset;
1098 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1099 entry_offset = (void *)e - base;
1100 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1101 t = ip6t_get_target(e);
1102 off += xt_compat_target_offset(t->u.kernel.target);
1103 newinfo->size -= off;
1104 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1108 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1109 if (info->hook_entry[i] &&
1110 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1111 newinfo->hook_entry[i] -= off;
1112 if (info->underflow[i] &&
1113 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1114 newinfo->underflow[i] -= off;
1119 static int compat_table_info(const struct xt_table_info *info,
1120 struct xt_table_info *newinfo)
1122 void *loc_cpu_entry;
1124 if (!newinfo || !info)
1127 /* we dont care about newinfo->entries[] */
1128 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1129 newinfo->initial_entries = 0;
1130 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1131 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1132 compat_calc_entry, info, loc_cpu_entry,
1137 static int get_info(struct net *net, void __user *user, int *len, int compat)
1139 char name[IP6T_TABLE_MAXNAMELEN];
1143 if (*len != sizeof(struct ip6t_getinfo)) {
1144 duprintf("length %u != %zu\n", *len,
1145 sizeof(struct ip6t_getinfo));
1149 if (copy_from_user(name, user, sizeof(name)) != 0)
1152 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1153 #ifdef CONFIG_COMPAT
1155 xt_compat_lock(AF_INET6);
1157 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1158 "ip6table_%s", name);
1159 if (t && !IS_ERR(t)) {
1160 struct ip6t_getinfo info;
1161 const struct xt_table_info *private = t->private;
1163 #ifdef CONFIG_COMPAT
1165 struct xt_table_info tmp;
1166 ret = compat_table_info(private, &tmp);
1167 xt_compat_flush_offsets(AF_INET6);
1171 info.valid_hooks = t->valid_hooks;
1172 memcpy(info.hook_entry, private->hook_entry,
1173 sizeof(info.hook_entry));
1174 memcpy(info.underflow, private->underflow,
1175 sizeof(info.underflow));
1176 info.num_entries = private->number;
1177 info.size = private->size;
1178 strcpy(info.name, name);
1180 if (copy_to_user(user, &info, *len) != 0)
1188 ret = t ? PTR_ERR(t) : -ENOENT;
1189 #ifdef CONFIG_COMPAT
1191 xt_compat_unlock(AF_INET6);
1197 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1200 struct ip6t_get_entries get;
1203 if (*len < sizeof(get)) {
1204 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1207 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1209 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1210 duprintf("get_entries: %u != %zu\n",
1211 *len, sizeof(get) + get.size);
1215 t = xt_find_table_lock(net, AF_INET6, get.name);
1216 if (t && !IS_ERR(t)) {
1217 struct xt_table_info *private = t->private;
1218 duprintf("t->private->number = %u\n", private->number);
1219 if (get.size == private->size)
1220 ret = copy_entries_to_user(private->size,
1221 t, uptr->entrytable);
1223 duprintf("get_entries: I've got %u not %u!\n",
1224 private->size, get.size);
1230 ret = t ? PTR_ERR(t) : -ENOENT;
1236 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1237 struct xt_table_info *newinfo, unsigned int num_counters,
1238 void __user *counters_ptr)
1242 struct xt_table_info *oldinfo;
1243 struct xt_counters *counters;
1244 const void *loc_cpu_old_entry;
1247 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1254 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1255 "ip6table_%s", name);
1256 if (!t || IS_ERR(t)) {
1257 ret = t ? PTR_ERR(t) : -ENOENT;
1258 goto free_newinfo_counters_untrans;
1262 if (valid_hooks != t->valid_hooks) {
1263 duprintf("Valid hook crap: %08X vs %08X\n",
1264 valid_hooks, t->valid_hooks);
1269 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1273 /* Update module usage count based on number of rules */
1274 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1275 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1276 if ((oldinfo->number > oldinfo->initial_entries) ||
1277 (newinfo->number <= oldinfo->initial_entries))
1279 if ((oldinfo->number > oldinfo->initial_entries) &&
1280 (newinfo->number <= oldinfo->initial_entries))
1283 /* Get the old counters. */
1284 get_counters(oldinfo, counters);
1285 /* Decrease module usage counts and free resource */
1286 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1287 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1289 xt_free_table_info(oldinfo);
1290 if (copy_to_user(counters_ptr, counters,
1291 sizeof(struct xt_counters) * num_counters) != 0)
1300 free_newinfo_counters_untrans:
1307 do_replace(struct net *net, void __user *user, unsigned int len)
1310 struct ip6t_replace tmp;
1311 struct xt_table_info *newinfo;
1312 void *loc_cpu_entry;
1314 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1317 /* overflow check */
1318 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1321 newinfo = xt_alloc_table_info(tmp.size);
1325 /* choose the copy that is on our node/cpu */
1326 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1327 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1333 ret = translate_table(tmp.name, tmp.valid_hooks,
1334 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1335 tmp.hook_entry, tmp.underflow);
1339 duprintf("ip_tables: Translated table\n");
1341 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1342 tmp.num_counters, tmp.counters);
1344 goto free_newinfo_untrans;
1347 free_newinfo_untrans:
1348 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1350 xt_free_table_info(newinfo);
1354 /* We're lazy, and add to the first CPU; overflow works its fey magic
1355 * and everything is OK. */
1357 add_counter_to_entry(struct ip6t_entry *e,
1358 const struct xt_counters addme[],
1362 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1364 (long unsigned int)e->counters.pcnt,
1365 (long unsigned int)e->counters.bcnt,
1366 (long unsigned int)addme[*i].pcnt,
1367 (long unsigned int)addme[*i].bcnt);
1370 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1377 do_add_counters(struct net *net, void __user *user, unsigned int len,
1381 struct xt_counters_info tmp;
1382 struct xt_counters *paddc;
1383 unsigned int num_counters;
1388 const struct xt_table_info *private;
1390 const void *loc_cpu_entry;
1391 #ifdef CONFIG_COMPAT
1392 struct compat_xt_counters_info compat_tmp;
1396 size = sizeof(struct compat_xt_counters_info);
1401 size = sizeof(struct xt_counters_info);
1404 if (copy_from_user(ptmp, user, size) != 0)
1407 #ifdef CONFIG_COMPAT
1409 num_counters = compat_tmp.num_counters;
1410 name = compat_tmp.name;
1414 num_counters = tmp.num_counters;
1418 if (len != size + num_counters * sizeof(struct xt_counters))
1421 paddc = vmalloc_node(len - size, numa_node_id());
1425 if (copy_from_user(paddc, user + size, len - size) != 0) {
1430 t = xt_find_table_lock(net, AF_INET6, name);
1431 if (!t || IS_ERR(t)) {
1432 ret = t ? PTR_ERR(t) : -ENOENT;
1436 write_lock_bh(&t->lock);
1437 private = t->private;
1438 if (private->number != num_counters) {
1440 goto unlock_up_free;
1444 /* Choose the copy that is on our node */
1445 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1446 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1448 add_counter_to_entry,
1452 write_unlock_bh(&t->lock);
1461 #ifdef CONFIG_COMPAT
1462 struct compat_ip6t_replace {
1463 char name[IP6T_TABLE_MAXNAMELEN];
1467 u32 hook_entry[NF_INET_NUMHOOKS];
1468 u32 underflow[NF_INET_NUMHOOKS];
1470 compat_uptr_t counters; /* struct ip6t_counters * */
1471 struct compat_ip6t_entry entries[0];
1475 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1476 unsigned int *size, struct xt_counters *counters,
1479 struct ip6t_entry_target *t;
1480 struct compat_ip6t_entry __user *ce;
1481 u_int16_t target_offset, next_offset;
1482 compat_uint_t origsize;
1487 ce = (struct compat_ip6t_entry __user *)*dstptr;
1488 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1491 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1494 *dstptr += sizeof(struct compat_ip6t_entry);
1495 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1497 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1498 target_offset = e->target_offset - (origsize - *size);
1501 t = ip6t_get_target(e);
1502 ret = xt_compat_target_to_user(t, dstptr, size);
1506 next_offset = e->next_offset - (origsize - *size);
1507 if (put_user(target_offset, &ce->target_offset))
1509 if (put_user(next_offset, &ce->next_offset))
1519 compat_find_calc_match(struct ip6t_entry_match *m,
1521 const struct ip6t_ip6 *ipv6,
1522 unsigned int hookmask,
1523 int *size, unsigned int *i)
1525 struct xt_match *match;
1527 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1528 m->u.user.revision),
1529 "ip6t_%s", m->u.user.name);
1530 if (IS_ERR(match) || !match) {
1531 duprintf("compat_check_calc_match: `%s' not found\n",
1533 return match ? PTR_ERR(match) : -ENOENT;
1535 m->u.kernel.match = match;
1536 *size += xt_compat_match_offset(match);
1543 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1545 if (i && (*i)-- == 0)
1548 module_put(m->u.kernel.match->me);
1553 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1555 struct ip6t_entry_target *t;
1557 if (i && (*i)-- == 0)
1560 /* Cleanup all matches */
1561 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1562 t = compat_ip6t_get_target(e);
1563 module_put(t->u.kernel.target->me);
1568 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1569 struct xt_table_info *newinfo,
1571 unsigned char *base,
1572 unsigned char *limit,
1573 unsigned int *hook_entries,
1574 unsigned int *underflows,
1578 struct ip6t_entry_target *t;
1579 struct xt_target *target;
1580 unsigned int entry_offset;
1584 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1585 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1586 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1587 duprintf("Bad offset %p, limit = %p\n", e, limit);
1591 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1592 sizeof(struct compat_xt_entry_target)) {
1593 duprintf("checking: element %p size %u\n",
1598 /* For purposes of check_entry casting the compat entry is fine */
1599 ret = check_entry((struct ip6t_entry *)e, name);
1603 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1604 entry_offset = (void *)e - (void *)base;
1606 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1607 &e->ipv6, e->comefrom, &off, &j);
1609 goto release_matches;
1611 t = compat_ip6t_get_target(e);
1612 target = try_then_request_module(xt_find_target(AF_INET6,
1614 t->u.user.revision),
1615 "ip6t_%s", t->u.user.name);
1616 if (IS_ERR(target) || !target) {
1617 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1619 ret = target ? PTR_ERR(target) : -ENOENT;
1620 goto release_matches;
1622 t->u.kernel.target = target;
1624 off += xt_compat_target_offset(target);
1626 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1630 /* Check hooks & underflows */
1631 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1632 if ((unsigned char *)e - base == hook_entries[h])
1633 newinfo->hook_entry[h] = hook_entries[h];
1634 if ((unsigned char *)e - base == underflows[h])
1635 newinfo->underflow[h] = underflows[h];
1638 /* Clear counters and comefrom */
1639 memset(&e->counters, 0, sizeof(e->counters));
1646 module_put(t->u.kernel.target->me);
1648 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1653 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1654 unsigned int *size, const char *name,
1655 struct xt_table_info *newinfo, unsigned char *base)
1657 struct ip6t_entry_target *t;
1658 struct xt_target *target;
1659 struct ip6t_entry *de;
1660 unsigned int origsize;
1665 de = (struct ip6t_entry *)*dstptr;
1666 memcpy(de, e, sizeof(struct ip6t_entry));
1667 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1669 *dstptr += sizeof(struct ip6t_entry);
1670 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1672 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1676 de->target_offset = e->target_offset - (origsize - *size);
1677 t = compat_ip6t_get_target(e);
1678 target = t->u.kernel.target;
1679 xt_compat_target_from_user(t, dstptr, size);
1681 de->next_offset = e->next_offset - (origsize - *size);
1682 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1683 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1684 newinfo->hook_entry[h] -= origsize - *size;
1685 if ((unsigned char *)de - base < newinfo->underflow[h])
1686 newinfo->underflow[h] -= origsize - *size;
1691 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1696 struct xt_mtchk_param mtpar;
1700 mtpar.entryinfo = &e->ipv6;
1701 mtpar.hook_mask = e->comefrom;
1702 mtpar.family = NFPROTO_IPV6;
1703 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1705 goto cleanup_matches;
1707 ret = check_target(e, name);
1709 goto cleanup_matches;
1715 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1720 translate_compat_table(const char *name,
1721 unsigned int valid_hooks,
1722 struct xt_table_info **pinfo,
1724 unsigned int total_size,
1725 unsigned int number,
1726 unsigned int *hook_entries,
1727 unsigned int *underflows)
1730 struct xt_table_info *newinfo, *info;
1731 void *pos, *entry0, *entry1;
1738 info->number = number;
1740 /* Init all hooks to impossible value. */
1741 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1742 info->hook_entry[i] = 0xFFFFFFFF;
1743 info->underflow[i] = 0xFFFFFFFF;
1746 duprintf("translate_compat_table: size %u\n", info->size);
1748 xt_compat_lock(AF_INET6);
1749 /* Walk through entries, checking offsets. */
1750 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1751 check_compat_entry_size_and_hooks,
1752 info, &size, entry0,
1753 entry0 + total_size,
1754 hook_entries, underflows, &j, name);
1760 duprintf("translate_compat_table: %u not %u entries\n",
1765 /* Check hooks all assigned */
1766 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1767 /* Only hooks which are valid */
1768 if (!(valid_hooks & (1 << i)))
1770 if (info->hook_entry[i] == 0xFFFFFFFF) {
1771 duprintf("Invalid hook entry %u %u\n",
1772 i, hook_entries[i]);
1775 if (info->underflow[i] == 0xFFFFFFFF) {
1776 duprintf("Invalid underflow %u %u\n",
1783 newinfo = xt_alloc_table_info(size);
1787 newinfo->number = number;
1788 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1789 newinfo->hook_entry[i] = info->hook_entry[i];
1790 newinfo->underflow[i] = info->underflow[i];
1792 entry1 = newinfo->entries[raw_smp_processor_id()];
1795 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1796 compat_copy_entry_from_user,
1797 &pos, &size, name, newinfo, entry1);
1798 xt_compat_flush_offsets(AF_INET6);
1799 xt_compat_unlock(AF_INET6);
1804 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1808 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1812 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1813 compat_release_entry, &j);
1814 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1815 xt_free_table_info(newinfo);
1819 /* And one copy for every other CPU */
1820 for_each_possible_cpu(i)
1821 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1822 memcpy(newinfo->entries[i], entry1, newinfo->size);
1826 xt_free_table_info(info);
1830 xt_free_table_info(newinfo);
1832 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1835 xt_compat_flush_offsets(AF_INET6);
1836 xt_compat_unlock(AF_INET6);
1841 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1844 struct compat_ip6t_replace tmp;
1845 struct xt_table_info *newinfo;
1846 void *loc_cpu_entry;
1848 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1851 /* overflow check */
1852 if (tmp.size >= INT_MAX / num_possible_cpus())
1854 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1857 newinfo = xt_alloc_table_info(tmp.size);
1861 /* choose the copy that is on our node/cpu */
1862 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1863 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1869 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1870 &newinfo, &loc_cpu_entry, tmp.size,
1871 tmp.num_entries, tmp.hook_entry,
1876 duprintf("compat_do_replace: Translated table\n");
1878 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1879 tmp.num_counters, compat_ptr(tmp.counters));
1881 goto free_newinfo_untrans;
1884 free_newinfo_untrans:
1885 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1887 xt_free_table_info(newinfo);
1892 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1897 if (!capable(CAP_NET_ADMIN))
1901 case IP6T_SO_SET_REPLACE:
1902 ret = compat_do_replace(sock_net(sk), user, len);
1905 case IP6T_SO_SET_ADD_COUNTERS:
1906 ret = do_add_counters(sock_net(sk), user, len, 1);
1910 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1917 struct compat_ip6t_get_entries {
1918 char name[IP6T_TABLE_MAXNAMELEN];
1920 struct compat_ip6t_entry entrytable[0];
1924 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1925 void __user *userptr)
1927 struct xt_counters *counters;
1928 const struct xt_table_info *private = table->private;
1932 const void *loc_cpu_entry;
1935 counters = alloc_counters(table);
1936 if (IS_ERR(counters))
1937 return PTR_ERR(counters);
1939 /* choose the copy that is on our node/cpu, ...
1940 * This choice is lazy (because current thread is
1941 * allowed to migrate to another cpu)
1943 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1946 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1947 compat_copy_entry_to_user,
1948 &pos, &size, counters, &i);
1955 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1959 struct compat_ip6t_get_entries get;
1962 if (*len < sizeof(get)) {
1963 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1967 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1970 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1971 duprintf("compat_get_entries: %u != %zu\n",
1972 *len, sizeof(get) + get.size);
1976 xt_compat_lock(AF_INET6);
1977 t = xt_find_table_lock(net, AF_INET6, get.name);
1978 if (t && !IS_ERR(t)) {
1979 const struct xt_table_info *private = t->private;
1980 struct xt_table_info info;
1981 duprintf("t->private->number = %u\n", private->number);
1982 ret = compat_table_info(private, &info);
1983 if (!ret && get.size == info.size) {
1984 ret = compat_copy_entries_to_user(private->size,
1985 t, uptr->entrytable);
1987 duprintf("compat_get_entries: I've got %u not %u!\n",
1988 private->size, get.size);
1991 xt_compat_flush_offsets(AF_INET6);
1995 ret = t ? PTR_ERR(t) : -ENOENT;
1997 xt_compat_unlock(AF_INET6);
2001 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2004 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2008 if (!capable(CAP_NET_ADMIN))
2012 case IP6T_SO_GET_INFO:
2013 ret = get_info(sock_net(sk), user, len, 1);
2015 case IP6T_SO_GET_ENTRIES:
2016 ret = compat_get_entries(sock_net(sk), user, len);
2019 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2026 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2030 if (!capable(CAP_NET_ADMIN))
2034 case IP6T_SO_SET_REPLACE:
2035 ret = do_replace(sock_net(sk), user, len);
2038 case IP6T_SO_SET_ADD_COUNTERS:
2039 ret = do_add_counters(sock_net(sk), user, len, 0);
2043 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2051 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2055 if (!capable(CAP_NET_ADMIN))
2059 case IP6T_SO_GET_INFO:
2060 ret = get_info(sock_net(sk), user, len, 0);
2063 case IP6T_SO_GET_ENTRIES:
2064 ret = get_entries(sock_net(sk), user, len);
2067 case IP6T_SO_GET_REVISION_MATCH:
2068 case IP6T_SO_GET_REVISION_TARGET: {
2069 struct ip6t_get_revision rev;
2072 if (*len != sizeof(rev)) {
2076 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2081 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2086 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2089 "ip6t_%s", rev.name);
2094 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2101 struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2102 const struct ip6t_replace *repl)
2105 struct xt_table_info *newinfo;
2106 struct xt_table_info bootstrap
2107 = { 0, 0, 0, { 0 }, { 0 }, { } };
2108 void *loc_cpu_entry;
2109 struct xt_table *new_table;
2111 newinfo = xt_alloc_table_info(repl->size);
2117 /* choose the copy on our node/cpu, but dont care about preemption */
2118 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2119 memcpy(loc_cpu_entry, repl->entries, repl->size);
2121 ret = translate_table(table->name, table->valid_hooks,
2122 newinfo, loc_cpu_entry, repl->size,
2129 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2130 if (IS_ERR(new_table)) {
2131 ret = PTR_ERR(new_table);
2137 xt_free_table_info(newinfo);
2139 return ERR_PTR(ret);
2142 void ip6t_unregister_table(struct xt_table *table)
2144 struct xt_table_info *private;
2145 void *loc_cpu_entry;
2146 struct module *table_owner = table->me;
2148 private = xt_unregister_table(table);
2150 /* Decrease module usage counts and free resources */
2151 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2152 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2153 if (private->number > private->initial_entries)
2154 module_put(table_owner);
2155 xt_free_table_info(private);
2158 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2160 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2161 u_int8_t type, u_int8_t code,
2164 return (type == test_type && code >= min_code && code <= max_code)
2169 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2171 const struct icmp6hdr *ic;
2172 struct icmp6hdr _icmph;
2173 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2175 /* Must not be a fragment. */
2176 if (par->fragoff != 0)
2179 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2181 /* We've been asked to examine this packet, and we
2182 * can't. Hence, no choice but to drop.
2184 duprintf("Dropping evil ICMP tinygram.\n");
2185 *par->hotdrop = true;
2189 return icmp6_type_code_match(icmpinfo->type,
2192 ic->icmp6_type, ic->icmp6_code,
2193 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2196 /* Called when user tries to insert an entry of this type. */
2197 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2199 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2201 /* Must specify no unknown invflags */
2202 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2205 /* The built-in targets: standard (NULL) and error. */
2206 static struct xt_target ip6t_standard_target __read_mostly = {
2207 .name = IP6T_STANDARD_TARGET,
2208 .targetsize = sizeof(int),
2210 #ifdef CONFIG_COMPAT
2211 .compatsize = sizeof(compat_int_t),
2212 .compat_from_user = compat_standard_from_user,
2213 .compat_to_user = compat_standard_to_user,
2217 static struct xt_target ip6t_error_target __read_mostly = {
2218 .name = IP6T_ERROR_TARGET,
2219 .target = ip6t_error,
2220 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2224 static struct nf_sockopt_ops ip6t_sockopts = {
2226 .set_optmin = IP6T_BASE_CTL,
2227 .set_optmax = IP6T_SO_SET_MAX+1,
2228 .set = do_ip6t_set_ctl,
2229 #ifdef CONFIG_COMPAT
2230 .compat_set = compat_do_ip6t_set_ctl,
2232 .get_optmin = IP6T_BASE_CTL,
2233 .get_optmax = IP6T_SO_GET_MAX+1,
2234 .get = do_ip6t_get_ctl,
2235 #ifdef CONFIG_COMPAT
2236 .compat_get = compat_do_ip6t_get_ctl,
2238 .owner = THIS_MODULE,
2241 static struct xt_match icmp6_matchstruct __read_mostly = {
2243 .match = icmp6_match,
2244 .matchsize = sizeof(struct ip6t_icmp),
2245 .checkentry = icmp6_checkentry,
2246 .proto = IPPROTO_ICMPV6,
2250 static int __net_init ip6_tables_net_init(struct net *net)
2252 return xt_proto_init(net, AF_INET6);
2255 static void __net_exit ip6_tables_net_exit(struct net *net)
2257 xt_proto_fini(net, AF_INET6);
2260 static struct pernet_operations ip6_tables_net_ops = {
2261 .init = ip6_tables_net_init,
2262 .exit = ip6_tables_net_exit,
2265 static int __init ip6_tables_init(void)
2269 ret = register_pernet_subsys(&ip6_tables_net_ops);
2273 /* Noone else will be downing sem now, so we won't sleep */
2274 ret = xt_register_target(&ip6t_standard_target);
2277 ret = xt_register_target(&ip6t_error_target);
2280 ret = xt_register_match(&icmp6_matchstruct);
2284 /* Register setsockopt */
2285 ret = nf_register_sockopt(&ip6t_sockopts);
2289 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2293 xt_unregister_match(&icmp6_matchstruct);
2295 xt_unregister_target(&ip6t_error_target);
2297 xt_unregister_target(&ip6t_standard_target);
2299 unregister_pernet_subsys(&ip6_tables_net_ops);
2304 static void __exit ip6_tables_fini(void)
2306 nf_unregister_sockopt(&ip6t_sockopts);
2308 xt_unregister_match(&icmp6_matchstruct);
2309 xt_unregister_target(&ip6t_error_target);
2310 xt_unregister_target(&ip6t_standard_target);
2312 unregister_pernet_subsys(&ip6_tables_net_ops);
2316 * find the offset to specified header or the protocol number of last header
2317 * if target < 0. "last header" is transport protocol header, ESP, or
2320 * If target header is found, its offset is set in *offset and return protocol
2321 * number. Otherwise, return -1.
2323 * If the first fragment doesn't contain the final protocol header or
2324 * NEXTHDR_NONE it is considered invalid.
2326 * Note that non-1st fragment is special case that "the protocol number
2327 * of last header" is "next header" field in Fragment header. In this case,
2328 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2332 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2333 int target, unsigned short *fragoff)
2335 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2336 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2337 unsigned int len = skb->len - start;
2342 while (nexthdr != target) {
2343 struct ipv6_opt_hdr _hdr, *hp;
2344 unsigned int hdrlen;
2346 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2352 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2355 if (nexthdr == NEXTHDR_FRAGMENT) {
2356 unsigned short _frag_off;
2358 fp = skb_header_pointer(skb,
2359 start+offsetof(struct frag_hdr,
2366 _frag_off = ntohs(*fp) & ~0x7;
2369 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2370 hp->nexthdr == NEXTHDR_NONE)) {
2372 *fragoff = _frag_off;
2378 } else if (nexthdr == NEXTHDR_AUTH)
2379 hdrlen = (hp->hdrlen + 2) << 2;
2381 hdrlen = ipv6_optlen(hp);
2383 nexthdr = hp->nexthdr;
2392 EXPORT_SYMBOL(ip6t_register_table);
2393 EXPORT_SYMBOL(ip6t_unregister_table);
2394 EXPORT_SYMBOL(ip6t_do_table);
2395 EXPORT_SYMBOL(ip6t_ext_hdr);
2396 EXPORT_SYMBOL(ipv6_find_hdr);
2398 module_init(ip6_tables_init);
2399 module_exit(ip6_tables_fini);