2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
81 ip6t_ext_hdr(u8 nexthdr)
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
95 ip6_packet_match(const struct sk_buff *skb,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
103 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
105 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
107 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
108 &ip6info->src), IP6T_INV_SRCIP)
109 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
110 &ip6info->dst), IP6T_INV_DSTIP)) {
111 dprintf("Source or dest mismatch.\n");
113 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
114 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
115 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
116 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
117 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
118 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
122 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
124 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
125 dprintf("VIA in mismatch (%s vs %s).%s\n",
126 indev, ip6info->iniface,
127 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
131 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
133 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
134 dprintf("VIA out mismatch (%s vs %s).%s\n",
135 outdev, ip6info->outiface,
136 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
140 /* ... might want to do something with class and flowlabel here ... */
142 /* look for the desired protocol header */
143 if((ip6info->flags & IP6T_F_PROTO)) {
145 unsigned short _frag_off;
147 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
153 *fragoff = _frag_off;
155 dprintf("Packet protocol %hi ?= %s%hi.\n",
157 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
160 if (ip6info->proto == protohdr) {
161 if(ip6info->invflags & IP6T_INV_PROTO) {
167 /* We need match for the '-p all', too! */
168 if ((ip6info->proto != 0) &&
169 !(ip6info->invflags & IP6T_INV_PROTO))
175 /* should be ip6 safe */
177 ip6_checkentry(const struct ip6t_ip6 *ipv6)
179 if (ipv6->flags & ~IP6T_F_MASK) {
180 duprintf("Unknown flag bits set: %08X\n",
181 ipv6->flags & ~IP6T_F_MASK);
184 if (ipv6->invflags & ~IP6T_INV_MASK) {
185 duprintf("Unknown invflag bits set: %08X\n",
186 ipv6->invflags & ~IP6T_INV_MASK);
193 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
196 printk("ip6_tables: error: `%s'\n",
197 (const char *)par->targinfo);
202 /* Performance critical - called for every packet */
204 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
205 struct xt_match_param *par)
207 par->match = m->u.kernel.match;
208 par->matchinfo = m->data;
210 /* Stop iteration if it doesn't match */
211 if (!m->u.kernel.match->match(skb, par))
217 static inline struct ip6t_entry *
218 get_entry(void *base, unsigned int offset)
220 return (struct ip6t_entry *)(base + offset);
223 /* All zeroes == unconditional rule. */
224 /* Mildly perf critical (only if packet tracing is on) */
226 unconditional(const struct ip6t_ip6 *ipv6)
230 for (i = 0; i < sizeof(*ipv6); i++)
231 if (((char *)ipv6)[i])
234 return (i == sizeof(*ipv6));
237 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
238 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
239 /* This cries for unification! */
240 static const char *const hooknames[] = {
241 [NF_INET_PRE_ROUTING] = "PREROUTING",
242 [NF_INET_LOCAL_IN] = "INPUT",
243 [NF_INET_FORWARD] = "FORWARD",
244 [NF_INET_LOCAL_OUT] = "OUTPUT",
245 [NF_INET_POST_ROUTING] = "POSTROUTING",
248 enum nf_ip_trace_comments {
249 NF_IP6_TRACE_COMMENT_RULE,
250 NF_IP6_TRACE_COMMENT_RETURN,
251 NF_IP6_TRACE_COMMENT_POLICY,
254 static const char *const comments[] = {
255 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
256 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
257 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
260 static struct nf_loginfo trace_loginfo = {
261 .type = NF_LOG_TYPE_LOG,
265 .logflags = NF_LOG_MASK,
270 /* Mildly perf critical (only if packet tracing is on) */
272 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
273 char *hookname, char **chainname,
274 char **comment, unsigned int *rulenum)
276 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
278 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
279 /* Head of user chain: ERROR target with chainname */
280 *chainname = t->target.data;
285 if (s->target_offset == sizeof(struct ip6t_entry)
286 && strcmp(t->target.u.kernel.target->name,
287 IP6T_STANDARD_TARGET) == 0
289 && unconditional(&s->ipv6)) {
290 /* Tail of chains: STANDARD target (return/policy) */
291 *comment = *chainname == hookname
292 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
293 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
302 static void trace_packet(struct sk_buff *skb,
304 const struct net_device *in,
305 const struct net_device *out,
306 const char *tablename,
307 struct xt_table_info *private,
308 struct ip6t_entry *e)
311 const struct ip6t_entry *root;
312 char *hookname, *chainname, *comment;
313 unsigned int rulenum = 0;
315 table_base = (void *)private->entries[smp_processor_id()];
316 root = get_entry(table_base, private->hook_entry[hook]);
318 hookname = chainname = (char *)hooknames[hook];
319 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
321 IP6T_ENTRY_ITERATE(root,
322 private->size - private->hook_entry[hook],
323 get_chainname_rulenum,
324 e, hookname, &chainname, &comment, &rulenum);
326 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
327 "TRACE: %s:%s:%s:%u ",
328 tablename, chainname, comment, rulenum);
332 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
334 ip6t_do_table(struct sk_buff *skb,
336 const struct net_device *in,
337 const struct net_device *out,
338 struct xt_table *table)
340 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
341 bool hotdrop = false;
342 /* Initializing verdict to NF_DROP keeps gcc happy. */
343 unsigned int verdict = NF_DROP;
344 const char *indev, *outdev;
346 struct ip6t_entry *e, *back;
347 struct xt_table_info *private;
348 struct xt_match_param mtpar;
349 struct xt_target_param tgpar;
352 indev = in ? in->name : nulldevname;
353 outdev = out ? out->name : nulldevname;
354 /* We handle fragments by dealing with the first fragment as
355 * if it was a normal packet. All other fragments are treated
356 * normally, except that they will NEVER match rules that ask
357 * things we don't know, ie. tcp syn flag or ports). If the
358 * rule is also a fragment-specific rule, non-fragments won't
360 mtpar.hotdrop = &hotdrop;
361 mtpar.in = tgpar.in = in;
362 mtpar.out = tgpar.out = out;
363 mtpar.family = tgpar.family = NFPROTO_IPV6;
364 tgpar.hooknum = hook;
366 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
369 private = rcu_dereference(table->private);
370 table_base = rcu_dereference(private->entries[smp_processor_id()]);
372 e = get_entry(table_base, private->hook_entry[hook]);
374 /* For return from builtin chain */
375 back = get_entry(table_base, private->underflow[hook]);
380 if (ip6_packet_match(skb, indev, outdev, &e->ipv6,
381 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
382 struct ip6t_entry_target *t;
384 if (IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0)
387 ADD_COUNTER(e->counters,
388 ntohs(ipv6_hdr(skb)->payload_len) +
389 sizeof(struct ipv6hdr), 1);
391 t = ip6t_get_target(e);
392 IP_NF_ASSERT(t->u.kernel.target);
394 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
395 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
396 /* The packet is traced: log it */
397 if (unlikely(skb->nf_trace))
398 trace_packet(skb, hook, in, out,
399 table->name, private, e);
401 /* Standard target? */
402 if (!t->u.kernel.target->target) {
405 v = ((struct ip6t_standard_target *)t)->verdict;
407 /* Pop from stack? */
408 if (v != IP6T_RETURN) {
409 verdict = (unsigned)(-v) - 1;
413 back = get_entry(table_base,
417 if (table_base + v != (void *)e + e->next_offset
418 && !(e->ipv6.flags & IP6T_F_GOTO)) {
419 /* Save old back ptr in next entry */
420 struct ip6t_entry *next
421 = (void *)e + e->next_offset;
423 = (void *)back - table_base;
424 /* set back pointer to next entry */
428 e = get_entry(table_base, v);
430 /* Targets which reenter must return
432 tgpar.target = t->u.kernel.target;
433 tgpar.targinfo = t->data;
435 #ifdef CONFIG_NETFILTER_DEBUG
436 ((struct ip6t_entry *)table_base)->comefrom
439 verdict = t->u.kernel.target->target(skb,
442 #ifdef CONFIG_NETFILTER_DEBUG
443 if (((struct ip6t_entry *)table_base)->comefrom
445 && verdict == IP6T_CONTINUE) {
446 printk("Target %s reentered!\n",
447 t->u.kernel.target->name);
450 ((struct ip6t_entry *)table_base)->comefrom
453 if (verdict == IP6T_CONTINUE)
454 e = (void *)e + e->next_offset;
462 e = (void *)e + e->next_offset;
466 #ifdef CONFIG_NETFILTER_DEBUG
467 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
469 rcu_read_unlock_bh();
471 #ifdef DEBUG_ALLOW_ALL
480 /* Figures out from what hook each rule can be called: returns 0 if
481 there are loops. Puts hook bitmask in comefrom. */
483 mark_source_chains(struct xt_table_info *newinfo,
484 unsigned int valid_hooks, void *entry0)
488 /* No recursion; use packet counter to save back ptrs (reset
489 to 0 as we leave), and comefrom to save source hook bitmask */
490 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
491 unsigned int pos = newinfo->hook_entry[hook];
492 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
494 if (!(valid_hooks & (1 << hook)))
497 /* Set initial back pointer. */
498 e->counters.pcnt = pos;
501 struct ip6t_standard_target *t
502 = (void *)ip6t_get_target(e);
503 int visited = e->comefrom & (1 << hook);
505 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
506 printk("iptables: loop hook %u pos %u %08X.\n",
507 hook, pos, e->comefrom);
510 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
512 /* Unconditional return/END. */
513 if ((e->target_offset == sizeof(struct ip6t_entry)
514 && (strcmp(t->target.u.user.name,
515 IP6T_STANDARD_TARGET) == 0)
517 && unconditional(&e->ipv6)) || visited) {
518 unsigned int oldpos, size;
520 if ((strcmp(t->target.u.user.name,
521 IP6T_STANDARD_TARGET) == 0) &&
522 t->verdict < -NF_MAX_VERDICT - 1) {
523 duprintf("mark_source_chains: bad "
524 "negative verdict (%i)\n",
529 /* Return: backtrack through the last
532 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
533 #ifdef DEBUG_IP_FIREWALL_USER
535 & (1 << NF_INET_NUMHOOKS)) {
536 duprintf("Back unset "
543 pos = e->counters.pcnt;
544 e->counters.pcnt = 0;
546 /* We're at the start. */
550 e = (struct ip6t_entry *)
552 } while (oldpos == pos + e->next_offset);
555 size = e->next_offset;
556 e = (struct ip6t_entry *)
557 (entry0 + pos + size);
558 e->counters.pcnt = pos;
561 int newpos = t->verdict;
563 if (strcmp(t->target.u.user.name,
564 IP6T_STANDARD_TARGET) == 0
566 if (newpos > newinfo->size -
567 sizeof(struct ip6t_entry)) {
568 duprintf("mark_source_chains: "
569 "bad verdict (%i)\n",
573 /* This a jump; chase it. */
574 duprintf("Jump rule %u -> %u\n",
577 /* ... this is a fallthru */
578 newpos = pos + e->next_offset;
580 e = (struct ip6t_entry *)
582 e->counters.pcnt = pos;
587 duprintf("Finished chain %u\n", hook);
593 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
595 struct xt_mtdtor_param par;
597 if (i && (*i)-- == 0)
600 par.match = m->u.kernel.match;
601 par.matchinfo = m->data;
602 par.family = NFPROTO_IPV6;
603 if (par.match->destroy != NULL)
604 par.match->destroy(&par);
605 module_put(par.match->me);
610 check_entry(struct ip6t_entry *e, const char *name)
612 struct ip6t_entry_target *t;
614 if (!ip6_checkentry(&e->ipv6)) {
615 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
619 if (e->target_offset + sizeof(struct ip6t_entry_target) >
623 t = ip6t_get_target(e);
624 if (e->target_offset + t->u.target_size > e->next_offset)
630 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
633 const struct ip6t_ip6 *ipv6 = par->entryinfo;
636 par->match = m->u.kernel.match;
637 par->matchinfo = m->data;
639 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
640 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
642 duprintf("ip_tables: check failed for `%s'.\n",
651 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
654 struct xt_match *match;
657 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
659 "ip6t_%s", m->u.user.name);
660 if (IS_ERR(match) || !match) {
661 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
662 return match ? PTR_ERR(match) : -ENOENT;
664 m->u.kernel.match = match;
666 ret = check_match(m, par, i);
672 module_put(m->u.kernel.match->me);
676 static int check_target(struct ip6t_entry *e, const char *name)
678 struct ip6t_entry_target *t = ip6t_get_target(e);
679 struct xt_tgchk_param par = {
682 .target = t->u.kernel.target,
684 .hook_mask = e->comefrom,
685 .family = NFPROTO_IPV6,
689 t = ip6t_get_target(e);
690 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
691 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
693 duprintf("ip_tables: check failed for `%s'.\n",
694 t->u.kernel.target->name);
701 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
704 struct ip6t_entry_target *t;
705 struct xt_target *target;
708 struct xt_mtchk_param mtpar;
710 ret = check_entry(e, name);
716 mtpar.entryinfo = &e->ipv6;
717 mtpar.hook_mask = e->comefrom;
718 mtpar.family = NFPROTO_IPV6;
719 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
721 goto cleanup_matches;
723 t = ip6t_get_target(e);
724 target = try_then_request_module(xt_find_target(AF_INET6,
727 "ip6t_%s", t->u.user.name);
728 if (IS_ERR(target) || !target) {
729 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
730 ret = target ? PTR_ERR(target) : -ENOENT;
731 goto cleanup_matches;
733 t->u.kernel.target = target;
735 ret = check_target(e, name);
742 module_put(t->u.kernel.target->me);
744 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
749 check_entry_size_and_hooks(struct ip6t_entry *e,
750 struct xt_table_info *newinfo,
752 unsigned char *limit,
753 const unsigned int *hook_entries,
754 const unsigned int *underflows,
759 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
760 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
761 duprintf("Bad offset %p\n", e);
766 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
767 duprintf("checking: element %p size %u\n",
772 /* Check hooks & underflows */
773 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
774 if ((unsigned char *)e - base == hook_entries[h])
775 newinfo->hook_entry[h] = hook_entries[h];
776 if ((unsigned char *)e - base == underflows[h])
777 newinfo->underflow[h] = underflows[h];
780 /* FIXME: underflows must be unconditional, standard verdicts
781 < 0 (not IP6T_RETURN). --RR */
783 /* Clear counters and comefrom */
784 e->counters = ((struct xt_counters) { 0, 0 });
792 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
794 struct xt_tgdtor_param par;
795 struct ip6t_entry_target *t;
797 if (i && (*i)-- == 0)
800 /* Cleanup all matches */
801 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
802 t = ip6t_get_target(e);
804 par.target = t->u.kernel.target;
805 par.targinfo = t->data;
806 par.family = NFPROTO_IPV6;
807 if (par.target->destroy != NULL)
808 par.target->destroy(&par);
809 module_put(par.target->me);
813 /* Checks and translates the user-supplied table segment (held in
816 translate_table(const char *name,
817 unsigned int valid_hooks,
818 struct xt_table_info *newinfo,
822 const unsigned int *hook_entries,
823 const unsigned int *underflows)
828 newinfo->size = size;
829 newinfo->number = number;
831 /* Init all hooks to impossible value. */
832 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
833 newinfo->hook_entry[i] = 0xFFFFFFFF;
834 newinfo->underflow[i] = 0xFFFFFFFF;
837 duprintf("translate_table: size %u\n", newinfo->size);
839 /* Walk through entries, checking offsets. */
840 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
841 check_entry_size_and_hooks,
845 hook_entries, underflows, &i);
850 duprintf("translate_table: %u not %u entries\n",
855 /* Check hooks all assigned */
856 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
857 /* Only hooks which are valid */
858 if (!(valid_hooks & (1 << i)))
860 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
861 duprintf("Invalid hook entry %u %u\n",
865 if (newinfo->underflow[i] == 0xFFFFFFFF) {
866 duprintf("Invalid underflow %u %u\n",
872 if (!mark_source_chains(newinfo, valid_hooks, entry0))
875 /* Finally, each sanity check must pass */
877 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
878 find_check_entry, name, size, &i);
881 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
886 /* And one copy for every other CPU */
887 for_each_possible_cpu(i) {
888 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
889 memcpy(newinfo->entries[i], entry0, newinfo->size);
897 add_entry_to_counter(const struct ip6t_entry *e,
898 struct xt_counters total[],
901 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
908 set_entry_to_counter(const struct ip6t_entry *e,
909 struct ip6t_counters total[],
912 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
919 get_counters(const struct xt_table_info *t,
920 struct xt_counters counters[])
926 /* Instead of clearing (by a previous call to memset())
927 * the counters and using adds, we set the counters
928 * with data used by 'current' CPU
929 * We dont care about preemption here.
931 curcpu = raw_smp_processor_id();
934 IP6T_ENTRY_ITERATE(t->entries[curcpu],
936 set_entry_to_counter,
940 for_each_possible_cpu(cpu) {
944 IP6T_ENTRY_ITERATE(t->entries[cpu],
946 add_entry_to_counter,
952 /* We're lazy, and add to the first CPU; overflow works its fey magic
953 * and everything is OK. */
955 add_counter_to_entry(struct ip6t_entry *e,
956 const struct xt_counters addme[],
959 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
965 /* Take values from counters and add them back onto the current cpu */
966 static void put_counters(struct xt_table_info *t,
967 const struct xt_counters counters[])
972 cpu = smp_processor_id();
974 IP6T_ENTRY_ITERATE(t->entries[cpu],
976 add_counter_to_entry,
983 zero_entry_counter(struct ip6t_entry *e, void *arg)
985 e->counters.bcnt = 0;
986 e->counters.pcnt = 0;
991 clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info)
994 const void *loc_cpu_entry = info->entries[raw_smp_processor_id()];
996 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
997 for_each_possible_cpu(cpu) {
998 memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size);
999 IP6T_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size,
1000 zero_entry_counter, NULL);
1004 static struct xt_counters *alloc_counters(struct xt_table *table)
1006 unsigned int countersize;
1007 struct xt_counters *counters;
1008 struct xt_table_info *private = table->private;
1009 struct xt_table_info *info;
1011 /* We need atomic snapshot of counters: rest doesn't change
1012 (other than comefrom, which userspace doesn't care
1014 countersize = sizeof(struct xt_counters) * private->number;
1015 counters = vmalloc_node(countersize, numa_node_id());
1017 if (counters == NULL)
1020 info = xt_alloc_table_info(private->size);
1024 clone_counters(info, private);
1026 mutex_lock(&table->lock);
1027 xt_table_entry_swap_rcu(private, info);
1028 synchronize_net(); /* Wait until smoke has cleared */
1030 get_counters(info, counters);
1031 put_counters(private, counters);
1032 mutex_unlock(&table->lock);
1034 xt_free_table_info(info);
1039 return ERR_PTR(-ENOMEM);
1043 copy_entries_to_user(unsigned int total_size,
1044 struct xt_table *table,
1045 void __user *userptr)
1047 unsigned int off, num;
1048 struct ip6t_entry *e;
1049 struct xt_counters *counters;
1050 const struct xt_table_info *private = table->private;
1052 const void *loc_cpu_entry;
1054 counters = alloc_counters(table);
1055 if (IS_ERR(counters))
1056 return PTR_ERR(counters);
1058 /* choose the copy that is on our node/cpu, ...
1059 * This choice is lazy (because current thread is
1060 * allowed to migrate to another cpu)
1062 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1063 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1068 /* FIXME: use iterator macros --RR */
1069 /* ... then go back and fix counters and names */
1070 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1072 const struct ip6t_entry_match *m;
1073 const struct ip6t_entry_target *t;
1075 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1076 if (copy_to_user(userptr + off
1077 + offsetof(struct ip6t_entry, counters),
1079 sizeof(counters[num])) != 0) {
1084 for (i = sizeof(struct ip6t_entry);
1085 i < e->target_offset;
1086 i += m->u.match_size) {
1089 if (copy_to_user(userptr + off + i
1090 + offsetof(struct ip6t_entry_match,
1092 m->u.kernel.match->name,
1093 strlen(m->u.kernel.match->name)+1)
1100 t = ip6t_get_target(e);
1101 if (copy_to_user(userptr + off + e->target_offset
1102 + offsetof(struct ip6t_entry_target,
1104 t->u.kernel.target->name,
1105 strlen(t->u.kernel.target->name)+1) != 0) {
1116 #ifdef CONFIG_COMPAT
1117 static void compat_standard_from_user(void *dst, void *src)
1119 int v = *(compat_int_t *)src;
1122 v += xt_compat_calc_jump(AF_INET6, v);
1123 memcpy(dst, &v, sizeof(v));
1126 static int compat_standard_to_user(void __user *dst, void *src)
1128 compat_int_t cv = *(int *)src;
1131 cv -= xt_compat_calc_jump(AF_INET6, cv);
1132 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1136 compat_calc_match(struct ip6t_entry_match *m, int *size)
1138 *size += xt_compat_match_offset(m->u.kernel.match);
1142 static int compat_calc_entry(struct ip6t_entry *e,
1143 const struct xt_table_info *info,
1144 void *base, struct xt_table_info *newinfo)
1146 struct ip6t_entry_target *t;
1147 unsigned int entry_offset;
1150 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1151 entry_offset = (void *)e - base;
1152 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1153 t = ip6t_get_target(e);
1154 off += xt_compat_target_offset(t->u.kernel.target);
1155 newinfo->size -= off;
1156 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1160 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1161 if (info->hook_entry[i] &&
1162 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1163 newinfo->hook_entry[i] -= off;
1164 if (info->underflow[i] &&
1165 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1166 newinfo->underflow[i] -= off;
1171 static int compat_table_info(const struct xt_table_info *info,
1172 struct xt_table_info *newinfo)
1174 void *loc_cpu_entry;
1176 if (!newinfo || !info)
1179 /* we dont care about newinfo->entries[] */
1180 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1181 newinfo->initial_entries = 0;
1182 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1183 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1184 compat_calc_entry, info, loc_cpu_entry,
1189 static int get_info(struct net *net, void __user *user, int *len, int compat)
1191 char name[IP6T_TABLE_MAXNAMELEN];
1195 if (*len != sizeof(struct ip6t_getinfo)) {
1196 duprintf("length %u != %zu\n", *len,
1197 sizeof(struct ip6t_getinfo));
1201 if (copy_from_user(name, user, sizeof(name)) != 0)
1204 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1205 #ifdef CONFIG_COMPAT
1207 xt_compat_lock(AF_INET6);
1209 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1210 "ip6table_%s", name);
1211 if (t && !IS_ERR(t)) {
1212 struct ip6t_getinfo info;
1213 const struct xt_table_info *private = t->private;
1215 #ifdef CONFIG_COMPAT
1217 struct xt_table_info tmp;
1218 ret = compat_table_info(private, &tmp);
1219 xt_compat_flush_offsets(AF_INET6);
1223 info.valid_hooks = t->valid_hooks;
1224 memcpy(info.hook_entry, private->hook_entry,
1225 sizeof(info.hook_entry));
1226 memcpy(info.underflow, private->underflow,
1227 sizeof(info.underflow));
1228 info.num_entries = private->number;
1229 info.size = private->size;
1230 strcpy(info.name, name);
1232 if (copy_to_user(user, &info, *len) != 0)
1240 ret = t ? PTR_ERR(t) : -ENOENT;
1241 #ifdef CONFIG_COMPAT
1243 xt_compat_unlock(AF_INET6);
1249 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1252 struct ip6t_get_entries get;
1255 if (*len < sizeof(get)) {
1256 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1259 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1261 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1262 duprintf("get_entries: %u != %zu\n",
1263 *len, sizeof(get) + get.size);
1267 t = xt_find_table_lock(net, AF_INET6, get.name);
1268 if (t && !IS_ERR(t)) {
1269 struct xt_table_info *private = t->private;
1270 duprintf("t->private->number = %u\n", private->number);
1271 if (get.size == private->size)
1272 ret = copy_entries_to_user(private->size,
1273 t, uptr->entrytable);
1275 duprintf("get_entries: I've got %u not %u!\n",
1276 private->size, get.size);
1282 ret = t ? PTR_ERR(t) : -ENOENT;
1288 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1289 struct xt_table_info *newinfo, unsigned int num_counters,
1290 void __user *counters_ptr)
1294 struct xt_table_info *oldinfo;
1295 struct xt_counters *counters;
1296 const void *loc_cpu_old_entry;
1299 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1306 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1307 "ip6table_%s", name);
1308 if (!t || IS_ERR(t)) {
1309 ret = t ? PTR_ERR(t) : -ENOENT;
1310 goto free_newinfo_counters_untrans;
1314 if (valid_hooks != t->valid_hooks) {
1315 duprintf("Valid hook crap: %08X vs %08X\n",
1316 valid_hooks, t->valid_hooks);
1321 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1325 /* Update module usage count based on number of rules */
1326 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1327 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1328 if ((oldinfo->number > oldinfo->initial_entries) ||
1329 (newinfo->number <= oldinfo->initial_entries))
1331 if ((oldinfo->number > oldinfo->initial_entries) &&
1332 (newinfo->number <= oldinfo->initial_entries))
1335 /* Get the old counters. */
1336 get_counters(oldinfo, counters);
1337 /* Decrease module usage counts and free resource */
1338 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1339 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1341 xt_free_table_info(oldinfo);
1342 if (copy_to_user(counters_ptr, counters,
1343 sizeof(struct xt_counters) * num_counters) != 0)
1352 free_newinfo_counters_untrans:
1359 do_replace(struct net *net, void __user *user, unsigned int len)
1362 struct ip6t_replace tmp;
1363 struct xt_table_info *newinfo;
1364 void *loc_cpu_entry;
1366 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1369 /* overflow check */
1370 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1373 newinfo = xt_alloc_table_info(tmp.size);
1377 /* choose the copy that is on our node/cpu */
1378 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1379 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1385 ret = translate_table(tmp.name, tmp.valid_hooks,
1386 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1387 tmp.hook_entry, tmp.underflow);
1391 duprintf("ip_tables: Translated table\n");
1393 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1394 tmp.num_counters, tmp.counters);
1396 goto free_newinfo_untrans;
1399 free_newinfo_untrans:
1400 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1402 xt_free_table_info(newinfo);
1407 do_add_counters(struct net *net, void __user *user, unsigned int len,
1411 struct xt_counters_info tmp;
1412 struct xt_counters *paddc;
1413 unsigned int num_counters;
1418 const struct xt_table_info *private;
1420 const void *loc_cpu_entry;
1421 #ifdef CONFIG_COMPAT
1422 struct compat_xt_counters_info compat_tmp;
1426 size = sizeof(struct compat_xt_counters_info);
1431 size = sizeof(struct xt_counters_info);
1434 if (copy_from_user(ptmp, user, size) != 0)
1437 #ifdef CONFIG_COMPAT
1439 num_counters = compat_tmp.num_counters;
1440 name = compat_tmp.name;
1444 num_counters = tmp.num_counters;
1448 if (len != size + num_counters * sizeof(struct xt_counters))
1451 paddc = vmalloc_node(len - size, numa_node_id());
1455 if (copy_from_user(paddc, user + size, len - size) != 0) {
1460 t = xt_find_table_lock(net, AF_INET6, name);
1461 if (!t || IS_ERR(t)) {
1462 ret = t ? PTR_ERR(t) : -ENOENT;
1466 mutex_lock(&t->lock);
1467 private = t->private;
1468 if (private->number != num_counters) {
1470 goto unlock_up_free;
1475 /* Choose the copy that is on our node */
1476 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1477 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1479 add_counter_to_entry,
1484 mutex_unlock(&t->lock);
1493 #ifdef CONFIG_COMPAT
1494 struct compat_ip6t_replace {
1495 char name[IP6T_TABLE_MAXNAMELEN];
1499 u32 hook_entry[NF_INET_NUMHOOKS];
1500 u32 underflow[NF_INET_NUMHOOKS];
1502 compat_uptr_t counters; /* struct ip6t_counters * */
1503 struct compat_ip6t_entry entries[0];
1507 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1508 unsigned int *size, struct xt_counters *counters,
1511 struct ip6t_entry_target *t;
1512 struct compat_ip6t_entry __user *ce;
1513 u_int16_t target_offset, next_offset;
1514 compat_uint_t origsize;
1519 ce = (struct compat_ip6t_entry __user *)*dstptr;
1520 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1523 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1526 *dstptr += sizeof(struct compat_ip6t_entry);
1527 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1529 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1530 target_offset = e->target_offset - (origsize - *size);
1533 t = ip6t_get_target(e);
1534 ret = xt_compat_target_to_user(t, dstptr, size);
1538 next_offset = e->next_offset - (origsize - *size);
1539 if (put_user(target_offset, &ce->target_offset))
1541 if (put_user(next_offset, &ce->next_offset))
1551 compat_find_calc_match(struct ip6t_entry_match *m,
1553 const struct ip6t_ip6 *ipv6,
1554 unsigned int hookmask,
1555 int *size, unsigned int *i)
1557 struct xt_match *match;
1559 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1560 m->u.user.revision),
1561 "ip6t_%s", m->u.user.name);
1562 if (IS_ERR(match) || !match) {
1563 duprintf("compat_check_calc_match: `%s' not found\n",
1565 return match ? PTR_ERR(match) : -ENOENT;
1567 m->u.kernel.match = match;
1568 *size += xt_compat_match_offset(match);
1575 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1577 if (i && (*i)-- == 0)
1580 module_put(m->u.kernel.match->me);
1585 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1587 struct ip6t_entry_target *t;
1589 if (i && (*i)-- == 0)
1592 /* Cleanup all matches */
1593 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1594 t = compat_ip6t_get_target(e);
1595 module_put(t->u.kernel.target->me);
1600 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1601 struct xt_table_info *newinfo,
1603 unsigned char *base,
1604 unsigned char *limit,
1605 unsigned int *hook_entries,
1606 unsigned int *underflows,
1610 struct ip6t_entry_target *t;
1611 struct xt_target *target;
1612 unsigned int entry_offset;
1616 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1617 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1618 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1619 duprintf("Bad offset %p, limit = %p\n", e, limit);
1623 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1624 sizeof(struct compat_xt_entry_target)) {
1625 duprintf("checking: element %p size %u\n",
1630 /* For purposes of check_entry casting the compat entry is fine */
1631 ret = check_entry((struct ip6t_entry *)e, name);
1635 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1636 entry_offset = (void *)e - (void *)base;
1638 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1639 &e->ipv6, e->comefrom, &off, &j);
1641 goto release_matches;
1643 t = compat_ip6t_get_target(e);
1644 target = try_then_request_module(xt_find_target(AF_INET6,
1646 t->u.user.revision),
1647 "ip6t_%s", t->u.user.name);
1648 if (IS_ERR(target) || !target) {
1649 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1651 ret = target ? PTR_ERR(target) : -ENOENT;
1652 goto release_matches;
1654 t->u.kernel.target = target;
1656 off += xt_compat_target_offset(target);
1658 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1662 /* Check hooks & underflows */
1663 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1664 if ((unsigned char *)e - base == hook_entries[h])
1665 newinfo->hook_entry[h] = hook_entries[h];
1666 if ((unsigned char *)e - base == underflows[h])
1667 newinfo->underflow[h] = underflows[h];
1670 /* Clear counters and comefrom */
1671 memset(&e->counters, 0, sizeof(e->counters));
1678 module_put(t->u.kernel.target->me);
1680 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1685 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1686 unsigned int *size, const char *name,
1687 struct xt_table_info *newinfo, unsigned char *base)
1689 struct ip6t_entry_target *t;
1690 struct xt_target *target;
1691 struct ip6t_entry *de;
1692 unsigned int origsize;
1697 de = (struct ip6t_entry *)*dstptr;
1698 memcpy(de, e, sizeof(struct ip6t_entry));
1699 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1701 *dstptr += sizeof(struct ip6t_entry);
1702 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1704 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1708 de->target_offset = e->target_offset - (origsize - *size);
1709 t = compat_ip6t_get_target(e);
1710 target = t->u.kernel.target;
1711 xt_compat_target_from_user(t, dstptr, size);
1713 de->next_offset = e->next_offset - (origsize - *size);
1714 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1715 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1716 newinfo->hook_entry[h] -= origsize - *size;
1717 if ((unsigned char *)de - base < newinfo->underflow[h])
1718 newinfo->underflow[h] -= origsize - *size;
1723 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1728 struct xt_mtchk_param mtpar;
1732 mtpar.entryinfo = &e->ipv6;
1733 mtpar.hook_mask = e->comefrom;
1734 mtpar.family = NFPROTO_IPV6;
1735 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1737 goto cleanup_matches;
1739 ret = check_target(e, name);
1741 goto cleanup_matches;
1747 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1752 translate_compat_table(const char *name,
1753 unsigned int valid_hooks,
1754 struct xt_table_info **pinfo,
1756 unsigned int total_size,
1757 unsigned int number,
1758 unsigned int *hook_entries,
1759 unsigned int *underflows)
1762 struct xt_table_info *newinfo, *info;
1763 void *pos, *entry0, *entry1;
1770 info->number = number;
1772 /* Init all hooks to impossible value. */
1773 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1774 info->hook_entry[i] = 0xFFFFFFFF;
1775 info->underflow[i] = 0xFFFFFFFF;
1778 duprintf("translate_compat_table: size %u\n", info->size);
1780 xt_compat_lock(AF_INET6);
1781 /* Walk through entries, checking offsets. */
1782 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1783 check_compat_entry_size_and_hooks,
1784 info, &size, entry0,
1785 entry0 + total_size,
1786 hook_entries, underflows, &j, name);
1792 duprintf("translate_compat_table: %u not %u entries\n",
1797 /* Check hooks all assigned */
1798 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1799 /* Only hooks which are valid */
1800 if (!(valid_hooks & (1 << i)))
1802 if (info->hook_entry[i] == 0xFFFFFFFF) {
1803 duprintf("Invalid hook entry %u %u\n",
1804 i, hook_entries[i]);
1807 if (info->underflow[i] == 0xFFFFFFFF) {
1808 duprintf("Invalid underflow %u %u\n",
1815 newinfo = xt_alloc_table_info(size);
1819 newinfo->number = number;
1820 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1821 newinfo->hook_entry[i] = info->hook_entry[i];
1822 newinfo->underflow[i] = info->underflow[i];
1824 entry1 = newinfo->entries[raw_smp_processor_id()];
1827 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1828 compat_copy_entry_from_user,
1829 &pos, &size, name, newinfo, entry1);
1830 xt_compat_flush_offsets(AF_INET6);
1831 xt_compat_unlock(AF_INET6);
1836 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1840 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1844 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1845 compat_release_entry, &j);
1846 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1847 xt_free_table_info(newinfo);
1851 /* And one copy for every other CPU */
1852 for_each_possible_cpu(i)
1853 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1854 memcpy(newinfo->entries[i], entry1, newinfo->size);
1858 xt_free_table_info(info);
1862 xt_free_table_info(newinfo);
1864 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1867 xt_compat_flush_offsets(AF_INET6);
1868 xt_compat_unlock(AF_INET6);
1873 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1876 struct compat_ip6t_replace tmp;
1877 struct xt_table_info *newinfo;
1878 void *loc_cpu_entry;
1880 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1883 /* overflow check */
1884 if (tmp.size >= INT_MAX / num_possible_cpus())
1886 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1889 newinfo = xt_alloc_table_info(tmp.size);
1893 /* choose the copy that is on our node/cpu */
1894 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1895 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1901 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1902 &newinfo, &loc_cpu_entry, tmp.size,
1903 tmp.num_entries, tmp.hook_entry,
1908 duprintf("compat_do_replace: Translated table\n");
1910 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1911 tmp.num_counters, compat_ptr(tmp.counters));
1913 goto free_newinfo_untrans;
1916 free_newinfo_untrans:
1917 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1919 xt_free_table_info(newinfo);
1924 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1929 if (!capable(CAP_NET_ADMIN))
1933 case IP6T_SO_SET_REPLACE:
1934 ret = compat_do_replace(sock_net(sk), user, len);
1937 case IP6T_SO_SET_ADD_COUNTERS:
1938 ret = do_add_counters(sock_net(sk), user, len, 1);
1942 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1949 struct compat_ip6t_get_entries {
1950 char name[IP6T_TABLE_MAXNAMELEN];
1952 struct compat_ip6t_entry entrytable[0];
1956 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1957 void __user *userptr)
1959 struct xt_counters *counters;
1960 const struct xt_table_info *private = table->private;
1964 const void *loc_cpu_entry;
1967 counters = alloc_counters(table);
1968 if (IS_ERR(counters))
1969 return PTR_ERR(counters);
1971 /* choose the copy that is on our node/cpu, ...
1972 * This choice is lazy (because current thread is
1973 * allowed to migrate to another cpu)
1975 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1978 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1979 compat_copy_entry_to_user,
1980 &pos, &size, counters, &i);
1987 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1991 struct compat_ip6t_get_entries get;
1994 if (*len < sizeof(get)) {
1995 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1999 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
2002 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
2003 duprintf("compat_get_entries: %u != %zu\n",
2004 *len, sizeof(get) + get.size);
2008 xt_compat_lock(AF_INET6);
2009 t = xt_find_table_lock(net, AF_INET6, get.name);
2010 if (t && !IS_ERR(t)) {
2011 const struct xt_table_info *private = t->private;
2012 struct xt_table_info info;
2013 duprintf("t->private->number = %u\n", private->number);
2014 ret = compat_table_info(private, &info);
2015 if (!ret && get.size == info.size) {
2016 ret = compat_copy_entries_to_user(private->size,
2017 t, uptr->entrytable);
2019 duprintf("compat_get_entries: I've got %u not %u!\n",
2020 private->size, get.size);
2023 xt_compat_flush_offsets(AF_INET6);
2027 ret = t ? PTR_ERR(t) : -ENOENT;
2029 xt_compat_unlock(AF_INET6);
2033 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2036 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2040 if (!capable(CAP_NET_ADMIN))
2044 case IP6T_SO_GET_INFO:
2045 ret = get_info(sock_net(sk), user, len, 1);
2047 case IP6T_SO_GET_ENTRIES:
2048 ret = compat_get_entries(sock_net(sk), user, len);
2051 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2058 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2062 if (!capable(CAP_NET_ADMIN))
2066 case IP6T_SO_SET_REPLACE:
2067 ret = do_replace(sock_net(sk), user, len);
2070 case IP6T_SO_SET_ADD_COUNTERS:
2071 ret = do_add_counters(sock_net(sk), user, len, 0);
2075 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2083 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2087 if (!capable(CAP_NET_ADMIN))
2091 case IP6T_SO_GET_INFO:
2092 ret = get_info(sock_net(sk), user, len, 0);
2095 case IP6T_SO_GET_ENTRIES:
2096 ret = get_entries(sock_net(sk), user, len);
2099 case IP6T_SO_GET_REVISION_MATCH:
2100 case IP6T_SO_GET_REVISION_TARGET: {
2101 struct ip6t_get_revision rev;
2104 if (*len != sizeof(rev)) {
2108 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2113 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2118 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2121 "ip6t_%s", rev.name);
2126 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2133 struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2134 const struct ip6t_replace *repl)
2137 struct xt_table_info *newinfo;
2138 struct xt_table_info bootstrap
2139 = { 0, 0, 0, { 0 }, { 0 }, { } };
2140 void *loc_cpu_entry;
2141 struct xt_table *new_table;
2143 newinfo = xt_alloc_table_info(repl->size);
2149 /* choose the copy on our node/cpu, but dont care about preemption */
2150 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2151 memcpy(loc_cpu_entry, repl->entries, repl->size);
2153 ret = translate_table(table->name, table->valid_hooks,
2154 newinfo, loc_cpu_entry, repl->size,
2161 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2162 if (IS_ERR(new_table)) {
2163 ret = PTR_ERR(new_table);
2169 xt_free_table_info(newinfo);
2171 return ERR_PTR(ret);
2174 void ip6t_unregister_table(struct xt_table *table)
2176 struct xt_table_info *private;
2177 void *loc_cpu_entry;
2178 struct module *table_owner = table->me;
2180 private = xt_unregister_table(table);
2182 /* Decrease module usage counts and free resources */
2183 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2184 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2185 if (private->number > private->initial_entries)
2186 module_put(table_owner);
2187 xt_free_table_info(private);
2190 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2192 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2193 u_int8_t type, u_int8_t code,
2196 return (type == test_type && code >= min_code && code <= max_code)
2201 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2203 const struct icmp6hdr *ic;
2204 struct icmp6hdr _icmph;
2205 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2207 /* Must not be a fragment. */
2208 if (par->fragoff != 0)
2211 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2213 /* We've been asked to examine this packet, and we
2214 * can't. Hence, no choice but to drop.
2216 duprintf("Dropping evil ICMP tinygram.\n");
2217 *par->hotdrop = true;
2221 return icmp6_type_code_match(icmpinfo->type,
2224 ic->icmp6_type, ic->icmp6_code,
2225 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2228 /* Called when user tries to insert an entry of this type. */
2229 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2231 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2233 /* Must specify no unknown invflags */
2234 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2237 /* The built-in targets: standard (NULL) and error. */
2238 static struct xt_target ip6t_standard_target __read_mostly = {
2239 .name = IP6T_STANDARD_TARGET,
2240 .targetsize = sizeof(int),
2242 #ifdef CONFIG_COMPAT
2243 .compatsize = sizeof(compat_int_t),
2244 .compat_from_user = compat_standard_from_user,
2245 .compat_to_user = compat_standard_to_user,
2249 static struct xt_target ip6t_error_target __read_mostly = {
2250 .name = IP6T_ERROR_TARGET,
2251 .target = ip6t_error,
2252 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2256 static struct nf_sockopt_ops ip6t_sockopts = {
2258 .set_optmin = IP6T_BASE_CTL,
2259 .set_optmax = IP6T_SO_SET_MAX+1,
2260 .set = do_ip6t_set_ctl,
2261 #ifdef CONFIG_COMPAT
2262 .compat_set = compat_do_ip6t_set_ctl,
2264 .get_optmin = IP6T_BASE_CTL,
2265 .get_optmax = IP6T_SO_GET_MAX+1,
2266 .get = do_ip6t_get_ctl,
2267 #ifdef CONFIG_COMPAT
2268 .compat_get = compat_do_ip6t_get_ctl,
2270 .owner = THIS_MODULE,
2273 static struct xt_match icmp6_matchstruct __read_mostly = {
2275 .match = icmp6_match,
2276 .matchsize = sizeof(struct ip6t_icmp),
2277 .checkentry = icmp6_checkentry,
2278 .proto = IPPROTO_ICMPV6,
2282 static int __net_init ip6_tables_net_init(struct net *net)
2284 return xt_proto_init(net, AF_INET6);
2287 static void __net_exit ip6_tables_net_exit(struct net *net)
2289 xt_proto_fini(net, AF_INET6);
2292 static struct pernet_operations ip6_tables_net_ops = {
2293 .init = ip6_tables_net_init,
2294 .exit = ip6_tables_net_exit,
2297 static int __init ip6_tables_init(void)
2301 ret = register_pernet_subsys(&ip6_tables_net_ops);
2305 /* Noone else will be downing sem now, so we won't sleep */
2306 ret = xt_register_target(&ip6t_standard_target);
2309 ret = xt_register_target(&ip6t_error_target);
2312 ret = xt_register_match(&icmp6_matchstruct);
2316 /* Register setsockopt */
2317 ret = nf_register_sockopt(&ip6t_sockopts);
2321 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2325 xt_unregister_match(&icmp6_matchstruct);
2327 xt_unregister_target(&ip6t_error_target);
2329 xt_unregister_target(&ip6t_standard_target);
2331 unregister_pernet_subsys(&ip6_tables_net_ops);
2336 static void __exit ip6_tables_fini(void)
2338 nf_unregister_sockopt(&ip6t_sockopts);
2340 xt_unregister_match(&icmp6_matchstruct);
2341 xt_unregister_target(&ip6t_error_target);
2342 xt_unregister_target(&ip6t_standard_target);
2344 unregister_pernet_subsys(&ip6_tables_net_ops);
2348 * find the offset to specified header or the protocol number of last header
2349 * if target < 0. "last header" is transport protocol header, ESP, or
2352 * If target header is found, its offset is set in *offset and return protocol
2353 * number. Otherwise, return -1.
2355 * If the first fragment doesn't contain the final protocol header or
2356 * NEXTHDR_NONE it is considered invalid.
2358 * Note that non-1st fragment is special case that "the protocol number
2359 * of last header" is "next header" field in Fragment header. In this case,
2360 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2364 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2365 int target, unsigned short *fragoff)
2367 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2368 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2369 unsigned int len = skb->len - start;
2374 while (nexthdr != target) {
2375 struct ipv6_opt_hdr _hdr, *hp;
2376 unsigned int hdrlen;
2378 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2384 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2387 if (nexthdr == NEXTHDR_FRAGMENT) {
2388 unsigned short _frag_off;
2390 fp = skb_header_pointer(skb,
2391 start+offsetof(struct frag_hdr,
2398 _frag_off = ntohs(*fp) & ~0x7;
2401 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2402 hp->nexthdr == NEXTHDR_NONE)) {
2404 *fragoff = _frag_off;
2410 } else if (nexthdr == NEXTHDR_AUTH)
2411 hdrlen = (hp->hdrlen + 2) << 2;
2413 hdrlen = ipv6_optlen(hp);
2415 nexthdr = hp->nexthdr;
2424 EXPORT_SYMBOL(ip6t_register_table);
2425 EXPORT_SYMBOL(ip6t_unregister_table);
2426 EXPORT_SYMBOL(ip6t_do_table);
2427 EXPORT_SYMBOL(ip6t_ext_hdr);
2428 EXPORT_SYMBOL(ipv6_find_hdr);
2430 module_init(ip6_tables_init);
2431 module_exit(ip6_tables_fini);