]> git.karo-electronics.de Git - linux-beck.git/commitdiff
netfilter: x_tables: pass xt_counters struct to counter allocator
authorFlorian Westphal <fw@strlen.de>
Tue, 22 Nov 2016 13:44:18 +0000 (14:44 +0100)
committerPablo Neira Ayuso <pablo@netfilter.org>
Tue, 6 Dec 2016 20:42:18 +0000 (21:42 +0100)
Keeps some noise away from a followup patch.

Signed-off-by: Florian Westphal <fw@strlen.de>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
include/linux/netfilter/x_tables.h
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv6/netfilter/ip6_tables.c
net/netfilter/x_tables.c

index 6e61edeb68e3b28c42ebd137234e2bfdf92d51fd..05a94bd32c55859af17b8ad1578a39219815f9aa 100644 (file)
@@ -404,32 +404,7 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
 }
 
 
-/* On SMP, ip(6)t_entry->counters.pcnt holds address of the
- * real (percpu) counter.  On !SMP, its just the packet count,
- * so nothing needs to be done there.
- *
- * xt_percpu_counter_alloc returns the address of the percpu
- * counter, or 0 on !SMP. We force an alignment of 16 bytes
- * so that bytes/packets share a common cache line.
- *
- * Hence caller must use IS_ERR_VALUE to check for error, this
- * allows us to return 0 for single core systems without forcing
- * callers to deal with SMP vs. NONSMP issues.
- */
-static inline unsigned long xt_percpu_counter_alloc(void)
-{
-       if (nr_cpu_ids > 1) {
-               void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
-                                                   sizeof(struct xt_counters));
-
-               if (res == NULL)
-                       return -ENOMEM;
-
-               return (__force unsigned long) res;
-       }
-
-       return 0;
-}
+bool xt_percpu_counter_alloc(struct xt_counters *counters);
 void xt_percpu_counter_free(struct xt_counters *cnt);
 
 static inline struct xt_counters *
index 019f8e8dda6d98eead1a74b1e957e43ac460c68a..808deb275ceba7385655fe12d28d4111e4668a66 100644 (file)
@@ -415,13 +415,10 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
 {
        struct xt_entry_target *t;
        struct xt_target *target;
-       unsigned long pcnt;
        int ret;
 
-       pcnt = xt_percpu_counter_alloc();
-       if (IS_ERR_VALUE(pcnt))
+       if (!xt_percpu_counter_alloc(&e->counters))
                return -ENOMEM;
-       e->counters.pcnt = pcnt;
 
        t = arpt_get_target(e);
        target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
index acc9a0c45bdf406d532218ba0a35996fc13a4ae5..a48430d3420f24f1235ce8710b436382f273c033 100644 (file)
@@ -539,12 +539,9 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
        unsigned int j;
        struct xt_mtchk_param mtpar;
        struct xt_entry_match *ematch;
-       unsigned long pcnt;
 
-       pcnt = xt_percpu_counter_alloc();
-       if (IS_ERR_VALUE(pcnt))
+       if (!xt_percpu_counter_alloc(&e->counters))
                return -ENOMEM;
-       e->counters.pcnt = pcnt;
 
        j = 0;
        mtpar.net       = net;
index 88b56a98905b2771119f45254aedd076e8ec6dfb..a5a92083fd626f821fdd6d9315d47e739eebc168 100644 (file)
@@ -570,12 +570,9 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
        unsigned int j;
        struct xt_mtchk_param mtpar;
        struct xt_entry_match *ematch;
-       unsigned long pcnt;
 
-       pcnt = xt_percpu_counter_alloc();
-       if (IS_ERR_VALUE(pcnt))
+       if (!xt_percpu_counter_alloc(&e->counters))
                return -ENOMEM;
-       e->counters.pcnt = pcnt;
 
        j = 0;
        mtpar.net       = net;
index 0580029eb0ee5b52d592e742e248c88d75f6e552..be5e830475942d45ee6076a0ca01a18009ce2c71 100644 (file)
@@ -1615,6 +1615,36 @@ void xt_proto_fini(struct net *net, u_int8_t af)
 }
 EXPORT_SYMBOL_GPL(xt_proto_fini);
 
+/**
+ * xt_percpu_counter_alloc - allocate x_tables rule counter
+ *
+ * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct
+ *
+ * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then
+ * contain the address of the real (percpu) counter.
+ *
+ * Rule evaluation needs to use xt_get_this_cpu_counter() helper
+ * to fetch the real percpu counter.
+ *
+ * returns false on error.
+ */
+bool xt_percpu_counter_alloc(struct xt_counters *counter)
+{
+       void __percpu *res;
+
+       if (nr_cpu_ids <= 1)
+               return true;
+
+       res = __alloc_percpu(sizeof(struct xt_counters),
+                            sizeof(struct xt_counters));
+       if (!res)
+               return false;
+
+       counter->pcnt = (__force unsigned long)res;
+       return true;
+}
+EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
+
 void xt_percpu_counter_free(struct xt_counters *counters)
 {
        unsigned long pcnt = counters->pcnt;