]> git.karo-electronics.de Git - linux-beck.git/commitdiff
[NETFILTER]: non-power-of-two jhash optimizations
authorPatrick McHardy <kaber@trash.net>
Tue, 18 Dec 2007 06:45:52 +0000 (22:45 -0800)
committerDavid S. Miller <davem@davemloft.net>
Mon, 28 Jan 2008 22:59:11 +0000 (14:59 -0800)
Apply Eric Dumazet's jhash optimizations where applicable. Quoting Eric:

Thanks to jhash, hash value uses full 32 bits. Instead of returning
hash % size (implying a divide) we return the high 32 bits of the
(hash * size) that will give results between [0 and size-1] and same
hash distribution.

On most cpus, a multiply is less expensive than a divide, by an order
of magnitude.

Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/nf_nat_core.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_expect.c

index a48e26449fd57e5b2b6453e32b8518b183cf3f57..df39ca07fb120706618908e0c77f089685607235 100644 (file)
@@ -273,7 +273,7 @@ clusterip_hashfn(const struct sk_buff *skb,
        }
 
        /* node numbers are 1..n, not 0..n */
-       return (hashval % config->num_total_nodes) + 1;
+       return (((u64)hashval * config->num_total_nodes) >> 32) + 1;
 }
 
 static inline int
index aec157d0ad9334785e89177bb55d0e8fd18f17c7..e53ae1ef8f5e1f0dae45e5d191b079c681a0dcb3 100644 (file)
@@ -77,10 +77,13 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put);
 static inline unsigned int
 hash_by_src(const struct nf_conntrack_tuple *tuple)
 {
+       unsigned int hash;
+
        /* Original src, to ensure we map it consistently if poss. */
-       return jhash_3words((__force u32)tuple->src.u3.ip,
+       hash = jhash_3words((__force u32)tuple->src.u3.ip,
                            (__force u32)tuple->src.u.all,
-                           tuple->dst.protonum, 0) % nf_nat_htable_size;
+                           tuple->dst.protonum, 0);
+       return ((u64)hash * nf_nat_htable_size) >> 32;
 }
 
 /* Is this tuple already taken? (not by us) */
@@ -211,7 +214,8 @@ find_best_ips_proto(struct nf_conntrack_tuple *tuple,
        maxip = ntohl(range->max_ip);
        j = jhash_2words((__force u32)tuple->src.u3.ip,
                         (__force u32)tuple->dst.u3.ip, 0);
-       *var_ipp = htonl(minip + j % (maxip - minip + 1));
+       j = ((u64)j * (maxip - minip + 1)) >> 32;
+       *var_ipp = htonl(minip + j);
 }
 
 /* Manipulate the tuple into the range given.  For NF_INET_POST_ROUTING,
index 5920f953f7828604716a7613b61055aca4bbee12..25564073ee231a4bcbe0f09b35a452bd1247932a 100644 (file)
@@ -81,7 +81,7 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
                   ((__force __u16)tuple->src.u.all << 16) |
                    (__force __u16)tuple->dst.u.all);
 
-       return jhash_2words(a, b, rnd) % size;
+       return ((u64)jhash_2words(a, b, rnd) * size) >> 32;
 }
 
 static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
index 0efbf343eac8c0fb2efe107fda9cddca8309bbd9..e0cd9d00aa61e872f2f769c142e065a529fcdf23 100644 (file)
@@ -73,15 +73,17 @@ static void nf_ct_expectation_timed_out(unsigned long ul_expect)
 
 static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
 {
+       unsigned int hash;
+
        if (unlikely(!nf_ct_expect_hash_rnd_initted)) {
                get_random_bytes(&nf_ct_expect_hash_rnd, 4);
                nf_ct_expect_hash_rnd_initted = 1;
        }
 
-       return jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
+       hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
                      (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
-                      (__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hash_rnd) %
-              nf_ct_expect_hsize;
+                      (__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hash_rnd);
+       return ((u64)hash * nf_ct_expect_hsize) >> 32;
 }
 
 struct nf_conntrack_expect *