]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - net/netfilter/nf_conntrack_core.c
Merge branch 'master' into tk71
[mv-sheeva.git] / net / netfilter / nf_conntrack_core.c
index df3eedb142ff809b9ce8f9dd49c41782f91aba42..84f4fcc5884be4fd6a4a08774eb91ba1facd2f7b 100644 (file)
@@ -65,32 +65,42 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max);
 DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
 
-static int nf_conntrack_hash_rnd_initted;
-static unsigned int nf_conntrack_hash_rnd;
+unsigned int nf_conntrack_hash_rnd __read_mostly;
 
-static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
-                                 u16 zone, unsigned int size, unsigned int rnd)
+static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
 {
        unsigned int n;
-       u_int32_t h;
 
        /* The direction must be ignored, so we hash everything up to the
         * destination ports (which is a multiple of 4) and treat the last
         * three bytes manually.
         */
        n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
-       h = jhash2((u32 *)tuple, n,
-                  zone ^ rnd ^ (((__force __u16)tuple->dst.u.all << 16) |
-                                tuple->dst.protonum));
+       return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^
+                     (((__force __u16)tuple->dst.u.all << 16) |
+                     tuple->dst.protonum));
+}
+
+static u32 __hash_bucket(u32 hash, unsigned int size)
+{
+       return ((u64)hash * size) >> 32;
+}
+
+static u32 hash_bucket(u32 hash, const struct net *net)
+{
+       return __hash_bucket(hash, net->ct.htable_size);
+}
 
-       return ((u64)h * size) >> 32;
+static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
+                                 u16 zone, unsigned int size)
+{
+       return __hash_bucket(hash_conntrack_raw(tuple, zone), size);
 }
 
 static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
                                       const struct nf_conntrack_tuple *tuple)
 {
-       return __hash_conntrack(tuple, zone, net->ct.htable_size,
-                               nf_conntrack_hash_rnd);
+       return __hash_conntrack(tuple, zone, net->ct.htable_size);
 }
 
 bool
@@ -292,20 +302,20 @@ static void death_by_timeout(unsigned long ul_conntrack)
  * OR
  * - Caller must lock nf_conntrack_lock before calling this function
  */
-struct nf_conntrack_tuple_hash *
-__nf_conntrack_find(struct net *net, u16 zone,
-                   const struct nf_conntrack_tuple *tuple)
+static struct nf_conntrack_tuple_hash *
+____nf_conntrack_find(struct net *net, u16 zone,
+                     const struct nf_conntrack_tuple *tuple, u32 hash)
 {
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
-       unsigned int hash = hash_conntrack(net, zone, tuple);
+       unsigned int bucket = hash_bucket(hash, net);
 
        /* Disable BHs the entire time since we normally need to disable them
         * at least once for the stats anyway.
         */
        local_bh_disable();
 begin:
-       hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
+       hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
                if (nf_ct_tuple_equal(tuple, &h->tuple) &&
                    nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
                        NF_CT_STAT_INC(net, found);
@@ -319,7 +329,7 @@ begin:
         * not the expected one, we must restart lookup.
         * We probably met an item that was moved to another chain.
         */
-       if (get_nulls_value(n) != hash) {
+       if (get_nulls_value(n) != bucket) {
                NF_CT_STAT_INC(net, search_restart);
                goto begin;
        }
@@ -327,19 +337,27 @@ begin:
 
        return NULL;
 }
+
+struct nf_conntrack_tuple_hash *
+__nf_conntrack_find(struct net *net, u16 zone,
+                   const struct nf_conntrack_tuple *tuple)
+{
+       return ____nf_conntrack_find(net, zone, tuple,
+                                    hash_conntrack_raw(tuple, zone));
+}
 EXPORT_SYMBOL_GPL(__nf_conntrack_find);
 
 /* Find a connection corresponding to a tuple. */
-struct nf_conntrack_tuple_hash *
-nf_conntrack_find_get(struct net *net, u16 zone,
-                     const struct nf_conntrack_tuple *tuple)
+static struct nf_conntrack_tuple_hash *
+__nf_conntrack_find_get(struct net *net, u16 zone,
+                       const struct nf_conntrack_tuple *tuple, u32 hash)
 {
        struct nf_conntrack_tuple_hash *h;
        struct nf_conn *ct;
 
        rcu_read_lock();
 begin:
-       h = __nf_conntrack_find(net, zone, tuple);
+       h = ____nf_conntrack_find(net, zone, tuple, hash);
        if (h) {
                ct = nf_ct_tuplehash_to_ctrack(h);
                if (unlikely(nf_ct_is_dying(ct) ||
@@ -357,6 +375,14 @@ begin:
 
        return h;
 }
+
+struct nf_conntrack_tuple_hash *
+nf_conntrack_find_get(struct net *net, u16 zone,
+                     const struct nf_conntrack_tuple *tuple)
+{
+       return __nf_conntrack_find_get(net, zone, tuple,
+                                      hash_conntrack_raw(tuple, zone));
+}
 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
 
 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
@@ -409,8 +435,11 @@ __nf_conntrack_confirm(struct sk_buff *skb)
                return NF_ACCEPT;
 
        zone = nf_ct_zone(ct);
-       hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-       repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+       /* reuse the hash saved before */
+       hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
+       hash = hash_bucket(hash, net);
+       repl_hash = hash_conntrack(net, zone,
+                                  &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 
        /* We're not in hash table, and we refuse to set up related
           connections for unconfirmed conns.  But packet copies and
@@ -567,17 +596,33 @@ static noinline int early_drop(struct net *net, unsigned int hash)
        return dropped;
 }
 
-struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
-                                  const struct nf_conntrack_tuple *orig,
-                                  const struct nf_conntrack_tuple *repl,
-                                  gfp_t gfp)
+void init_nf_conntrack_hash_rnd(void)
+{
+       unsigned int rand;
+
+       /*
+        * Why not initialize nf_conntrack_rnd in a "init()" function ?
+        * Because there isn't enough entropy when system initializing,
+        * and we initialize it as late as possible.
+        */
+       do {
+               get_random_bytes(&rand, sizeof(rand));
+       } while (!rand);
+       cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
+}
+
+static struct nf_conn *
+__nf_conntrack_alloc(struct net *net, u16 zone,
+                    const struct nf_conntrack_tuple *orig,
+                    const struct nf_conntrack_tuple *repl,
+                    gfp_t gfp, u32 hash)
 {
        struct nf_conn *ct;
 
-       if (unlikely(!nf_conntrack_hash_rnd_initted)) {
-               get_random_bytes(&nf_conntrack_hash_rnd,
-                               sizeof(nf_conntrack_hash_rnd));
-               nf_conntrack_hash_rnd_initted = 1;
+       if (unlikely(!nf_conntrack_hash_rnd)) {
+               init_nf_conntrack_hash_rnd();
+               /* recompute the hash as nf_conntrack_hash_rnd is initialized */
+               hash = hash_conntrack_raw(orig, zone);
        }
 
        /* We don't want any race condition at early drop stage */
@@ -585,8 +630,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
 
        if (nf_conntrack_max &&
            unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
-               unsigned int hash = hash_conntrack(net, zone, orig);
-               if (!early_drop(net, hash)) {
+               if (!early_drop(net, hash_bucket(hash, net))) {
                        atomic_dec(&net->ct.count);
                        if (net_ratelimit())
                                printk(KERN_WARNING
@@ -616,7 +660,8 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
        ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
        ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
        ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
-       ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL;
+       /* save hash for reusing when confirming */
+       *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
        /* Don't set timer yet: wait for confirmation */
        setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
        write_pnet(&ct->ct_net, net);
@@ -643,6 +688,14 @@ out_free:
        return ERR_PTR(-ENOMEM);
 #endif
 }
+
+struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
+                                  const struct nf_conntrack_tuple *orig,
+                                  const struct nf_conntrack_tuple *repl,
+                                  gfp_t gfp)
+{
+       return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
+}
 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
 
 void nf_conntrack_free(struct nf_conn *ct)
@@ -664,7 +717,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
               struct nf_conntrack_l3proto *l3proto,
               struct nf_conntrack_l4proto *l4proto,
               struct sk_buff *skb,
-              unsigned int dataoff)
+              unsigned int dataoff, u32 hash)
 {
        struct nf_conn *ct;
        struct nf_conn_help *help;
@@ -678,7 +731,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
                return NULL;
        }
 
-       ct = nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC);
+       ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
+                                 hash);
        if (IS_ERR(ct)) {
                pr_debug("Can't allocate conntrack.\n");
                return (struct nf_conntrack_tuple_hash *)ct;
@@ -755,6 +809,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
        struct nf_conntrack_tuple_hash *h;
        struct nf_conn *ct;
        u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
+       u32 hash;
 
        if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
                             dataoff, l3num, protonum, &tuple, l3proto,
@@ -764,10 +819,11 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
        }
 
        /* look for tuple match */
-       h = nf_conntrack_find_get(net, zone, &tuple);
+       hash = hash_conntrack_raw(&tuple, zone);
+       h = __nf_conntrack_find_get(net, zone, &tuple, hash);
        if (!h) {
                h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
-                                  skb, dataoff);
+                                  skb, dataoff, hash);
                if (!h)
                        return NULL;
                if (IS_ERR(h))
@@ -886,8 +942,15 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
        if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
                nf_conntrack_event_cache(IPCT_REPLY, ct);
 out:
-       if (tmpl)
-               nf_ct_put(tmpl);
+       if (tmpl) {
+               /* Special case: we have to repeat this hook, assign the
+                * template again to this packet. We assume that this packet
+                * has no conntrack assigned. This is used by nf_ct_tcp. */
+               if (ret == NF_REPEAT)
+                       skb->nfct = (struct nf_conntrack *)tmpl;
+               else
+                       nf_ct_put(tmpl);
+       }
 
        return ret;
 }
@@ -1260,7 +1323,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
        if (!hash) {
                *vmalloced = 1;
                printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
-               hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+               hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+                                PAGE_KERNEL);
        }
 
        if (hash && nulls)
@@ -1307,8 +1371,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
                        ct = nf_ct_tuplehash_to_ctrack(h);
                        hlist_nulls_del_rcu(&h->hnnode);
                        bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
-                                                 hashsize,
-                                                 nf_conntrack_hash_rnd);
+                                                 hashsize);
                        hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
                }
        }