]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
netfilter: trivial code cleanup and doc changes
authorJesper Dangaard Brouer <brouer@redhat.com>
Mon, 3 Mar 2014 13:44:54 +0000 (14:44 +0100)
committerPablo Neira Ayuso <pablo@netfilter.org>
Fri, 7 Mar 2014 10:40:04 +0000 (11:40 +0100)
Changes while reading through the netfilter code.

Added hint about how conntrack nf_conn refcnt is accessed.
And renamed repl_hash to reply_hash for readability

Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Reviewed-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
include/net/netfilter/nf_conntrack.h
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_expect.c

index b2ac6246b7e0abe156b26a06bf9a4463331baa6e..e10d1faa6d096fc580cacf7902085b09beb18598 100644 (file)
@@ -73,7 +73,13 @@ struct nf_conn_help {
 
 struct nf_conn {
        /* Usage count in here is 1 for hash table/destruct timer, 1 per skb,
-           plus 1 for any connection(s) we are `master' for */
+        * plus 1 for any connection(s) we are `master' for
+        *
+        * Hint, SKB address this struct and refcnt via skb->nfct and
+        * helpers nf_conntrack_get() and nf_conntrack_put().
+        * Helper nf_ct_put() equals nf_conntrack_put() by dec refcnt,
+        * beware nf_ct_get() is different and don't inc refcnt.
+        */
        struct nf_conntrack ct_general;
 
        spinlock_t lock;
index 356bef519fe5b781f6225557f64186c0075852a1..965693eb1f0e665f97ce2dfd0af0777a5b3ab6eb 100644 (file)
@@ -408,21 +408,21 @@ EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
 
 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
                                       unsigned int hash,
-                                      unsigned int repl_hash)
+                                      unsigned int reply_hash)
 {
        struct net *net = nf_ct_net(ct);
 
        hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
                           &net->ct.hash[hash]);
        hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
-                          &net->ct.hash[repl_hash]);
+                          &net->ct.hash[reply_hash]);
 }
 
 int
 nf_conntrack_hash_check_insert(struct nf_conn *ct)
 {
        struct net *net = nf_ct_net(ct);
-       unsigned int hash, repl_hash;
+       unsigned int hash, reply_hash;
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
        u16 zone;
@@ -430,7 +430,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
        zone = nf_ct_zone(ct);
        hash = hash_conntrack(net, zone,
                              &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-       repl_hash = hash_conntrack(net, zone,
+       reply_hash = hash_conntrack(net, zone,
                                   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 
        spin_lock_bh(&nf_conntrack_lock);
@@ -441,7 +441,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
                                      &h->tuple) &&
                    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
                        goto out;
-       hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
+       hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
                                      &h->tuple) &&
                    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
@@ -451,7 +451,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
        smp_wmb();
        /* The caller holds a reference to this object */
        atomic_set(&ct->ct_general.use, 2);
-       __nf_conntrack_hash_insert(ct, hash, repl_hash);
+       __nf_conntrack_hash_insert(ct, hash, reply_hash);
        NF_CT_STAT_INC(net, insert);
        spin_unlock_bh(&nf_conntrack_lock);
 
@@ -483,7 +483,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert);
 int
 __nf_conntrack_confirm(struct sk_buff *skb)
 {
-       unsigned int hash, repl_hash;
+       unsigned int hash, reply_hash;
        struct nf_conntrack_tuple_hash *h;
        struct nf_conn *ct;
        struct nf_conn_help *help;
@@ -507,7 +507,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        /* reuse the hash saved before */
        hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
        hash = hash_bucket(hash, net);
-       repl_hash = hash_conntrack(net, zone,
+       reply_hash = hash_conntrack(net, zone,
                                   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 
        /* We're not in hash table, and we refuse to set up related
@@ -540,7 +540,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
                                      &h->tuple) &&
                    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
                        goto out;
-       hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
+       hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
                                      &h->tuple) &&
                    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
@@ -570,7 +570,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
         * guarantee that no other CPU can find the conntrack before the above
         * stores are visible.
         */
-       __nf_conntrack_hash_insert(ct, hash, repl_hash);
+       __nf_conntrack_hash_insert(ct, hash, reply_hash);
        NF_CT_STAT_INC(net, insert);
        spin_unlock_bh(&nf_conntrack_lock);
 
index 4fd1ca94fd4a140b374385ded878af60b503b529..da2f84f41777bce7de8edbbbd9d7f9bd7ee75536 100644 (file)
@@ -417,7 +417,7 @@ out:
        return ret;
 }
 
-int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, 
+int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
                                u32 portid, int report)
 {
        int ret;