X-Git-Url: https://git.karo-electronics.de/?a=blobdiff_plain;f=net%2Fnetfilter%2Fnf_conntrack_core.c;h=9979f46c81dce32bc2288cfd4561c571f5bea4c5;hb=109a5db5042c035ded330b948a710b9a0c20934d;hp=e847dbaa0c6b3aefc3d417421a2b529a10735e38;hpb=1f8216e7f78c2e55b07117c77887a314bc36f690;p=karo-tx-linux.git diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index e847dbaa0c6b..9979f46c81dc 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1586,13 +1586,12 @@ static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb) /* Bring out ya dead! */ static struct nf_conn * -get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), +get_next_corpse(int (*iter)(struct nf_conn *i, void *data), void *data, unsigned int *bucket) { struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; struct hlist_nulls_node *n; - int cpu; spinlock_t *lockp; for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { @@ -1604,8 +1603,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) continue; ct = nf_ct_tuplehash_to_ctrack(h); - if (net_eq(nf_ct_net(ct), net) && - iter(ct, data)) + if (iter(ct, data)) goto found; } } @@ -1614,51 +1612,150 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), cond_resched(); } + return NULL; +found: + atomic_inc(&ct->ct_general.use); + spin_unlock(lockp); + local_bh_enable(); + return ct; +} + +static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), + void *data, u32 portid, int report) +{ + unsigned int bucket = 0, sequence; + struct nf_conn *ct; + + might_sleep(); + + for (;;) { + sequence = read_seqcount_begin(&nf_conntrack_generation); + + while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) { + /* Time to push up daises... */ + + nf_ct_delete(ct, portid, report); + nf_ct_put(ct); + cond_resched(); + } + + if (!read_seqcount_retry(&nf_conntrack_generation, sequence)) + break; + bucket = 0; + } +} + +struct iter_data { + int (*iter)(struct nf_conn *i, void *data); + void *data; + struct net *net; +}; + +static int iter_net_only(struct nf_conn *i, void *data) +{ + struct iter_data *d = data; + + if (!net_eq(d->net, nf_ct_net(i))) + return 0; + + return d->iter(i, d->data); +} + +static void +__nf_ct_unconfirmed_destroy(struct net *net) +{ + int cpu; + for_each_possible_cpu(cpu) { - struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); + struct nf_conntrack_tuple_hash *h; + struct hlist_nulls_node *n; + struct ct_pcpu *pcpu; + + pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); spin_lock_bh(&pcpu->lock); hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) { + struct nf_conn *ct; + ct = nf_ct_tuplehash_to_ctrack(h); - if (iter(ct, data)) - set_bit(IPS_DYING_BIT, &ct->status); + + /* we cannot call iter() on unconfirmed list, the + * owning cpu can reallocate ct->ext at any time. + */ + set_bit(IPS_DYING_BIT, &ct->status); } spin_unlock_bh(&pcpu->lock); cond_resched(); } - return NULL; -found: - atomic_inc(&ct->ct_general.use); - spin_unlock(lockp); - local_bh_enable(); - return ct; } -void nf_ct_iterate_cleanup(struct net *net, - int (*iter)(struct nf_conn *i, void *data), - void *data, u32 portid, int report) +void nf_ct_iterate_cleanup_net(struct net *net, + int (*iter)(struct nf_conn *i, void *data), + void *data, u32 portid, int report) { - struct nf_conn *ct; - unsigned int bucket = 0; + struct iter_data d; might_sleep(); if (atomic_read(&net->ct.count) == 0) return; - while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { - /* Time to push up daises... */ + __nf_ct_unconfirmed_destroy(net); - nf_ct_delete(ct, portid, report); - nf_ct_put(ct); - cond_resched(); + d.iter = iter; + d.data = data; + d.net = net; + + synchronize_net(); + + nf_ct_iterate_cleanup(iter_net_only, &d, portid, report); +} +EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net); + +/** + * nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table + * @iter: callback to invoke for each conntrack + * @data: data to pass to @iter + * + * Like nf_ct_iterate_cleanup, but first marks conntracks on the + * unconfirmed list as dying (so they will not be inserted into + * main table). + * + * Can only be called in module exit path. + */ +void +nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data) +{ + struct net *net; + + rtnl_lock(); + for_each_net(net) { + if (atomic_read(&net->ct.count) == 0) + continue; + __nf_ct_unconfirmed_destroy(net); } + rtnl_unlock(); + + /* Need to wait for netns cleanup worker to finish, if its + * running -- it might have deleted a net namespace from + * the global list, so our __nf_ct_unconfirmed_destroy() might + * not have affected all namespaces. + */ + net_ns_barrier(); + + /* a conntrack could have been unlinked from unconfirmed list + * before we grabbed pcpu lock in __nf_ct_unconfirmed_destroy(). + * This makes sure its inserted into conntrack table. + */ + synchronize_net(); + + nf_ct_iterate_cleanup(iter, data, 0, 0); } -EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); +EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy); static int kill_all(struct nf_conn *i, void *data) { - return 1; + return net_eq(nf_ct_net(i), data); } void nf_ct_free_hashtable(void *hash, unsigned int size) @@ -1723,7 +1820,7 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list) i_see_dead_people: busy = 0; list_for_each_entry(net, net_exit_list, exit_list) { - nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0); + nf_ct_iterate_cleanup(kill_all, net, 0, 0); if (atomic_read(&net->ct.count) != 0) busy = 1; }