1 /* flow.c: Generic flow cache.
3 * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
4 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/list.h>
10 #include <linux/jhash.h>
11 #include <linux/interrupt.h>
13 #include <linux/random.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/smp.h>
17 #include <linux/completion.h>
18 #include <linux/percpu.h>
19 #include <linux/bitops.h>
20 #include <linux/notifier.h>
21 #include <linux/cpu.h>
22 #include <linux/cpumask.h>
23 #include <linux/mutex.h>
25 #include <linux/atomic.h>
26 #include <linux/security.h>
27 #include <net/net_namespace.h>
29 struct flow_cache_entry {
31 struct hlist_node hlist;
32 struct list_head gc_list;
39 struct flow_cache_object *object;
42 struct flow_flush_info {
43 struct flow_cache *cache;
45 struct completion completion;
48 static struct kmem_cache *flow_cachep __read_mostly;
50 #define flow_cache_hash_size(cache) (1U << (cache)->hash_shift)
51 #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
53 static void flow_cache_new_hashrnd(unsigned long arg)
55 struct flow_cache *fc = (void *) arg;
58 for_each_possible_cpu(i)
59 per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
61 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
62 add_timer(&fc->rnd_timer);
65 static int flow_entry_valid(struct flow_cache_entry *fle,
66 struct netns_xfrm *xfrm)
68 if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
70 if (fle->object && !fle->object->ops->check(fle->object))
75 static void flow_entry_kill(struct flow_cache_entry *fle,
76 struct netns_xfrm *xfrm)
79 fle->object->ops->delete(fle->object);
80 kmem_cache_free(flow_cachep, fle);
83 static void flow_cache_gc_task(struct work_struct *work)
85 struct list_head gc_list;
86 struct flow_cache_entry *fce, *n;
87 struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
90 INIT_LIST_HEAD(&gc_list);
91 spin_lock_bh(&xfrm->flow_cache_gc_lock);
92 list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list);
93 spin_unlock_bh(&xfrm->flow_cache_gc_lock);
95 list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) {
96 flow_entry_kill(fce, xfrm);
97 atomic_dec(&xfrm->flow_cache_gc_count);
101 static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
102 int deleted, struct list_head *gc_list,
103 struct netns_xfrm *xfrm)
106 atomic_add(deleted, &xfrm->flow_cache_gc_count);
107 fcp->hash_count -= deleted;
108 spin_lock_bh(&xfrm->flow_cache_gc_lock);
109 list_splice_tail(gc_list, &xfrm->flow_cache_gc_list);
110 spin_unlock_bh(&xfrm->flow_cache_gc_lock);
111 schedule_work(&xfrm->flow_cache_gc_work);
115 static void __flow_cache_shrink(struct flow_cache *fc,
116 struct flow_cache_percpu *fcp,
119 struct flow_cache_entry *fle;
120 struct hlist_node *tmp;
123 struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
127 for (i = 0; i < flow_cache_hash_size(fc); i++) {
130 hlist_for_each_entry_safe(fle, tmp,
131 &fcp->hash_table[i], u.hlist) {
132 if (saved < shrink_to &&
133 flow_entry_valid(fle, xfrm)) {
137 hlist_del(&fle->u.hlist);
138 list_add_tail(&fle->u.gc_list, &gc_list);
143 flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
146 static void flow_cache_shrink(struct flow_cache *fc,
147 struct flow_cache_percpu *fcp)
149 int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
151 __flow_cache_shrink(fc, fcp, shrink_to);
154 static void flow_new_hash_rnd(struct flow_cache *fc,
155 struct flow_cache_percpu *fcp)
157 get_random_bytes(&fcp->hash_rnd, sizeof(u32));
158 fcp->hash_rnd_recalc = 0;
159 __flow_cache_shrink(fc, fcp, 0);
162 static u32 flow_hash_code(struct flow_cache *fc,
163 struct flow_cache_percpu *fcp,
164 const struct flowi *key,
165 unsigned int keysize)
167 const u32 *k = (const u32 *) key;
168 const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
170 return jhash2(k, length, fcp->hash_rnd)
171 & (flow_cache_hash_size(fc) - 1);
174 /* I hear what you're saying, use memcmp. But memcmp cannot make
175 * important assumptions that we can here, such as alignment.
177 static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
178 unsigned int keysize)
180 const flow_compare_t *k1, *k1_lim, *k2;
182 k1 = (const flow_compare_t *) key1;
183 k1_lim = k1 + keysize;
185 k2 = (const flow_compare_t *) key2;
190 } while (k1 < k1_lim);
195 struct flow_cache_object *
196 flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
197 flow_resolve_t resolver, void *ctx)
199 struct flow_cache *fc = &net->xfrm.flow_cache_global;
200 struct flow_cache_percpu *fcp;
201 struct flow_cache_entry *fle, *tfle;
202 struct flow_cache_object *flo;
203 unsigned int keysize;
207 fcp = this_cpu_ptr(fc->percpu);
212 keysize = flow_key_size(family);
216 /* Packet really early in init? Making flow_cache_init a
217 * pre-smp initcall would solve this. --RR */
218 if (!fcp->hash_table)
221 if (fcp->hash_rnd_recalc)
222 flow_new_hash_rnd(fc, fcp);
224 hash = flow_hash_code(fc, fcp, key, keysize);
225 hlist_for_each_entry(tfle, &fcp->hash_table[hash], u.hlist) {
226 if (tfle->net == net &&
227 tfle->family == family &&
229 flow_key_compare(key, &tfle->key, keysize) == 0) {
235 if (unlikely(!fle)) {
236 if (fcp->hash_count > fc->high_watermark)
237 flow_cache_shrink(fc, fcp);
239 if (atomic_read(&net->xfrm.flow_cache_gc_count) >
240 2 * num_online_cpus() * fc->high_watermark) {
241 flo = ERR_PTR(-ENOBUFS);
245 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
248 fle->family = family;
250 memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
252 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
255 } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
259 flo = flo->ops->get(flo);
262 } else if (fle->object) {
264 flo->ops->delete(flo);
274 flo = resolver(net, key, family, dir, flo, ctx);
276 fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
282 if (!IS_ERR_OR_NULL(flo))
283 flo->ops->delete(flo);
289 EXPORT_SYMBOL(flow_cache_lookup);
291 static void flow_cache_flush_tasklet(unsigned long data)
293 struct flow_flush_info *info = (void *)data;
294 struct flow_cache *fc = info->cache;
295 struct flow_cache_percpu *fcp;
296 struct flow_cache_entry *fle;
297 struct hlist_node *tmp;
300 struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
304 fcp = this_cpu_ptr(fc->percpu);
305 for (i = 0; i < flow_cache_hash_size(fc); i++) {
306 hlist_for_each_entry_safe(fle, tmp,
307 &fcp->hash_table[i], u.hlist) {
308 if (flow_entry_valid(fle, xfrm))
312 hlist_del(&fle->u.hlist);
313 list_add_tail(&fle->u.gc_list, &gc_list);
317 flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
319 if (atomic_dec_and_test(&info->cpuleft))
320 complete(&info->completion);
324 * Return whether a cpu needs flushing. Conservatively, we assume
325 * the presence of any entries means the core may require flushing,
326 * since the flow_cache_ops.check() function may assume it's running
327 * on the same core as the per-cpu cache component.
329 static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu)
331 struct flow_cache_percpu *fcp;
334 fcp = per_cpu_ptr(fc->percpu, cpu);
335 for (i = 0; i < flow_cache_hash_size(fc); i++)
336 if (!hlist_empty(&fcp->hash_table[i]))
341 static void flow_cache_flush_per_cpu(void *data)
343 struct flow_flush_info *info = data;
344 struct tasklet_struct *tasklet;
346 tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet;
347 tasklet->data = (unsigned long)info;
348 tasklet_schedule(tasklet);
351 void flow_cache_flush(struct net *net)
353 struct flow_flush_info info;
357 /* Track which cpus need flushing to avoid disturbing all cores. */
358 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
362 /* Don't want cpus going down or up during this. */
364 mutex_lock(&net->xfrm.flow_flush_sem);
365 info.cache = &net->xfrm.flow_cache_global;
366 for_each_online_cpu(i)
367 if (!flow_cache_percpu_empty(info.cache, i))
368 cpumask_set_cpu(i, mask);
369 atomic_set(&info.cpuleft, cpumask_weight(mask));
370 if (atomic_read(&info.cpuleft) == 0)
373 init_completion(&info.completion);
376 self = cpumask_test_and_clear_cpu(smp_processor_id(), mask);
377 on_each_cpu_mask(mask, flow_cache_flush_per_cpu, &info, 0);
379 flow_cache_flush_tasklet((unsigned long)&info);
382 wait_for_completion(&info.completion);
385 mutex_unlock(&net->xfrm.flow_flush_sem);
387 free_cpumask_var(mask);
390 static void flow_cache_flush_task(struct work_struct *work)
392 struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
393 flow_cache_flush_work);
394 struct net *net = container_of(xfrm, struct net, xfrm);
396 flow_cache_flush(net);
399 void flow_cache_flush_deferred(struct net *net)
401 schedule_work(&net->xfrm.flow_cache_flush_work);
404 static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
406 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
407 unsigned int sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
409 if (!fcp->hash_table) {
410 fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
411 if (!fcp->hash_table) {
412 pr_err("NET: failed to allocate flow cache sz %u\n", sz);
415 fcp->hash_rnd_recalc = 1;
417 tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
422 static int flow_cache_cpu_up_prep(unsigned int cpu, struct hlist_node *node)
424 struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node);
426 return flow_cache_cpu_prepare(fc, cpu);
429 static int flow_cache_cpu_dead(unsigned int cpu, struct hlist_node *node)
431 struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node);
432 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
434 __flow_cache_shrink(fc, fcp, 0);
438 int flow_cache_init(struct net *net)
441 struct flow_cache *fc = &net->xfrm.flow_cache_global;
444 flow_cachep = kmem_cache_create("flow_cache",
445 sizeof(struct flow_cache_entry),
446 0, SLAB_PANIC, NULL);
447 spin_lock_init(&net->xfrm.flow_cache_gc_lock);
448 INIT_LIST_HEAD(&net->xfrm.flow_cache_gc_list);
449 INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task);
450 INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task);
451 mutex_init(&net->xfrm.flow_flush_sem);
452 atomic_set(&net->xfrm.flow_cache_gc_count, 0);
455 fc->low_watermark = 2 * flow_cache_hash_size(fc);
456 fc->high_watermark = 4 * flow_cache_hash_size(fc);
458 fc->percpu = alloc_percpu(struct flow_cache_percpu);
462 if (cpuhp_state_add_instance(CPUHP_NET_FLOW_PREPARE, &fc->node))
465 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
467 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
468 add_timer(&fc->rnd_timer);
473 for_each_possible_cpu(i) {
474 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
475 kfree(fcp->hash_table);
476 fcp->hash_table = NULL;
479 free_percpu(fc->percpu);
484 EXPORT_SYMBOL(flow_cache_init);
486 void flow_cache_fini(struct net *net)
489 struct flow_cache *fc = &net->xfrm.flow_cache_global;
491 del_timer_sync(&fc->rnd_timer);
493 cpuhp_state_remove_instance_nocalls(CPUHP_NET_FLOW_PREPARE, &fc->node);
495 for_each_possible_cpu(i) {
496 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
497 kfree(fcp->hash_table);
498 fcp->hash_table = NULL;
501 free_percpu(fc->percpu);
504 EXPORT_SYMBOL(flow_cache_fini);
506 void __init flow_cache_hp_init(void)
510 ret = cpuhp_setup_state_multi(CPUHP_NET_FLOW_PREPARE,
512 flow_cache_cpu_up_prep,
513 flow_cache_cpu_dead);