]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/core/flow.c
flowcache: make flow_key_size() return "unsigned int"
[karo-tx-linux.git] / net / core / flow.c
1 /* flow.c: Generic flow cache.
2  *
3  * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
4  * Copyright (C) 2003 David S. Miller (davem@redhat.com)
5  */
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/list.h>
10 #include <linux/jhash.h>
11 #include <linux/interrupt.h>
12 #include <linux/mm.h>
13 #include <linux/random.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/smp.h>
17 #include <linux/completion.h>
18 #include <linux/percpu.h>
19 #include <linux/bitops.h>
20 #include <linux/notifier.h>
21 #include <linux/cpu.h>
22 #include <linux/cpumask.h>
23 #include <linux/mutex.h>
24 #include <net/flow.h>
25 #include <linux/atomic.h>
26 #include <linux/security.h>
27 #include <net/net_namespace.h>
28
29 struct flow_cache_entry {
30         union {
31                 struct hlist_node       hlist;
32                 struct list_head        gc_list;
33         } u;
34         struct net                      *net;
35         u16                             family;
36         u8                              dir;
37         u32                             genid;
38         struct flowi                    key;
39         struct flow_cache_object        *object;
40 };
41
42 struct flow_flush_info {
43         struct flow_cache               *cache;
44         atomic_t                        cpuleft;
45         struct completion               completion;
46 };
47
48 static struct kmem_cache *flow_cachep __read_mostly;
49
50 #define flow_cache_hash_size(cache)     (1 << (cache)->hash_shift)
51 #define FLOW_HASH_RND_PERIOD            (10 * 60 * HZ)
52
53 static void flow_cache_new_hashrnd(unsigned long arg)
54 {
55         struct flow_cache *fc = (void *) arg;
56         int i;
57
58         for_each_possible_cpu(i)
59                 per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
60
61         fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
62         add_timer(&fc->rnd_timer);
63 }
64
65 static int flow_entry_valid(struct flow_cache_entry *fle,
66                                 struct netns_xfrm *xfrm)
67 {
68         if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
69                 return 0;
70         if (fle->object && !fle->object->ops->check(fle->object))
71                 return 0;
72         return 1;
73 }
74
75 static void flow_entry_kill(struct flow_cache_entry *fle,
76                                 struct netns_xfrm *xfrm)
77 {
78         if (fle->object)
79                 fle->object->ops->delete(fle->object);
80         kmem_cache_free(flow_cachep, fle);
81 }
82
83 static void flow_cache_gc_task(struct work_struct *work)
84 {
85         struct list_head gc_list;
86         struct flow_cache_entry *fce, *n;
87         struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
88                                                 flow_cache_gc_work);
89
90         INIT_LIST_HEAD(&gc_list);
91         spin_lock_bh(&xfrm->flow_cache_gc_lock);
92         list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list);
93         spin_unlock_bh(&xfrm->flow_cache_gc_lock);
94
95         list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) {
96                 flow_entry_kill(fce, xfrm);
97                 atomic_dec(&xfrm->flow_cache_gc_count);
98         }
99 }
100
101 static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
102                                      int deleted, struct list_head *gc_list,
103                                      struct netns_xfrm *xfrm)
104 {
105         if (deleted) {
106                 atomic_add(deleted, &xfrm->flow_cache_gc_count);
107                 fcp->hash_count -= deleted;
108                 spin_lock_bh(&xfrm->flow_cache_gc_lock);
109                 list_splice_tail(gc_list, &xfrm->flow_cache_gc_list);
110                 spin_unlock_bh(&xfrm->flow_cache_gc_lock);
111                 schedule_work(&xfrm->flow_cache_gc_work);
112         }
113 }
114
115 static void __flow_cache_shrink(struct flow_cache *fc,
116                                 struct flow_cache_percpu *fcp,
117                                 int shrink_to)
118 {
119         struct flow_cache_entry *fle;
120         struct hlist_node *tmp;
121         LIST_HEAD(gc_list);
122         int i, deleted = 0;
123         struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
124                                                 flow_cache_global);
125
126         for (i = 0; i < flow_cache_hash_size(fc); i++) {
127                 int saved = 0;
128
129                 hlist_for_each_entry_safe(fle, tmp,
130                                           &fcp->hash_table[i], u.hlist) {
131                         if (saved < shrink_to &&
132                             flow_entry_valid(fle, xfrm)) {
133                                 saved++;
134                         } else {
135                                 deleted++;
136                                 hlist_del(&fle->u.hlist);
137                                 list_add_tail(&fle->u.gc_list, &gc_list);
138                         }
139                 }
140         }
141
142         flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
143 }
144
145 static void flow_cache_shrink(struct flow_cache *fc,
146                               struct flow_cache_percpu *fcp)
147 {
148         int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
149
150         __flow_cache_shrink(fc, fcp, shrink_to);
151 }
152
153 static void flow_new_hash_rnd(struct flow_cache *fc,
154                               struct flow_cache_percpu *fcp)
155 {
156         get_random_bytes(&fcp->hash_rnd, sizeof(u32));
157         fcp->hash_rnd_recalc = 0;
158         __flow_cache_shrink(fc, fcp, 0);
159 }
160
161 static u32 flow_hash_code(struct flow_cache *fc,
162                           struct flow_cache_percpu *fcp,
163                           const struct flowi *key,
164                           unsigned int keysize)
165 {
166         const u32 *k = (const u32 *) key;
167         const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
168
169         return jhash2(k, length, fcp->hash_rnd)
170                 & (flow_cache_hash_size(fc) - 1);
171 }
172
173 /* I hear what you're saying, use memcmp.  But memcmp cannot make
174  * important assumptions that we can here, such as alignment.
175  */
176 static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
177                             unsigned int keysize)
178 {
179         const flow_compare_t *k1, *k1_lim, *k2;
180
181         k1 = (const flow_compare_t *) key1;
182         k1_lim = k1 + keysize;
183
184         k2 = (const flow_compare_t *) key2;
185
186         do {
187                 if (*k1++ != *k2++)
188                         return 1;
189         } while (k1 < k1_lim);
190
191         return 0;
192 }
193
194 struct flow_cache_object *
195 flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
196                   flow_resolve_t resolver, void *ctx)
197 {
198         struct flow_cache *fc = &net->xfrm.flow_cache_global;
199         struct flow_cache_percpu *fcp;
200         struct flow_cache_entry *fle, *tfle;
201         struct flow_cache_object *flo;
202         unsigned int keysize;
203         unsigned int hash;
204
205         local_bh_disable();
206         fcp = this_cpu_ptr(fc->percpu);
207
208         fle = NULL;
209         flo = NULL;
210
211         keysize = flow_key_size(family);
212         if (!keysize)
213                 goto nocache;
214
215         /* Packet really early in init?  Making flow_cache_init a
216          * pre-smp initcall would solve this.  --RR */
217         if (!fcp->hash_table)
218                 goto nocache;
219
220         if (fcp->hash_rnd_recalc)
221                 flow_new_hash_rnd(fc, fcp);
222
223         hash = flow_hash_code(fc, fcp, key, keysize);
224         hlist_for_each_entry(tfle, &fcp->hash_table[hash], u.hlist) {
225                 if (tfle->net == net &&
226                     tfle->family == family &&
227                     tfle->dir == dir &&
228                     flow_key_compare(key, &tfle->key, keysize) == 0) {
229                         fle = tfle;
230                         break;
231                 }
232         }
233
234         if (unlikely(!fle)) {
235                 if (fcp->hash_count > fc->high_watermark)
236                         flow_cache_shrink(fc, fcp);
237
238                 if (atomic_read(&net->xfrm.flow_cache_gc_count) >
239                     2 * num_online_cpus() * fc->high_watermark) {
240                         flo = ERR_PTR(-ENOBUFS);
241                         goto ret_object;
242                 }
243
244                 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
245                 if (fle) {
246                         fle->net = net;
247                         fle->family = family;
248                         fle->dir = dir;
249                         memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
250                         fle->object = NULL;
251                         hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
252                         fcp->hash_count++;
253                 }
254         } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
255                 flo = fle->object;
256                 if (!flo)
257                         goto ret_object;
258                 flo = flo->ops->get(flo);
259                 if (flo)
260                         goto ret_object;
261         } else if (fle->object) {
262                 flo = fle->object;
263                 flo->ops->delete(flo);
264                 fle->object = NULL;
265         }
266
267 nocache:
268         flo = NULL;
269         if (fle) {
270                 flo = fle->object;
271                 fle->object = NULL;
272         }
273         flo = resolver(net, key, family, dir, flo, ctx);
274         if (fle) {
275                 fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
276                 if (!IS_ERR(flo))
277                         fle->object = flo;
278                 else
279                         fle->genid--;
280         } else {
281                 if (!IS_ERR_OR_NULL(flo))
282                         flo->ops->delete(flo);
283         }
284 ret_object:
285         local_bh_enable();
286         return flo;
287 }
288 EXPORT_SYMBOL(flow_cache_lookup);
289
290 static void flow_cache_flush_tasklet(unsigned long data)
291 {
292         struct flow_flush_info *info = (void *)data;
293         struct flow_cache *fc = info->cache;
294         struct flow_cache_percpu *fcp;
295         struct flow_cache_entry *fle;
296         struct hlist_node *tmp;
297         LIST_HEAD(gc_list);
298         int i, deleted = 0;
299         struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
300                                                 flow_cache_global);
301
302         fcp = this_cpu_ptr(fc->percpu);
303         for (i = 0; i < flow_cache_hash_size(fc); i++) {
304                 hlist_for_each_entry_safe(fle, tmp,
305                                           &fcp->hash_table[i], u.hlist) {
306                         if (flow_entry_valid(fle, xfrm))
307                                 continue;
308
309                         deleted++;
310                         hlist_del(&fle->u.hlist);
311                         list_add_tail(&fle->u.gc_list, &gc_list);
312                 }
313         }
314
315         flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
316
317         if (atomic_dec_and_test(&info->cpuleft))
318                 complete(&info->completion);
319 }
320
321 /*
322  * Return whether a cpu needs flushing.  Conservatively, we assume
323  * the presence of any entries means the core may require flushing,
324  * since the flow_cache_ops.check() function may assume it's running
325  * on the same core as the per-cpu cache component.
326  */
327 static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu)
328 {
329         struct flow_cache_percpu *fcp;
330         int i;
331
332         fcp = per_cpu_ptr(fc->percpu, cpu);
333         for (i = 0; i < flow_cache_hash_size(fc); i++)
334                 if (!hlist_empty(&fcp->hash_table[i]))
335                         return 0;
336         return 1;
337 }
338
339 static void flow_cache_flush_per_cpu(void *data)
340 {
341         struct flow_flush_info *info = data;
342         struct tasklet_struct *tasklet;
343
344         tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet;
345         tasklet->data = (unsigned long)info;
346         tasklet_schedule(tasklet);
347 }
348
349 void flow_cache_flush(struct net *net)
350 {
351         struct flow_flush_info info;
352         cpumask_var_t mask;
353         int i, self;
354
355         /* Track which cpus need flushing to avoid disturbing all cores. */
356         if (!alloc_cpumask_var(&mask, GFP_KERNEL))
357                 return;
358         cpumask_clear(mask);
359
360         /* Don't want cpus going down or up during this. */
361         get_online_cpus();
362         mutex_lock(&net->xfrm.flow_flush_sem);
363         info.cache = &net->xfrm.flow_cache_global;
364         for_each_online_cpu(i)
365                 if (!flow_cache_percpu_empty(info.cache, i))
366                         cpumask_set_cpu(i, mask);
367         atomic_set(&info.cpuleft, cpumask_weight(mask));
368         if (atomic_read(&info.cpuleft) == 0)
369                 goto done;
370
371         init_completion(&info.completion);
372
373         local_bh_disable();
374         self = cpumask_test_and_clear_cpu(smp_processor_id(), mask);
375         on_each_cpu_mask(mask, flow_cache_flush_per_cpu, &info, 0);
376         if (self)
377                 flow_cache_flush_tasklet((unsigned long)&info);
378         local_bh_enable();
379
380         wait_for_completion(&info.completion);
381
382 done:
383         mutex_unlock(&net->xfrm.flow_flush_sem);
384         put_online_cpus();
385         free_cpumask_var(mask);
386 }
387
388 static void flow_cache_flush_task(struct work_struct *work)
389 {
390         struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
391                                                 flow_cache_flush_work);
392         struct net *net = container_of(xfrm, struct net, xfrm);
393
394         flow_cache_flush(net);
395 }
396
397 void flow_cache_flush_deferred(struct net *net)
398 {
399         schedule_work(&net->xfrm.flow_cache_flush_work);
400 }
401
402 static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
403 {
404         struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
405         size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
406
407         if (!fcp->hash_table) {
408                 fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
409                 if (!fcp->hash_table) {
410                         pr_err("NET: failed to allocate flow cache sz %zu\n", sz);
411                         return -ENOMEM;
412                 }
413                 fcp->hash_rnd_recalc = 1;
414                 fcp->hash_count = 0;
415                 tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
416         }
417         return 0;
418 }
419
420 static int flow_cache_cpu_up_prep(unsigned int cpu, struct hlist_node *node)
421 {
422         struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node);
423
424         return flow_cache_cpu_prepare(fc, cpu);
425 }
426
427 static int flow_cache_cpu_dead(unsigned int cpu, struct hlist_node *node)
428 {
429         struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node);
430         struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
431
432         __flow_cache_shrink(fc, fcp, 0);
433         return 0;
434 }
435
436 int flow_cache_init(struct net *net)
437 {
438         int i;
439         struct flow_cache *fc = &net->xfrm.flow_cache_global;
440
441         if (!flow_cachep)
442                 flow_cachep = kmem_cache_create("flow_cache",
443                                                 sizeof(struct flow_cache_entry),
444                                                 0, SLAB_PANIC, NULL);
445         spin_lock_init(&net->xfrm.flow_cache_gc_lock);
446         INIT_LIST_HEAD(&net->xfrm.flow_cache_gc_list);
447         INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task);
448         INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task);
449         mutex_init(&net->xfrm.flow_flush_sem);
450         atomic_set(&net->xfrm.flow_cache_gc_count, 0);
451
452         fc->hash_shift = 10;
453         fc->low_watermark = 2 * flow_cache_hash_size(fc);
454         fc->high_watermark = 4 * flow_cache_hash_size(fc);
455
456         fc->percpu = alloc_percpu(struct flow_cache_percpu);
457         if (!fc->percpu)
458                 return -ENOMEM;
459
460         if (cpuhp_state_add_instance(CPUHP_NET_FLOW_PREPARE, &fc->node))
461                 goto err;
462
463         setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
464                     (unsigned long) fc);
465         fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
466         add_timer(&fc->rnd_timer);
467
468         return 0;
469
470 err:
471         for_each_possible_cpu(i) {
472                 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
473                 kfree(fcp->hash_table);
474                 fcp->hash_table = NULL;
475         }
476
477         free_percpu(fc->percpu);
478         fc->percpu = NULL;
479
480         return -ENOMEM;
481 }
482 EXPORT_SYMBOL(flow_cache_init);
483
484 void flow_cache_fini(struct net *net)
485 {
486         int i;
487         struct flow_cache *fc = &net->xfrm.flow_cache_global;
488
489         del_timer_sync(&fc->rnd_timer);
490
491         cpuhp_state_remove_instance_nocalls(CPUHP_NET_FLOW_PREPARE, &fc->node);
492
493         for_each_possible_cpu(i) {
494                 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
495                 kfree(fcp->hash_table);
496                 fcp->hash_table = NULL;
497         }
498
499         free_percpu(fc->percpu);
500         fc->percpu = NULL;
501 }
502 EXPORT_SYMBOL(flow_cache_fini);
503
504 void __init flow_cache_hp_init(void)
505 {
506         int ret;
507
508         ret = cpuhp_setup_state_multi(CPUHP_NET_FLOW_PREPARE,
509                                       "net/flow:prepare",
510                                       flow_cache_cpu_up_prep,
511                                       flow_cache_cpu_dead);
512         WARN_ON(ret < 0);
513 }