2 * net/core/dst.c Protocol independent destination cache.
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
8 #include <linux/bitops.h>
9 #include <linux/errno.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/workqueue.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/netdevice.h>
17 #include <linux/skbuff.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <net/net_namespace.h>
21 #include <linux/sched.h>
22 #include <linux/prefetch.h>
23 #include <net/lwtunnel.h>
26 #include <net/dst_metadata.h>
29 * Theory of operations:
30 * 1) We use a list, protected by a spinlock, to add
31 * new entries from both BH and non-BH context.
32 * 2) In order to keep spinlock held for a small delay,
33 * we use a second list where are stored long lived
34 * entries, that are handled by the garbage collect thread
35 * fired by a workqueue.
36 * 3) This list is guarded by a mutex,
37 * so that the gc_task and dst_dev_event() can be synchronized.
41 * We want to keep lock & list close together
42 * to dirty as few cache lines as possible in __dst_free().
43 * As this is not a very strong hint, we dont force an alignment on SMP.
47 struct dst_entry *list;
48 unsigned long timer_inc;
49 unsigned long timer_expires;
51 .lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock),
52 .timer_inc = DST_GC_MAX,
54 static void dst_gc_task(struct work_struct *work);
55 static void ___dst_free(struct dst_entry *dst);
57 static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task);
59 static DEFINE_MUTEX(dst_gc_mutex);
61 * long lived entries are maintained in this list, guarded by dst_gc_mutex
63 static struct dst_entry *dst_busy_list;
65 static void dst_gc_task(struct work_struct *work)
68 int work_performed = 0;
69 unsigned long expires = ~0L;
70 struct dst_entry *dst, *next, head;
71 struct dst_entry *last = &head;
73 mutex_lock(&dst_gc_mutex);
77 while ((dst = next) != NULL) {
79 prefetch(&next->next);
81 if (likely(atomic_read(&dst->__refcnt))) {
89 dst = dst_destroy(dst);
91 /* NOHASH and still referenced. Unless it is already
92 * on gc list, invalidate it and add to gc list.
94 * Note: this is temporary. Actually, NOHASH dst's
95 * must be obsoleted when parent is obsoleted.
96 * But we do not have state "obsoleted, but
97 * referenced by parent", so it is right.
99 if (dst->obsolete > 0)
108 spin_lock_bh(&dst_garbage.lock);
109 next = dst_garbage.list;
111 dst_garbage.list = NULL;
112 spin_unlock_bh(&dst_garbage.lock);
116 dst_busy_list = head.next;
118 dst_garbage.timer_inc = DST_GC_MAX;
121 * if we freed less than 1/10 of delayed entries,
122 * we can sleep longer.
124 if (work_performed <= delayed/10) {
125 dst_garbage.timer_expires += dst_garbage.timer_inc;
126 if (dst_garbage.timer_expires > DST_GC_MAX)
127 dst_garbage.timer_expires = DST_GC_MAX;
128 dst_garbage.timer_inc += DST_GC_INC;
130 dst_garbage.timer_inc = DST_GC_INC;
131 dst_garbage.timer_expires = DST_GC_MIN;
133 expires = dst_garbage.timer_expires;
135 * if the next desired timer is more than 4 seconds in the
136 * future then round the timer to whole seconds
139 expires = round_jiffies_relative(expires);
140 schedule_delayed_work(&dst_gc_work, expires);
143 spin_unlock_bh(&dst_garbage.lock);
144 mutex_unlock(&dst_gc_mutex);
147 int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
152 EXPORT_SYMBOL(dst_discard_out);
154 const struct dst_metrics dst_default_metrics = {
155 /* This initializer is needed to force linker to place this variable
156 * into const section. Otherwise it might end into bss section.
157 * We really want to avoid false sharing on this variable, and catch
160 .refcnt = ATOMIC_INIT(1),
163 void dst_init(struct dst_entry *dst, struct dst_ops *ops,
164 struct net_device *dev, int initial_ref, int initial_obsolete,
165 unsigned short flags)
172 dst_init_metrics(dst, dst_default_metrics.metrics, true);
179 dst->input = dst_discard;
180 dst->output = dst_discard_out;
182 dst->obsolete = initial_obsolete;
184 dst->trailer_len = 0;
185 #ifdef CONFIG_IP_ROUTE_CLASSID
188 dst->lwtstate = NULL;
189 atomic_set(&dst->__refcnt, initial_ref);
191 dst->lastuse = jiffies;
194 if (!(flags & DST_NOCOUNT))
195 dst_entries_add(ops, 1);
197 EXPORT_SYMBOL(dst_init);
199 void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
200 int initial_ref, int initial_obsolete, unsigned short flags)
202 struct dst_entry *dst;
204 if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
209 dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
213 dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags);
217 EXPORT_SYMBOL(dst_alloc);
219 static void ___dst_free(struct dst_entry *dst)
221 /* The first case (dev==NULL) is required, when
222 protocol module is unloaded.
224 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) {
225 dst->input = dst_discard;
226 dst->output = dst_discard_out;
228 dst->obsolete = DST_OBSOLETE_DEAD;
231 void __dst_free(struct dst_entry *dst)
233 spin_lock_bh(&dst_garbage.lock);
235 dst->next = dst_garbage.list;
236 dst_garbage.list = dst;
237 if (dst_garbage.timer_inc > DST_GC_INC) {
238 dst_garbage.timer_inc = DST_GC_INC;
239 dst_garbage.timer_expires = DST_GC_MIN;
240 mod_delayed_work(system_wq, &dst_gc_work,
241 dst_garbage.timer_expires);
243 spin_unlock_bh(&dst_garbage.lock);
245 EXPORT_SYMBOL(__dst_free);
247 struct dst_entry *dst_destroy(struct dst_entry * dst)
249 struct dst_entry *child;
256 if (!(dst->flags & DST_NOCOUNT))
257 dst_entries_add(dst->ops, -1);
259 if (dst->ops->destroy)
260 dst->ops->destroy(dst);
264 lwtstate_put(dst->lwtstate);
266 if (dst->flags & DST_METADATA)
267 metadata_dst_free((struct metadata_dst *)dst);
269 kmem_cache_free(dst->ops->kmem_cachep, dst);
273 int nohash = dst->flags & DST_NOHASH;
275 if (atomic_dec_and_test(&dst->__refcnt)) {
276 /* We were real parent of this dst, so kill child. */
280 /* Child is still referenced, return it for freeing. */
283 /* Child is still in his hash table */
288 EXPORT_SYMBOL(dst_destroy);
290 static void dst_destroy_rcu(struct rcu_head *head)
292 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
294 dst = dst_destroy(dst);
299 void dst_release(struct dst_entry *dst)
303 unsigned short nocache = dst->flags & DST_NOCACHE;
305 newrefcnt = atomic_dec_return(&dst->__refcnt);
306 if (unlikely(newrefcnt < 0))
307 net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
308 __func__, dst, newrefcnt);
309 if (!newrefcnt && unlikely(nocache))
310 call_rcu(&dst->rcu_head, dst_destroy_rcu);
313 EXPORT_SYMBOL(dst_release);
315 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
317 struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
320 struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
321 unsigned long prev, new;
323 atomic_set(&p->refcnt, 1);
324 memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
326 new = (unsigned long) p;
327 prev = cmpxchg(&dst->_metrics, old, new);
331 p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
332 if (prev & DST_METRICS_READ_ONLY)
334 } else if (prev & DST_METRICS_REFCOUNTED) {
335 if (atomic_dec_and_test(&old_p->refcnt))
339 BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
342 EXPORT_SYMBOL(dst_cow_metrics_generic);
344 /* Caller asserts that dst_metrics_read_only(dst) is false. */
345 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
347 unsigned long prev, new;
349 new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
350 prev = cmpxchg(&dst->_metrics, old, new);
352 kfree(__DST_METRICS_PTR(old));
354 EXPORT_SYMBOL(__dst_destroy_metrics_generic);
356 static struct dst_ops md_dst_ops = {
360 static int dst_md_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
362 WARN_ONCE(1, "Attempting to call output on metadata dst\n");
367 static int dst_md_discard(struct sk_buff *skb)
369 WARN_ONCE(1, "Attempting to call input on metadata dst\n");
374 static void __metadata_dst_init(struct metadata_dst *md_dst, u8 optslen)
376 struct dst_entry *dst;
379 dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE,
380 DST_METADATA | DST_NOCACHE | DST_NOCOUNT);
382 dst->input = dst_md_discard;
383 dst->output = dst_md_discard_out;
385 memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
388 struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags)
390 struct metadata_dst *md_dst;
392 md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
396 __metadata_dst_init(md_dst, optslen);
400 EXPORT_SYMBOL_GPL(metadata_dst_alloc);
402 void metadata_dst_free(struct metadata_dst *md_dst)
404 #ifdef CONFIG_DST_CACHE
405 dst_cache_destroy(&md_dst->u.tun_info.dst_cache);
410 struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags)
413 struct metadata_dst __percpu *md_dst;
415 md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen,
416 __alignof__(struct metadata_dst), flags);
420 for_each_possible_cpu(cpu)
421 __metadata_dst_init(per_cpu_ptr(md_dst, cpu), optslen);
425 EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
427 /* Dirty hack. We did it in 2.2 (in __dst_free),
428 * we have _very_ good reasons not to repeat
429 * this mistake in 2.3, but we have no choice
430 * now. _It_ _is_ _explicit_ _deliberate_
431 * _race_ _condition_.
433 * Commented and originally written by Alexey.
435 static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
438 if (dst->ops->ifdown)
439 dst->ops->ifdown(dst, dev, unregister);
445 dst->input = dst_discard;
446 dst->output = dst_discard_out;
448 dst->dev = dev_net(dst->dev)->loopback_dev;
454 static int dst_dev_event(struct notifier_block *this, unsigned long event,
457 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
458 struct dst_entry *dst, *last = NULL;
461 case NETDEV_UNREGISTER_FINAL:
463 mutex_lock(&dst_gc_mutex);
464 for (dst = dst_busy_list; dst; dst = dst->next) {
466 dst_ifdown(dst, dev, event != NETDEV_DOWN);
469 spin_lock_bh(&dst_garbage.lock);
470 dst = dst_garbage.list;
471 dst_garbage.list = NULL;
472 /* The code in dst_ifdown places a hold on the loopback device.
473 * If the gc entry processing is set to expire after a lengthy
474 * interval, this hold can cause netdev_wait_allrefs() to hang
475 * out and wait for a long time -- until the the loopback
476 * interface is released. If we're really unlucky, it'll emit
477 * pr_emerg messages to console too. Reset the interval here,
478 * so dst cleanups occur in a more timely fashion.
480 if (dst_garbage.timer_inc > DST_GC_INC) {
481 dst_garbage.timer_inc = DST_GC_INC;
482 dst_garbage.timer_expires = DST_GC_MIN;
483 mod_delayed_work(system_wq, &dst_gc_work,
484 dst_garbage.timer_expires);
486 spin_unlock_bh(&dst_garbage.lock);
492 for (; dst; dst = dst->next)
493 dst_ifdown(dst, dev, event != NETDEV_DOWN);
494 mutex_unlock(&dst_gc_mutex);
500 static struct notifier_block dst_dev_notifier = {
501 .notifier_call = dst_dev_event,
502 .priority = -10, /* must be called after other network notifiers */
505 void __init dst_subsys_init(void)
507 register_netdevice_notifier(&dst_dev_notifier);