]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/ipv4/inet_fragment.c
inet: frag: remove periodic secret rebuild timer
[karo-tx-linux.git] / net / ipv4 / inet_fragment.c
1 /*
2  * inet fragments management
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  *              Authors:        Pavel Emelyanov <xemul@openvz.org>
10  *                              Started as consolidation of ipv4/ip_fragment.c,
11  *                              ipv6/reassembly. and ipv6 nf conntrack reassembly
12  */
13
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/timer.h>
18 #include <linux/mm.h>
19 #include <linux/random.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/slab.h>
23
24 #include <net/sock.h>
25 #include <net/inet_frag.h>
26 #include <net/inet_ecn.h>
27
28 #define INETFRAGS_EVICT_BUCKETS   128
29 #define INETFRAGS_EVICT_MAX       512
30
31 /* don't rebuild inetfrag table with new secret more often than this */
32 #define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
33
34 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
35  * Value : 0xff if frame should be dropped.
36  *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
37  */
38 const u8 ip_frag_ecn_table[16] = {
39         /* at least one fragment had CE, and others ECT_0 or ECT_1 */
40         [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0]                      = INET_ECN_CE,
41         [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1]                      = INET_ECN_CE,
42         [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1]   = INET_ECN_CE,
43
44         /* invalid combinations : drop frame */
45         [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
46         [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
47         [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
48         [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
49         [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
50         [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
51         [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
52 };
53 EXPORT_SYMBOL(ip_frag_ecn_table);
54
55 static unsigned int
56 inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
57 {
58         return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
59 }
60
61 static bool inet_frag_may_rebuild(struct inet_frags *f)
62 {
63         return time_after(jiffies,
64                f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
65 }
66
67 static void inet_frag_secret_rebuild(struct inet_frags *f)
68 {
69         int i;
70
71         /* Per bucket lock NOT needed here, due to write lock protection */
72         write_lock_bh(&f->lock);
73
74         if (!inet_frag_may_rebuild(f))
75                 goto out;
76
77         get_random_bytes(&f->rnd, sizeof(u32));
78
79         for (i = 0; i < INETFRAGS_HASHSZ; i++) {
80                 struct inet_frag_bucket *hb;
81                 struct inet_frag_queue *q;
82                 struct hlist_node *n;
83
84                 hb = &f->hash[i];
85                 hlist_for_each_entry_safe(q, n, &hb->chain, list) {
86                         unsigned int hval = inet_frag_hashfn(f, q);
87
88                         if (hval != i) {
89                                 struct inet_frag_bucket *hb_dest;
90
91                                 hlist_del(&q->list);
92
93                                 /* Relink to new hash chain. */
94                                 hb_dest = &f->hash[hval];
95                                 hlist_add_head(&q->list, &hb_dest->chain);
96                         }
97                 }
98         }
99
100         f->rebuild = false;
101         f->last_rebuild_jiffies = jiffies;
102 out:
103         write_unlock_bh(&f->lock);
104 }
105
106 static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
107 {
108         return q->net->low_thresh == 0 ||
109                frag_mem_limit(q->net) >= q->net->low_thresh;
110 }
111
112 static unsigned int
113 inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
114 {
115         struct inet_frag_queue *fq;
116         struct hlist_node *n;
117         unsigned int evicted = 0;
118         HLIST_HEAD(expired);
119
120 evict_again:
121         spin_lock(&hb->chain_lock);
122
123         hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
124                 if (!inet_fragq_should_evict(fq))
125                         continue;
126
127                 if (!del_timer(&fq->timer)) {
128                         /* q expiring right now thus increment its refcount so
129                          * it won't be freed under us and wait until the timer
130                          * has finished executing then destroy it
131                          */
132                         atomic_inc(&fq->refcnt);
133                         spin_unlock(&hb->chain_lock);
134                         del_timer_sync(&fq->timer);
135                         WARN_ON(atomic_read(&fq->refcnt) != 1);
136                         inet_frag_put(fq, f);
137                         goto evict_again;
138                 }
139
140                 /* suppress xmit of (icmp) error packet */
141                 fq->last_in &= ~INET_FRAG_FIRST_IN;
142                 fq->last_in |= INET_FRAG_EVICTED;
143                 hlist_del(&fq->list);
144                 hlist_add_head(&fq->list, &expired);
145                 ++evicted;
146         }
147
148         spin_unlock(&hb->chain_lock);
149
150         hlist_for_each_entry_safe(fq, n, &expired, list)
151                 f->frag_expire((unsigned long) fq);
152
153         return evicted;
154 }
155
156 static void inet_frag_worker(struct work_struct *work)
157 {
158         unsigned int budget = INETFRAGS_EVICT_BUCKETS;
159         unsigned int i, evicted = 0;
160         struct inet_frags *f;
161
162         f = container_of(work, struct inet_frags, frags_work);
163
164         BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
165
166         read_lock_bh(&f->lock);
167
168         for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
169                 evicted += inet_evict_bucket(f, &f->hash[i]);
170                 i = (i + 1) & (INETFRAGS_HASHSZ - 1);
171                 if (evicted > INETFRAGS_EVICT_MAX)
172                         break;
173         }
174
175         f->next_bucket = i;
176
177         read_unlock_bh(&f->lock);
178         if (f->rebuild && inet_frag_may_rebuild(f))
179                 inet_frag_secret_rebuild(f);
180 }
181
182 static void inet_frag_schedule_worker(struct inet_frags *f)
183 {
184         if (unlikely(!work_pending(&f->frags_work)))
185                 schedule_work(&f->frags_work);
186 }
187
188 void inet_frags_init(struct inet_frags *f)
189 {
190         int i;
191
192         INIT_WORK(&f->frags_work, inet_frag_worker);
193
194         for (i = 0; i < INETFRAGS_HASHSZ; i++) {
195                 struct inet_frag_bucket *hb = &f->hash[i];
196
197                 spin_lock_init(&hb->chain_lock);
198                 INIT_HLIST_HEAD(&hb->chain);
199         }
200         rwlock_init(&f->lock);
201         f->last_rebuild_jiffies = 0;
202 }
203 EXPORT_SYMBOL(inet_frags_init);
204
205 void inet_frags_init_net(struct netns_frags *nf)
206 {
207         init_frag_mem_limit(nf);
208 }
209 EXPORT_SYMBOL(inet_frags_init_net);
210
211 void inet_frags_fini(struct inet_frags *f)
212 {
213         cancel_work_sync(&f->frags_work);
214 }
215 EXPORT_SYMBOL(inet_frags_fini);
216
217 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
218 {
219         int i;
220
221         nf->low_thresh = 0;
222
223         read_lock_bh(&f->lock);
224
225         for (i = 0; i < INETFRAGS_HASHSZ ; i++)
226                 inet_evict_bucket(f, &f->hash[i]);
227
228         read_unlock_bh(&f->lock);
229
230         percpu_counter_destroy(&nf->mem);
231 }
232 EXPORT_SYMBOL(inet_frags_exit_net);
233
234 static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
235 {
236         struct inet_frag_bucket *hb;
237         unsigned int hash;
238
239         read_lock(&f->lock);
240         hash = inet_frag_hashfn(f, fq);
241         hb = &f->hash[hash];
242
243         spin_lock(&hb->chain_lock);
244         hlist_del(&fq->list);
245         spin_unlock(&hb->chain_lock);
246
247         read_unlock(&f->lock);
248 }
249
250 void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
251 {
252         if (del_timer(&fq->timer))
253                 atomic_dec(&fq->refcnt);
254
255         if (!(fq->last_in & INET_FRAG_COMPLETE)) {
256                 fq_unlink(fq, f);
257                 atomic_dec(&fq->refcnt);
258                 fq->last_in |= INET_FRAG_COMPLETE;
259         }
260 }
261 EXPORT_SYMBOL(inet_frag_kill);
262
263 static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
264                 struct sk_buff *skb)
265 {
266         if (f->skb_free)
267                 f->skb_free(skb);
268         kfree_skb(skb);
269 }
270
271 void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
272 {
273         struct sk_buff *fp;
274         struct netns_frags *nf;
275         unsigned int sum, sum_truesize = 0;
276
277         WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
278         WARN_ON(del_timer(&q->timer) != 0);
279
280         /* Release all fragment data. */
281         fp = q->fragments;
282         nf = q->net;
283         while (fp) {
284                 struct sk_buff *xp = fp->next;
285
286                 sum_truesize += fp->truesize;
287                 frag_kfree_skb(nf, f, fp);
288                 fp = xp;
289         }
290         sum = sum_truesize + f->qsize;
291         sub_frag_mem_limit(q, sum);
292
293         if (f->destructor)
294                 f->destructor(q);
295         kfree(q);
296 }
297 EXPORT_SYMBOL(inet_frag_destroy);
298
299 static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
300                 struct inet_frag_queue *qp_in, struct inet_frags *f,
301                 void *arg)
302 {
303         struct inet_frag_bucket *hb;
304         struct inet_frag_queue *qp;
305         unsigned int hash;
306
307         read_lock(&f->lock); /* Protects against hash rebuild */
308         /*
309          * While we stayed w/o the lock other CPU could update
310          * the rnd seed, so we need to re-calculate the hash
311          * chain. Fortunatelly the qp_in can be used to get one.
312          */
313         hash = inet_frag_hashfn(f, qp_in);
314         hb = &f->hash[hash];
315         spin_lock(&hb->chain_lock);
316
317 #ifdef CONFIG_SMP
318         /* With SMP race we have to recheck hash table, because
319          * such entry could be created on other cpu, while we
320          * released the hash bucket lock.
321          */
322         hlist_for_each_entry(qp, &hb->chain, list) {
323                 if (qp->net == nf && f->match(qp, arg)) {
324                         atomic_inc(&qp->refcnt);
325                         spin_unlock(&hb->chain_lock);
326                         read_unlock(&f->lock);
327                         qp_in->last_in |= INET_FRAG_COMPLETE;
328                         inet_frag_put(qp_in, f);
329                         return qp;
330                 }
331         }
332 #endif
333         qp = qp_in;
334         if (!mod_timer(&qp->timer, jiffies + nf->timeout))
335                 atomic_inc(&qp->refcnt);
336
337         atomic_inc(&qp->refcnt);
338         hlist_add_head(&qp->list, &hb->chain);
339
340         spin_unlock(&hb->chain_lock);
341         read_unlock(&f->lock);
342
343         return qp;
344 }
345
346 static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
347                 struct inet_frags *f, void *arg)
348 {
349         struct inet_frag_queue *q;
350
351         if (frag_mem_limit(nf) > nf->high_thresh) {
352                 inet_frag_schedule_worker(f);
353                 return NULL;
354         }
355
356         q = kzalloc(f->qsize, GFP_ATOMIC);
357         if (q == NULL)
358                 return NULL;
359
360         q->net = nf;
361         f->constructor(q, arg);
362         add_frag_mem_limit(q, f->qsize);
363
364         setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
365         spin_lock_init(&q->lock);
366         atomic_set(&q->refcnt, 1);
367
368         return q;
369 }
370
371 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
372                 struct inet_frags *f, void *arg)
373 {
374         struct inet_frag_queue *q;
375
376         q = inet_frag_alloc(nf, f, arg);
377         if (q == NULL)
378                 return NULL;
379
380         return inet_frag_intern(nf, q, f, arg);
381 }
382
383 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
384                 struct inet_frags *f, void *key, unsigned int hash)
385         __releases(&f->lock)
386 {
387         struct inet_frag_bucket *hb;
388         struct inet_frag_queue *q;
389         int depth = 0;
390
391         if (frag_mem_limit(nf) > nf->low_thresh)
392                 inet_frag_schedule_worker(f);
393
394         hash &= (INETFRAGS_HASHSZ - 1);
395         hb = &f->hash[hash];
396
397         spin_lock(&hb->chain_lock);
398         hlist_for_each_entry(q, &hb->chain, list) {
399                 if (q->net == nf && f->match(q, key)) {
400                         atomic_inc(&q->refcnt);
401                         spin_unlock(&hb->chain_lock);
402                         read_unlock(&f->lock);
403                         return q;
404                 }
405                 depth++;
406         }
407         spin_unlock(&hb->chain_lock);
408         read_unlock(&f->lock);
409
410         if (depth <= INETFRAGS_MAXDEPTH)
411                 return inet_frag_create(nf, f, key);
412
413         if (inet_frag_may_rebuild(f)) {
414                 f->rebuild = true;
415                 inet_frag_schedule_worker(f);
416         }
417
418         return ERR_PTR(-ENOBUFS);
419 }
420 EXPORT_SYMBOL(inet_frag_find);
421
422 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
423                                    const char *prefix)
424 {
425         static const char msg[] = "inet_frag_find: Fragment hash bucket"
426                 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
427                 ". Dropping fragment.\n";
428
429         if (PTR_ERR(q) == -ENOBUFS)
430                 LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
431 }
432 EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);