2 * inet fragments management
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/timer.h>
19 #include <linux/random.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
23 #include <net/inet_frag.h>
25 static void inet_frag_secret_rebuild(unsigned long dummy)
27 struct inet_frags *f = (struct inet_frags *)dummy;
28 unsigned long now = jiffies;
32 get_random_bytes(&f->rnd, sizeof(u32));
33 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
34 struct inet_frag_queue *q;
35 struct hlist_node *p, *n;
37 hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) {
38 unsigned int hval = f->hashfn(q);
43 /* Relink to new hash chain. */
44 hlist_add_head(&q->list, &f->hash[hval]);
48 write_unlock(&f->lock);
50 mod_timer(&f->secret_timer, now + f->ctl->secret_interval);
53 void inet_frags_init(struct inet_frags *f)
57 for (i = 0; i < INETFRAGS_HASHSZ; i++)
58 INIT_HLIST_HEAD(&f->hash[i]);
60 INIT_LIST_HEAD(&f->lru_list);
61 rwlock_init(&f->lock);
63 f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
64 (jiffies ^ (jiffies >> 6)));
67 atomic_set(&f->mem, 0);
69 setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
71 f->secret_timer.expires = jiffies + f->ctl->secret_interval;
72 add_timer(&f->secret_timer);
74 EXPORT_SYMBOL(inet_frags_init);
76 void inet_frags_fini(struct inet_frags *f)
78 del_timer(&f->secret_timer);
80 EXPORT_SYMBOL(inet_frags_fini);
82 static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
86 list_del(&fq->lru_list);
88 write_unlock(&f->lock);
91 void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
93 if (del_timer(&fq->timer))
94 atomic_dec(&fq->refcnt);
96 if (!(fq->last_in & COMPLETE)) {
98 atomic_dec(&fq->refcnt);
99 fq->last_in |= COMPLETE;
103 EXPORT_SYMBOL(inet_frag_kill);
105 static inline void frag_kfree_skb(struct inet_frags *f, struct sk_buff *skb,
109 *work -= skb->truesize;
111 atomic_sub(skb->truesize, &f->mem);
117 void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
122 BUG_TRAP(q->last_in & COMPLETE);
123 BUG_TRAP(del_timer(&q->timer) == 0);
125 /* Release all fragment data. */
128 struct sk_buff *xp = fp->next;
130 frag_kfree_skb(f, fp, work);
136 atomic_sub(f->qsize, &f->mem);
143 EXPORT_SYMBOL(inet_frag_destroy);
145 int inet_frag_evictor(struct inet_frags *f)
147 struct inet_frag_queue *q;
148 int work, evicted = 0;
150 work = atomic_read(&f->mem) - f->ctl->low_thresh;
153 if (list_empty(&f->lru_list)) {
154 read_unlock(&f->lock);
158 q = list_first_entry(&f->lru_list,
159 struct inet_frag_queue, lru_list);
160 atomic_inc(&q->refcnt);
161 read_unlock(&f->lock);
164 if (!(q->last_in & COMPLETE))
165 inet_frag_kill(q, f);
166 spin_unlock(&q->lock);
168 if (atomic_dec_and_test(&q->refcnt))
169 inet_frag_destroy(q, f, &work);
175 EXPORT_SYMBOL(inet_frag_evictor);
177 static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
178 struct inet_frag_queue *qp_in, struct inet_frags *f,
179 unsigned int hash, void *arg)
181 struct inet_frag_queue *qp;
183 struct hlist_node *n;
186 write_lock(&f->lock);
188 /* With SMP race we have to recheck hash table, because
189 * such entry could be created on other cpu, while we
190 * promoted read lock to write lock.
192 hlist_for_each_entry(qp, n, &f->hash[hash], list) {
193 if (qp->net == nf && f->match(qp, arg)) {
194 atomic_inc(&qp->refcnt);
195 write_unlock(&f->lock);
196 qp_in->last_in |= COMPLETE;
197 inet_frag_put(qp_in, f);
203 if (!mod_timer(&qp->timer, jiffies + f->ctl->timeout))
204 atomic_inc(&qp->refcnt);
206 atomic_inc(&qp->refcnt);
207 hlist_add_head(&qp->list, &f->hash[hash]);
208 list_add_tail(&qp->lru_list, &f->lru_list);
210 write_unlock(&f->lock);
214 static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
215 struct inet_frags *f, void *arg)
217 struct inet_frag_queue *q;
219 q = kzalloc(f->qsize, GFP_ATOMIC);
223 f->constructor(q, arg);
224 atomic_add(f->qsize, &f->mem);
225 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
226 spin_lock_init(&q->lock);
227 atomic_set(&q->refcnt, 1);
233 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
234 struct inet_frags *f, void *arg, unsigned int hash)
236 struct inet_frag_queue *q;
238 q = inet_frag_alloc(nf, f, arg);
242 return inet_frag_intern(nf, q, f, hash, arg);
245 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
246 struct inet_frags *f, void *key, unsigned int hash)
248 struct inet_frag_queue *q;
249 struct hlist_node *n;
252 hlist_for_each_entry(q, n, &f->hash[hash], list) {
253 if (q->net == nf && f->match(q, key)) {
254 atomic_inc(&q->refcnt);
255 read_unlock(&f->lock);
259 read_unlock(&f->lock);
261 return inet_frag_create(nf, f, key, hash);
263 EXPORT_SYMBOL(inet_frag_find);