2 * Copyright (C) 2012 Red Hat, Inc.
4 * This file is released under the GPL.
8 #include "dm-bio-prison.h"
10 #include <linux/spinlock.h>
11 #include <linux/mempool.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
15 /*----------------------------------------------------------------*/
17 struct dm_bio_prison_cell {
18 struct hlist_node list;
19 struct dm_bio_prison *prison;
20 struct dm_cell_key key;
25 struct dm_bio_prison {
31 struct hlist_head *cells;
34 /*----------------------------------------------------------------*/
36 static uint32_t calc_nr_buckets(unsigned nr_cells)
41 nr_cells = min(nr_cells, 8192u);
49 static struct kmem_cache *_cell_cache;
52 * @nr_cells should be the number of cells you want in use _concurrently_.
53 * Don't confuse it with the number of distinct keys.
55 struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells)
58 uint32_t nr_buckets = calc_nr_buckets(nr_cells);
59 size_t len = sizeof(struct dm_bio_prison) +
60 (sizeof(struct hlist_head) * nr_buckets);
61 struct dm_bio_prison *prison = kmalloc(len, GFP_KERNEL);
66 spin_lock_init(&prison->lock);
67 prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
68 if (!prison->cell_pool) {
73 prison->nr_buckets = nr_buckets;
74 prison->hash_mask = nr_buckets - 1;
75 prison->cells = (struct hlist_head *) (prison + 1);
76 for (i = 0; i < nr_buckets; i++)
77 INIT_HLIST_HEAD(prison->cells + i);
81 EXPORT_SYMBOL_GPL(dm_bio_prison_create);
83 void dm_bio_prison_destroy(struct dm_bio_prison *prison)
85 mempool_destroy(prison->cell_pool);
88 EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);
90 static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key)
92 const unsigned long BIG_PRIME = 4294967291UL;
93 uint64_t hash = key->block * BIG_PRIME;
95 return (uint32_t) (hash & prison->hash_mask);
98 static int keys_equal(struct dm_cell_key *lhs, struct dm_cell_key *rhs)
100 return (lhs->virtual == rhs->virtual) &&
101 (lhs->dev == rhs->dev) &&
102 (lhs->block == rhs->block);
105 static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
106 struct dm_cell_key *key)
108 struct dm_bio_prison_cell *cell;
110 hlist_for_each_entry(cell, bucket, list)
111 if (keys_equal(&cell->key, key))
118 * This may block if a new cell needs allocating. You must ensure that
119 * cells will be unlocked even if the calling thread is blocked.
121 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
123 int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
124 struct bio *inmate, struct dm_bio_prison_cell **ref)
128 uint32_t hash = hash_key(prison, key);
129 struct dm_bio_prison_cell *cell, *cell2;
131 BUG_ON(hash > prison->nr_buckets);
133 spin_lock_irqsave(&prison->lock, flags);
135 cell = __search_bucket(prison->cells + hash, key);
137 bio_list_add(&cell->bios, inmate);
142 * Allocate a new cell
144 spin_unlock_irqrestore(&prison->lock, flags);
145 cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
146 spin_lock_irqsave(&prison->lock, flags);
149 * We've been unlocked, so we have to double check that
150 * nobody else has inserted this cell in the meantime.
152 cell = __search_bucket(prison->cells + hash, key);
154 mempool_free(cell2, prison->cell_pool);
155 bio_list_add(&cell->bios, inmate);
164 cell->prison = prison;
165 memcpy(&cell->key, key, sizeof(cell->key));
166 cell->holder = inmate;
167 bio_list_init(&cell->bios);
168 hlist_add_head(&cell->list, prison->cells + hash);
173 spin_unlock_irqrestore(&prison->lock, flags);
179 EXPORT_SYMBOL_GPL(dm_bio_detain);
182 * @inmates must have been initialised prior to this call
184 static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
186 struct dm_bio_prison *prison = cell->prison;
188 hlist_del(&cell->list);
191 bio_list_add(inmates, cell->holder);
192 bio_list_merge(inmates, &cell->bios);
195 mempool_free(cell, prison->cell_pool);
198 void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
201 struct dm_bio_prison *prison = cell->prison;
203 spin_lock_irqsave(&prison->lock, flags);
204 __cell_release(cell, bios);
205 spin_unlock_irqrestore(&prison->lock, flags);
207 EXPORT_SYMBOL_GPL(dm_cell_release);
210 * Sometimes we don't want the holder, just the additional bios.
212 static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
214 struct dm_bio_prison *prison = cell->prison;
216 hlist_del(&cell->list);
217 bio_list_merge(inmates, &cell->bios);
219 mempool_free(cell, prison->cell_pool);
222 void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
225 struct dm_bio_prison *prison = cell->prison;
227 spin_lock_irqsave(&prison->lock, flags);
228 __cell_release_no_holder(cell, inmates);
229 spin_unlock_irqrestore(&prison->lock, flags);
231 EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
233 void dm_cell_error(struct dm_bio_prison_cell *cell)
235 struct dm_bio_prison *prison = cell->prison;
236 struct bio_list bios;
240 bio_list_init(&bios);
242 spin_lock_irqsave(&prison->lock, flags);
243 __cell_release(cell, &bios);
244 spin_unlock_irqrestore(&prison->lock, flags);
246 while ((bio = bio_list_pop(&bios)))
249 EXPORT_SYMBOL_GPL(dm_cell_error);
251 /*----------------------------------------------------------------*/
253 #define DEFERRED_SET_SIZE 64
255 struct dm_deferred_entry {
256 struct dm_deferred_set *ds;
258 struct list_head work_items;
261 struct dm_deferred_set {
263 unsigned current_entry;
265 struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
268 struct dm_deferred_set *dm_deferred_set_create(void)
271 struct dm_deferred_set *ds;
273 ds = kmalloc(sizeof(*ds), GFP_KERNEL);
277 spin_lock_init(&ds->lock);
278 ds->current_entry = 0;
280 for (i = 0; i < DEFERRED_SET_SIZE; i++) {
281 ds->entries[i].ds = ds;
282 ds->entries[i].count = 0;
283 INIT_LIST_HEAD(&ds->entries[i].work_items);
288 EXPORT_SYMBOL_GPL(dm_deferred_set_create);
290 void dm_deferred_set_destroy(struct dm_deferred_set *ds)
294 EXPORT_SYMBOL_GPL(dm_deferred_set_destroy);
296 struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
299 struct dm_deferred_entry *entry;
301 spin_lock_irqsave(&ds->lock, flags);
302 entry = ds->entries + ds->current_entry;
304 spin_unlock_irqrestore(&ds->lock, flags);
308 EXPORT_SYMBOL_GPL(dm_deferred_entry_inc);
310 static unsigned ds_next(unsigned index)
312 return (index + 1) % DEFERRED_SET_SIZE;
315 static void __sweep(struct dm_deferred_set *ds, struct list_head *head)
317 while ((ds->sweeper != ds->current_entry) &&
318 !ds->entries[ds->sweeper].count) {
319 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
320 ds->sweeper = ds_next(ds->sweeper);
323 if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
324 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
327 void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head)
331 spin_lock_irqsave(&entry->ds->lock, flags);
332 BUG_ON(!entry->count);
334 __sweep(entry->ds, head);
335 spin_unlock_irqrestore(&entry->ds->lock, flags);
337 EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
340 * Returns 1 if deferred or 0 if no pending items to delay job.
342 int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
348 spin_lock_irqsave(&ds->lock, flags);
349 if ((ds->sweeper == ds->current_entry) &&
350 !ds->entries[ds->current_entry].count)
353 list_add(work, &ds->entries[ds->current_entry].work_items);
354 next_entry = ds_next(ds->current_entry);
355 if (!ds->entries[next_entry].count)
356 ds->current_entry = next_entry;
358 spin_unlock_irqrestore(&ds->lock, flags);
362 EXPORT_SYMBOL_GPL(dm_deferred_set_add_work);
364 /*----------------------------------------------------------------*/
366 static int __init dm_bio_prison_init(void)
368 _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
375 static void __exit dm_bio_prison_exit(void)
377 kmem_cache_destroy(_cell_cache);
384 module_init(dm_bio_prison_init);
385 module_exit(dm_bio_prison_exit);
387 MODULE_DESCRIPTION(DM_NAME " bio prison");
388 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
389 MODULE_LICENSE("GPL");