3 * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
7 * Filesystem Meta Information Block Cache (mbcache)
9 * The mbcache caches blocks of block devices that need to be located
10 * by their device/block number, as well as by other criteria (such
11 * as the block's contents).
13 * There can only be one cache entry in a cache per device and block number.
14 * Additional indexes need not be unique in this sense. The number of
15 * additional indexes (=other criteria) can be hardwired at compile time
16 * or specified at cache create time.
18 * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
19 * in the cache. A valid entry is in the main hash tables of the cache,
20 * and may also be in the lru list. An invalid entry is not in any hashes
23 * A valid cache entry is only in the lru list if no handles refer to it.
24 * Invalid cache entries will be freed when the last handle to the cache
25 * entry is released. Entries that cannot be freed immediately are put
26 * back on the lru list.
29 #include <linux/kernel.h>
30 #include <linux/module.h>
32 #include <linux/hash.h>
35 #include <linux/slab.h>
36 #include <linux/sched.h>
37 #include <linux/init.h>
38 #include <linux/mbcache.h>
42 # define mb_debug(f...) do { \
43 printk(KERN_DEBUG f); \
46 #define mb_assert(c) do { if (!(c)) \
47 printk(KERN_ERR "assertion " #c " failed\n"); \
50 # define mb_debug(f...) do { } while(0)
51 # define mb_assert(c) do { } while(0)
53 #define mb_error(f...) do { \
58 #define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
60 static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
62 MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
63 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
64 MODULE_LICENSE("GPL");
66 EXPORT_SYMBOL(mb_cache_create);
67 EXPORT_SYMBOL(mb_cache_shrink);
68 EXPORT_SYMBOL(mb_cache_destroy);
69 EXPORT_SYMBOL(mb_cache_entry_alloc);
70 EXPORT_SYMBOL(mb_cache_entry_insert);
71 EXPORT_SYMBOL(mb_cache_entry_release);
72 EXPORT_SYMBOL(mb_cache_entry_free);
73 EXPORT_SYMBOL(mb_cache_entry_get);
74 #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
75 EXPORT_SYMBOL(mb_cache_entry_find_first);
76 EXPORT_SYMBOL(mb_cache_entry_find_next);
80 struct list_head c_cache_list;
82 atomic_t c_entry_count;
84 struct kmem_cache *c_entry_cache;
85 struct list_head *c_block_hash;
86 struct list_head *c_index_hash;
91 * Global data: list of all mbcache's, lru list, and a spinlock for
92 * accessing cache data structures on SMP machines. The lru list is
93 * global across all mbcaches.
96 static LIST_HEAD(mb_cache_list);
97 static LIST_HEAD(mb_cache_lru_list);
98 static DEFINE_SPINLOCK(mb_cache_spinlock);
101 * What the mbcache registers as to get shrunk dynamically.
104 static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask);
106 static struct shrinker mb_cache_shrinker = {
107 .shrink = mb_cache_shrink_fn,
108 .seeks = DEFAULT_SEEKS,
112 __mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
114 return !list_empty(&ce->e_block_list);
119 __mb_cache_entry_unhash(struct mb_cache_entry *ce)
121 if (__mb_cache_entry_is_hashed(ce)) {
122 list_del_init(&ce->e_block_list);
123 list_del(&ce->e_index.o_list);
129 __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
131 struct mb_cache *cache = ce->e_cache;
133 mb_assert(!(ce->e_used || ce->e_queued));
134 kmem_cache_free(cache->c_entry_cache, ce);
135 atomic_dec(&cache->c_entry_count);
140 __mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
141 __releases(mb_cache_spinlock)
143 /* Wake up all processes queuing for this cache entry. */
145 wake_up_all(&mb_cache_queue);
146 if (ce->e_used >= MB_CACHE_WRITER)
147 ce->e_used -= MB_CACHE_WRITER;
149 if (!(ce->e_used || ce->e_queued)) {
150 if (!__mb_cache_entry_is_hashed(ce))
152 mb_assert(list_empty(&ce->e_lru_list));
153 list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
155 spin_unlock(&mb_cache_spinlock);
158 spin_unlock(&mb_cache_spinlock);
159 __mb_cache_entry_forget(ce, GFP_KERNEL);
164 * mb_cache_shrink_fn() memory pressure callback
166 * This function is called by the kernel memory management when memory
170 * @nr_to_scan: Number of objects to scan
171 * @gfp_mask: (ignored)
173 * Returns the number of objects which are present in the cache.
176 mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
178 LIST_HEAD(free_list);
179 struct list_head *l, *ltmp;
182 spin_lock(&mb_cache_spinlock);
183 list_for_each(l, &mb_cache_list) {
184 struct mb_cache *cache =
185 list_entry(l, struct mb_cache, c_cache_list);
186 mb_debug("cache %s (%d)", cache->c_name,
187 atomic_read(&cache->c_entry_count));
188 count += atomic_read(&cache->c_entry_count);
190 mb_debug("trying to free %d entries", nr_to_scan);
191 if (nr_to_scan == 0) {
192 spin_unlock(&mb_cache_spinlock);
195 while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
196 struct mb_cache_entry *ce =
197 list_entry(mb_cache_lru_list.next,
198 struct mb_cache_entry, e_lru_list);
199 list_move_tail(&ce->e_lru_list, &free_list);
200 __mb_cache_entry_unhash(ce);
202 spin_unlock(&mb_cache_spinlock);
203 list_for_each_safe(l, ltmp, &free_list) {
204 __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
205 e_lru_list), gfp_mask);
208 return (count / 100) * sysctl_vfs_cache_pressure;
213 * mb_cache_create() create a new cache
215 * All entries in one cache are equal size. Cache entries may be from
216 * multiple devices. If this is the first mbcache created, registers
217 * the cache with kernel memory management. Returns NULL if no more
218 * memory was available.
220 * @name: name of the cache (informal)
221 * @bucket_bits: log2(number of hash buckets)
224 mb_cache_create(const char *name, int bucket_bits)
226 int n, bucket_count = 1 << bucket_bits;
227 struct mb_cache *cache = NULL;
229 cache = kmalloc(sizeof(struct mb_cache), GFP_KERNEL);
232 cache->c_name = name;
233 atomic_set(&cache->c_entry_count, 0);
234 cache->c_bucket_bits = bucket_bits;
235 cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head),
237 if (!cache->c_block_hash)
239 for (n=0; n<bucket_count; n++)
240 INIT_LIST_HEAD(&cache->c_block_hash[n]);
241 cache->c_index_hash = kmalloc(bucket_count * sizeof(struct list_head),
243 if (!cache->c_index_hash)
245 for (n=0; n<bucket_count; n++)
246 INIT_LIST_HEAD(&cache->c_index_hash[n]);
247 cache->c_entry_cache = kmem_cache_create(name,
248 sizeof(struct mb_cache_entry), 0,
249 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
250 if (!cache->c_entry_cache)
253 spin_lock(&mb_cache_spinlock);
254 list_add(&cache->c_cache_list, &mb_cache_list);
255 spin_unlock(&mb_cache_spinlock);
259 kfree(cache->c_index_hash);
262 kfree(cache->c_block_hash);
271 * Removes all cache entries of a device from the cache. All cache entries
272 * currently in use cannot be freed, and thus remain in the cache. All others
275 * @bdev: which device's cache entries to shrink
278 mb_cache_shrink(struct block_device *bdev)
280 LIST_HEAD(free_list);
281 struct list_head *l, *ltmp;
283 spin_lock(&mb_cache_spinlock);
284 list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
285 struct mb_cache_entry *ce =
286 list_entry(l, struct mb_cache_entry, e_lru_list);
287 if (ce->e_bdev == bdev) {
288 list_move_tail(&ce->e_lru_list, &free_list);
289 __mb_cache_entry_unhash(ce);
292 spin_unlock(&mb_cache_spinlock);
293 list_for_each_safe(l, ltmp, &free_list) {
294 __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
295 e_lru_list), GFP_KERNEL);
303 * Shrinks the cache to its minimum possible size (hopefully 0 entries),
304 * and then destroys it. If this was the last mbcache, un-registers the
305 * mbcache from kernel memory management.
308 mb_cache_destroy(struct mb_cache *cache)
310 LIST_HEAD(free_list);
311 struct list_head *l, *ltmp;
313 spin_lock(&mb_cache_spinlock);
314 list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
315 struct mb_cache_entry *ce =
316 list_entry(l, struct mb_cache_entry, e_lru_list);
317 if (ce->e_cache == cache) {
318 list_move_tail(&ce->e_lru_list, &free_list);
319 __mb_cache_entry_unhash(ce);
322 list_del(&cache->c_cache_list);
323 spin_unlock(&mb_cache_spinlock);
325 list_for_each_safe(l, ltmp, &free_list) {
326 __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
327 e_lru_list), GFP_KERNEL);
330 if (atomic_read(&cache->c_entry_count) > 0) {
331 mb_error("cache %s: %d orphaned entries",
333 atomic_read(&cache->c_entry_count));
336 kmem_cache_destroy(cache->c_entry_cache);
338 kfree(cache->c_index_hash);
339 kfree(cache->c_block_hash);
345 * mb_cache_entry_alloc()
347 * Allocates a new cache entry. The new entry will not be valid initially,
348 * and thus cannot be looked up yet. It should be filled with data, and
349 * then inserted into the cache using mb_cache_entry_insert(). Returns NULL
350 * if no more memory was available.
352 struct mb_cache_entry *
353 mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
355 struct mb_cache_entry *ce;
357 ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
359 atomic_inc(&cache->c_entry_count);
360 INIT_LIST_HEAD(&ce->e_lru_list);
361 INIT_LIST_HEAD(&ce->e_block_list);
363 ce->e_used = 1 + MB_CACHE_WRITER;
371 * mb_cache_entry_insert()
373 * Inserts an entry that was allocated using mb_cache_entry_alloc() into
374 * the cache. After this, the cache entry can be looked up, but is not yet
375 * in the lru list as the caller still holds a handle to it. Returns 0 on
376 * success, or -EBUSY if a cache entry for that device + inode exists
377 * already (this may happen after a failed lookup, but when another process
378 * has inserted the same cache entry in the meantime).
380 * @bdev: device the cache entry belongs to
381 * @block: block number
385 mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
386 sector_t block, unsigned int key)
388 struct mb_cache *cache = ce->e_cache;
393 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
394 cache->c_bucket_bits);
395 spin_lock(&mb_cache_spinlock);
396 list_for_each_prev(l, &cache->c_block_hash[bucket]) {
397 struct mb_cache_entry *ce =
398 list_entry(l, struct mb_cache_entry, e_block_list);
399 if (ce->e_bdev == bdev && ce->e_block == block)
402 __mb_cache_entry_unhash(ce);
405 list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
406 ce->e_index.o_key = key;
407 bucket = hash_long(key, cache->c_bucket_bits);
408 list_add(&ce->e_index.o_list, &cache->c_index_hash[bucket]);
411 spin_unlock(&mb_cache_spinlock);
417 * mb_cache_entry_release()
419 * Release a handle to a cache entry. When the last handle to a cache entry
420 * is released it is either freed (if it is invalid) or otherwise inserted
421 * in to the lru list.
424 mb_cache_entry_release(struct mb_cache_entry *ce)
426 spin_lock(&mb_cache_spinlock);
427 __mb_cache_entry_release_unlock(ce);
432 * mb_cache_entry_free()
434 * This is equivalent to the sequence mb_cache_entry_takeout() --
435 * mb_cache_entry_release().
438 mb_cache_entry_free(struct mb_cache_entry *ce)
440 spin_lock(&mb_cache_spinlock);
441 mb_assert(list_empty(&ce->e_lru_list));
442 __mb_cache_entry_unhash(ce);
443 __mb_cache_entry_release_unlock(ce);
448 * mb_cache_entry_get()
450 * Get a cache entry by device / block number. (There can only be one entry
451 * in the cache per device and block.) Returns NULL if no such cache entry
452 * exists. The returned cache entry is locked for exclusive access ("single
455 struct mb_cache_entry *
456 mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
461 struct mb_cache_entry *ce;
463 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
464 cache->c_bucket_bits);
465 spin_lock(&mb_cache_spinlock);
466 list_for_each(l, &cache->c_block_hash[bucket]) {
467 ce = list_entry(l, struct mb_cache_entry, e_block_list);
468 if (ce->e_bdev == bdev && ce->e_block == block) {
471 if (!list_empty(&ce->e_lru_list))
472 list_del_init(&ce->e_lru_list);
474 while (ce->e_used > 0) {
476 prepare_to_wait(&mb_cache_queue, &wait,
477 TASK_UNINTERRUPTIBLE);
478 spin_unlock(&mb_cache_spinlock);
480 spin_lock(&mb_cache_spinlock);
483 finish_wait(&mb_cache_queue, &wait);
484 ce->e_used += 1 + MB_CACHE_WRITER;
486 if (!__mb_cache_entry_is_hashed(ce)) {
487 __mb_cache_entry_release_unlock(ce);
496 spin_unlock(&mb_cache_spinlock);
500 #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
502 static struct mb_cache_entry *
503 __mb_cache_entry_find(struct list_head *l, struct list_head *head,
504 struct block_device *bdev, unsigned int key)
507 struct mb_cache_entry *ce =
508 list_entry(l, struct mb_cache_entry, e_index.o_list);
509 if (ce->e_bdev == bdev && ce->e_index.o_key == key) {
512 if (!list_empty(&ce->e_lru_list))
513 list_del_init(&ce->e_lru_list);
515 /* Incrementing before holding the lock gives readers
516 priority over writers. */
518 while (ce->e_used >= MB_CACHE_WRITER) {
520 prepare_to_wait(&mb_cache_queue, &wait,
521 TASK_UNINTERRUPTIBLE);
522 spin_unlock(&mb_cache_spinlock);
524 spin_lock(&mb_cache_spinlock);
527 finish_wait(&mb_cache_queue, &wait);
529 if (!__mb_cache_entry_is_hashed(ce)) {
530 __mb_cache_entry_release_unlock(ce);
531 spin_lock(&mb_cache_spinlock);
532 return ERR_PTR(-EAGAIN);
543 * mb_cache_entry_find_first()
545 * Find the first cache entry on a given device with a certain key in
546 * an additional index. Additonal matches can be found with
547 * mb_cache_entry_find_next(). Returns NULL if no match was found. The
548 * returned cache entry is locked for shared access ("multiple readers").
550 * @cache: the cache to search
551 * @bdev: the device the cache entry should belong to
552 * @key: the key in the index
554 struct mb_cache_entry *
555 mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev,
558 unsigned int bucket = hash_long(key, cache->c_bucket_bits);
560 struct mb_cache_entry *ce;
562 spin_lock(&mb_cache_spinlock);
563 l = cache->c_index_hash[bucket].next;
564 ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
565 spin_unlock(&mb_cache_spinlock);
571 * mb_cache_entry_find_next()
573 * Find the next cache entry on a given device with a certain key in an
574 * additional index. Returns NULL if no match could be found. The previous
575 * entry is atomatically released, so that mb_cache_entry_find_next() can
576 * be called like this:
578 * entry = mb_cache_entry_find_first();
581 * entry = mb_cache_entry_find_next(entry, ...);
584 * @prev: The previous match
585 * @bdev: the device the cache entry should belong to
586 * @key: the key in the index
588 struct mb_cache_entry *
589 mb_cache_entry_find_next(struct mb_cache_entry *prev,
590 struct block_device *bdev, unsigned int key)
592 struct mb_cache *cache = prev->e_cache;
593 unsigned int bucket = hash_long(key, cache->c_bucket_bits);
595 struct mb_cache_entry *ce;
597 spin_lock(&mb_cache_spinlock);
598 l = prev->e_index.o_list.next;
599 ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
600 __mb_cache_entry_release_unlock(prev);
604 #endif /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
606 static int __init init_mbcache(void)
608 register_shrinker(&mb_cache_shrinker);
612 static void __exit exit_mbcache(void)
614 unregister_shrinker(&mb_cache_shrinker);
617 module_init(init_mbcache)
618 module_exit(exit_mbcache)