2 * Main bcache entry point - handle a read or a write request and decide what to
3 * do with it; the make_request functions are called by the block layer.
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
14 #include <linux/cgroup.h>
15 #include <linux/module.h>
16 #include <linux/hash.h>
17 #include <linux/random.h>
18 #include "blk-cgroup.h"
20 #include <trace/events/bcache.h>
22 #define CUTOFF_CACHE_ADD 95
23 #define CUTOFF_CACHE_READA 90
24 #define CUTOFF_WRITEBACK 50
25 #define CUTOFF_WRITEBACK_SYNC 75
27 struct kmem_cache *bch_search_cache;
29 static void check_should_skip(struct cached_dev *, struct search *);
31 /* Cgroup interface */
33 #ifdef CONFIG_CGROUP_BCACHE
34 static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 };
36 static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup)
38 struct cgroup_subsys_state *css;
40 (css = cgroup_subsys_state(cgroup, bcache_subsys_id))
41 ? container_of(css, struct bch_cgroup, css)
42 : &bcache_default_cgroup;
45 struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio)
47 struct cgroup_subsys_state *css = bio->bi_css
48 ? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id)
49 : task_subsys_state(current, bcache_subsys_id);
52 ? container_of(css, struct bch_cgroup, css)
53 : &bcache_default_cgroup;
56 static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft,
58 char __user *buf, size_t nbytes, loff_t *ppos)
61 int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes,
62 cgroup_to_bcache(cgrp)->cache_mode + 1);
67 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
70 static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft,
73 int v = bch_read_string_list(buf, bch_cache_modes);
77 cgroup_to_bcache(cgrp)->cache_mode = v - 1;
81 static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft)
83 return cgroup_to_bcache(cgrp)->verify;
86 static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val)
88 cgroup_to_bcache(cgrp)->verify = val;
92 static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft)
94 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
95 return atomic_read(&bcachecg->stats.cache_hits);
98 static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft)
100 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
101 return atomic_read(&bcachecg->stats.cache_misses);
104 static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp,
107 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
108 return atomic_read(&bcachecg->stats.cache_bypass_hits);
111 static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp,
114 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
115 return atomic_read(&bcachecg->stats.cache_bypass_misses);
118 static struct cftype bch_files[] = {
120 .name = "cache_mode",
121 .read = cache_mode_read,
122 .write_string = cache_mode_write,
126 .read_u64 = bch_verify_read,
127 .write_u64 = bch_verify_write,
130 .name = "cache_hits",
131 .read_u64 = bch_cache_hits_read,
134 .name = "cache_misses",
135 .read_u64 = bch_cache_misses_read,
138 .name = "cache_bypass_hits",
139 .read_u64 = bch_cache_bypass_hits_read,
142 .name = "cache_bypass_misses",
143 .read_u64 = bch_cache_bypass_misses_read,
148 static void init_bch_cgroup(struct bch_cgroup *cg)
153 static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup)
155 struct bch_cgroup *cg;
157 cg = kzalloc(sizeof(*cg), GFP_KERNEL);
159 return ERR_PTR(-ENOMEM);
164 static void bcachecg_destroy(struct cgroup *cgroup)
166 struct bch_cgroup *cg = cgroup_to_bcache(cgroup);
167 free_css_id(&bcache_subsys, &cg->css);
171 struct cgroup_subsys bcache_subsys = {
172 .create = bcachecg_create,
173 .destroy = bcachecg_destroy,
174 .subsys_id = bcache_subsys_id,
176 .module = THIS_MODULE,
178 EXPORT_SYMBOL_GPL(bcache_subsys);
181 static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
183 #ifdef CONFIG_CGROUP_BCACHE
184 int r = bch_bio_to_cgroup(bio)->cache_mode;
188 return BDEV_CACHE_MODE(&dc->sb);
191 static bool verify(struct cached_dev *dc, struct bio *bio)
193 #ifdef CONFIG_CGROUP_BCACHE
194 if (bch_bio_to_cgroup(bio)->verify)
200 static void bio_csum(struct bio *bio, struct bkey *k)
206 bio_for_each_segment(bv, bio, i) {
207 void *d = kmap(bv->bv_page) + bv->bv_offset;
208 csum = bch_crc64_update(csum, d, bv->bv_len);
212 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
215 /* Insert data into cache */
217 static void bio_invalidate(struct closure *cl)
219 struct btree_op *op = container_of(cl, struct btree_op, cl);
220 struct bio *bio = op->cache_bio;
222 pr_debug("invalidating %i sectors from %llu",
223 bio_sectors(bio), (uint64_t) bio->bi_sector);
225 while (bio_sectors(bio)) {
226 unsigned len = min(bio_sectors(bio), 1U << 14);
228 if (bch_keylist_realloc(&op->keys, 0, op->c))
231 bio->bi_sector += len;
232 bio->bi_size -= len << 9;
234 bch_keylist_add(&op->keys,
235 &KEY(op->inode, bio->bi_sector, len));
238 op->insert_data_done = true;
241 continue_at(cl, bch_journal, bcache_wq);
245 struct list_head list;
246 struct task_struct *last;
247 unsigned sectors_free;
251 void bch_open_buckets_free(struct cache_set *c)
253 struct open_bucket *b;
255 while (!list_empty(&c->data_buckets)) {
256 b = list_first_entry(&c->data_buckets,
257 struct open_bucket, list);
263 int bch_open_buckets_alloc(struct cache_set *c)
267 spin_lock_init(&c->data_bucket_lock);
269 for (i = 0; i < 6; i++) {
270 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
274 list_add(&b->list, &c->data_buckets);
281 * We keep multiple buckets open for writes, and try to segregate different
282 * write streams for better cache utilization: first we look for a bucket where
283 * the last write to it was sequential with the current write, and failing that
284 * we look for a bucket that was last used by the same task.
286 * The ideas is if you've got multiple tasks pulling data into the cache at the
287 * same time, you'll get better cache utilization if you try to segregate their
288 * data and preserve locality.
290 * For example, say you've starting Firefox at the same time you're copying a
291 * bunch of files. Firefox will likely end up being fairly hot and stay in the
292 * cache awhile, but the data you copied might not be; if you wrote all that
293 * data to the same buckets it'd get invalidated at the same time.
295 * Both of those tasks will be doing fairly random IO so we can't rely on
296 * detecting sequential IO to segregate their data, but going off of the task
297 * should be a sane heuristic.
299 static struct open_bucket *pick_data_bucket(struct cache_set *c,
300 const struct bkey *search,
301 struct task_struct *task,
304 struct open_bucket *ret, *ret_task = NULL;
306 list_for_each_entry_reverse(ret, &c->data_buckets, list)
307 if (!bkey_cmp(&ret->key, search))
309 else if (ret->last == task)
312 ret = ret_task ?: list_first_entry(&c->data_buckets,
313 struct open_bucket, list);
315 if (!ret->sectors_free && KEY_PTRS(alloc)) {
316 ret->sectors_free = c->sb.bucket_size;
317 bkey_copy(&ret->key, alloc);
321 if (!ret->sectors_free)
328 * Allocates some space in the cache to write to, and k to point to the newly
329 * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
330 * end of the newly allocated space).
332 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
333 * sectors were actually allocated.
335 * If s->writeback is true, will not fail.
337 static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
340 struct cache_set *c = s->op.c;
341 struct open_bucket *b;
342 BKEY_PADDED(key) alloc;
343 struct closure cl, *w = NULL;
347 closure_init_stack(&cl);
352 * We might have to allocate a new bucket, which we can't do with a
353 * spinlock held. So if we have to allocate, we drop the lock, allocate
354 * and then retry. KEY_PTRS() indicates whether alloc points to
355 * allocated bucket(s).
358 bkey_init(&alloc.key);
359 spin_lock(&c->data_bucket_lock);
361 while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) {
362 unsigned watermark = s->op.write_prio
366 spin_unlock(&c->data_bucket_lock);
368 if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, w))
371 spin_lock(&c->data_bucket_lock);
375 * If we had to allocate, we might race and not need to allocate the
376 * second time we call find_data_bucket(). If we allocated a bucket but
377 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
379 if (KEY_PTRS(&alloc.key))
380 __bkey_put(c, &alloc.key);
382 for (i = 0; i < KEY_PTRS(&b->key); i++)
383 EBUG_ON(ptr_stale(c, &b->key, i));
385 /* Set up the pointer to the space we're allocating: */
387 for (i = 0; i < KEY_PTRS(&b->key); i++)
388 k->ptr[i] = b->key.ptr[i];
390 sectors = min(sectors, b->sectors_free);
392 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
393 SET_KEY_SIZE(k, sectors);
394 SET_KEY_PTRS(k, KEY_PTRS(&b->key));
397 * Move b to the end of the lru, and keep track of what this bucket was
400 list_move_tail(&b->list, &c->data_buckets);
401 bkey_copy_key(&b->key, k);
404 b->sectors_free -= sectors;
406 for (i = 0; i < KEY_PTRS(&b->key); i++) {
407 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
409 atomic_long_add(sectors,
410 &PTR_CACHE(c, &b->key, i)->sectors_written);
413 if (b->sectors_free < c->sb.block_size)
417 * k takes refcounts on the buckets it points to until it's inserted
418 * into the btree, but if we're done with this bucket we just transfer
419 * get_data_bucket()'s refcount.
422 for (i = 0; i < KEY_PTRS(&b->key); i++)
423 atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
425 spin_unlock(&c->data_bucket_lock);
429 static void bch_insert_data_error(struct closure *cl)
431 struct btree_op *op = container_of(cl, struct btree_op, cl);
434 * Our data write just errored, which means we've got a bunch of keys to
435 * insert that point to data that wasn't succesfully written.
437 * We don't have to insert those keys but we still have to invalidate
438 * that region of the cache - so, if we just strip off all the pointers
439 * from the keys we'll accomplish just that.
442 struct bkey *src = op->keys.bottom, *dst = op->keys.bottom;
444 while (src != op->keys.top) {
445 struct bkey *n = bkey_next(src);
447 SET_KEY_PTRS(src, 0);
450 dst = bkey_next(dst);
459 static void bch_insert_data_endio(struct bio *bio, int error,
460 struct batch_complete *batch)
462 struct closure *cl = bio->bi_private;
463 struct btree_op *op = container_of(cl, struct btree_op, cl);
464 struct search *s = container_of(op, struct search, op);
467 /* TODO: We could try to recover from this. */
471 set_closure_fn(cl, bch_insert_data_error, bcache_wq);
473 set_closure_fn(cl, NULL, NULL);
476 bch_bbio_endio(op->c, bio, error, "writing data to cache");
479 static void bch_insert_data_loop(struct closure *cl)
481 struct btree_op *op = container_of(cl, struct btree_op, cl);
482 struct search *s = container_of(op, struct search, op);
483 struct bio *bio = op->cache_bio, *n;
486 return bio_invalidate(cl);
488 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
489 set_gc_sectors(op->c);
496 struct bio_set *split = s->d
497 ? s->d->bio_split : op->c->bio_split;
499 /* 1 for the device pointer and 1 for the chksum */
500 if (bch_keylist_realloc(&op->keys,
501 1 + (op->csum ? 1 : 0),
503 continue_at(cl, bch_journal, bcache_wq);
507 SET_KEY_INODE(k, op->inode);
508 SET_KEY_OFFSET(k, bio->bi_sector);
510 if (!bch_alloc_sectors(k, bio_sectors(bio), s))
513 n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
515 __bkey_put(op->c, k);
516 continue_at(cl, bch_insert_data_loop, bcache_wq);
519 n->bi_end_io = bch_insert_data_endio;
523 SET_KEY_DIRTY(k, true);
525 for (i = 0; i < KEY_PTRS(k); i++)
526 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
530 SET_KEY_CSUM(k, op->csum);
534 pr_debug("%s", pkey(k));
535 bch_keylist_push(&op->keys);
537 trace_bcache_cache_insert(n, n->bi_sector, n->bi_bdev);
538 n->bi_rw |= REQ_WRITE;
539 bch_submit_bbio(n, op->c, k, 0);
542 op->insert_data_done = true;
543 continue_at(cl, bch_journal, bcache_wq);
545 /* bch_alloc_sectors() blocks if s->writeback = true */
546 BUG_ON(s->writeback);
549 * But if it's not a writeback write we'd rather just bail out if
550 * there aren't any buckets ready to write to - it might take awhile and
551 * we might be starving btree writes for gc or something.
556 * Writethrough write: We can't complete the write until we've
557 * updated the index. But we don't want to delay the write while
558 * we wait for buckets to be freed up, so just invalidate the
562 return bio_invalidate(cl);
565 * From a cache miss, we can just insert the keys for the data
566 * we have written or bail out if we didn't do anything.
568 op->insert_data_done = true;
571 if (!bch_keylist_empty(&op->keys))
572 continue_at(cl, bch_journal, bcache_wq);
579 * bch_insert_data - stick some data in the cache
581 * This is the starting point for any data to end up in a cache device; it could
582 * be from a normal write, or a writeback write, or a write to a flash only
583 * volume - it's also used by the moving garbage collector to compact data in
584 * mostly empty buckets.
586 * It first writes the data to the cache, creating a list of keys to be inserted
587 * (if the data had to be fragmented there will be multiple keys); after the
588 * data is written it calls bch_journal, and after the keys have been added to
589 * the next journal write they're inserted into the btree.
591 * It inserts the data in op->cache_bio; bi_sector is used for the key offset,
592 * and op->inode is used for the key inode.
594 * If op->skip is true, instead of inserting the data it invalidates the region
595 * of the cache represented by op->cache_bio and op->inode.
597 void bch_insert_data(struct closure *cl)
599 struct btree_op *op = container_of(cl, struct btree_op, cl);
601 bch_keylist_init(&op->keys);
602 bio_get(op->cache_bio);
603 bch_insert_data_loop(cl);
606 void bch_btree_insert_async(struct closure *cl)
608 struct btree_op *op = container_of(cl, struct btree_op, cl);
609 struct search *s = container_of(op, struct search, op);
611 if (bch_btree_insert(op, op->c)) {
613 op->insert_data_done = true;
616 if (op->insert_data_done) {
617 bch_keylist_free(&op->keys);
620 continue_at(cl, bch_insert_data_loop, bcache_wq);
623 /* Common code for the make_request functions */
625 static void request_endio(struct bio *bio, int error,
626 struct batch_complete *batch)
628 struct closure *cl = bio->bi_private;
631 struct search *s = container_of(cl, struct search, cl);
633 /* Only cache read errors are recoverable */
634 s->recoverable = false;
641 void bch_cache_read_endio(struct bio *bio, int error,
642 struct batch_complete *batch)
644 struct bbio *b = container_of(bio, struct bbio, bio);
645 struct closure *cl = bio->bi_private;
646 struct search *s = container_of(cl, struct search, cl);
649 * If the bucket was reused while our bio was in flight, we might have
650 * read the wrong data. Set s->error but not error so it doesn't get
651 * counted against the cache device, but we'll still reread the data
652 * from the backing device.
657 else if (ptr_stale(s->op.c, &b->key, 0)) {
658 atomic_long_inc(&s->op.c->cache_read_races);
662 bch_bbio_endio(s->op.c, bio, error, "reading from cache");
665 static void bio_complete(struct search *s)
668 int cpu, rw = bio_data_dir(s->orig_bio);
669 unsigned long duration = jiffies - s->start_time;
671 cpu = part_stat_lock();
672 part_round_stats(cpu, &s->d->disk->part0);
673 part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
676 trace_bcache_request_end(s, s->orig_bio);
677 bio_endio(s->orig_bio, s->error);
682 static void do_bio_hook(struct search *s)
684 struct bio *bio = &s->bio.bio;
685 memcpy(bio, s->orig_bio, sizeof(struct bio));
687 bio->bi_end_io = request_endio;
688 bio->bi_private = &s->cl;
689 atomic_set(&bio->bi_cnt, 3);
692 static void search_free(struct closure *cl)
694 struct search *s = container_of(cl, struct search, cl);
698 bio_put(s->op.cache_bio);
700 if (s->unaligned_bvec)
701 mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
703 closure_debug_destroy(cl);
704 mempool_free(s, s->d->c->search);
707 static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
710 struct search *s = mempool_alloc(d->c->search, GFP_NOIO);
711 memset(s, 0, offsetof(struct search, op.keys));
713 __closure_init(&s->cl, NULL);
721 s->write = (bio->bi_rw & REQ_WRITE) != 0;
722 s->op.flush_journal = (bio->bi_rw & REQ_FLUSH) != 0;
723 s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0;
725 s->start_time = jiffies;
728 if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
729 bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
730 memcpy(bv, bio_iovec(bio),
731 sizeof(struct bio_vec) * bio_segments(bio));
733 s->bio.bio.bi_io_vec = bv;
734 s->unaligned_bvec = 1;
740 static void btree_read_async(struct closure *cl)
742 struct btree_op *op = container_of(cl, struct btree_op, cl);
744 int ret = btree_root(search_recurse, op->c, op);
747 continue_at(cl, btree_read_async, bcache_wq);
754 static void cached_dev_bio_complete(struct closure *cl)
756 struct search *s = container_of(cl, struct search, cl);
757 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
765 static void cached_dev_read_complete(struct closure *cl)
767 struct search *s = container_of(cl, struct search, cl);
769 if (s->op.insert_collision)
770 bch_mark_cache_miss_collision(s);
772 if (s->op.cache_bio) {
776 __bio_for_each_segment(bv, s->op.cache_bio, i, 0)
777 __free_page(bv->bv_page);
780 cached_dev_bio_complete(cl);
783 static void request_read_error(struct closure *cl)
785 struct search *s = container_of(cl, struct search, cl);
789 if (s->recoverable) {
790 /* The cache read failed, but we can retry from the backing
793 pr_debug("recovering at sector %llu",
794 (uint64_t) s->orig_bio->bi_sector);
797 bv = s->bio.bio.bi_io_vec;
799 s->bio.bio.bi_io_vec = bv;
801 if (!s->unaligned_bvec)
802 bio_for_each_segment(bv, s->orig_bio, i)
803 bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
805 memcpy(s->bio.bio.bi_io_vec,
806 bio_iovec(s->orig_bio),
807 sizeof(struct bio_vec) *
808 bio_segments(s->orig_bio));
810 /* XXX: invalidate cache */
812 trace_bcache_read_retry(&s->bio.bio);
813 closure_bio_submit(&s->bio.bio, &s->cl, s->d);
816 continue_at(cl, cached_dev_read_complete, NULL);
819 static void request_read_done(struct closure *cl)
821 struct search *s = container_of(cl, struct search, cl);
822 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
825 * s->cache_bio != NULL implies that we had a cache miss; cache_bio now
826 * contains data ready to be inserted into the cache.
828 * First, we copy the data we just read from cache_bio's bounce buffers
829 * to the buffers the original bio pointed to:
832 if (s->op.cache_bio) {
833 struct bio_vec *src, *dst;
834 unsigned src_offset, dst_offset, bytes;
837 bio_reset(s->op.cache_bio);
838 s->op.cache_bio->bi_sector = s->cache_miss->bi_sector;
839 s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev;
840 s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
841 bch_bio_map(s->op.cache_bio, NULL);
843 src = bio_iovec(s->op.cache_bio);
844 dst = bio_iovec(s->cache_miss);
845 src_offset = src->bv_offset;
846 dst_offset = dst->bv_offset;
847 dst_ptr = kmap(dst->bv_page);
850 if (dst_offset == dst->bv_offset + dst->bv_len) {
851 kunmap(dst->bv_page);
853 if (dst == bio_iovec_idx(s->cache_miss,
854 s->cache_miss->bi_vcnt))
857 dst_offset = dst->bv_offset;
858 dst_ptr = kmap(dst->bv_page);
861 if (src_offset == src->bv_offset + src->bv_len) {
863 if (src == bio_iovec_idx(s->op.cache_bio,
864 s->op.cache_bio->bi_vcnt))
867 src_offset = src->bv_offset;
870 bytes = min(dst->bv_offset + dst->bv_len - dst_offset,
871 src->bv_offset + src->bv_len - src_offset);
873 memcpy(dst_ptr + dst_offset,
874 page_address(src->bv_page) + src_offset,
881 bio_put(s->cache_miss);
882 s->cache_miss = NULL;
885 if (verify(dc, &s->bio.bio) && s->recoverable)
890 if (s->op.cache_bio &&
891 !test_bit(CACHE_SET_STOPPING, &s->op.c->flags)) {
892 s->op.type = BTREE_REPLACE;
893 closure_call(&s->op.cl, bch_insert_data, NULL, cl);
896 continue_at(cl, cached_dev_read_complete, NULL);
899 static void request_read_done_bh(struct closure *cl)
901 struct search *s = container_of(cl, struct search, cl);
902 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
904 bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip);
907 continue_at_nobarrier(cl, request_read_error, bcache_wq);
908 else if (s->op.cache_bio || verify(dc, &s->bio.bio))
909 continue_at_nobarrier(cl, request_read_done, bcache_wq);
911 continue_at_nobarrier(cl, cached_dev_read_complete, NULL);
914 static int cached_dev_cache_miss(struct btree *b, struct search *s,
915 struct bio *bio, unsigned sectors)
919 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
922 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
927 s->op.lookup_done = true;
929 miss->bi_end_io = request_endio;
930 miss->bi_private = &s->cl;
932 if (s->cache_miss || s->op.skip)
936 (bio->bi_rw & REQ_RAHEAD) ||
937 (bio->bi_rw & REQ_META) ||
938 s->op.c->gc_stats.in_use >= CUTOFF_CACHE_READA)
941 reada = min(dc->readahead >> 9,
942 sectors - bio_sectors(miss));
944 if (bio_end(miss) + reada > bdev_sectors(miss->bi_bdev))
945 reada = bdev_sectors(miss->bi_bdev) - bio_end(miss);
948 s->cache_bio_sectors = bio_sectors(miss) + reada;
949 s->op.cache_bio = bio_alloc_bioset(GFP_NOWAIT,
950 DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS),
953 if (!s->op.cache_bio)
956 s->op.cache_bio->bi_sector = miss->bi_sector;
957 s->op.cache_bio->bi_bdev = miss->bi_bdev;
958 s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
960 s->op.cache_bio->bi_end_io = request_endio;
961 s->op.cache_bio->bi_private = &s->cl;
963 /* btree_search_recurse()'s btree iterator is no good anymore */
965 if (!bch_btree_insert_check_key(b, &s->op, s->op.cache_bio))
968 bch_bio_map(s->op.cache_bio, NULL);
969 if (bch_bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
972 s->cache_miss = miss;
973 bio_get(s->op.cache_bio);
975 trace_bcache_cache_miss(s->orig_bio);
976 closure_bio_submit(s->op.cache_bio, &s->cl, s->d);
980 bio_put(s->op.cache_bio);
981 s->op.cache_bio = NULL;
983 closure_bio_submit(miss, &s->cl, s->d);
987 static void request_read(struct cached_dev *dc, struct search *s)
989 struct closure *cl = &s->cl;
991 check_should_skip(dc, s);
992 closure_call(&s->op.cl, btree_read_async, NULL, cl);
994 continue_at(cl, request_read_done_bh, NULL);
999 static void cached_dev_write_complete(struct closure *cl)
1001 struct search *s = container_of(cl, struct search, cl);
1002 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
1004 up_read_non_owner(&dc->writeback_lock);
1005 cached_dev_bio_complete(cl);
1008 static bool should_writeback(struct cached_dev *dc, struct bio *bio)
1010 unsigned threshold = (bio->bi_rw & REQ_SYNC)
1011 ? CUTOFF_WRITEBACK_SYNC
1014 return !atomic_read(&dc->disk.detaching) &&
1015 cache_mode(dc, bio) == CACHE_MODE_WRITEBACK &&
1016 dc->disk.c->gc_stats.in_use < threshold;
1019 static void request_write(struct cached_dev *dc, struct search *s)
1021 struct closure *cl = &s->cl;
1022 struct bio *bio = &s->bio.bio;
1023 struct bkey start, end;
1024 start = KEY(dc->disk.id, bio->bi_sector, 0);
1025 end = KEY(dc->disk.id, bio_end(bio), 0);
1027 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);
1029 check_should_skip(dc, s);
1030 down_read_non_owner(&dc->writeback_lock);
1032 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
1034 s->writeback = true;
1037 if (bio->bi_rw & REQ_DISCARD)
1043 if (should_writeback(dc, s->orig_bio))
1044 s->writeback = true;
1046 if (!s->writeback) {
1047 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
1048 dc->disk.bio_split);
1050 trace_bcache_writethrough(s->orig_bio);
1051 closure_bio_submit(bio, cl, s->d);
1053 s->op.cache_bio = bio;
1054 trace_bcache_writeback(s->orig_bio);
1055 bch_writeback_add(dc, bio_sectors(bio));
1058 closure_call(&s->op.cl, bch_insert_data, NULL, cl);
1059 continue_at(cl, cached_dev_write_complete, NULL);
1062 s->op.cache_bio = s->orig_bio;
1063 bio_get(s->op.cache_bio);
1064 trace_bcache_write_skip(s->orig_bio);
1066 if ((bio->bi_rw & REQ_DISCARD) &&
1067 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1070 closure_bio_submit(bio, cl, s->d);
1074 static void request_nodata(struct cached_dev *dc, struct search *s)
1076 struct closure *cl = &s->cl;
1077 struct bio *bio = &s->bio.bio;
1079 if (bio->bi_rw & REQ_DISCARD) {
1080 request_write(dc, s);
1084 if (s->op.flush_journal)
1085 bch_journal_meta(s->op.c, cl);
1087 closure_bio_submit(bio, cl, s->d);
1089 continue_at(cl, cached_dev_bio_complete, NULL);
1092 /* Cached devices - read & write stuff */
1094 int bch_get_congested(struct cache_set *c)
1098 if (!c->congested_read_threshold_us &&
1099 !c->congested_write_threshold_us)
1102 i = (local_clock_us() - c->congested_last_us) / 1024;
1106 i += atomic_read(&c->congested);
1112 return i <= 0 ? 1 : fract_exp_two(i, 6);
1115 static void add_sequential(struct task_struct *t)
1117 ewma_add(t->sequential_io_avg,
1118 t->sequential_io, 8, 0);
1120 t->sequential_io = 0;
1123 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
1125 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
1128 static void check_should_skip(struct cached_dev *dc, struct search *s)
1130 struct cache_set *c = s->op.c;
1131 struct bio *bio = &s->bio.bio;
1134 int cutoff = bch_get_congested(c);
1135 unsigned mode = cache_mode(dc, bio);
1137 if (atomic_read(&dc->disk.detaching) ||
1138 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
1139 (bio->bi_rw & REQ_DISCARD))
1142 if (mode == CACHE_MODE_NONE ||
1143 (mode == CACHE_MODE_WRITEAROUND &&
1144 (bio->bi_rw & REQ_WRITE)))
1147 if (bio->bi_sector & (c->sb.block_size - 1) ||
1148 bio_sectors(bio) & (c->sb.block_size - 1)) {
1149 pr_debug("skipping unaligned io");
1154 cutoff = dc->sequential_cutoff >> 9;
1159 if (mode == CACHE_MODE_WRITEBACK &&
1160 (bio->bi_rw & REQ_WRITE) &&
1161 (bio->bi_rw & REQ_SYNC))
1165 if (dc->sequential_merge) {
1168 spin_lock(&dc->io_lock);
1170 hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
1171 if (i->last == bio->bi_sector &&
1172 time_before(jiffies, i->jiffies))
1175 i = list_first_entry(&dc->io_lru, struct io, lru);
1177 add_sequential(s->task);
1180 if (i->sequential + bio->bi_size > i->sequential)
1181 i->sequential += bio->bi_size;
1183 i->last = bio_end(bio);
1184 i->jiffies = jiffies + msecs_to_jiffies(5000);
1185 s->task->sequential_io = i->sequential;
1187 hlist_del(&i->hash);
1188 hlist_add_head(&i->hash, iohash(dc, i->last));
1189 list_move_tail(&i->lru, &dc->io_lru);
1191 spin_unlock(&dc->io_lock);
1193 s->task->sequential_io = bio->bi_size;
1195 add_sequential(s->task);
1198 rand = get_random_int();
1199 cutoff -= bitmap_weight(&rand, BITS_PER_LONG);
1201 if (cutoff <= (int) (max(s->task->sequential_io,
1202 s->task->sequential_io_avg) >> 9))
1206 bch_rescale_priorities(c, bio_sectors(bio));
1209 bch_mark_sectors_bypassed(s, bio_sectors(bio));
1213 static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
1216 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1217 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1218 int cpu, rw = bio_data_dir(bio);
1220 cpu = part_stat_lock();
1221 part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1222 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1225 bio->bi_bdev = dc->bdev;
1226 bio->bi_sector += dc->sb.data_offset;
1228 if (cached_dev_get(dc)) {
1229 s = search_alloc(bio, d);
1230 trace_bcache_request_start(s, bio);
1232 if (!bio_has_data(bio))
1233 request_nodata(dc, s);
1235 request_write(dc, s);
1237 request_read(dc, s);
1239 if ((bio->bi_rw & REQ_DISCARD) &&
1240 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1243 bch_generic_make_request(bio, &d->bio_split_hook);
1247 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1248 unsigned int cmd, unsigned long arg)
1250 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1251 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1254 static int cached_dev_congested(void *data, int bits)
1256 struct bcache_device *d = data;
1257 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1258 struct request_queue *q = bdev_get_queue(dc->bdev);
1261 if (bdi_congested(&q->backing_dev_info, bits))
1264 if (cached_dev_get(dc)) {
1268 for_each_cache(ca, d->c, i) {
1269 q = bdev_get_queue(ca->bdev);
1270 ret |= bdi_congested(&q->backing_dev_info, bits);
1279 void bch_cached_dev_request_init(struct cached_dev *dc)
1281 struct gendisk *g = dc->disk.disk;
1283 g->queue->make_request_fn = cached_dev_make_request;
1284 g->queue->backing_dev_info.congested_fn = cached_dev_congested;
1285 dc->disk.cache_miss = cached_dev_cache_miss;
1286 dc->disk.ioctl = cached_dev_ioctl;
1289 /* Flash backed devices */
1291 static int flash_dev_cache_miss(struct btree *b, struct search *s,
1292 struct bio *bio, unsigned sectors)
1296 while (bio->bi_idx != bio->bi_vcnt) {
1297 struct bio_vec *bv = bio_iovec(bio);
1298 unsigned j = min(bv->bv_len >> 9, sectors);
1300 void *p = kmap(bv->bv_page);
1301 memset(p + bv->bv_offset, 0, j << 9);
1302 kunmap(bv->bv_page);
1304 bv->bv_len -= j << 9;
1305 bv->bv_offset += j << 9;
1310 bio->bi_sector += j;
1311 bio->bi_size -= j << 9;
1317 s->op.lookup_done = true;
1322 static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1326 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1327 int cpu, rw = bio_data_dir(bio);
1329 cpu = part_stat_lock();
1330 part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1331 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1334 s = search_alloc(bio, d);
1338 trace_bcache_request_start(s, bio);
1340 if (bio_has_data(bio) && !rw) {
1341 closure_call(&s->op.cl, btree_read_async, NULL, cl);
1342 } else if (bio_has_data(bio) || s->op.skip) {
1343 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
1344 &KEY(d->id, bio->bi_sector, 0),
1345 &KEY(d->id, bio_end(bio), 0));
1347 s->writeback = true;
1348 s->op.cache_bio = bio;
1350 closure_call(&s->op.cl, bch_insert_data, NULL, cl);
1352 /* No data - probably a cache flush */
1353 if (s->op.flush_journal)
1354 bch_journal_meta(s->op.c, cl);
1357 continue_at(cl, search_free, NULL);
1360 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1361 unsigned int cmd, unsigned long arg)
1366 static int flash_dev_congested(void *data, int bits)
1368 struct bcache_device *d = data;
1369 struct request_queue *q;
1374 for_each_cache(ca, d->c, i) {
1375 q = bdev_get_queue(ca->bdev);
1376 ret |= bdi_congested(&q->backing_dev_info, bits);
1382 void bch_flash_dev_request_init(struct bcache_device *d)
1384 struct gendisk *g = d->disk;
1386 g->queue->make_request_fn = flash_dev_make_request;
1387 g->queue->backing_dev_info.congested_fn = flash_dev_congested;
1388 d->cache_miss = flash_dev_cache_miss;
1389 d->ioctl = flash_dev_ioctl;
1392 void bch_request_exit(void)
1394 #ifdef CONFIG_CGROUP_BCACHE
1395 cgroup_unload_subsys(&bcache_subsys);
1397 if (bch_search_cache)
1398 kmem_cache_destroy(bch_search_cache);
1401 int __init bch_request_init(void)
1403 bch_search_cache = KMEM_CACHE(search, 0);
1404 if (!bch_search_cache)
1407 #ifdef CONFIG_CGROUP_BCACHE
1408 cgroup_load_subsys(&bcache_subsys);
1409 init_bch_cgroup(&bcache_default_cgroup);
1411 cgroup_add_cftypes(&bcache_subsys, bch_files);