2 * bcache journalling code, for btree insertions
4 * Copyright 2012 Google, Inc.
12 #include <trace/events/bcache.h>
15 * Journal replay/recovery:
17 * This code is all driven from run_cache_set(); we first read the journal
18 * entries, do some other stuff, then we mark all the keys in the journal
19 * entries (same as garbage collection would), then we replay them - reinserting
20 * them into the cache in precisely the same order as they appear in the
23 * We only journal keys that go in leaf nodes, which simplifies things quite a
27 static void journal_read_endio(struct bio *bio, int error)
29 struct closure *cl = bio->bi_private;
33 static int journal_read_bucket(struct cache *ca, struct list_head *list,
34 struct btree_op *op, unsigned bucket_index)
36 struct journal_device *ja = &ca->journal;
37 struct bio *bio = &ja->bio;
39 struct journal_replay *i;
40 struct jset *j, *data = ca->set->journal.w[0].data;
41 unsigned len, left, offset = 0;
43 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
45 pr_debug("reading %llu", (uint64_t) bucket);
47 while (offset < ca->sb.bucket_size) {
48 reread: left = ca->sb.bucket_size - offset;
49 len = min_t(unsigned, left, PAGE_SECTORS * 8);
52 bio->bi_sector = bucket + offset;
53 bio->bi_bdev = ca->bdev;
55 bio->bi_size = len << 9;
57 bio->bi_end_io = journal_read_endio;
58 bio->bi_private = &op->cl;
59 bch_bio_map(bio, data);
61 closure_bio_submit(bio, &op->cl, ca);
62 closure_sync(&op->cl);
64 /* This function could be simpler now since we no longer write
65 * journal entries that overlap bucket boundaries; this means
66 * the start of a bucket will always have a valid journal entry
67 * if it has any journal entries at all.
72 struct list_head *where;
73 size_t blocks, bytes = set_bytes(j);
75 if (j->magic != jset_magic(ca->set))
78 if (bytes > left << 9)
84 if (j->csum != csum_set(j))
87 blocks = set_blocks(j, ca->set);
89 while (!list_empty(list)) {
90 i = list_first_entry(list,
91 struct journal_replay, list);
92 if (i->j.seq >= j->last_seq)
98 list_for_each_entry_reverse(i, list, list) {
99 if (j->seq == i->j.seq)
102 if (j->seq < i->j.last_seq)
105 if (j->seq > i->j.seq) {
113 i = kmalloc(offsetof(struct journal_replay, j) +
117 memcpy(&i->j, j, bytes);
118 list_add(&i->list, where);
121 ja->seq[bucket_index] = j->seq;
123 offset += blocks * ca->sb.block_size;
124 len -= blocks * ca->sb.block_size;
125 j = ((void *) j) + blocks * block_bytes(ca);
132 int bch_journal_read(struct cache_set *c, struct list_head *list,
135 #define read_bucket(b) \
137 int ret = journal_read_bucket(ca, list, op, b); \
138 __set_bit(b, bitmap); \
147 for_each_cache(ca, c, iter) {
148 struct journal_device *ja = &ca->journal;
149 unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG];
153 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
154 pr_debug("%u journal buckets", ca->sb.njournal_buckets);
157 * Read journal buckets ordered by golden ratio hash to quickly
158 * find a sequence of buckets with valid journal entries
160 for (i = 0; i < ca->sb.njournal_buckets; i++) {
161 l = (i * 2654435769U) % ca->sb.njournal_buckets;
163 if (test_bit(l, bitmap))
171 * If that fails, check all the buckets we haven't checked
174 pr_debug("falling back to linear search");
176 for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
177 l < ca->sb.njournal_buckets;
178 l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
182 if (list_empty(list))
186 m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
187 pr_debug("starting binary search, l %u r %u", l, r);
190 seq = list_entry(list->prev, struct journal_replay,
196 if (seq != list_entry(list->prev, struct journal_replay,
204 * Read buckets in reverse order until we stop finding more
207 pr_debug("finishing up: m %u njournal_buckets %u",
208 m, ca->sb.njournal_buckets);
213 l = ca->sb.njournal_buckets - 1;
218 if (test_bit(l, bitmap))
227 for (i = 0; i < ca->sb.njournal_buckets; i++)
228 if (ja->seq[i] > seq) {
230 ja->cur_idx = ja->discard_idx =
236 if (!list_empty(list))
237 c->journal.seq = list_entry(list->prev,
238 struct journal_replay,
245 void bch_journal_mark(struct cache_set *c, struct list_head *list)
249 struct journal_replay *i;
250 struct journal *j = &c->journal;
251 uint64_t last = j->seq;
254 * journal.pin should never fill up - we never write a journal
255 * entry when it would fill up. But if for some reason it does, we
256 * iterate over the list in reverse order so that we can just skip that
257 * refcount instead of bugging.
260 list_for_each_entry_reverse(i, list, list) {
261 BUG_ON(last < i->j.seq);
264 while (last-- != i->j.seq)
265 if (fifo_free(&j->pin) > 1) {
266 fifo_push_front(&j->pin, p);
267 atomic_set(&fifo_front(&j->pin), 0);
270 if (fifo_free(&j->pin) > 1) {
271 fifo_push_front(&j->pin, p);
272 i->pin = &fifo_front(&j->pin);
273 atomic_set(i->pin, 1);
281 for (j = 0; j < KEY_PTRS(k); j++) {
282 struct bucket *g = PTR_BUCKET(c, k, j);
285 if (g->prio == BTREE_PRIO &&
287 g->prio = INITIAL_PRIO;
290 __bch_btree_mark_key(c, 0, k);
295 int bch_journal_replay(struct cache_set *s, struct list_head *list,
298 int ret = 0, keys = 0, entries = 0;
300 struct journal_replay *i =
301 list_entry(list->prev, struct journal_replay, list);
303 uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
305 list_for_each_entry(i, list, list) {
306 BUG_ON(i->pin && atomic_read(i->pin) != 1);
310 "journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
311 n, i->j.seq - 1, start, end);
316 trace_bcache_journal_replay_key(k);
318 bkey_copy(op->keys.top, k);
319 bch_keylist_push(&op->keys);
321 op->journal = i->pin;
322 atomic_inc(op->journal);
324 ret = bch_btree_insert(op, s);
328 BUG_ON(!bch_keylist_empty(&op->keys));
340 pr_info("journal replay done, %i keys in %i entries, seq %llu",
343 while (!list_empty(list)) {
344 i = list_first_entry(list, struct journal_replay, list);
349 closure_sync(&op->cl);
355 static void btree_flush_write(struct cache_set *c)
358 * Try to find the btree node with that references the oldest journal
359 * entry, best is our current candidate and is locked if non NULL:
361 struct btree *b, *best = NULL;
364 for_each_cached_btree(b, c, iter) {
365 if (!down_write_trylock(&b->lock))
368 if (!btree_node_dirty(b) ||
369 !btree_current_write(b)->journal) {
376 else if (journal_pin_cmp(c,
377 btree_current_write(best),
378 btree_current_write(b))) {
379 rw_unlock(true, best);
388 /* We can't find the best btree node, just pick the first */
389 list_for_each_entry(b, &c->btree_cache, list)
390 if (!b->level && btree_node_dirty(b)) {
392 rw_lock(true, best, best->level);
400 if (btree_node_dirty(best))
401 bch_btree_node_write(best, NULL);
402 rw_unlock(true, best);
405 #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
407 static void journal_discard_endio(struct bio *bio, int error)
409 struct journal_device *ja =
410 container_of(bio, struct journal_device, discard_bio);
411 struct cache *ca = container_of(ja, struct cache, journal);
413 atomic_set(&ja->discard_in_flight, DISCARD_DONE);
415 closure_wake_up(&ca->set->journal.wait);
416 closure_put(&ca->set->cl);
419 static void journal_discard_work(struct work_struct *work)
421 struct journal_device *ja =
422 container_of(work, struct journal_device, discard_work);
424 submit_bio(0, &ja->discard_bio);
427 static void do_journal_discard(struct cache *ca)
429 struct journal_device *ja = &ca->journal;
430 struct bio *bio = &ja->discard_bio;
433 ja->discard_idx = ja->last_idx;
437 switch (atomic_read(&ja->discard_in_flight)) {
438 case DISCARD_IN_FLIGHT:
442 ja->discard_idx = (ja->discard_idx + 1) %
443 ca->sb.njournal_buckets;
445 atomic_set(&ja->discard_in_flight, DISCARD_READY);
449 if (ja->discard_idx == ja->last_idx)
452 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
455 bio->bi_sector = bucket_to_sector(ca->set,
456 ca->sb.d[ja->discard_idx]);
457 bio->bi_bdev = ca->bdev;
458 bio->bi_rw = REQ_WRITE|REQ_DISCARD;
459 bio->bi_max_vecs = 1;
460 bio->bi_io_vec = bio->bi_inline_vecs;
461 bio->bi_size = bucket_bytes(ca);
462 bio->bi_end_io = journal_discard_endio;
464 closure_get(&ca->set->cl);
465 INIT_WORK(&ja->discard_work, journal_discard_work);
466 schedule_work(&ja->discard_work);
470 static void journal_reclaim(struct cache_set *c)
472 struct bkey *k = &c->journal.key;
475 unsigned iter, n = 0;
478 while (!atomic_read(&fifo_front(&c->journal.pin)))
479 fifo_pop(&c->journal.pin, p);
481 last_seq = last_seq(&c->journal);
483 /* Update last_idx */
485 for_each_cache(ca, c, iter) {
486 struct journal_device *ja = &ca->journal;
488 while (ja->last_idx != ja->cur_idx &&
489 ja->seq[ja->last_idx] < last_seq)
490 ja->last_idx = (ja->last_idx + 1) %
491 ca->sb.njournal_buckets;
494 for_each_cache(ca, c, iter)
495 do_journal_discard(ca);
497 if (c->journal.blocks_free)
502 * XXX: Sort by free journal space
505 for_each_cache(ca, c, iter) {
506 struct journal_device *ja = &ca->journal;
507 unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
509 /* No space available on this device */
510 if (next == ja->discard_idx)
515 bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
523 c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
525 if (!journal_full(&c->journal))
526 __closure_wake_up(&c->journal.wait);
529 void bch_journal_next(struct journal *j)
533 j->cur = (j->cur == j->w)
538 * The fifo_push() needs to happen at the same time as j->seq is
539 * incremented for last_seq() to be calculated correctly
541 BUG_ON(!fifo_push(&j->pin, p));
542 atomic_set(&fifo_back(&j->pin), 1);
544 j->cur->data->seq = ++j->seq;
545 j->cur->need_write = false;
546 j->cur->data->keys = 0;
548 if (fifo_full(&j->pin))
549 pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
552 static void journal_write_endio(struct bio *bio, int error)
554 struct journal_write *w = bio->bi_private;
556 cache_set_err_on(error, w->c, "journal io error");
557 closure_put(&w->c->journal.io.cl);
560 static void journal_write(struct closure *);
562 static void journal_write_done(struct closure *cl)
564 struct journal *j = container_of(cl, struct journal, io.cl);
565 struct cache_set *c = container_of(j, struct cache_set, journal);
567 struct journal_write *w = (j->cur == j->w)
571 __closure_wake_up(&w->wait);
573 if (c->journal_delay_ms)
574 closure_delay(&j->io, msecs_to_jiffies(c->journal_delay_ms));
576 continue_at(cl, journal_write, system_wq);
579 static void journal_write_unlocked(struct closure *cl)
580 __releases(c->journal.lock)
582 struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl);
584 struct journal_write *w = c->journal.cur;
585 struct bkey *k = &c->journal.key;
586 unsigned i, sectors = set_blocks(w->data, c) * c->sb.block_size;
589 struct bio_list list;
590 bio_list_init(&list);
592 if (!w->need_write) {
594 * XXX: have to unlock closure before we unlock journal lock,
595 * else we race with bch_journal(). But this way we race
596 * against cache set unregister. Doh.
598 set_closure_fn(cl, NULL, NULL);
599 closure_sub(cl, CLOSURE_RUNNING + 1);
600 spin_unlock(&c->journal.lock);
602 } else if (journal_full(&c->journal)) {
604 spin_unlock(&c->journal.lock);
606 btree_flush_write(c);
607 continue_at(cl, journal_write, system_wq);
610 c->journal.blocks_free -= set_blocks(w->data, c);
612 w->data->btree_level = c->root->level;
614 bkey_copy(&w->data->btree_root, &c->root->key);
615 bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
617 for_each_cache(ca, c, i)
618 w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
620 w->data->magic = jset_magic(c);
621 w->data->version = BCACHE_JSET_VERSION;
622 w->data->last_seq = last_seq(&c->journal);
623 w->data->csum = csum_set(w->data);
625 for (i = 0; i < KEY_PTRS(k); i++) {
626 ca = PTR_CACHE(c, k, i);
627 bio = &ca->journal.bio;
629 atomic_long_add(sectors, &ca->meta_sectors_written);
632 bio->bi_sector = PTR_OFFSET(k, i);
633 bio->bi_bdev = ca->bdev;
634 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
635 bio->bi_size = sectors << 9;
637 bio->bi_end_io = journal_write_endio;
639 bch_bio_map(bio, w->data);
641 trace_bcache_journal_write(bio);
642 bio_list_add(&list, bio);
644 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
646 ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
649 atomic_dec_bug(&fifo_back(&c->journal.pin));
650 bch_journal_next(&c->journal);
653 spin_unlock(&c->journal.lock);
655 while ((bio = bio_list_pop(&list)))
656 closure_bio_submit(bio, cl, c->cache[0]);
658 continue_at(cl, journal_write_done, NULL);
661 static void journal_write(struct closure *cl)
663 struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl);
665 spin_lock(&c->journal.lock);
666 journal_write_unlocked(cl);
669 static void __journal_try_write(struct cache_set *c, bool noflush)
670 __releases(c->journal.lock)
672 struct closure *cl = &c->journal.io.cl;
674 if (!closure_trylock(cl, &c->cl))
675 spin_unlock(&c->journal.lock);
676 else if (noflush && journal_full(&c->journal)) {
677 spin_unlock(&c->journal.lock);
678 continue_at(cl, journal_write, system_wq);
680 journal_write_unlocked(cl);
683 #define journal_try_write(c) __journal_try_write(c, false)
685 void bch_journal_meta(struct cache_set *c, struct closure *cl)
687 struct journal_write *w;
689 if (CACHE_SYNC(&c->sb)) {
690 spin_lock(&c->journal.lock);
693 w->need_write = true;
696 BUG_ON(!closure_wait(&w->wait, cl));
698 closure_flush(&c->journal.io);
699 __journal_try_write(c, true);
704 * Entry point to the journalling code - bio_insert() and btree_invalidate()
705 * pass bch_journal() a list of keys to be journalled, and then
706 * bch_journal() hands those same keys off to btree_insert_async()
709 void bch_journal(struct closure *cl)
711 struct btree_op *op = container_of(cl, struct btree_op, cl);
712 struct cache_set *c = op->c;
713 struct journal_write *w;
714 size_t b, n = ((uint64_t *) op->keys.top) - op->keys.list;
716 if (op->type != BTREE_INSERT ||
721 * If we're looping because we errored, might already be waiting on
722 * another journal write:
724 while (atomic_read(&cl->parent->remaining) & CLOSURE_WAITING)
725 closure_sync(cl->parent);
727 spin_lock(&c->journal.lock);
729 if (journal_full(&c->journal)) {
730 trace_bcache_journal_full(c);
732 closure_wait(&c->journal.wait, cl);
735 spin_unlock(&c->journal.lock);
737 btree_flush_write(c);
738 continue_at(cl, bch_journal, bcache_wq);
742 w->need_write = true;
743 b = __set_blocks(w->data, w->data->keys + n, c);
745 if (b * c->sb.block_size > PAGE_SECTORS << JSET_BITS ||
746 b > c->journal.blocks_free) {
747 trace_bcache_journal_entry_full(c);
750 * XXX: If we were inserting so many keys that they won't fit in
751 * an _empty_ journal write, we'll deadlock. For now, handle
752 * this in bch_keylist_realloc() - but something to think about.
754 BUG_ON(!w->data->keys);
756 BUG_ON(!closure_wait(&w->wait, cl));
758 closure_flush(&c->journal.io);
760 journal_try_write(c);
761 continue_at(cl, bch_journal, bcache_wq);
764 memcpy(end(w->data), op->keys.list, n * sizeof(uint64_t));
767 op->journal = &fifo_back(&c->journal.pin);
768 atomic_inc(op->journal);
770 if (op->flush_journal) {
771 closure_flush(&c->journal.io);
772 closure_wait(&w->wait, cl->parent);
775 journal_try_write(c);
777 bch_btree_insert_async(cl);
780 void bch_journal_free(struct cache_set *c)
782 free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
783 free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
784 free_fifo(&c->journal.pin);
787 int bch_journal_alloc(struct cache_set *c)
789 struct journal *j = &c->journal;
791 closure_init_unlocked(&j->io);
792 spin_lock_init(&j->lock);
794 c->journal_delay_ms = 100;
799 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
800 !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
801 !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))