2 * Copyright (C) 2015 Shaohua Li <shli@fb.com>
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #include <linux/kernel.h>
15 #include <linux/wait.h>
16 #include <linux/blkdev.h>
17 #include <linux/slab.h>
18 #include <linux/raid/md_p.h>
19 #include <linux/crc32c.h>
20 #include <linux/random.h>
25 * metadata/data stored in disk with 4k size unit (a block) regardless
26 * underneath hardware sector size. only works with PAGE_SIZE == 4096
28 #define BLOCK_SECTORS (8)
31 * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent
32 * recovery scans a very long log
34 #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
35 #define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
42 sector_t device_size; /* log device size, round to
44 sector_t max_free_space; /* reclaim run if free space is at
47 sector_t last_checkpoint; /* log tail. where recovery scan
49 u64 last_cp_seq; /* log tail sequence */
51 sector_t log_start; /* log head. where new data appends */
52 u64 seq; /* log head sequence */
54 sector_t next_checkpoint;
57 struct mutex io_mutex;
58 struct r5l_io_unit *current_io; /* current io_unit accepting new data */
60 spinlock_t io_list_lock;
61 struct list_head running_ios; /* io_units which are still running,
62 * and have not yet been completely
63 * written to the log */
64 struct list_head io_end_ios; /* io_units which have been completely
65 * written to the log but not yet written
67 struct list_head flushing_ios; /* io_units which are waiting for log
69 struct list_head finished_ios; /* io_units which settle down in log disk */
72 struct kmem_cache *io_kc;
74 struct md_thread *reclaim_thread;
75 unsigned long reclaim_target; /* number of space that need to be
76 * reclaimed. if it's 0, reclaim spaces
77 * used by io_units which are in
78 * IO_UNIT_STRIPE_END state (eg, reclaim
79 * dones't wait for specific io_unit
80 * switching to IO_UNIT_STRIPE_END
82 wait_queue_head_t iounit_wait;
84 struct list_head no_space_stripes; /* pending stripes, log has no space */
85 spinlock_t no_space_stripes_lock;
87 bool need_cache_flush;
91 * an IO range starts from a meta data block and end at the next meta data
92 * block. The io unit's the meta data block tracks data/parity followed it. io
93 * unit is written to log disk with normal write, as we always flush log disk
94 * first and then start move data to raid disks, there is no requirement to
95 * write io unit with FLUSH/FUA
100 struct page *meta_page; /* store meta block */
101 int meta_offset; /* current offset in meta_page */
103 struct bio *current_bio;/* current_bio accepting new data */
105 atomic_t pending_stripe;/* how many stripes not flushed to raid */
106 u64 seq; /* seq number of the metablock */
107 sector_t log_start; /* where the io_unit starts */
108 sector_t log_end; /* where the io_unit ends */
109 struct list_head log_sibling; /* log->running_ios */
110 struct list_head stripe_list; /* stripes added to the io_unit */
116 /* r5l_io_unit state */
117 enum r5l_io_unit_state {
118 IO_UNIT_RUNNING = 0, /* accepting new IO */
119 IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
120 * don't accepting new bio */
121 IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
122 IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
125 static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
128 if (start >= log->device_size)
129 start = start - log->device_size;
133 static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
139 return end + log->device_size - start;
142 static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
146 used_size = r5l_ring_distance(log, log->last_checkpoint,
149 return log->device_size > used_size + size;
152 static void r5l_free_io_unit(struct r5l_log *log, struct r5l_io_unit *io)
154 __free_page(io->meta_page);
155 kmem_cache_free(log->io_kc, io);
158 static void r5l_move_io_unit_list(struct list_head *from, struct list_head *to,
159 enum r5l_io_unit_state state)
161 struct r5l_io_unit *io;
163 while (!list_empty(from)) {
164 io = list_first_entry(from, struct r5l_io_unit, log_sibling);
165 /* don't change list order */
166 if (io->state >= state)
167 list_move_tail(&io->log_sibling, to);
173 static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
174 enum r5l_io_unit_state state)
176 if (WARN_ON(io->state >= state))
181 static void r5l_io_run_stripes(struct r5l_io_unit *io)
183 struct stripe_head *sh, *next;
185 list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
186 list_del_init(&sh->log_list);
187 set_bit(STRIPE_HANDLE, &sh->state);
188 raid5_release_stripe(sh);
192 /* XXX: totally ignores I/O errors */
193 static void r5l_log_run_stripes(struct r5l_log *log)
195 struct r5l_io_unit *io, *next;
197 assert_spin_locked(&log->io_list_lock);
199 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
200 /* don't change list order */
201 if (io->state < IO_UNIT_IO_END)
204 list_move_tail(&io->log_sibling, &log->finished_ios);
205 r5l_io_run_stripes(io);
209 static void r5l_log_endio(struct bio *bio)
211 struct r5l_io_unit *io = bio->bi_private;
212 struct r5l_log *log = io->log;
217 spin_lock_irqsave(&log->io_list_lock, flags);
218 __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
219 if (log->need_cache_flush)
220 r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios,
223 r5l_log_run_stripes(log);
224 spin_unlock_irqrestore(&log->io_list_lock, flags);
226 if (log->need_cache_flush)
227 md_wakeup_thread(log->rdev->mddev->thread);
230 static void r5l_submit_current_io(struct r5l_log *log)
232 struct r5l_io_unit *io = log->current_io;
233 struct r5l_meta_block *block;
240 block = page_address(io->meta_page);
241 block->meta_size = cpu_to_le32(io->meta_offset);
242 crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
243 block->checksum = cpu_to_le32(crc);
245 log->current_io = NULL;
246 spin_lock_irqsave(&log->io_list_lock, flags);
247 __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
248 spin_unlock_irqrestore(&log->io_list_lock, flags);
250 submit_bio(WRITE, io->current_bio);
253 static struct bio *r5l_bio_alloc(struct r5l_log *log)
255 struct bio *bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES);
258 bio->bi_bdev = log->rdev->bdev;
259 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
264 static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
266 log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
269 * If we filled up the log device start from the beginning again,
270 * which will require a new bio.
272 * Note: for this to work properly the log size needs to me a multiple
275 if (log->log_start == 0)
276 io->need_split_bio = true;
278 io->log_end = log->log_start;
281 static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
283 struct r5l_io_unit *io;
284 struct r5l_meta_block *block;
286 /* We can't handle memory allocate failure so far */
287 io = kmem_cache_zalloc(log->io_kc, GFP_NOIO | __GFP_NOFAIL);
289 INIT_LIST_HEAD(&io->log_sibling);
290 INIT_LIST_HEAD(&io->stripe_list);
291 io->state = IO_UNIT_RUNNING;
293 io->meta_page = alloc_page(GFP_NOIO | __GFP_NOFAIL | __GFP_ZERO);
294 block = page_address(io->meta_page);
295 block->magic = cpu_to_le32(R5LOG_MAGIC);
296 block->version = R5LOG_VERSION;
297 block->seq = cpu_to_le64(log->seq);
298 block->position = cpu_to_le64(log->log_start);
300 io->log_start = log->log_start;
301 io->meta_offset = sizeof(struct r5l_meta_block);
302 io->seq = log->seq++;
304 io->current_bio = r5l_bio_alloc(log);
305 io->current_bio->bi_end_io = r5l_log_endio;
306 io->current_bio->bi_private = io;
307 bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
309 r5_reserve_log_entry(log, io);
311 spin_lock_irq(&log->io_list_lock);
312 list_add_tail(&io->log_sibling, &log->running_ios);
313 spin_unlock_irq(&log->io_list_lock);
318 static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
320 if (log->current_io &&
321 log->current_io->meta_offset + payload_size > PAGE_SIZE)
322 r5l_submit_current_io(log);
324 if (!log->current_io)
325 log->current_io = r5l_new_meta(log);
329 static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
331 u32 checksum1, u32 checksum2,
332 bool checksum2_valid)
334 struct r5l_io_unit *io = log->current_io;
335 struct r5l_payload_data_parity *payload;
337 payload = page_address(io->meta_page) + io->meta_offset;
338 payload->header.type = cpu_to_le16(type);
339 payload->header.flags = cpu_to_le16(0);
340 payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
342 payload->location = cpu_to_le64(location);
343 payload->checksum[0] = cpu_to_le32(checksum1);
345 payload->checksum[1] = cpu_to_le32(checksum2);
347 io->meta_offset += sizeof(struct r5l_payload_data_parity) +
348 sizeof(__le32) * (1 + !!checksum2_valid);
351 static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
353 struct r5l_io_unit *io = log->current_io;
355 if (io->need_split_bio) {
356 struct bio *prev = io->current_bio;
358 io->current_bio = r5l_bio_alloc(log);
359 bio_chain(io->current_bio, prev);
361 submit_bio(WRITE, prev);
364 if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
367 r5_reserve_log_entry(log, io);
370 static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
371 int data_pages, int parity_pages)
375 struct r5l_io_unit *io;
378 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
380 sizeof(struct r5l_payload_data_parity) +
381 sizeof(__le32) * parity_pages;
383 r5l_get_meta(log, meta_size);
384 io = log->current_io;
386 for (i = 0; i < sh->disks; i++) {
387 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
389 if (i == sh->pd_idx || i == sh->qd_idx)
391 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
392 raid5_compute_blocknr(sh, i, 0),
393 sh->dev[i].log_checksum, 0, false);
394 r5l_append_payload_page(log, sh->dev[i].page);
397 if (sh->qd_idx >= 0) {
398 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
399 sh->sector, sh->dev[sh->pd_idx].log_checksum,
400 sh->dev[sh->qd_idx].log_checksum, true);
401 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
402 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
404 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
405 sh->sector, sh->dev[sh->pd_idx].log_checksum,
407 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
410 list_add_tail(&sh->log_list, &io->stripe_list);
411 atomic_inc(&io->pending_stripe);
415 static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
417 * running in raid5d, where reclaim could wait for raid5d too (when it flushes
418 * data from log to raid disks), so we shouldn't wait for reclaim here
420 int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
423 int data_pages, parity_pages;
430 /* Don't support stripe batch */
431 if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
432 test_bit(STRIPE_SYNCING, &sh->state)) {
433 /* the stripe is written to log, we start writing it to raid */
434 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
438 for (i = 0; i < sh->disks; i++) {
441 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
444 /* checksum is already calculated in last run */
445 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
447 addr = kmap_atomic(sh->dev[i].page);
448 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
452 parity_pages = 1 + !!(sh->qd_idx >= 0);
453 data_pages = write_disks - parity_pages;
456 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
458 sizeof(struct r5l_payload_data_parity) +
459 sizeof(__le32) * parity_pages;
460 /* Doesn't work with very big raid array */
461 if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE)
464 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
466 * The stripe must enter state machine again to finish the write, so
469 clear_bit(STRIPE_DELAYED, &sh->state);
470 atomic_inc(&sh->count);
472 mutex_lock(&log->io_mutex);
474 reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
475 if (r5l_has_free_space(log, reserve))
476 r5l_log_stripe(log, sh, data_pages, parity_pages);
478 spin_lock(&log->no_space_stripes_lock);
479 list_add_tail(&sh->log_list, &log->no_space_stripes);
480 spin_unlock(&log->no_space_stripes_lock);
482 r5l_wake_reclaim(log, reserve);
484 mutex_unlock(&log->io_mutex);
489 void r5l_write_stripe_run(struct r5l_log *log)
493 mutex_lock(&log->io_mutex);
494 r5l_submit_current_io(log);
495 mutex_unlock(&log->io_mutex);
498 int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
503 * we flush log disk cache first, then write stripe data to raid disks.
504 * So if bio is finished, the log disk cache is flushed already. The
505 * recovery guarantees we can recovery the bio from log disk, so we
506 * don't need to flush again
508 if (bio->bi_iter.bi_size == 0) {
512 bio->bi_rw &= ~REQ_FLUSH;
516 /* This will run after log space is reclaimed */
517 static void r5l_run_no_space_stripes(struct r5l_log *log)
519 struct stripe_head *sh;
521 spin_lock(&log->no_space_stripes_lock);
522 while (!list_empty(&log->no_space_stripes)) {
523 sh = list_first_entry(&log->no_space_stripes,
524 struct stripe_head, log_list);
525 list_del_init(&sh->log_list);
526 set_bit(STRIPE_HANDLE, &sh->state);
527 raid5_release_stripe(sh);
529 spin_unlock(&log->no_space_stripes_lock);
532 static sector_t r5l_reclaimable_space(struct r5l_log *log)
534 return r5l_ring_distance(log, log->last_checkpoint,
535 log->next_checkpoint);
538 static bool r5l_complete_finished_ios(struct r5l_log *log)
540 struct r5l_io_unit *io, *next;
543 assert_spin_locked(&log->io_list_lock);
545 list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
546 /* don't change list order */
547 if (io->state < IO_UNIT_STRIPE_END)
550 log->next_checkpoint = io->log_start;
551 log->next_cp_seq = io->seq;
553 list_del(&io->log_sibling);
554 r5l_free_io_unit(log, io);
562 static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
564 struct r5l_log *log = io->log;
567 spin_lock_irqsave(&log->io_list_lock, flags);
568 __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
570 if (!r5l_complete_finished_ios(log)) {
571 spin_unlock_irqrestore(&log->io_list_lock, flags);
575 if (r5l_reclaimable_space(log) > log->max_free_space)
576 r5l_wake_reclaim(log, 0);
578 spin_unlock_irqrestore(&log->io_list_lock, flags);
579 wake_up(&log->iounit_wait);
582 void r5l_stripe_write_finished(struct stripe_head *sh)
584 struct r5l_io_unit *io;
589 if (io && atomic_dec_and_test(&io->pending_stripe))
590 __r5l_stripe_write_finished(io);
593 static void r5l_log_flush_endio(struct bio *bio)
595 struct r5l_log *log = container_of(bio, struct r5l_log,
598 struct r5l_io_unit *io;
600 spin_lock_irqsave(&log->io_list_lock, flags);
601 list_for_each_entry(io, &log->flushing_ios, log_sibling)
602 r5l_io_run_stripes(io);
603 list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
604 spin_unlock_irqrestore(&log->io_list_lock, flags);
608 * Starting dispatch IO to raid.
609 * io_unit(meta) consists of a log. There is one situation we want to avoid. A
610 * broken meta in the middle of a log causes recovery can't find meta at the
611 * head of log. If operations require meta at the head persistent in log, we
612 * must make sure meta before it persistent in log too. A case is:
614 * stripe data/parity is in log, we start write stripe to raid disks. stripe
615 * data/parity must be persistent in log before we do the write to raid disks.
617 * The solution is we restrictly maintain io_unit list order. In this case, we
618 * only write stripes of an io_unit to raid disks till the io_unit is the first
619 * one whose data/parity is in log.
621 void r5l_flush_stripe_to_raid(struct r5l_log *log)
625 if (!log || !log->need_cache_flush)
628 spin_lock_irq(&log->io_list_lock);
629 /* flush bio is running */
630 if (!list_empty(&log->flushing_ios)) {
631 spin_unlock_irq(&log->io_list_lock);
634 list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
635 do_flush = !list_empty(&log->flushing_ios);
636 spin_unlock_irq(&log->io_list_lock);
640 bio_reset(&log->flush_bio);
641 log->flush_bio.bi_bdev = log->rdev->bdev;
642 log->flush_bio.bi_end_io = r5l_log_flush_endio;
643 submit_bio(WRITE_FLUSH, &log->flush_bio);
646 static void r5l_write_super(struct r5l_log *log, sector_t cp);
647 static void r5l_do_reclaim(struct r5l_log *log)
649 sector_t reclaim_target = xchg(&log->reclaim_target, 0);
650 sector_t reclaimable;
651 sector_t next_checkpoint;
654 spin_lock_irq(&log->io_list_lock);
656 * move proper io_unit to reclaim list. We should not change the order.
657 * reclaimable/unreclaimable io_unit can be mixed in the list, we
658 * shouldn't reuse space of an unreclaimable io_unit
661 reclaimable = r5l_reclaimable_space(log);
662 if (reclaimable >= reclaim_target ||
663 (list_empty(&log->running_ios) &&
664 list_empty(&log->io_end_ios) &&
665 list_empty(&log->flushing_ios) &&
666 list_empty(&log->finished_ios)))
669 md_wakeup_thread(log->rdev->mddev->thread);
670 wait_event_lock_irq(log->iounit_wait,
671 r5l_reclaimable_space(log) > reclaimable,
675 next_checkpoint = log->next_checkpoint;
676 next_cp_seq = log->next_cp_seq;
677 spin_unlock_irq(&log->io_list_lock);
679 BUG_ON(reclaimable < 0);
680 if (reclaimable == 0)
684 * write_super will flush cache of each raid disk. We must write super
685 * here, because the log area might be reused soon and we don't want to
688 r5l_write_super(log, next_checkpoint);
690 mutex_lock(&log->io_mutex);
691 log->last_checkpoint = next_checkpoint;
692 log->last_cp_seq = next_cp_seq;
693 mutex_unlock(&log->io_mutex);
695 r5l_run_no_space_stripes(log);
698 static void r5l_reclaim_thread(struct md_thread *thread)
700 struct mddev *mddev = thread->mddev;
701 struct r5conf *conf = mddev->private;
702 struct r5l_log *log = conf->log;
709 static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
711 unsigned long target;
712 unsigned long new = (unsigned long)space; /* overflow in theory */
715 target = log->reclaim_target;
718 } while (cmpxchg(&log->reclaim_target, target, new) != target);
719 md_wakeup_thread(log->reclaim_thread);
722 void r5l_quiesce(struct r5l_log *log, int state)
724 if (!log || state == 2)
727 log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
728 log->rdev->mddev, "reclaim");
729 } else if (state == 1) {
731 * at this point all stripes are finished, so io_unit is at
732 * least in STRIPE_END state
734 r5l_wake_reclaim(log, -1L);
735 md_unregister_thread(&log->reclaim_thread);
740 struct r5l_recovery_ctx {
741 struct page *meta_page; /* current meta */
742 sector_t meta_total_blocks; /* total size of current meta and data */
743 sector_t pos; /* recovery position */
744 u64 seq; /* recovery position seq */
747 static int r5l_read_meta_block(struct r5l_log *log,
748 struct r5l_recovery_ctx *ctx)
750 struct page *page = ctx->meta_page;
751 struct r5l_meta_block *mb;
754 if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false))
757 mb = page_address(page);
758 stored_crc = le32_to_cpu(mb->checksum);
761 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
762 le64_to_cpu(mb->seq) != ctx->seq ||
763 mb->version != R5LOG_VERSION ||
764 le64_to_cpu(mb->position) != ctx->pos)
767 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
768 if (stored_crc != crc)
771 if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
774 ctx->meta_total_blocks = BLOCK_SECTORS;
779 static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
780 struct r5l_recovery_ctx *ctx,
781 sector_t stripe_sect,
782 int *offset, sector_t *log_offset)
784 struct r5conf *conf = log->rdev->mddev->private;
785 struct stripe_head *sh;
786 struct r5l_payload_data_parity *payload;
789 sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0);
791 payload = page_address(ctx->meta_page) + *offset;
793 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
794 raid5_compute_sector(conf,
795 le64_to_cpu(payload->location), 0,
798 sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
799 sh->dev[disk_index].page, READ, false);
800 sh->dev[disk_index].log_checksum =
801 le32_to_cpu(payload->checksum[0]);
802 set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
803 ctx->meta_total_blocks += BLOCK_SECTORS;
805 disk_index = sh->pd_idx;
806 sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
807 sh->dev[disk_index].page, READ, false);
808 sh->dev[disk_index].log_checksum =
809 le32_to_cpu(payload->checksum[0]);
810 set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
812 if (sh->qd_idx >= 0) {
813 disk_index = sh->qd_idx;
814 sync_page_io(log->rdev,
815 r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
816 PAGE_SIZE, sh->dev[disk_index].page,
818 sh->dev[disk_index].log_checksum =
819 le32_to_cpu(payload->checksum[1]);
820 set_bit(R5_Wantwrite,
821 &sh->dev[disk_index].flags);
823 ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
826 *log_offset = r5l_ring_add(log, *log_offset,
827 le32_to_cpu(payload->size));
828 *offset += sizeof(struct r5l_payload_data_parity) +
830 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
831 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
835 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
839 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
841 addr = kmap_atomic(sh->dev[disk_index].page);
842 checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
844 if (checksum != sh->dev[disk_index].log_checksum)
848 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
849 struct md_rdev *rdev, *rrdev;
851 if (!test_and_clear_bit(R5_Wantwrite,
852 &sh->dev[disk_index].flags))
855 /* in case device is broken */
856 rdev = rcu_dereference(conf->disks[disk_index].rdev);
858 sync_page_io(rdev, stripe_sect, PAGE_SIZE,
859 sh->dev[disk_index].page, WRITE, false);
860 rrdev = rcu_dereference(conf->disks[disk_index].replacement);
862 sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
863 sh->dev[disk_index].page, WRITE, false);
865 raid5_release_stripe(sh);
869 for (disk_index = 0; disk_index < sh->disks; disk_index++)
870 sh->dev[disk_index].flags = 0;
871 raid5_release_stripe(sh);
875 static int r5l_recovery_flush_one_meta(struct r5l_log *log,
876 struct r5l_recovery_ctx *ctx)
878 struct r5conf *conf = log->rdev->mddev->private;
879 struct r5l_payload_data_parity *payload;
880 struct r5l_meta_block *mb;
883 sector_t stripe_sector;
885 mb = page_address(ctx->meta_page);
886 offset = sizeof(struct r5l_meta_block);
887 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
889 while (offset < le32_to_cpu(mb->meta_size)) {
892 payload = (void *)mb + offset;
893 stripe_sector = raid5_compute_sector(conf,
894 le64_to_cpu(payload->location), 0, &dd, NULL);
895 if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector,
896 &offset, &log_offset))
902 /* copy data/parity from log to raid disks */
903 static void r5l_recovery_flush_log(struct r5l_log *log,
904 struct r5l_recovery_ctx *ctx)
907 if (r5l_read_meta_block(log, ctx))
909 if (r5l_recovery_flush_one_meta(log, ctx))
912 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
916 static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
920 struct r5l_meta_block *mb;
923 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
926 mb = page_address(page);
927 mb->magic = cpu_to_le32(R5LOG_MAGIC);
928 mb->version = R5LOG_VERSION;
929 mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
930 mb->seq = cpu_to_le64(seq);
931 mb->position = cpu_to_le64(pos);
932 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
933 mb->checksum = cpu_to_le32(crc);
935 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) {
943 static int r5l_recovery_log(struct r5l_log *log)
945 struct r5l_recovery_ctx ctx;
947 ctx.pos = log->last_checkpoint;
948 ctx.seq = log->last_cp_seq;
949 ctx.meta_page = alloc_page(GFP_KERNEL);
953 r5l_recovery_flush_log(log, &ctx);
954 __free_page(ctx.meta_page);
957 * we did a recovery. Now ctx.pos points to an invalid meta block. New
958 * log will start here. but we can't let superblock point to last valid
959 * meta block. The log might looks like:
960 * | meta 1| meta 2| meta 3|
961 * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
962 * superblock points to meta 1, we write a new valid meta 2n. if crash
963 * happens again, new recovery will start from meta 1. Since meta 2n is
964 * valid now, recovery will think meta 3 is valid, which is wrong.
965 * The solution is we create a new meta in meta2 with its seq == meta
966 * 1's seq + 10 and let superblock points to meta2. The same recovery will
967 * not think meta 3 is a valid meta, because its seq doesn't match
969 if (ctx.seq > log->last_cp_seq + 1) {
972 ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
975 log->seq = ctx.seq + 11;
976 log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
977 r5l_write_super(log, ctx.pos);
979 log->log_start = ctx.pos;
985 static void r5l_write_super(struct r5l_log *log, sector_t cp)
987 struct mddev *mddev = log->rdev->mddev;
989 log->rdev->journal_tail = cp;
990 set_bit(MD_CHANGE_DEVS, &mddev->flags);
993 static int r5l_load_log(struct r5l_log *log)
995 struct md_rdev *rdev = log->rdev;
997 struct r5l_meta_block *mb;
998 sector_t cp = log->rdev->journal_tail;
999 u32 stored_crc, expected_crc;
1000 bool create_super = false;
1003 /* Make sure it's valid */
1004 if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
1006 page = alloc_page(GFP_KERNEL);
1010 if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) {
1014 mb = page_address(page);
1016 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
1017 mb->version != R5LOG_VERSION) {
1018 create_super = true;
1021 stored_crc = le32_to_cpu(mb->checksum);
1023 expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
1024 if (stored_crc != expected_crc) {
1025 create_super = true;
1028 if (le64_to_cpu(mb->position) != cp) {
1029 create_super = true;
1034 log->last_cp_seq = prandom_u32();
1037 * Make sure super points to correct address. Log might have
1038 * data very soon. If super hasn't correct log tail address,
1039 * recovery can't find the log
1041 r5l_write_super(log, cp);
1043 log->last_cp_seq = le64_to_cpu(mb->seq);
1045 log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
1046 log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
1047 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
1048 log->max_free_space = RECLAIM_MAX_FREE_SPACE;
1049 log->last_checkpoint = cp;
1053 return r5l_recovery_log(log);
1059 int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
1061 struct r5l_log *log;
1063 if (PAGE_SIZE != 4096)
1065 log = kzalloc(sizeof(*log), GFP_KERNEL);
1070 log->need_cache_flush = (rdev->bdev->bd_disk->queue->flush_flags != 0);
1072 log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
1073 sizeof(rdev->mddev->uuid));
1075 mutex_init(&log->io_mutex);
1077 spin_lock_init(&log->io_list_lock);
1078 INIT_LIST_HEAD(&log->running_ios);
1079 INIT_LIST_HEAD(&log->io_end_ios);
1080 INIT_LIST_HEAD(&log->flushing_ios);
1081 INIT_LIST_HEAD(&log->finished_ios);
1082 bio_init(&log->flush_bio);
1084 log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
1088 log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
1089 log->rdev->mddev, "reclaim");
1090 if (!log->reclaim_thread)
1091 goto reclaim_thread;
1092 init_waitqueue_head(&log->iounit_wait);
1094 INIT_LIST_HEAD(&log->no_space_stripes);
1095 spin_lock_init(&log->no_space_stripes_lock);
1097 if (r5l_load_log(log))
1103 md_unregister_thread(&log->reclaim_thread);
1105 kmem_cache_destroy(log->io_kc);
1111 void r5l_exit_log(struct r5l_log *log)
1113 md_unregister_thread(&log->reclaim_thread);
1114 kmem_cache_destroy(log->io_kc);