2 * Copyright (C) 2015 Shaohua Li <shli@fb.com>
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #include <linux/kernel.h>
15 #include <linux/wait.h>
16 #include <linux/blkdev.h>
17 #include <linux/slab.h>
18 #include <linux/raid/md_p.h>
19 #include <linux/crc32c.h>
20 #include <linux/random.h>
25 * metadata/data stored in disk with 4k size unit (a block) regardless
26 * underneath hardware sector size. only works with PAGE_SIZE == 4096
28 #define BLOCK_SECTORS (8)
31 * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent
32 * recovery scans a very long log
34 #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
35 #define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
42 sector_t device_size; /* log device size, round to
44 sector_t max_free_space; /* reclaim run if free space is at
47 sector_t last_checkpoint; /* log tail. where recovery scan
49 u64 last_cp_seq; /* log tail sequence */
51 sector_t log_start; /* log head. where new data appends */
52 u64 seq; /* log head sequence */
54 struct mutex io_mutex;
55 struct r5l_io_unit *current_io; /* current io_unit accepting new data */
57 spinlock_t io_list_lock;
58 struct list_head running_ios; /* io_units which are still running,
59 * and have not yet been completely
60 * written to the log */
61 struct list_head io_end_ios; /* io_units which have been completely
62 * written to the log but not yet written
64 struct list_head flushing_ios; /* io_units which are waiting for log
66 struct list_head flushed_ios; /* io_units which settle down in log disk */
68 struct list_head stripe_end_ios;/* io_units which have been completely
69 * written to the RAID but have not yet
70 * been considered for updating super */
72 struct kmem_cache *io_kc;
74 struct md_thread *reclaim_thread;
75 unsigned long reclaim_target; /* number of space that need to be
76 * reclaimed. if it's 0, reclaim spaces
77 * used by io_units which are in
78 * IO_UNIT_STRIPE_END state (eg, reclaim
79 * dones't wait for specific io_unit
80 * switching to IO_UNIT_STRIPE_END
83 struct list_head no_space_stripes; /* pending stripes, log has no space */
84 spinlock_t no_space_stripes_lock;
88 * an IO range starts from a meta data block and end at the next meta data
89 * block. The io unit's the meta data block tracks data/parity followed it. io
90 * unit is written to log disk with normal write, as we always flush log disk
91 * first and then start move data to raid disks, there is no requirement to
92 * write io unit with FLUSH/FUA
97 struct page *meta_page; /* store meta block */
98 int meta_offset; /* current offset in meta_page */
100 struct bio_list bios;
101 atomic_t pending_io; /* pending bios not written to log yet */
102 struct bio *current_bio;/* current_bio accepting new data */
104 atomic_t pending_stripe;/* how many stripes not flushed to raid */
105 u64 seq; /* seq number of the metablock */
106 sector_t log_start; /* where the io_unit starts */
107 sector_t log_end; /* where the io_unit ends */
108 struct list_head log_sibling; /* log->running_ios */
109 struct list_head stripe_list; /* stripes added to the io_unit */
112 wait_queue_head_t wait_state;
115 /* r5l_io_unit state */
116 enum r5l_io_unit_state {
117 IO_UNIT_RUNNING = 0, /* accepting new IO */
118 IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
119 * don't accepting new bio */
120 IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
121 IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
124 static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
127 if (start >= log->device_size)
128 start = start - log->device_size;
132 static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
138 return end + log->device_size - start;
141 static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
145 used_size = r5l_ring_distance(log, log->last_checkpoint,
148 return log->device_size > used_size + size;
151 static struct r5l_io_unit *r5l_alloc_io_unit(struct r5l_log *log)
153 struct r5l_io_unit *io;
154 /* We can't handle memory allocate failure so far */
155 gfp_t gfp = GFP_NOIO | __GFP_NOFAIL;
157 io = kmem_cache_zalloc(log->io_kc, gfp);
159 io->meta_page = alloc_page(gfp | __GFP_ZERO);
161 bio_list_init(&io->bios);
162 INIT_LIST_HEAD(&io->log_sibling);
163 INIT_LIST_HEAD(&io->stripe_list);
164 io->state = IO_UNIT_RUNNING;
165 init_waitqueue_head(&io->wait_state);
169 static void r5l_free_io_unit(struct r5l_log *log, struct r5l_io_unit *io)
171 __free_page(io->meta_page);
172 kmem_cache_free(log->io_kc, io);
175 static void r5l_move_io_unit_list(struct list_head *from, struct list_head *to,
176 enum r5l_io_unit_state state)
178 struct r5l_io_unit *io;
180 while (!list_empty(from)) {
181 io = list_first_entry(from, struct r5l_io_unit, log_sibling);
182 /* don't change list order */
183 if (io->state >= state)
184 list_move_tail(&io->log_sibling, to);
191 * We don't want too many io_units reside in stripe_end_ios list, which will
192 * waste a lot of memory. So we try to remove some. But we must keep at least 2
193 * io_units. The superblock must point to a valid meta, if it's the last meta,
194 * recovery can scan less
196 static void r5l_compress_stripe_end_list(struct r5l_log *log)
198 struct r5l_io_unit *first, *last, *io;
200 first = list_first_entry(&log->stripe_end_ios,
201 struct r5l_io_unit, log_sibling);
202 last = list_last_entry(&log->stripe_end_ios,
203 struct r5l_io_unit, log_sibling);
206 list_del(&first->log_sibling);
207 list_del(&last->log_sibling);
208 while (!list_empty(&log->stripe_end_ios)) {
209 io = list_first_entry(&log->stripe_end_ios,
210 struct r5l_io_unit, log_sibling);
211 list_del(&io->log_sibling);
212 first->log_end = io->log_end;
213 r5l_free_io_unit(log, io);
215 list_add_tail(&first->log_sibling, &log->stripe_end_ios);
216 list_add_tail(&last->log_sibling, &log->stripe_end_ios);
219 static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
220 static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
221 enum r5l_io_unit_state state)
223 struct r5l_log *log = io->log;
225 if (WARN_ON(io->state >= state))
228 if (state == IO_UNIT_IO_END)
229 r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios,
231 if (state == IO_UNIT_STRIPE_END) {
232 struct r5l_io_unit *last;
233 sector_t reclaimable_space;
235 r5l_move_io_unit_list(&log->flushed_ios, &log->stripe_end_ios,
238 last = list_last_entry(&log->stripe_end_ios,
239 struct r5l_io_unit, log_sibling);
240 reclaimable_space = r5l_ring_distance(log, log->last_checkpoint,
242 if (reclaimable_space >= log->max_free_space)
243 r5l_wake_reclaim(log, 0);
245 r5l_compress_stripe_end_list(log);
247 wake_up(&io->wait_state);
250 static void r5l_set_io_unit_state(struct r5l_io_unit *io,
251 enum r5l_io_unit_state state)
253 struct r5l_log *log = io->log;
256 spin_lock_irqsave(&log->io_list_lock, flags);
257 __r5l_set_io_unit_state(io, state);
258 spin_unlock_irqrestore(&log->io_list_lock, flags);
261 /* XXX: totally ignores I/O errors */
262 static void r5l_log_endio(struct bio *bio)
264 struct r5l_io_unit *io = bio->bi_private;
265 struct r5l_log *log = io->log;
269 if (!atomic_dec_and_test(&io->pending_io))
272 r5l_set_io_unit_state(io, IO_UNIT_IO_END);
273 md_wakeup_thread(log->rdev->mddev->thread);
276 static void r5l_submit_current_io(struct r5l_log *log)
278 struct r5l_io_unit *io = log->current_io;
279 struct r5l_meta_block *block;
286 block = page_address(io->meta_page);
287 block->meta_size = cpu_to_le32(io->meta_offset);
288 crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
289 block->checksum = cpu_to_le32(crc);
291 log->current_io = NULL;
292 r5l_set_io_unit_state(io, IO_UNIT_IO_START);
294 while ((bio = bio_list_pop(&io->bios))) {
295 /* all IO must start from rdev->data_offset */
296 bio->bi_iter.bi_sector += log->rdev->data_offset;
297 submit_bio(WRITE, bio);
301 static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
303 struct r5l_io_unit *io;
304 struct r5l_meta_block *block;
307 io = r5l_alloc_io_unit(log);
309 block = page_address(io->meta_page);
310 block->magic = cpu_to_le32(R5LOG_MAGIC);
311 block->version = R5LOG_VERSION;
312 block->seq = cpu_to_le64(log->seq);
313 block->position = cpu_to_le64(log->log_start);
315 io->log_start = log->log_start;
316 io->meta_offset = sizeof(struct r5l_meta_block);
319 bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES);
320 io->current_bio = bio;
322 bio->bi_bdev = log->rdev->bdev;
323 bio->bi_iter.bi_sector = log->log_start;
324 bio_add_page(bio, io->meta_page, PAGE_SIZE, 0);
325 bio->bi_end_io = r5l_log_endio;
326 bio->bi_private = io;
328 bio_list_add(&io->bios, bio);
329 atomic_inc(&io->pending_io);
332 log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
333 io->log_end = log->log_start;
334 /* current bio hit disk end */
335 if (log->log_start == 0)
336 io->current_bio = NULL;
338 spin_lock_irq(&log->io_list_lock);
339 list_add_tail(&io->log_sibling, &log->running_ios);
340 spin_unlock_irq(&log->io_list_lock);
345 static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
347 struct r5l_io_unit *io;
349 io = log->current_io;
350 if (io && io->meta_offset + payload_size > PAGE_SIZE)
351 r5l_submit_current_io(log);
352 io = log->current_io;
356 log->current_io = r5l_new_meta(log);
360 static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
362 u32 checksum1, u32 checksum2,
363 bool checksum2_valid)
365 struct r5l_io_unit *io = log->current_io;
366 struct r5l_payload_data_parity *payload;
368 payload = page_address(io->meta_page) + io->meta_offset;
369 payload->header.type = cpu_to_le16(type);
370 payload->header.flags = cpu_to_le16(0);
371 payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
373 payload->location = cpu_to_le64(location);
374 payload->checksum[0] = cpu_to_le32(checksum1);
376 payload->checksum[1] = cpu_to_le32(checksum2);
378 io->meta_offset += sizeof(struct r5l_payload_data_parity) +
379 sizeof(__le32) * (1 + !!checksum2_valid);
382 static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
384 struct r5l_io_unit *io = log->current_io;
387 if (!io->current_bio) {
390 bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES);
392 bio->bi_bdev = log->rdev->bdev;
393 bio->bi_iter.bi_sector = log->log_start;
394 bio->bi_end_io = r5l_log_endio;
395 bio->bi_private = io;
396 bio_list_add(&io->bios, bio);
397 atomic_inc(&io->pending_io);
398 io->current_bio = bio;
400 if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) {
401 io->current_bio = NULL;
404 log->log_start = r5l_ring_add(log, log->log_start,
406 /* current bio hit disk end */
407 if (log->log_start == 0)
408 io->current_bio = NULL;
410 io->log_end = log->log_start;
413 static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
414 int data_pages, int parity_pages)
418 struct r5l_io_unit *io;
421 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
423 sizeof(struct r5l_payload_data_parity) +
424 sizeof(__le32) * parity_pages;
426 r5l_get_meta(log, meta_size);
427 io = log->current_io;
429 for (i = 0; i < sh->disks; i++) {
430 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
432 if (i == sh->pd_idx || i == sh->qd_idx)
434 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
435 raid5_compute_blocknr(sh, i, 0),
436 sh->dev[i].log_checksum, 0, false);
437 r5l_append_payload_page(log, sh->dev[i].page);
440 if (sh->qd_idx >= 0) {
441 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
442 sh->sector, sh->dev[sh->pd_idx].log_checksum,
443 sh->dev[sh->qd_idx].log_checksum, true);
444 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
445 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
447 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
448 sh->sector, sh->dev[sh->pd_idx].log_checksum,
450 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
453 list_add_tail(&sh->log_list, &io->stripe_list);
454 atomic_inc(&io->pending_stripe);
459 * running in raid5d, where reclaim could wait for raid5d too (when it flushes
460 * data from log to raid disks), so we shouldn't wait for reclaim here
462 int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
465 int data_pages, parity_pages;
472 /* Don't support stripe batch */
473 if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
474 test_bit(STRIPE_SYNCING, &sh->state)) {
475 /* the stripe is written to log, we start writing it to raid */
476 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
480 for (i = 0; i < sh->disks; i++) {
483 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
486 /* checksum is already calculated in last run */
487 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
489 addr = kmap_atomic(sh->dev[i].page);
490 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
494 parity_pages = 1 + !!(sh->qd_idx >= 0);
495 data_pages = write_disks - parity_pages;
498 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
500 sizeof(struct r5l_payload_data_parity) +
501 sizeof(__le32) * parity_pages;
502 /* Doesn't work with very big raid array */
503 if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE)
506 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
507 atomic_inc(&sh->count);
509 mutex_lock(&log->io_mutex);
511 reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
512 if (r5l_has_free_space(log, reserve))
513 r5l_log_stripe(log, sh, data_pages, parity_pages);
515 spin_lock(&log->no_space_stripes_lock);
516 list_add_tail(&sh->log_list, &log->no_space_stripes);
517 spin_unlock(&log->no_space_stripes_lock);
519 r5l_wake_reclaim(log, reserve);
521 mutex_unlock(&log->io_mutex);
526 void r5l_write_stripe_run(struct r5l_log *log)
530 mutex_lock(&log->io_mutex);
531 r5l_submit_current_io(log);
532 mutex_unlock(&log->io_mutex);
535 /* This will run after log space is reclaimed */
536 static void r5l_run_no_space_stripes(struct r5l_log *log)
538 struct stripe_head *sh;
540 spin_lock(&log->no_space_stripes_lock);
541 while (!list_empty(&log->no_space_stripes)) {
542 sh = list_first_entry(&log->no_space_stripes,
543 struct stripe_head, log_list);
544 list_del_init(&sh->log_list);
545 set_bit(STRIPE_HANDLE, &sh->state);
546 raid5_release_stripe(sh);
548 spin_unlock(&log->no_space_stripes_lock);
551 void r5l_stripe_write_finished(struct stripe_head *sh)
553 struct r5l_io_unit *io;
555 /* Don't support stripe batch */
561 if (atomic_dec_and_test(&io->pending_stripe))
562 r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
565 static void r5l_log_flush_endio(struct bio *bio)
567 struct r5l_log *log = container_of(bio, struct r5l_log,
570 struct r5l_io_unit *io;
571 struct stripe_head *sh;
573 spin_lock_irqsave(&log->io_list_lock, flags);
574 list_for_each_entry(io, &log->flushing_ios, log_sibling) {
575 while (!list_empty(&io->stripe_list)) {
576 sh = list_first_entry(&io->stripe_list,
577 struct stripe_head, log_list);
578 list_del_init(&sh->log_list);
579 set_bit(STRIPE_HANDLE, &sh->state);
580 raid5_release_stripe(sh);
583 list_splice_tail_init(&log->flushing_ios, &log->flushed_ios);
584 spin_unlock_irqrestore(&log->io_list_lock, flags);
588 * Starting dispatch IO to raid.
589 * io_unit(meta) consists of a log. There is one situation we want to avoid. A
590 * broken meta in the middle of a log causes recovery can't find meta at the
591 * head of log. If operations require meta at the head persistent in log, we
592 * must make sure meta before it persistent in log too. A case is:
594 * stripe data/parity is in log, we start write stripe to raid disks. stripe
595 * data/parity must be persistent in log before we do the write to raid disks.
597 * The solution is we restrictly maintain io_unit list order. In this case, we
598 * only write stripes of an io_unit to raid disks till the io_unit is the first
599 * one whose data/parity is in log.
601 void r5l_flush_stripe_to_raid(struct r5l_log *log)
607 spin_lock_irq(&log->io_list_lock);
608 /* flush bio is running */
609 if (!list_empty(&log->flushing_ios)) {
610 spin_unlock_irq(&log->io_list_lock);
613 list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
614 do_flush = !list_empty(&log->flushing_ios);
615 spin_unlock_irq(&log->io_list_lock);
619 bio_reset(&log->flush_bio);
620 log->flush_bio.bi_bdev = log->rdev->bdev;
621 log->flush_bio.bi_end_io = r5l_log_flush_endio;
622 submit_bio(WRITE_FLUSH, &log->flush_bio);
625 static void r5l_kick_io_unit(struct r5l_log *log, struct r5l_io_unit *io)
627 md_wakeup_thread(log->rdev->mddev->thread);
628 wait_event(io->wait_state, io->state >= IO_UNIT_STRIPE_END);
631 static void r5l_write_super(struct r5l_log *log, sector_t cp);
632 static void r5l_do_reclaim(struct r5l_log *log)
634 struct r5l_io_unit *io, *last;
637 sector_t reclaim_target = xchg(&log->reclaim_target, 0);
639 spin_lock_irq(&log->io_list_lock);
641 * move proper io_unit to reclaim list. We should not change the order.
642 * reclaimable/unreclaimable io_unit can be mixed in the list, we
643 * shouldn't reuse space of an unreclaimable io_unit
646 struct list_head *target_list = NULL;
648 while (!list_empty(&log->stripe_end_ios)) {
649 io = list_first_entry(&log->stripe_end_ios,
650 struct r5l_io_unit, log_sibling);
651 list_move_tail(&io->log_sibling, &list);
652 free += r5l_ring_distance(log, io->log_start,
656 if (free >= reclaim_target ||
657 (list_empty(&log->running_ios) &&
658 list_empty(&log->io_end_ios) &&
659 list_empty(&log->flushing_ios) &&
660 list_empty(&log->flushed_ios)))
663 /* Below waiting mostly happens when we shutdown the raid */
664 if (!list_empty(&log->flushed_ios))
665 target_list = &log->flushed_ios;
666 else if (!list_empty(&log->flushing_ios))
667 target_list = &log->flushing_ios;
668 else if (!list_empty(&log->io_end_ios))
669 target_list = &log->io_end_ios;
670 else if (!list_empty(&log->running_ios))
671 target_list = &log->running_ios;
673 io = list_first_entry(target_list,
674 struct r5l_io_unit, log_sibling);
675 spin_unlock_irq(&log->io_list_lock);
676 /* nobody else can delete the io, we are safe */
677 r5l_kick_io_unit(log, io);
678 spin_lock_irq(&log->io_list_lock);
680 spin_unlock_irq(&log->io_list_lock);
682 if (list_empty(&list))
685 /* super always point to last valid meta */
686 last = list_last_entry(&list, struct r5l_io_unit, log_sibling);
688 * write_super will flush cache of each raid disk. We must write super
689 * here, because the log area might be reused soon and we don't want to
692 r5l_write_super(log, last->log_start);
694 mutex_lock(&log->io_mutex);
695 log->last_checkpoint = last->log_start;
696 log->last_cp_seq = last->seq;
697 mutex_unlock(&log->io_mutex);
698 r5l_run_no_space_stripes(log);
700 while (!list_empty(&list)) {
701 io = list_first_entry(&list, struct r5l_io_unit, log_sibling);
702 list_del(&io->log_sibling);
703 r5l_free_io_unit(log, io);
707 static void r5l_reclaim_thread(struct md_thread *thread)
709 struct mddev *mddev = thread->mddev;
710 struct r5conf *conf = mddev->private;
711 struct r5l_log *log = conf->log;
718 static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
720 unsigned long target;
721 unsigned long new = (unsigned long)space; /* overflow in theory */
724 target = log->reclaim_target;
727 } while (cmpxchg(&log->reclaim_target, target, new) != target);
728 md_wakeup_thread(log->reclaim_thread);
731 struct r5l_recovery_ctx {
732 struct page *meta_page; /* current meta */
733 sector_t meta_total_blocks; /* total size of current meta and data */
734 sector_t pos; /* recovery position */
735 u64 seq; /* recovery position seq */
738 static int r5l_read_meta_block(struct r5l_log *log,
739 struct r5l_recovery_ctx *ctx)
741 struct page *page = ctx->meta_page;
742 struct r5l_meta_block *mb;
745 if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false))
748 mb = page_address(page);
749 stored_crc = le32_to_cpu(mb->checksum);
752 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
753 le64_to_cpu(mb->seq) != ctx->seq ||
754 mb->version != R5LOG_VERSION ||
755 le64_to_cpu(mb->position) != ctx->pos)
758 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
759 if (stored_crc != crc)
762 if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
765 ctx->meta_total_blocks = BLOCK_SECTORS;
770 static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
771 struct r5l_recovery_ctx *ctx,
772 sector_t stripe_sect,
773 int *offset, sector_t *log_offset)
775 struct r5conf *conf = log->rdev->mddev->private;
776 struct stripe_head *sh;
777 struct r5l_payload_data_parity *payload;
780 sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0);
782 payload = page_address(ctx->meta_page) + *offset;
784 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
785 raid5_compute_sector(conf,
786 le64_to_cpu(payload->location), 0,
789 sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
790 sh->dev[disk_index].page, READ, false);
791 sh->dev[disk_index].log_checksum =
792 le32_to_cpu(payload->checksum[0]);
793 set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
794 ctx->meta_total_blocks += BLOCK_SECTORS;
796 disk_index = sh->pd_idx;
797 sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
798 sh->dev[disk_index].page, READ, false);
799 sh->dev[disk_index].log_checksum =
800 le32_to_cpu(payload->checksum[0]);
801 set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
803 if (sh->qd_idx >= 0) {
804 disk_index = sh->qd_idx;
805 sync_page_io(log->rdev,
806 r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
807 PAGE_SIZE, sh->dev[disk_index].page,
809 sh->dev[disk_index].log_checksum =
810 le32_to_cpu(payload->checksum[1]);
811 set_bit(R5_Wantwrite,
812 &sh->dev[disk_index].flags);
814 ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
817 *log_offset = r5l_ring_add(log, *log_offset,
818 le32_to_cpu(payload->size));
819 *offset += sizeof(struct r5l_payload_data_parity) +
821 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
822 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
826 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
830 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
832 addr = kmap_atomic(sh->dev[disk_index].page);
833 checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
835 if (checksum != sh->dev[disk_index].log_checksum)
839 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
840 struct md_rdev *rdev, *rrdev;
842 if (!test_and_clear_bit(R5_Wantwrite,
843 &sh->dev[disk_index].flags))
846 /* in case device is broken */
847 rdev = rcu_dereference(conf->disks[disk_index].rdev);
849 sync_page_io(rdev, stripe_sect, PAGE_SIZE,
850 sh->dev[disk_index].page, WRITE, false);
851 rrdev = rcu_dereference(conf->disks[disk_index].replacement);
853 sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
854 sh->dev[disk_index].page, WRITE, false);
856 raid5_release_stripe(sh);
860 for (disk_index = 0; disk_index < sh->disks; disk_index++)
861 sh->dev[disk_index].flags = 0;
862 raid5_release_stripe(sh);
866 static int r5l_recovery_flush_one_meta(struct r5l_log *log,
867 struct r5l_recovery_ctx *ctx)
869 struct r5conf *conf = log->rdev->mddev->private;
870 struct r5l_payload_data_parity *payload;
871 struct r5l_meta_block *mb;
874 sector_t stripe_sector;
876 mb = page_address(ctx->meta_page);
877 offset = sizeof(struct r5l_meta_block);
878 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
880 while (offset < le32_to_cpu(mb->meta_size)) {
883 payload = (void *)mb + offset;
884 stripe_sector = raid5_compute_sector(conf,
885 le64_to_cpu(payload->location), 0, &dd, NULL);
886 if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector,
887 &offset, &log_offset))
893 /* copy data/parity from log to raid disks */
894 static void r5l_recovery_flush_log(struct r5l_log *log,
895 struct r5l_recovery_ctx *ctx)
898 if (r5l_read_meta_block(log, ctx))
900 if (r5l_recovery_flush_one_meta(log, ctx))
903 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
907 static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
911 struct r5l_meta_block *mb;
914 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
917 mb = page_address(page);
918 mb->magic = cpu_to_le32(R5LOG_MAGIC);
919 mb->version = R5LOG_VERSION;
920 mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
921 mb->seq = cpu_to_le64(seq);
922 mb->position = cpu_to_le64(pos);
923 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
924 mb->checksum = cpu_to_le32(crc);
926 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) {
934 static int r5l_recovery_log(struct r5l_log *log)
936 struct r5l_recovery_ctx ctx;
938 ctx.pos = log->last_checkpoint;
939 ctx.seq = log->last_cp_seq;
940 ctx.meta_page = alloc_page(GFP_KERNEL);
944 r5l_recovery_flush_log(log, &ctx);
945 __free_page(ctx.meta_page);
948 * we did a recovery. Now ctx.pos points to an invalid meta block. New
949 * log will start here. but we can't let superblock point to last valid
950 * meta block. The log might looks like:
951 * | meta 1| meta 2| meta 3|
952 * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
953 * superblock points to meta 1, we write a new valid meta 2n. if crash
954 * happens again, new recovery will start from meta 1. Since meta 2n is
955 * valid now, recovery will think meta 3 is valid, which is wrong.
956 * The solution is we create a new meta in meta2 with its seq == meta
957 * 1's seq + 10 and let superblock points to meta2. The same recovery will
958 * not think meta 3 is a valid meta, because its seq doesn't match
960 if (ctx.seq > log->last_cp_seq + 1) {
963 ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
966 log->seq = ctx.seq + 11;
967 log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
968 r5l_write_super(log, ctx.pos);
970 log->log_start = ctx.pos;
976 static void r5l_write_super(struct r5l_log *log, sector_t cp)
978 struct mddev *mddev = log->rdev->mddev;
980 log->rdev->journal_tail = cp;
981 set_bit(MD_CHANGE_DEVS, &mddev->flags);
984 static int r5l_load_log(struct r5l_log *log)
986 struct md_rdev *rdev = log->rdev;
988 struct r5l_meta_block *mb;
989 sector_t cp = log->rdev->journal_tail;
990 u32 stored_crc, expected_crc;
991 bool create_super = false;
994 /* Make sure it's valid */
995 if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
997 page = alloc_page(GFP_KERNEL);
1001 if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) {
1005 mb = page_address(page);
1007 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
1008 mb->version != R5LOG_VERSION) {
1009 create_super = true;
1012 stored_crc = le32_to_cpu(mb->checksum);
1014 expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
1015 if (stored_crc != expected_crc) {
1016 create_super = true;
1019 if (le64_to_cpu(mb->position) != cp) {
1020 create_super = true;
1025 log->last_cp_seq = prandom_u32();
1028 * Make sure super points to correct address. Log might have
1029 * data very soon. If super hasn't correct log tail address,
1030 * recovery can't find the log
1032 r5l_write_super(log, cp);
1034 log->last_cp_seq = le64_to_cpu(mb->seq);
1036 log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
1037 log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
1038 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
1039 log->max_free_space = RECLAIM_MAX_FREE_SPACE;
1040 log->last_checkpoint = cp;
1044 return r5l_recovery_log(log);
1050 int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
1052 struct r5l_log *log;
1054 if (PAGE_SIZE != 4096)
1056 log = kzalloc(sizeof(*log), GFP_KERNEL);
1061 log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
1062 sizeof(rdev->mddev->uuid));
1064 mutex_init(&log->io_mutex);
1066 spin_lock_init(&log->io_list_lock);
1067 INIT_LIST_HEAD(&log->running_ios);
1068 INIT_LIST_HEAD(&log->io_end_ios);
1069 INIT_LIST_HEAD(&log->stripe_end_ios);
1070 INIT_LIST_HEAD(&log->flushing_ios);
1071 INIT_LIST_HEAD(&log->flushed_ios);
1072 bio_init(&log->flush_bio);
1074 log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
1078 log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
1079 log->rdev->mddev, "reclaim");
1080 if (!log->reclaim_thread)
1081 goto reclaim_thread;
1083 INIT_LIST_HEAD(&log->no_space_stripes);
1084 spin_lock_init(&log->no_space_stripes_lock);
1086 if (r5l_load_log(log))
1092 md_unregister_thread(&log->reclaim_thread);
1094 kmem_cache_destroy(log->io_kc);
1100 void r5l_exit_log(struct r5l_log *log)
1103 * at this point all stripes are finished, so io_unit is at least in
1106 r5l_wake_reclaim(log, -1L);
1107 md_unregister_thread(&log->reclaim_thread);
1108 r5l_do_reclaim(log);
1110 * force a super update, r5l_do_reclaim might updated the super.
1111 * mddev->thread is already stopped
1113 md_update_sb(log->rdev->mddev, 1);
1115 kmem_cache_destroy(log->io_kc);