]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/md/raid5-cache.c
md/r5cache: Check array size in r5l_init_log
[karo-tx-linux.git] / drivers / md / raid5-cache.c
1 /*
2  * Copyright (C) 2015 Shaohua Li <shli@fb.com>
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  */
14 #include <linux/kernel.h>
15 #include <linux/wait.h>
16 #include <linux/blkdev.h>
17 #include <linux/slab.h>
18 #include <linux/raid/md_p.h>
19 #include <linux/crc32c.h>
20 #include <linux/random.h>
21 #include "md.h"
22 #include "raid5.h"
23
24 /*
25  * metadata/data stored in disk with 4k size unit (a block) regardless
26  * underneath hardware sector size. only works with PAGE_SIZE == 4096
27  */
28 #define BLOCK_SECTORS (8)
29
30 /*
31  * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent
32  * recovery scans a very long log
33  */
34 #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
35 #define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
36
37 /*
38  * We only need 2 bios per I/O unit to make progress, but ensure we
39  * have a few more available to not get too tight.
40  */
41 #define R5L_POOL_SIZE   4
42
43 struct r5l_log {
44         struct md_rdev *rdev;
45
46         u32 uuid_checksum;
47
48         sector_t device_size;           /* log device size, round to
49                                          * BLOCK_SECTORS */
50         sector_t max_free_space;        /* reclaim run if free space is at
51                                          * this size */
52
53         sector_t last_checkpoint;       /* log tail. where recovery scan
54                                          * starts from */
55         u64 last_cp_seq;                /* log tail sequence */
56
57         sector_t log_start;             /* log head. where new data appends */
58         u64 seq;                        /* log head sequence */
59
60         sector_t next_checkpoint;
61         u64 next_cp_seq;
62
63         struct mutex io_mutex;
64         struct r5l_io_unit *current_io; /* current io_unit accepting new data */
65
66         spinlock_t io_list_lock;
67         struct list_head running_ios;   /* io_units which are still running,
68                                          * and have not yet been completely
69                                          * written to the log */
70         struct list_head io_end_ios;    /* io_units which have been completely
71                                          * written to the log but not yet written
72                                          * to the RAID */
73         struct list_head flushing_ios;  /* io_units which are waiting for log
74                                          * cache flush */
75         struct list_head finished_ios;  /* io_units which settle down in log disk */
76         struct bio flush_bio;
77
78         struct list_head no_mem_stripes;   /* pending stripes, -ENOMEM */
79
80         struct kmem_cache *io_kc;
81         mempool_t *io_pool;
82         struct bio_set *bs;
83         mempool_t *meta_pool;
84
85         struct md_thread *reclaim_thread;
86         unsigned long reclaim_target;   /* number of space that need to be
87                                          * reclaimed.  if it's 0, reclaim spaces
88                                          * used by io_units which are in
89                                          * IO_UNIT_STRIPE_END state (eg, reclaim
90                                          * dones't wait for specific io_unit
91                                          * switching to IO_UNIT_STRIPE_END
92                                          * state) */
93         wait_queue_head_t iounit_wait;
94
95         struct list_head no_space_stripes; /* pending stripes, log has no space */
96         spinlock_t no_space_stripes_lock;
97
98         bool need_cache_flush;
99 };
100
101 /*
102  * an IO range starts from a meta data block and end at the next meta data
103  * block. The io unit's the meta data block tracks data/parity followed it. io
104  * unit is written to log disk with normal write, as we always flush log disk
105  * first and then start move data to raid disks, there is no requirement to
106  * write io unit with FLUSH/FUA
107  */
108 struct r5l_io_unit {
109         struct r5l_log *log;
110
111         struct page *meta_page; /* store meta block */
112         int meta_offset;        /* current offset in meta_page */
113
114         struct bio *current_bio;/* current_bio accepting new data */
115
116         atomic_t pending_stripe;/* how many stripes not flushed to raid */
117         u64 seq;                /* seq number of the metablock */
118         sector_t log_start;     /* where the io_unit starts */
119         sector_t log_end;       /* where the io_unit ends */
120         struct list_head log_sibling; /* log->running_ios */
121         struct list_head stripe_list; /* stripes added to the io_unit */
122
123         int state;
124         bool need_split_bio;
125 };
126
127 /* r5l_io_unit state */
128 enum r5l_io_unit_state {
129         IO_UNIT_RUNNING = 0,    /* accepting new IO */
130         IO_UNIT_IO_START = 1,   /* io_unit bio start writing to log,
131                                  * don't accepting new bio */
132         IO_UNIT_IO_END = 2,     /* io_unit bio finish writing to log */
133         IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
134 };
135
136 static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
137 {
138         start += inc;
139         if (start >= log->device_size)
140                 start = start - log->device_size;
141         return start;
142 }
143
144 static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
145                                   sector_t end)
146 {
147         if (end >= start)
148                 return end - start;
149         else
150                 return end + log->device_size - start;
151 }
152
153 static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
154 {
155         sector_t used_size;
156
157         used_size = r5l_ring_distance(log, log->last_checkpoint,
158                                         log->log_start);
159
160         return log->device_size > used_size + size;
161 }
162
163 static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
164                                     enum r5l_io_unit_state state)
165 {
166         if (WARN_ON(io->state >= state))
167                 return;
168         io->state = state;
169 }
170
171 static void r5l_io_run_stripes(struct r5l_io_unit *io)
172 {
173         struct stripe_head *sh, *next;
174
175         list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
176                 list_del_init(&sh->log_list);
177                 set_bit(STRIPE_HANDLE, &sh->state);
178                 raid5_release_stripe(sh);
179         }
180 }
181
182 static void r5l_log_run_stripes(struct r5l_log *log)
183 {
184         struct r5l_io_unit *io, *next;
185
186         assert_spin_locked(&log->io_list_lock);
187
188         list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
189                 /* don't change list order */
190                 if (io->state < IO_UNIT_IO_END)
191                         break;
192
193                 list_move_tail(&io->log_sibling, &log->finished_ios);
194                 r5l_io_run_stripes(io);
195         }
196 }
197
198 static void r5l_move_to_end_ios(struct r5l_log *log)
199 {
200         struct r5l_io_unit *io, *next;
201
202         assert_spin_locked(&log->io_list_lock);
203
204         list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
205                 /* don't change list order */
206                 if (io->state < IO_UNIT_IO_END)
207                         break;
208                 list_move_tail(&io->log_sibling, &log->io_end_ios);
209         }
210 }
211
212 static void r5l_log_endio(struct bio *bio)
213 {
214         struct r5l_io_unit *io = bio->bi_private;
215         struct r5l_log *log = io->log;
216         unsigned long flags;
217
218         if (bio->bi_error)
219                 md_error(log->rdev->mddev, log->rdev);
220
221         bio_put(bio);
222         mempool_free(io->meta_page, log->meta_pool);
223
224         spin_lock_irqsave(&log->io_list_lock, flags);
225         __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
226         if (log->need_cache_flush)
227                 r5l_move_to_end_ios(log);
228         else
229                 r5l_log_run_stripes(log);
230         spin_unlock_irqrestore(&log->io_list_lock, flags);
231
232         if (log->need_cache_flush)
233                 md_wakeup_thread(log->rdev->mddev->thread);
234 }
235
236 static void r5l_submit_current_io(struct r5l_log *log)
237 {
238         struct r5l_io_unit *io = log->current_io;
239         struct r5l_meta_block *block;
240         unsigned long flags;
241         u32 crc;
242
243         if (!io)
244                 return;
245
246         block = page_address(io->meta_page);
247         block->meta_size = cpu_to_le32(io->meta_offset);
248         crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
249         block->checksum = cpu_to_le32(crc);
250
251         log->current_io = NULL;
252         spin_lock_irqsave(&log->io_list_lock, flags);
253         __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
254         spin_unlock_irqrestore(&log->io_list_lock, flags);
255
256         submit_bio(io->current_bio);
257 }
258
259 static struct bio *r5l_bio_alloc(struct r5l_log *log)
260 {
261         struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs);
262
263         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
264         bio->bi_bdev = log->rdev->bdev;
265         bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
266
267         return bio;
268 }
269
270 static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
271 {
272         log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
273
274         /*
275          * If we filled up the log device start from the beginning again,
276          * which will require a new bio.
277          *
278          * Note: for this to work properly the log size needs to me a multiple
279          * of BLOCK_SECTORS.
280          */
281         if (log->log_start == 0)
282                 io->need_split_bio = true;
283
284         io->log_end = log->log_start;
285 }
286
287 static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
288 {
289         struct r5l_io_unit *io;
290         struct r5l_meta_block *block;
291
292         io = mempool_alloc(log->io_pool, GFP_ATOMIC);
293         if (!io)
294                 return NULL;
295         memset(io, 0, sizeof(*io));
296
297         io->log = log;
298         INIT_LIST_HEAD(&io->log_sibling);
299         INIT_LIST_HEAD(&io->stripe_list);
300         io->state = IO_UNIT_RUNNING;
301
302         io->meta_page = mempool_alloc(log->meta_pool, GFP_NOIO);
303         block = page_address(io->meta_page);
304         clear_page(block);
305         block->magic = cpu_to_le32(R5LOG_MAGIC);
306         block->version = R5LOG_VERSION;
307         block->seq = cpu_to_le64(log->seq);
308         block->position = cpu_to_le64(log->log_start);
309
310         io->log_start = log->log_start;
311         io->meta_offset = sizeof(struct r5l_meta_block);
312         io->seq = log->seq++;
313
314         io->current_bio = r5l_bio_alloc(log);
315         io->current_bio->bi_end_io = r5l_log_endio;
316         io->current_bio->bi_private = io;
317         bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
318
319         r5_reserve_log_entry(log, io);
320
321         spin_lock_irq(&log->io_list_lock);
322         list_add_tail(&io->log_sibling, &log->running_ios);
323         spin_unlock_irq(&log->io_list_lock);
324
325         return io;
326 }
327
328 static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
329 {
330         if (log->current_io &&
331             log->current_io->meta_offset + payload_size > PAGE_SIZE)
332                 r5l_submit_current_io(log);
333
334         if (!log->current_io) {
335                 log->current_io = r5l_new_meta(log);
336                 if (!log->current_io)
337                         return -ENOMEM;
338         }
339
340         return 0;
341 }
342
343 static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
344                                     sector_t location,
345                                     u32 checksum1, u32 checksum2,
346                                     bool checksum2_valid)
347 {
348         struct r5l_io_unit *io = log->current_io;
349         struct r5l_payload_data_parity *payload;
350
351         payload = page_address(io->meta_page) + io->meta_offset;
352         payload->header.type = cpu_to_le16(type);
353         payload->header.flags = cpu_to_le16(0);
354         payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
355                                     (PAGE_SHIFT - 9));
356         payload->location = cpu_to_le64(location);
357         payload->checksum[0] = cpu_to_le32(checksum1);
358         if (checksum2_valid)
359                 payload->checksum[1] = cpu_to_le32(checksum2);
360
361         io->meta_offset += sizeof(struct r5l_payload_data_parity) +
362                 sizeof(__le32) * (1 + !!checksum2_valid);
363 }
364
365 static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
366 {
367         struct r5l_io_unit *io = log->current_io;
368
369         if (io->need_split_bio) {
370                 struct bio *prev = io->current_bio;
371
372                 io->current_bio = r5l_bio_alloc(log);
373                 bio_chain(io->current_bio, prev);
374
375                 submit_bio(prev);
376         }
377
378         if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
379                 BUG();
380
381         r5_reserve_log_entry(log, io);
382 }
383
384 static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
385                            int data_pages, int parity_pages)
386 {
387         int i;
388         int meta_size;
389         int ret;
390         struct r5l_io_unit *io;
391
392         meta_size =
393                 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
394                  * data_pages) +
395                 sizeof(struct r5l_payload_data_parity) +
396                 sizeof(__le32) * parity_pages;
397
398         ret = r5l_get_meta(log, meta_size);
399         if (ret)
400                 return ret;
401
402         io = log->current_io;
403
404         for (i = 0; i < sh->disks; i++) {
405                 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
406                         continue;
407                 if (i == sh->pd_idx || i == sh->qd_idx)
408                         continue;
409                 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
410                                         raid5_compute_blocknr(sh, i, 0),
411                                         sh->dev[i].log_checksum, 0, false);
412                 r5l_append_payload_page(log, sh->dev[i].page);
413         }
414
415         if (sh->qd_idx >= 0) {
416                 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
417                                         sh->sector, sh->dev[sh->pd_idx].log_checksum,
418                                         sh->dev[sh->qd_idx].log_checksum, true);
419                 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
420                 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
421         } else {
422                 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
423                                         sh->sector, sh->dev[sh->pd_idx].log_checksum,
424                                         0, false);
425                 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
426         }
427
428         list_add_tail(&sh->log_list, &io->stripe_list);
429         atomic_inc(&io->pending_stripe);
430         sh->log_io = io;
431
432         return 0;
433 }
434
435 static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
436 /*
437  * running in raid5d, where reclaim could wait for raid5d too (when it flushes
438  * data from log to raid disks), so we shouldn't wait for reclaim here
439  */
440 int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
441 {
442         int write_disks = 0;
443         int data_pages, parity_pages;
444         int reserve;
445         int i;
446         int ret = 0;
447
448         if (!log)
449                 return -EAGAIN;
450         /* Don't support stripe batch */
451         if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
452             test_bit(STRIPE_SYNCING, &sh->state)) {
453                 /* the stripe is written to log, we start writing it to raid */
454                 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
455                 return -EAGAIN;
456         }
457
458         for (i = 0; i < sh->disks; i++) {
459                 void *addr;
460
461                 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
462                         continue;
463                 write_disks++;
464                 /* checksum is already calculated in last run */
465                 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
466                         continue;
467                 addr = kmap_atomic(sh->dev[i].page);
468                 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
469                                                     addr, PAGE_SIZE);
470                 kunmap_atomic(addr);
471         }
472         parity_pages = 1 + !!(sh->qd_idx >= 0);
473         data_pages = write_disks - parity_pages;
474
475         set_bit(STRIPE_LOG_TRAPPED, &sh->state);
476         /*
477          * The stripe must enter state machine again to finish the write, so
478          * don't delay.
479          */
480         clear_bit(STRIPE_DELAYED, &sh->state);
481         atomic_inc(&sh->count);
482
483         mutex_lock(&log->io_mutex);
484         /* meta + data */
485         reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
486         if (!r5l_has_free_space(log, reserve)) {
487                 spin_lock(&log->no_space_stripes_lock);
488                 list_add_tail(&sh->log_list, &log->no_space_stripes);
489                 spin_unlock(&log->no_space_stripes_lock);
490
491                 r5l_wake_reclaim(log, reserve);
492         } else {
493                 ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
494                 if (ret) {
495                         spin_lock_irq(&log->io_list_lock);
496                         list_add_tail(&sh->log_list, &log->no_mem_stripes);
497                         spin_unlock_irq(&log->io_list_lock);
498                 }
499         }
500
501         mutex_unlock(&log->io_mutex);
502         return 0;
503 }
504
505 void r5l_write_stripe_run(struct r5l_log *log)
506 {
507         if (!log)
508                 return;
509         mutex_lock(&log->io_mutex);
510         r5l_submit_current_io(log);
511         mutex_unlock(&log->io_mutex);
512 }
513
514 int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
515 {
516         if (!log)
517                 return -ENODEV;
518         /*
519          * we flush log disk cache first, then write stripe data to raid disks.
520          * So if bio is finished, the log disk cache is flushed already. The
521          * recovery guarantees we can recovery the bio from log disk, so we
522          * don't need to flush again
523          */
524         if (bio->bi_iter.bi_size == 0) {
525                 bio_endio(bio);
526                 return 0;
527         }
528         bio->bi_opf &= ~REQ_PREFLUSH;
529         return -EAGAIN;
530 }
531
532 /* This will run after log space is reclaimed */
533 static void r5l_run_no_space_stripes(struct r5l_log *log)
534 {
535         struct stripe_head *sh;
536
537         spin_lock(&log->no_space_stripes_lock);
538         while (!list_empty(&log->no_space_stripes)) {
539                 sh = list_first_entry(&log->no_space_stripes,
540                                       struct stripe_head, log_list);
541                 list_del_init(&sh->log_list);
542                 set_bit(STRIPE_HANDLE, &sh->state);
543                 raid5_release_stripe(sh);
544         }
545         spin_unlock(&log->no_space_stripes_lock);
546 }
547
548 static sector_t r5l_reclaimable_space(struct r5l_log *log)
549 {
550         return r5l_ring_distance(log, log->last_checkpoint,
551                                  log->next_checkpoint);
552 }
553
554 static void r5l_run_no_mem_stripe(struct r5l_log *log)
555 {
556         struct stripe_head *sh;
557
558         assert_spin_locked(&log->io_list_lock);
559
560         if (!list_empty(&log->no_mem_stripes)) {
561                 sh = list_first_entry(&log->no_mem_stripes,
562                                       struct stripe_head, log_list);
563                 list_del_init(&sh->log_list);
564                 set_bit(STRIPE_HANDLE, &sh->state);
565                 raid5_release_stripe(sh);
566         }
567 }
568
569 static bool r5l_complete_finished_ios(struct r5l_log *log)
570 {
571         struct r5l_io_unit *io, *next;
572         bool found = false;
573
574         assert_spin_locked(&log->io_list_lock);
575
576         list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
577                 /* don't change list order */
578                 if (io->state < IO_UNIT_STRIPE_END)
579                         break;
580
581                 log->next_checkpoint = io->log_start;
582                 log->next_cp_seq = io->seq;
583
584                 list_del(&io->log_sibling);
585                 mempool_free(io, log->io_pool);
586                 r5l_run_no_mem_stripe(log);
587
588                 found = true;
589         }
590
591         return found;
592 }
593
594 static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
595 {
596         struct r5l_log *log = io->log;
597         unsigned long flags;
598
599         spin_lock_irqsave(&log->io_list_lock, flags);
600         __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
601
602         if (!r5l_complete_finished_ios(log)) {
603                 spin_unlock_irqrestore(&log->io_list_lock, flags);
604                 return;
605         }
606
607         if (r5l_reclaimable_space(log) > log->max_free_space)
608                 r5l_wake_reclaim(log, 0);
609
610         spin_unlock_irqrestore(&log->io_list_lock, flags);
611         wake_up(&log->iounit_wait);
612 }
613
614 void r5l_stripe_write_finished(struct stripe_head *sh)
615 {
616         struct r5l_io_unit *io;
617
618         io = sh->log_io;
619         sh->log_io = NULL;
620
621         if (io && atomic_dec_and_test(&io->pending_stripe))
622                 __r5l_stripe_write_finished(io);
623 }
624
625 static void r5l_log_flush_endio(struct bio *bio)
626 {
627         struct r5l_log *log = container_of(bio, struct r5l_log,
628                 flush_bio);
629         unsigned long flags;
630         struct r5l_io_unit *io;
631
632         if (bio->bi_error)
633                 md_error(log->rdev->mddev, log->rdev);
634
635         spin_lock_irqsave(&log->io_list_lock, flags);
636         list_for_each_entry(io, &log->flushing_ios, log_sibling)
637                 r5l_io_run_stripes(io);
638         list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
639         spin_unlock_irqrestore(&log->io_list_lock, flags);
640 }
641
642 /*
643  * Starting dispatch IO to raid.
644  * io_unit(meta) consists of a log. There is one situation we want to avoid. A
645  * broken meta in the middle of a log causes recovery can't find meta at the
646  * head of log. If operations require meta at the head persistent in log, we
647  * must make sure meta before it persistent in log too. A case is:
648  *
649  * stripe data/parity is in log, we start write stripe to raid disks. stripe
650  * data/parity must be persistent in log before we do the write to raid disks.
651  *
652  * The solution is we restrictly maintain io_unit list order. In this case, we
653  * only write stripes of an io_unit to raid disks till the io_unit is the first
654  * one whose data/parity is in log.
655  */
656 void r5l_flush_stripe_to_raid(struct r5l_log *log)
657 {
658         bool do_flush;
659
660         if (!log || !log->need_cache_flush)
661                 return;
662
663         spin_lock_irq(&log->io_list_lock);
664         /* flush bio is running */
665         if (!list_empty(&log->flushing_ios)) {
666                 spin_unlock_irq(&log->io_list_lock);
667                 return;
668         }
669         list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
670         do_flush = !list_empty(&log->flushing_ios);
671         spin_unlock_irq(&log->io_list_lock);
672
673         if (!do_flush)
674                 return;
675         bio_reset(&log->flush_bio);
676         log->flush_bio.bi_bdev = log->rdev->bdev;
677         log->flush_bio.bi_end_io = r5l_log_flush_endio;
678         bio_set_op_attrs(&log->flush_bio, REQ_OP_WRITE, WRITE_FLUSH);
679         submit_bio(&log->flush_bio);
680 }
681
682 static void r5l_write_super(struct r5l_log *log, sector_t cp);
683 static void r5l_write_super_and_discard_space(struct r5l_log *log,
684         sector_t end)
685 {
686         struct block_device *bdev = log->rdev->bdev;
687         struct mddev *mddev;
688
689         r5l_write_super(log, end);
690
691         if (!blk_queue_discard(bdev_get_queue(bdev)))
692                 return;
693
694         mddev = log->rdev->mddev;
695         /*
696          * Discard could zero data, so before discard we must make sure
697          * superblock is updated to new log tail. Updating superblock (either
698          * directly call md_update_sb() or depend on md thread) must hold
699          * reconfig mutex. On the other hand, raid5_quiesce is called with
700          * reconfig_mutex hold. The first step of raid5_quiesce() is waitting
701          * for all IO finish, hence waitting for reclaim thread, while reclaim
702          * thread is calling this function and waitting for reconfig mutex. So
703          * there is a deadlock. We workaround this issue with a trylock.
704          * FIXME: we could miss discard if we can't take reconfig mutex
705          */
706         set_mask_bits(&mddev->flags, 0,
707                 BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
708         if (!mddev_trylock(mddev))
709                 return;
710         md_update_sb(mddev, 1);
711         mddev_unlock(mddev);
712
713         /* discard IO error really doesn't matter, ignore it */
714         if (log->last_checkpoint < end) {
715                 blkdev_issue_discard(bdev,
716                                 log->last_checkpoint + log->rdev->data_offset,
717                                 end - log->last_checkpoint, GFP_NOIO, 0);
718         } else {
719                 blkdev_issue_discard(bdev,
720                                 log->last_checkpoint + log->rdev->data_offset,
721                                 log->device_size - log->last_checkpoint,
722                                 GFP_NOIO, 0);
723                 blkdev_issue_discard(bdev, log->rdev->data_offset, end,
724                                 GFP_NOIO, 0);
725         }
726 }
727
728
729 static void r5l_do_reclaim(struct r5l_log *log)
730 {
731         sector_t reclaim_target = xchg(&log->reclaim_target, 0);
732         sector_t reclaimable;
733         sector_t next_checkpoint;
734         u64 next_cp_seq;
735
736         spin_lock_irq(&log->io_list_lock);
737         /*
738          * move proper io_unit to reclaim list. We should not change the order.
739          * reclaimable/unreclaimable io_unit can be mixed in the list, we
740          * shouldn't reuse space of an unreclaimable io_unit
741          */
742         while (1) {
743                 reclaimable = r5l_reclaimable_space(log);
744                 if (reclaimable >= reclaim_target ||
745                     (list_empty(&log->running_ios) &&
746                      list_empty(&log->io_end_ios) &&
747                      list_empty(&log->flushing_ios) &&
748                      list_empty(&log->finished_ios)))
749                         break;
750
751                 md_wakeup_thread(log->rdev->mddev->thread);
752                 wait_event_lock_irq(log->iounit_wait,
753                                     r5l_reclaimable_space(log) > reclaimable,
754                                     log->io_list_lock);
755         }
756
757         next_checkpoint = log->next_checkpoint;
758         next_cp_seq = log->next_cp_seq;
759         spin_unlock_irq(&log->io_list_lock);
760
761         BUG_ON(reclaimable < 0);
762         if (reclaimable == 0)
763                 return;
764
765         /*
766          * write_super will flush cache of each raid disk. We must write super
767          * here, because the log area might be reused soon and we don't want to
768          * confuse recovery
769          */
770         r5l_write_super_and_discard_space(log, next_checkpoint);
771
772         mutex_lock(&log->io_mutex);
773         log->last_checkpoint = next_checkpoint;
774         log->last_cp_seq = next_cp_seq;
775         mutex_unlock(&log->io_mutex);
776
777         r5l_run_no_space_stripes(log);
778 }
779
780 static void r5l_reclaim_thread(struct md_thread *thread)
781 {
782         struct mddev *mddev = thread->mddev;
783         struct r5conf *conf = mddev->private;
784         struct r5l_log *log = conf->log;
785
786         if (!log)
787                 return;
788         r5l_do_reclaim(log);
789 }
790
791 static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
792 {
793         unsigned long target;
794         unsigned long new = (unsigned long)space; /* overflow in theory */
795
796         do {
797                 target = log->reclaim_target;
798                 if (new < target)
799                         return;
800         } while (cmpxchg(&log->reclaim_target, target, new) != target);
801         md_wakeup_thread(log->reclaim_thread);
802 }
803
804 void r5l_quiesce(struct r5l_log *log, int state)
805 {
806         struct mddev *mddev;
807         if (!log || state == 2)
808                 return;
809         if (state == 0) {
810                 /*
811                  * This is a special case for hotadd. In suspend, the array has
812                  * no journal. In resume, journal is initialized as well as the
813                  * reclaim thread.
814                  */
815                 if (log->reclaim_thread)
816                         return;
817                 log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
818                                         log->rdev->mddev, "reclaim");
819         } else if (state == 1) {
820                 /* make sure r5l_write_super_and_discard_space exits */
821                 mddev = log->rdev->mddev;
822                 wake_up(&mddev->sb_wait);
823                 r5l_wake_reclaim(log, -1L);
824                 md_unregister_thread(&log->reclaim_thread);
825                 r5l_do_reclaim(log);
826         }
827 }
828
829 bool r5l_log_disk_error(struct r5conf *conf)
830 {
831         struct r5l_log *log;
832         bool ret;
833         /* don't allow write if journal disk is missing */
834         rcu_read_lock();
835         log = rcu_dereference(conf->log);
836
837         if (!log)
838                 ret = test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
839         else
840                 ret = test_bit(Faulty, &log->rdev->flags);
841         rcu_read_unlock();
842         return ret;
843 }
844
845 struct r5l_recovery_ctx {
846         struct page *meta_page;         /* current meta */
847         sector_t meta_total_blocks;     /* total size of current meta and data */
848         sector_t pos;                   /* recovery position */
849         u64 seq;                        /* recovery position seq */
850 };
851
852 static int r5l_read_meta_block(struct r5l_log *log,
853                                struct r5l_recovery_ctx *ctx)
854 {
855         struct page *page = ctx->meta_page;
856         struct r5l_meta_block *mb;
857         u32 crc, stored_crc;
858
859         if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, REQ_OP_READ, 0,
860                           false))
861                 return -EIO;
862
863         mb = page_address(page);
864         stored_crc = le32_to_cpu(mb->checksum);
865         mb->checksum = 0;
866
867         if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
868             le64_to_cpu(mb->seq) != ctx->seq ||
869             mb->version != R5LOG_VERSION ||
870             le64_to_cpu(mb->position) != ctx->pos)
871                 return -EINVAL;
872
873         crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
874         if (stored_crc != crc)
875                 return -EINVAL;
876
877         if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
878                 return -EINVAL;
879
880         ctx->meta_total_blocks = BLOCK_SECTORS;
881
882         return 0;
883 }
884
885 static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
886                                          struct r5l_recovery_ctx *ctx,
887                                          sector_t stripe_sect,
888                                          int *offset)
889 {
890         struct r5conf *conf = log->rdev->mddev->private;
891         struct stripe_head *sh;
892         struct r5l_payload_data_parity *payload;
893         int disk_index;
894
895         sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0);
896         while (1) {
897                 sector_t log_offset = r5l_ring_add(log, ctx->pos,
898                                 ctx->meta_total_blocks);
899                 payload = page_address(ctx->meta_page) + *offset;
900
901                 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
902                         raid5_compute_sector(conf,
903                                              le64_to_cpu(payload->location), 0,
904                                              &disk_index, sh);
905
906                         sync_page_io(log->rdev, log_offset, PAGE_SIZE,
907                                      sh->dev[disk_index].page, REQ_OP_READ, 0,
908                                      false);
909                         sh->dev[disk_index].log_checksum =
910                                 le32_to_cpu(payload->checksum[0]);
911                         set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
912                 } else {
913                         disk_index = sh->pd_idx;
914                         sync_page_io(log->rdev, log_offset, PAGE_SIZE,
915                                      sh->dev[disk_index].page, REQ_OP_READ, 0,
916                                      false);
917                         sh->dev[disk_index].log_checksum =
918                                 le32_to_cpu(payload->checksum[0]);
919                         set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
920
921                         if (sh->qd_idx >= 0) {
922                                 disk_index = sh->qd_idx;
923                                 sync_page_io(log->rdev,
924                                              r5l_ring_add(log, log_offset, BLOCK_SECTORS),
925                                              PAGE_SIZE, sh->dev[disk_index].page,
926                                              REQ_OP_READ, 0, false);
927                                 sh->dev[disk_index].log_checksum =
928                                         le32_to_cpu(payload->checksum[1]);
929                                 set_bit(R5_Wantwrite,
930                                         &sh->dev[disk_index].flags);
931                         }
932                 }
933
934                 ctx->meta_total_blocks += le32_to_cpu(payload->size);
935                 *offset += sizeof(struct r5l_payload_data_parity) +
936                         sizeof(__le32) *
937                         (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
938                 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
939                         break;
940         }
941
942         for (disk_index = 0; disk_index < sh->disks; disk_index++) {
943                 void *addr;
944                 u32 checksum;
945
946                 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
947                         continue;
948                 addr = kmap_atomic(sh->dev[disk_index].page);
949                 checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
950                 kunmap_atomic(addr);
951                 if (checksum != sh->dev[disk_index].log_checksum)
952                         goto error;
953         }
954
955         for (disk_index = 0; disk_index < sh->disks; disk_index++) {
956                 struct md_rdev *rdev, *rrdev;
957
958                 if (!test_and_clear_bit(R5_Wantwrite,
959                                         &sh->dev[disk_index].flags))
960                         continue;
961
962                 /* in case device is broken */
963                 rcu_read_lock();
964                 rdev = rcu_dereference(conf->disks[disk_index].rdev);
965                 if (rdev) {
966                         atomic_inc(&rdev->nr_pending);
967                         rcu_read_unlock();
968                         sync_page_io(rdev, stripe_sect, PAGE_SIZE,
969                                      sh->dev[disk_index].page, REQ_OP_WRITE, 0,
970                                      false);
971                         rdev_dec_pending(rdev, rdev->mddev);
972                         rcu_read_lock();
973                 }
974                 rrdev = rcu_dereference(conf->disks[disk_index].replacement);
975                 if (rrdev) {
976                         atomic_inc(&rrdev->nr_pending);
977                         rcu_read_unlock();
978                         sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
979                                      sh->dev[disk_index].page, REQ_OP_WRITE, 0,
980                                      false);
981                         rdev_dec_pending(rrdev, rrdev->mddev);
982                         rcu_read_lock();
983                 }
984                 rcu_read_unlock();
985         }
986         raid5_release_stripe(sh);
987         return 0;
988
989 error:
990         for (disk_index = 0; disk_index < sh->disks; disk_index++)
991                 sh->dev[disk_index].flags = 0;
992         raid5_release_stripe(sh);
993         return -EINVAL;
994 }
995
996 static int r5l_recovery_flush_one_meta(struct r5l_log *log,
997                                        struct r5l_recovery_ctx *ctx)
998 {
999         struct r5conf *conf = log->rdev->mddev->private;
1000         struct r5l_payload_data_parity *payload;
1001         struct r5l_meta_block *mb;
1002         int offset;
1003         sector_t stripe_sector;
1004
1005         mb = page_address(ctx->meta_page);
1006         offset = sizeof(struct r5l_meta_block);
1007
1008         while (offset < le32_to_cpu(mb->meta_size)) {
1009                 int dd;
1010
1011                 payload = (void *)mb + offset;
1012                 stripe_sector = raid5_compute_sector(conf,
1013                                                      le64_to_cpu(payload->location), 0, &dd, NULL);
1014                 if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector,
1015                                                   &offset))
1016                         return -EINVAL;
1017         }
1018         return 0;
1019 }
1020
1021 /* copy data/parity from log to raid disks */
1022 static void r5l_recovery_flush_log(struct r5l_log *log,
1023                                    struct r5l_recovery_ctx *ctx)
1024 {
1025         while (1) {
1026                 if (r5l_read_meta_block(log, ctx))
1027                         return;
1028                 if (r5l_recovery_flush_one_meta(log, ctx))
1029                         return;
1030                 ctx->seq++;
1031                 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
1032         }
1033 }
1034
1035 static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
1036                                           u64 seq)
1037 {
1038         struct page *page;
1039         struct r5l_meta_block *mb;
1040         u32 crc;
1041
1042         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1043         if (!page)
1044                 return -ENOMEM;
1045         mb = page_address(page);
1046         mb->magic = cpu_to_le32(R5LOG_MAGIC);
1047         mb->version = R5LOG_VERSION;
1048         mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
1049         mb->seq = cpu_to_le64(seq);
1050         mb->position = cpu_to_le64(pos);
1051         crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
1052         mb->checksum = cpu_to_le32(crc);
1053
1054         if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
1055                           WRITE_FUA, false)) {
1056                 __free_page(page);
1057                 return -EIO;
1058         }
1059         __free_page(page);
1060         return 0;
1061 }
1062
1063 static int r5l_recovery_log(struct r5l_log *log)
1064 {
1065         struct r5l_recovery_ctx ctx;
1066
1067         ctx.pos = log->last_checkpoint;
1068         ctx.seq = log->last_cp_seq;
1069         ctx.meta_page = alloc_page(GFP_KERNEL);
1070         if (!ctx.meta_page)
1071                 return -ENOMEM;
1072
1073         r5l_recovery_flush_log(log, &ctx);
1074         __free_page(ctx.meta_page);
1075
1076         /*
1077          * we did a recovery. Now ctx.pos points to an invalid meta block. New
1078          * log will start here. but we can't let superblock point to last valid
1079          * meta block. The log might looks like:
1080          * | meta 1| meta 2| meta 3|
1081          * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
1082          * superblock points to meta 1, we write a new valid meta 2n.  if crash
1083          * happens again, new recovery will start from meta 1. Since meta 2n is
1084          * valid now, recovery will think meta 3 is valid, which is wrong.
1085          * The solution is we create a new meta in meta2 with its seq == meta
1086          * 1's seq + 10 and let superblock points to meta2. The same recovery will
1087          * not think meta 3 is a valid meta, because its seq doesn't match
1088          */
1089         if (ctx.seq > log->last_cp_seq) {
1090                 int ret;
1091
1092                 ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
1093                 if (ret)
1094                         return ret;
1095                 log->seq = ctx.seq + 11;
1096                 log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
1097                 r5l_write_super(log, ctx.pos);
1098                 log->last_checkpoint = ctx.pos;
1099                 log->next_checkpoint = ctx.pos;
1100         } else {
1101                 log->log_start = ctx.pos;
1102                 log->seq = ctx.seq;
1103         }
1104         return 0;
1105 }
1106
1107 static void r5l_write_super(struct r5l_log *log, sector_t cp)
1108 {
1109         struct mddev *mddev = log->rdev->mddev;
1110
1111         log->rdev->journal_tail = cp;
1112         set_bit(MD_CHANGE_DEVS, &mddev->flags);
1113 }
1114
1115 static int r5l_load_log(struct r5l_log *log)
1116 {
1117         struct md_rdev *rdev = log->rdev;
1118         struct page *page;
1119         struct r5l_meta_block *mb;
1120         sector_t cp = log->rdev->journal_tail;
1121         u32 stored_crc, expected_crc;
1122         bool create_super = false;
1123         int ret;
1124
1125         /* Make sure it's valid */
1126         if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
1127                 cp = 0;
1128         page = alloc_page(GFP_KERNEL);
1129         if (!page)
1130                 return -ENOMEM;
1131
1132         if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) {
1133                 ret = -EIO;
1134                 goto ioerr;
1135         }
1136         mb = page_address(page);
1137
1138         if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
1139             mb->version != R5LOG_VERSION) {
1140                 create_super = true;
1141                 goto create;
1142         }
1143         stored_crc = le32_to_cpu(mb->checksum);
1144         mb->checksum = 0;
1145         expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
1146         if (stored_crc != expected_crc) {
1147                 create_super = true;
1148                 goto create;
1149         }
1150         if (le64_to_cpu(mb->position) != cp) {
1151                 create_super = true;
1152                 goto create;
1153         }
1154 create:
1155         if (create_super) {
1156                 log->last_cp_seq = prandom_u32();
1157                 cp = 0;
1158                 r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
1159                 /*
1160                  * Make sure super points to correct address. Log might have
1161                  * data very soon. If super hasn't correct log tail address,
1162                  * recovery can't find the log
1163                  */
1164                 r5l_write_super(log, cp);
1165         } else
1166                 log->last_cp_seq = le64_to_cpu(mb->seq);
1167
1168         log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
1169         log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
1170         if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
1171                 log->max_free_space = RECLAIM_MAX_FREE_SPACE;
1172         log->last_checkpoint = cp;
1173         log->next_checkpoint = cp;
1174
1175         __free_page(page);
1176
1177         return r5l_recovery_log(log);
1178 ioerr:
1179         __free_page(page);
1180         return ret;
1181 }
1182
1183 int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
1184 {
1185         struct request_queue *q = bdev_get_queue(rdev->bdev);
1186         struct r5l_log *log;
1187
1188         if (PAGE_SIZE != 4096)
1189                 return -EINVAL;
1190
1191         /*
1192          * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and
1193          * raid_disks r5l_payload_data_parity.
1194          *
1195          * Write journal and cache does not work for very big array
1196          * (raid_disks > 203)
1197          */
1198         if (sizeof(struct r5l_meta_block) +
1199             ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) *
1200              conf->raid_disks) > PAGE_SIZE) {
1201                 pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n",
1202                        mdname(conf->mddev), conf->raid_disks);
1203                 return -EINVAL;
1204         }
1205
1206         log = kzalloc(sizeof(*log), GFP_KERNEL);
1207         if (!log)
1208                 return -ENOMEM;
1209         log->rdev = rdev;
1210
1211         log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0;
1212
1213         log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
1214                                        sizeof(rdev->mddev->uuid));
1215
1216         mutex_init(&log->io_mutex);
1217
1218         spin_lock_init(&log->io_list_lock);
1219         INIT_LIST_HEAD(&log->running_ios);
1220         INIT_LIST_HEAD(&log->io_end_ios);
1221         INIT_LIST_HEAD(&log->flushing_ios);
1222         INIT_LIST_HEAD(&log->finished_ios);
1223         bio_init(&log->flush_bio);
1224
1225         log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
1226         if (!log->io_kc)
1227                 goto io_kc;
1228
1229         log->io_pool = mempool_create_slab_pool(R5L_POOL_SIZE, log->io_kc);
1230         if (!log->io_pool)
1231                 goto io_pool;
1232
1233         log->bs = bioset_create(R5L_POOL_SIZE, 0);
1234         if (!log->bs)
1235                 goto io_bs;
1236
1237         log->meta_pool = mempool_create_page_pool(R5L_POOL_SIZE, 0);
1238         if (!log->meta_pool)
1239                 goto out_mempool;
1240
1241         log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
1242                                                  log->rdev->mddev, "reclaim");
1243         if (!log->reclaim_thread)
1244                 goto reclaim_thread;
1245         init_waitqueue_head(&log->iounit_wait);
1246
1247         INIT_LIST_HEAD(&log->no_mem_stripes);
1248
1249         INIT_LIST_HEAD(&log->no_space_stripes);
1250         spin_lock_init(&log->no_space_stripes_lock);
1251
1252         if (r5l_load_log(log))
1253                 goto error;
1254
1255         rcu_assign_pointer(conf->log, log);
1256         set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
1257         return 0;
1258
1259 error:
1260         md_unregister_thread(&log->reclaim_thread);
1261 reclaim_thread:
1262         mempool_destroy(log->meta_pool);
1263 out_mempool:
1264         bioset_free(log->bs);
1265 io_bs:
1266         mempool_destroy(log->io_pool);
1267 io_pool:
1268         kmem_cache_destroy(log->io_kc);
1269 io_kc:
1270         kfree(log);
1271         return -EINVAL;
1272 }
1273
1274 void r5l_exit_log(struct r5l_log *log)
1275 {
1276         md_unregister_thread(&log->reclaim_thread);
1277         mempool_destroy(log->meta_pool);
1278         bioset_free(log->bs);
1279         mempool_destroy(log->io_pool);
1280         kmem_cache_destroy(log->io_kc);
1281         kfree(log);
1282 }