]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/md/dm-cache-target.c
mtd: gpmi: add subpage read support
[karo-tx-linux.git] / drivers / md / dm-cache-target.c
1 /*
2  * Copyright (C) 2012 Red Hat. All rights reserved.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm.h"
8 #include "dm-bio-prison.h"
9 #include "dm-bio-record.h"
10 #include "dm-cache-metadata.h"
11
12 #include <linux/dm-io.h>
13 #include <linux/dm-kcopyd.h>
14 #include <linux/init.h>
15 #include <linux/mempool.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19
20 #define DM_MSG_PREFIX "cache"
21
22 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
23         "A percentage of time allocated for copying to and/or from cache");
24
25 /*----------------------------------------------------------------*/
26
27 /*
28  * Glossary:
29  *
30  * oblock: index of an origin block
31  * cblock: index of a cache block
32  * promotion: movement of a block from origin to cache
33  * demotion: movement of a block from cache to origin
34  * migration: movement of a block between the origin and cache device,
35  *            either direction
36  */
37
38 /*----------------------------------------------------------------*/
39
40 static size_t bitset_size_in_bytes(unsigned nr_entries)
41 {
42         return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
43 }
44
45 static unsigned long *alloc_bitset(unsigned nr_entries)
46 {
47         size_t s = bitset_size_in_bytes(nr_entries);
48         return vzalloc(s);
49 }
50
51 static void clear_bitset(void *bitset, unsigned nr_entries)
52 {
53         size_t s = bitset_size_in_bytes(nr_entries);
54         memset(bitset, 0, s);
55 }
56
57 static void free_bitset(unsigned long *bits)
58 {
59         vfree(bits);
60 }
61
62 /*----------------------------------------------------------------*/
63
64 /*
65  * There are a couple of places where we let a bio run, but want to do some
66  * work before calling its endio function.  We do this by temporarily
67  * changing the endio fn.
68  */
69 struct dm_hook_info {
70         bio_end_io_t *bi_end_io;
71         void *bi_private;
72 };
73
74 static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
75                         bio_end_io_t *bi_end_io, void *bi_private)
76 {
77         h->bi_end_io = bio->bi_end_io;
78         h->bi_private = bio->bi_private;
79
80         bio->bi_end_io = bi_end_io;
81         bio->bi_private = bi_private;
82 }
83
84 static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
85 {
86         bio->bi_end_io = h->bi_end_io;
87         bio->bi_private = h->bi_private;
88
89         /*
90          * Must bump bi_remaining to allow bio to complete with
91          * restored bi_end_io.
92          */
93         atomic_inc(&bio->bi_remaining);
94 }
95
96 /*----------------------------------------------------------------*/
97
98 #define PRISON_CELLS 1024
99 #define MIGRATION_POOL_SIZE 128
100 #define COMMIT_PERIOD HZ
101 #define MIGRATION_COUNT_WINDOW 10
102
103 /*
104  * The block size of the device holding cache data must be
105  * between 32KB and 1GB.
106  */
107 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
108 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
109
110 /*
111  * FIXME: the cache is read/write for the time being.
112  */
113 enum cache_metadata_mode {
114         CM_WRITE,               /* metadata may be changed */
115         CM_READ_ONLY,           /* metadata may not be changed */
116 };
117
118 enum cache_io_mode {
119         /*
120          * Data is written to cached blocks only.  These blocks are marked
121          * dirty.  If you lose the cache device you will lose data.
122          * Potential performance increase for both reads and writes.
123          */
124         CM_IO_WRITEBACK,
125
126         /*
127          * Data is written to both cache and origin.  Blocks are never
128          * dirty.  Potential performance benfit for reads only.
129          */
130         CM_IO_WRITETHROUGH,
131
132         /*
133          * A degraded mode useful for various cache coherency situations
134          * (eg, rolling back snapshots).  Reads and writes always go to the
135          * origin.  If a write goes to a cached oblock, then the cache
136          * block is invalidated.
137          */
138         CM_IO_PASSTHROUGH
139 };
140
141 struct cache_features {
142         enum cache_metadata_mode mode;
143         enum cache_io_mode io_mode;
144 };
145
146 struct cache_stats {
147         atomic_t read_hit;
148         atomic_t read_miss;
149         atomic_t write_hit;
150         atomic_t write_miss;
151         atomic_t demotion;
152         atomic_t promotion;
153         atomic_t copies_avoided;
154         atomic_t cache_cell_clash;
155         atomic_t commit_count;
156         atomic_t discard_count;
157 };
158
159 /*
160  * Defines a range of cblocks, begin to (end - 1) are in the range.  end is
161  * the one-past-the-end value.
162  */
163 struct cblock_range {
164         dm_cblock_t begin;
165         dm_cblock_t end;
166 };
167
168 struct invalidation_request {
169         struct list_head list;
170         struct cblock_range *cblocks;
171
172         atomic_t complete;
173         int err;
174
175         wait_queue_head_t result_wait;
176 };
177
178 struct cache {
179         struct dm_target *ti;
180         struct dm_target_callbacks callbacks;
181
182         struct dm_cache_metadata *cmd;
183
184         /*
185          * Metadata is written to this device.
186          */
187         struct dm_dev *metadata_dev;
188
189         /*
190          * The slower of the two data devices.  Typically a spindle.
191          */
192         struct dm_dev *origin_dev;
193
194         /*
195          * The faster of the two data devices.  Typically an SSD.
196          */
197         struct dm_dev *cache_dev;
198
199         /*
200          * Size of the origin device in _complete_ blocks and native sectors.
201          */
202         dm_oblock_t origin_blocks;
203         sector_t origin_sectors;
204
205         /*
206          * Size of the cache device in blocks.
207          */
208         dm_cblock_t cache_size;
209
210         /*
211          * Fields for converting from sectors to blocks.
212          */
213         uint32_t sectors_per_block;
214         int sectors_per_block_shift;
215
216         spinlock_t lock;
217         struct bio_list deferred_bios;
218         struct bio_list deferred_flush_bios;
219         struct bio_list deferred_writethrough_bios;
220         struct list_head quiesced_migrations;
221         struct list_head completed_migrations;
222         struct list_head need_commit_migrations;
223         sector_t migration_threshold;
224         wait_queue_head_t migration_wait;
225         atomic_t nr_migrations;
226
227         wait_queue_head_t quiescing_wait;
228         atomic_t quiescing;
229         atomic_t quiescing_ack;
230
231         /*
232          * cache_size entries, dirty if set
233          */
234         atomic_t nr_dirty;
235         unsigned long *dirty_bitset;
236
237         /*
238          * origin_blocks entries, discarded if set.
239          */
240         dm_dblock_t discard_nr_blocks;
241         unsigned long *discard_bitset;
242         uint32_t discard_block_size;
243
244         /*
245          * Rather than reconstructing the table line for the status we just
246          * save it and regurgitate.
247          */
248         unsigned nr_ctr_args;
249         const char **ctr_args;
250
251         struct dm_kcopyd_client *copier;
252         struct workqueue_struct *wq;
253         struct work_struct worker;
254
255         struct delayed_work waker;
256         unsigned long last_commit_jiffies;
257
258         struct dm_bio_prison *prison;
259         struct dm_deferred_set *all_io_ds;
260
261         mempool_t *migration_pool;
262         struct dm_cache_migration *next_migration;
263
264         struct dm_cache_policy *policy;
265         unsigned policy_nr_args;
266
267         bool need_tick_bio:1;
268         bool sized:1;
269         bool invalidate:1;
270         bool commit_requested:1;
271         bool loaded_mappings:1;
272         bool loaded_discards:1;
273
274         /*
275          * Cache features such as write-through.
276          */
277         struct cache_features features;
278
279         struct cache_stats stats;
280
281         /*
282          * Invalidation fields.
283          */
284         spinlock_t invalidation_lock;
285         struct list_head invalidation_requests;
286 };
287
288 struct per_bio_data {
289         bool tick:1;
290         unsigned req_nr:2;
291         struct dm_deferred_entry *all_io_entry;
292         struct dm_hook_info hook_info;
293
294         /*
295          * writethrough fields.  These MUST remain at the end of this
296          * structure and the 'cache' member must be the first as it
297          * is used to determine the offset of the writethrough fields.
298          */
299         struct cache *cache;
300         dm_cblock_t cblock;
301         struct dm_bio_details bio_details;
302 };
303
304 struct dm_cache_migration {
305         struct list_head list;
306         struct cache *cache;
307
308         unsigned long start_jiffies;
309         dm_oblock_t old_oblock;
310         dm_oblock_t new_oblock;
311         dm_cblock_t cblock;
312
313         bool err:1;
314         bool writeback:1;
315         bool demote:1;
316         bool promote:1;
317         bool requeue_holder:1;
318         bool invalidate:1;
319
320         struct dm_bio_prison_cell *old_ocell;
321         struct dm_bio_prison_cell *new_ocell;
322 };
323
324 /*
325  * Processing a bio in the worker thread may require these memory
326  * allocations.  We prealloc to avoid deadlocks (the same worker thread
327  * frees them back to the mempool).
328  */
329 struct prealloc {
330         struct dm_cache_migration *mg;
331         struct dm_bio_prison_cell *cell1;
332         struct dm_bio_prison_cell *cell2;
333 };
334
335 static void wake_worker(struct cache *cache)
336 {
337         queue_work(cache->wq, &cache->worker);
338 }
339
340 /*----------------------------------------------------------------*/
341
342 static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache)
343 {
344         /* FIXME: change to use a local slab. */
345         return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT);
346 }
347
348 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell)
349 {
350         dm_bio_prison_free_cell(cache->prison, cell);
351 }
352
353 static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
354 {
355         if (!p->mg) {
356                 p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
357                 if (!p->mg)
358                         return -ENOMEM;
359         }
360
361         if (!p->cell1) {
362                 p->cell1 = alloc_prison_cell(cache);
363                 if (!p->cell1)
364                         return -ENOMEM;
365         }
366
367         if (!p->cell2) {
368                 p->cell2 = alloc_prison_cell(cache);
369                 if (!p->cell2)
370                         return -ENOMEM;
371         }
372
373         return 0;
374 }
375
376 static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
377 {
378         if (p->cell2)
379                 free_prison_cell(cache, p->cell2);
380
381         if (p->cell1)
382                 free_prison_cell(cache, p->cell1);
383
384         if (p->mg)
385                 mempool_free(p->mg, cache->migration_pool);
386 }
387
388 static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
389 {
390         struct dm_cache_migration *mg = p->mg;
391
392         BUG_ON(!mg);
393         p->mg = NULL;
394
395         return mg;
396 }
397
398 /*
399  * You must have a cell within the prealloc struct to return.  If not this
400  * function will BUG() rather than returning NULL.
401  */
402 static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p)
403 {
404         struct dm_bio_prison_cell *r = NULL;
405
406         if (p->cell1) {
407                 r = p->cell1;
408                 p->cell1 = NULL;
409
410         } else if (p->cell2) {
411                 r = p->cell2;
412                 p->cell2 = NULL;
413         } else
414                 BUG();
415
416         return r;
417 }
418
419 /*
420  * You can't have more than two cells in a prealloc struct.  BUG() will be
421  * called if you try and overfill.
422  */
423 static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell)
424 {
425         if (!p->cell2)
426                 p->cell2 = cell;
427
428         else if (!p->cell1)
429                 p->cell1 = cell;
430
431         else
432                 BUG();
433 }
434
435 /*----------------------------------------------------------------*/
436
437 static void build_key(dm_oblock_t oblock, struct dm_cell_key *key)
438 {
439         key->virtual = 0;
440         key->dev = 0;
441         key->block = from_oblock(oblock);
442 }
443
444 /*
445  * The caller hands in a preallocated cell, and a free function for it.
446  * The cell will be freed if there's an error, or if it wasn't used because
447  * a cell with that key already exists.
448  */
449 typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell);
450
451 static int bio_detain(struct cache *cache, dm_oblock_t oblock,
452                       struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
453                       cell_free_fn free_fn, void *free_context,
454                       struct dm_bio_prison_cell **cell_result)
455 {
456         int r;
457         struct dm_cell_key key;
458
459         build_key(oblock, &key);
460         r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result);
461         if (r)
462                 free_fn(free_context, cell_prealloc);
463
464         return r;
465 }
466
467 static int get_cell(struct cache *cache,
468                     dm_oblock_t oblock,
469                     struct prealloc *structs,
470                     struct dm_bio_prison_cell **cell_result)
471 {
472         int r;
473         struct dm_cell_key key;
474         struct dm_bio_prison_cell *cell_prealloc;
475
476         cell_prealloc = prealloc_get_cell(structs);
477
478         build_key(oblock, &key);
479         r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result);
480         if (r)
481                 prealloc_put_cell(structs, cell_prealloc);
482
483         return r;
484 }
485
486 /*----------------------------------------------------------------*/
487
488 static bool is_dirty(struct cache *cache, dm_cblock_t b)
489 {
490         return test_bit(from_cblock(b), cache->dirty_bitset);
491 }
492
493 static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
494 {
495         if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
496                 atomic_inc(&cache->nr_dirty);
497                 policy_set_dirty(cache->policy, oblock);
498         }
499 }
500
501 static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
502 {
503         if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
504                 policy_clear_dirty(cache->policy, oblock);
505                 if (atomic_dec_return(&cache->nr_dirty) == 0)
506                         dm_table_event(cache->ti->table);
507         }
508 }
509
510 /*----------------------------------------------------------------*/
511
512 static bool block_size_is_power_of_two(struct cache *cache)
513 {
514         return cache->sectors_per_block_shift >= 0;
515 }
516
517 /* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */
518 #if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6
519 __always_inline
520 #endif
521 static dm_block_t block_div(dm_block_t b, uint32_t n)
522 {
523         do_div(b, n);
524
525         return b;
526 }
527
528 static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
529 {
530         uint32_t discard_blocks = cache->discard_block_size;
531         dm_block_t b = from_oblock(oblock);
532
533         if (!block_size_is_power_of_two(cache))
534                 discard_blocks = discard_blocks / cache->sectors_per_block;
535         else
536                 discard_blocks >>= cache->sectors_per_block_shift;
537
538         b = block_div(b, discard_blocks);
539
540         return to_dblock(b);
541 }
542
543 static void set_discard(struct cache *cache, dm_dblock_t b)
544 {
545         unsigned long flags;
546
547         atomic_inc(&cache->stats.discard_count);
548
549         spin_lock_irqsave(&cache->lock, flags);
550         set_bit(from_dblock(b), cache->discard_bitset);
551         spin_unlock_irqrestore(&cache->lock, flags);
552 }
553
554 static void clear_discard(struct cache *cache, dm_dblock_t b)
555 {
556         unsigned long flags;
557
558         spin_lock_irqsave(&cache->lock, flags);
559         clear_bit(from_dblock(b), cache->discard_bitset);
560         spin_unlock_irqrestore(&cache->lock, flags);
561 }
562
563 static bool is_discarded(struct cache *cache, dm_dblock_t b)
564 {
565         int r;
566         unsigned long flags;
567
568         spin_lock_irqsave(&cache->lock, flags);
569         r = test_bit(from_dblock(b), cache->discard_bitset);
570         spin_unlock_irqrestore(&cache->lock, flags);
571
572         return r;
573 }
574
575 static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
576 {
577         int r;
578         unsigned long flags;
579
580         spin_lock_irqsave(&cache->lock, flags);
581         r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
582                      cache->discard_bitset);
583         spin_unlock_irqrestore(&cache->lock, flags);
584
585         return r;
586 }
587
588 /*----------------------------------------------------------------*/
589
590 static void load_stats(struct cache *cache)
591 {
592         struct dm_cache_statistics stats;
593
594         dm_cache_metadata_get_stats(cache->cmd, &stats);
595         atomic_set(&cache->stats.read_hit, stats.read_hits);
596         atomic_set(&cache->stats.read_miss, stats.read_misses);
597         atomic_set(&cache->stats.write_hit, stats.write_hits);
598         atomic_set(&cache->stats.write_miss, stats.write_misses);
599 }
600
601 static void save_stats(struct cache *cache)
602 {
603         struct dm_cache_statistics stats;
604
605         stats.read_hits = atomic_read(&cache->stats.read_hit);
606         stats.read_misses = atomic_read(&cache->stats.read_miss);
607         stats.write_hits = atomic_read(&cache->stats.write_hit);
608         stats.write_misses = atomic_read(&cache->stats.write_miss);
609
610         dm_cache_metadata_set_stats(cache->cmd, &stats);
611 }
612
613 /*----------------------------------------------------------------
614  * Per bio data
615  *--------------------------------------------------------------*/
616
617 /*
618  * If using writeback, leave out struct per_bio_data's writethrough fields.
619  */
620 #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
621 #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
622
623 static bool writethrough_mode(struct cache_features *f)
624 {
625         return f->io_mode == CM_IO_WRITETHROUGH;
626 }
627
628 static bool writeback_mode(struct cache_features *f)
629 {
630         return f->io_mode == CM_IO_WRITEBACK;
631 }
632
633 static bool passthrough_mode(struct cache_features *f)
634 {
635         return f->io_mode == CM_IO_PASSTHROUGH;
636 }
637
638 static size_t get_per_bio_data_size(struct cache *cache)
639 {
640         return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
641 }
642
643 static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
644 {
645         struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
646         BUG_ON(!pb);
647         return pb;
648 }
649
650 static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
651 {
652         struct per_bio_data *pb = get_per_bio_data(bio, data_size);
653
654         pb->tick = false;
655         pb->req_nr = dm_bio_get_target_bio_nr(bio);
656         pb->all_io_entry = NULL;
657
658         return pb;
659 }
660
661 /*----------------------------------------------------------------
662  * Remapping
663  *--------------------------------------------------------------*/
664 static void remap_to_origin(struct cache *cache, struct bio *bio)
665 {
666         bio->bi_bdev = cache->origin_dev->bdev;
667 }
668
669 static void remap_to_cache(struct cache *cache, struct bio *bio,
670                            dm_cblock_t cblock)
671 {
672         sector_t bi_sector = bio->bi_iter.bi_sector;
673         sector_t block = from_cblock(cblock);
674
675         bio->bi_bdev = cache->cache_dev->bdev;
676         if (!block_size_is_power_of_two(cache))
677                 bio->bi_iter.bi_sector =
678                         (block * cache->sectors_per_block) +
679                         sector_div(bi_sector, cache->sectors_per_block);
680         else
681                 bio->bi_iter.bi_sector =
682                         (block << cache->sectors_per_block_shift) |
683                         (bi_sector & (cache->sectors_per_block - 1));
684 }
685
686 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
687 {
688         unsigned long flags;
689         size_t pb_data_size = get_per_bio_data_size(cache);
690         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
691
692         spin_lock_irqsave(&cache->lock, flags);
693         if (cache->need_tick_bio &&
694             !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
695                 pb->tick = true;
696                 cache->need_tick_bio = false;
697         }
698         spin_unlock_irqrestore(&cache->lock, flags);
699 }
700
701 static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
702                                   dm_oblock_t oblock)
703 {
704         check_if_tick_bio_needed(cache, bio);
705         remap_to_origin(cache, bio);
706         if (bio_data_dir(bio) == WRITE)
707                 clear_discard(cache, oblock_to_dblock(cache, oblock));
708 }
709
710 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
711                                  dm_oblock_t oblock, dm_cblock_t cblock)
712 {
713         check_if_tick_bio_needed(cache, bio);
714         remap_to_cache(cache, bio, cblock);
715         if (bio_data_dir(bio) == WRITE) {
716                 set_dirty(cache, oblock, cblock);
717                 clear_discard(cache, oblock_to_dblock(cache, oblock));
718         }
719 }
720
721 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
722 {
723         sector_t block_nr = bio->bi_iter.bi_sector;
724
725         if (!block_size_is_power_of_two(cache))
726                 (void) sector_div(block_nr, cache->sectors_per_block);
727         else
728                 block_nr >>= cache->sectors_per_block_shift;
729
730         return to_oblock(block_nr);
731 }
732
733 static int bio_triggers_commit(struct cache *cache, struct bio *bio)
734 {
735         return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
736 }
737
738 static void issue(struct cache *cache, struct bio *bio)
739 {
740         unsigned long flags;
741
742         if (!bio_triggers_commit(cache, bio)) {
743                 generic_make_request(bio);
744                 return;
745         }
746
747         /*
748          * Batch together any bios that trigger commits and then issue a
749          * single commit for them in do_worker().
750          */
751         spin_lock_irqsave(&cache->lock, flags);
752         cache->commit_requested = true;
753         bio_list_add(&cache->deferred_flush_bios, bio);
754         spin_unlock_irqrestore(&cache->lock, flags);
755 }
756
757 static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
758 {
759         unsigned long flags;
760
761         spin_lock_irqsave(&cache->lock, flags);
762         bio_list_add(&cache->deferred_writethrough_bios, bio);
763         spin_unlock_irqrestore(&cache->lock, flags);
764
765         wake_worker(cache);
766 }
767
768 static void writethrough_endio(struct bio *bio, int err)
769 {
770         struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
771
772         dm_unhook_bio(&pb->hook_info, bio);
773
774         if (err) {
775                 bio_endio(bio, err);
776                 return;
777         }
778
779         dm_bio_restore(&pb->bio_details, bio);
780         remap_to_cache(pb->cache, bio, pb->cblock);
781
782         /*
783          * We can't issue this bio directly, since we're in interrupt
784          * context.  So it gets put on a bio list for processing by the
785          * worker thread.
786          */
787         defer_writethrough_bio(pb->cache, bio);
788 }
789
790 /*
791  * When running in writethrough mode we need to send writes to clean blocks
792  * to both the cache and origin devices.  In future we'd like to clone the
793  * bio and send them in parallel, but for now we're doing them in
794  * series as this is easier.
795  */
796 static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
797                                        dm_oblock_t oblock, dm_cblock_t cblock)
798 {
799         struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
800
801         pb->cache = cache;
802         pb->cblock = cblock;
803         dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
804         dm_bio_record(&pb->bio_details, bio);
805
806         remap_to_origin_clear_discard(pb->cache, bio, oblock);
807 }
808
809 /*----------------------------------------------------------------
810  * Migration processing
811  *
812  * Migration covers moving data from the origin device to the cache, or
813  * vice versa.
814  *--------------------------------------------------------------*/
815 static void free_migration(struct dm_cache_migration *mg)
816 {
817         mempool_free(mg, mg->cache->migration_pool);
818 }
819
820 static void inc_nr_migrations(struct cache *cache)
821 {
822         atomic_inc(&cache->nr_migrations);
823 }
824
825 static void dec_nr_migrations(struct cache *cache)
826 {
827         atomic_dec(&cache->nr_migrations);
828
829         /*
830          * Wake the worker in case we're suspending the target.
831          */
832         wake_up(&cache->migration_wait);
833 }
834
835 static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
836                          bool holder)
837 {
838         (holder ? dm_cell_release : dm_cell_release_no_holder)
839                 (cache->prison, cell, &cache->deferred_bios);
840         free_prison_cell(cache, cell);
841 }
842
843 static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
844                        bool holder)
845 {
846         unsigned long flags;
847
848         spin_lock_irqsave(&cache->lock, flags);
849         __cell_defer(cache, cell, holder);
850         spin_unlock_irqrestore(&cache->lock, flags);
851
852         wake_worker(cache);
853 }
854
855 static void cleanup_migration(struct dm_cache_migration *mg)
856 {
857         struct cache *cache = mg->cache;
858         free_migration(mg);
859         dec_nr_migrations(cache);
860 }
861
862 static void migration_failure(struct dm_cache_migration *mg)
863 {
864         struct cache *cache = mg->cache;
865
866         if (mg->writeback) {
867                 DMWARN_LIMIT("writeback failed; couldn't copy block");
868                 set_dirty(cache, mg->old_oblock, mg->cblock);
869                 cell_defer(cache, mg->old_ocell, false);
870
871         } else if (mg->demote) {
872                 DMWARN_LIMIT("demotion failed; couldn't copy block");
873                 policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
874
875                 cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
876                 if (mg->promote)
877                         cell_defer(cache, mg->new_ocell, true);
878         } else {
879                 DMWARN_LIMIT("promotion failed; couldn't copy block");
880                 policy_remove_mapping(cache->policy, mg->new_oblock);
881                 cell_defer(cache, mg->new_ocell, true);
882         }
883
884         cleanup_migration(mg);
885 }
886
887 static void migration_success_pre_commit(struct dm_cache_migration *mg)
888 {
889         unsigned long flags;
890         struct cache *cache = mg->cache;
891
892         if (mg->writeback) {
893                 clear_dirty(cache, mg->old_oblock, mg->cblock);
894                 cell_defer(cache, mg->old_ocell, false);
895                 cleanup_migration(mg);
896                 return;
897
898         } else if (mg->demote) {
899                 if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) {
900                         DMWARN_LIMIT("demotion failed; couldn't update on disk metadata");
901                         policy_force_mapping(cache->policy, mg->new_oblock,
902                                              mg->old_oblock);
903                         if (mg->promote)
904                                 cell_defer(cache, mg->new_ocell, true);
905                         cleanup_migration(mg);
906                         return;
907                 }
908         } else {
909                 if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
910                         DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
911                         policy_remove_mapping(cache->policy, mg->new_oblock);
912                         cleanup_migration(mg);
913                         return;
914                 }
915         }
916
917         spin_lock_irqsave(&cache->lock, flags);
918         list_add_tail(&mg->list, &cache->need_commit_migrations);
919         cache->commit_requested = true;
920         spin_unlock_irqrestore(&cache->lock, flags);
921 }
922
923 static void migration_success_post_commit(struct dm_cache_migration *mg)
924 {
925         unsigned long flags;
926         struct cache *cache = mg->cache;
927
928         if (mg->writeback) {
929                 DMWARN("writeback unexpectedly triggered commit");
930                 return;
931
932         } else if (mg->demote) {
933                 cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
934
935                 if (mg->promote) {
936                         mg->demote = false;
937
938                         spin_lock_irqsave(&cache->lock, flags);
939                         list_add_tail(&mg->list, &cache->quiesced_migrations);
940                         spin_unlock_irqrestore(&cache->lock, flags);
941
942                 } else {
943                         if (mg->invalidate)
944                                 policy_remove_mapping(cache->policy, mg->old_oblock);
945                         cleanup_migration(mg);
946                 }
947
948         } else {
949                 if (mg->requeue_holder) {
950                         clear_dirty(cache, mg->new_oblock, mg->cblock);
951                         cell_defer(cache, mg->new_ocell, true);
952                 } else {
953                         /*
954                          * The block was promoted via an overwrite, so it's dirty.
955                          */
956                         set_dirty(cache, mg->new_oblock, mg->cblock);
957                         bio_endio(mg->new_ocell->holder, 0);
958                         cell_defer(cache, mg->new_ocell, false);
959                 }
960                 cleanup_migration(mg);
961         }
962 }
963
964 static void copy_complete(int read_err, unsigned long write_err, void *context)
965 {
966         unsigned long flags;
967         struct dm_cache_migration *mg = (struct dm_cache_migration *) context;
968         struct cache *cache = mg->cache;
969
970         if (read_err || write_err)
971                 mg->err = true;
972
973         spin_lock_irqsave(&cache->lock, flags);
974         list_add_tail(&mg->list, &cache->completed_migrations);
975         spin_unlock_irqrestore(&cache->lock, flags);
976
977         wake_worker(cache);
978 }
979
980 static void issue_copy_real(struct dm_cache_migration *mg)
981 {
982         int r;
983         struct dm_io_region o_region, c_region;
984         struct cache *cache = mg->cache;
985         sector_t cblock = from_cblock(mg->cblock);
986
987         o_region.bdev = cache->origin_dev->bdev;
988         o_region.count = cache->sectors_per_block;
989
990         c_region.bdev = cache->cache_dev->bdev;
991         c_region.sector = cblock * cache->sectors_per_block;
992         c_region.count = cache->sectors_per_block;
993
994         if (mg->writeback || mg->demote) {
995                 /* demote */
996                 o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block;
997                 r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg);
998         } else {
999                 /* promote */
1000                 o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block;
1001                 r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
1002         }
1003
1004         if (r < 0) {
1005                 DMERR_LIMIT("issuing migration failed");
1006                 migration_failure(mg);
1007         }
1008 }
1009
1010 static void overwrite_endio(struct bio *bio, int err)
1011 {
1012         struct dm_cache_migration *mg = bio->bi_private;
1013         struct cache *cache = mg->cache;
1014         size_t pb_data_size = get_per_bio_data_size(cache);
1015         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1016         unsigned long flags;
1017
1018         dm_unhook_bio(&pb->hook_info, bio);
1019
1020         if (err)
1021                 mg->err = true;
1022
1023         mg->requeue_holder = false;
1024
1025         spin_lock_irqsave(&cache->lock, flags);
1026         list_add_tail(&mg->list, &cache->completed_migrations);
1027         spin_unlock_irqrestore(&cache->lock, flags);
1028
1029         wake_worker(cache);
1030 }
1031
1032 static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
1033 {
1034         size_t pb_data_size = get_per_bio_data_size(mg->cache);
1035         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1036
1037         dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
1038         remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock);
1039         generic_make_request(bio);
1040 }
1041
1042 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
1043 {
1044         return (bio_data_dir(bio) == WRITE) &&
1045                 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
1046 }
1047
1048 static void avoid_copy(struct dm_cache_migration *mg)
1049 {
1050         atomic_inc(&mg->cache->stats.copies_avoided);
1051         migration_success_pre_commit(mg);
1052 }
1053
1054 static void issue_copy(struct dm_cache_migration *mg)
1055 {
1056         bool avoid;
1057         struct cache *cache = mg->cache;
1058
1059         if (mg->writeback || mg->demote)
1060                 avoid = !is_dirty(cache, mg->cblock) ||
1061                         is_discarded_oblock(cache, mg->old_oblock);
1062         else {
1063                 struct bio *bio = mg->new_ocell->holder;
1064
1065                 avoid = is_discarded_oblock(cache, mg->new_oblock);
1066
1067                 if (writeback_mode(&cache->features) &&
1068                     !avoid && bio_writes_complete_block(cache, bio)) {
1069                         issue_overwrite(mg, bio);
1070                         return;
1071                 }
1072         }
1073
1074         avoid ? avoid_copy(mg) : issue_copy_real(mg);
1075 }
1076
1077 static void complete_migration(struct dm_cache_migration *mg)
1078 {
1079         if (mg->err)
1080                 migration_failure(mg);
1081         else
1082                 migration_success_pre_commit(mg);
1083 }
1084
1085 static void process_migrations(struct cache *cache, struct list_head *head,
1086                                void (*fn)(struct dm_cache_migration *))
1087 {
1088         unsigned long flags;
1089         struct list_head list;
1090         struct dm_cache_migration *mg, *tmp;
1091
1092         INIT_LIST_HEAD(&list);
1093         spin_lock_irqsave(&cache->lock, flags);
1094         list_splice_init(head, &list);
1095         spin_unlock_irqrestore(&cache->lock, flags);
1096
1097         list_for_each_entry_safe(mg, tmp, &list, list)
1098                 fn(mg);
1099 }
1100
1101 static void __queue_quiesced_migration(struct dm_cache_migration *mg)
1102 {
1103         list_add_tail(&mg->list, &mg->cache->quiesced_migrations);
1104 }
1105
1106 static void queue_quiesced_migration(struct dm_cache_migration *mg)
1107 {
1108         unsigned long flags;
1109         struct cache *cache = mg->cache;
1110
1111         spin_lock_irqsave(&cache->lock, flags);
1112         __queue_quiesced_migration(mg);
1113         spin_unlock_irqrestore(&cache->lock, flags);
1114
1115         wake_worker(cache);
1116 }
1117
1118 static void queue_quiesced_migrations(struct cache *cache, struct list_head *work)
1119 {
1120         unsigned long flags;
1121         struct dm_cache_migration *mg, *tmp;
1122
1123         spin_lock_irqsave(&cache->lock, flags);
1124         list_for_each_entry_safe(mg, tmp, work, list)
1125                 __queue_quiesced_migration(mg);
1126         spin_unlock_irqrestore(&cache->lock, flags);
1127
1128         wake_worker(cache);
1129 }
1130
1131 static void check_for_quiesced_migrations(struct cache *cache,
1132                                           struct per_bio_data *pb)
1133 {
1134         struct list_head work;
1135
1136         if (!pb->all_io_entry)
1137                 return;
1138
1139         INIT_LIST_HEAD(&work);
1140         if (pb->all_io_entry)
1141                 dm_deferred_entry_dec(pb->all_io_entry, &work);
1142
1143         if (!list_empty(&work))
1144                 queue_quiesced_migrations(cache, &work);
1145 }
1146
1147 static void quiesce_migration(struct dm_cache_migration *mg)
1148 {
1149         if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list))
1150                 queue_quiesced_migration(mg);
1151 }
1152
1153 static void promote(struct cache *cache, struct prealloc *structs,
1154                     dm_oblock_t oblock, dm_cblock_t cblock,
1155                     struct dm_bio_prison_cell *cell)
1156 {
1157         struct dm_cache_migration *mg = prealloc_get_migration(structs);
1158
1159         mg->err = false;
1160         mg->writeback = false;
1161         mg->demote = false;
1162         mg->promote = true;
1163         mg->requeue_holder = true;
1164         mg->invalidate = false;
1165         mg->cache = cache;
1166         mg->new_oblock = oblock;
1167         mg->cblock = cblock;
1168         mg->old_ocell = NULL;
1169         mg->new_ocell = cell;
1170         mg->start_jiffies = jiffies;
1171
1172         inc_nr_migrations(cache);
1173         quiesce_migration(mg);
1174 }
1175
1176 static void writeback(struct cache *cache, struct prealloc *structs,
1177                       dm_oblock_t oblock, dm_cblock_t cblock,
1178                       struct dm_bio_prison_cell *cell)
1179 {
1180         struct dm_cache_migration *mg = prealloc_get_migration(structs);
1181
1182         mg->err = false;
1183         mg->writeback = true;
1184         mg->demote = false;
1185         mg->promote = false;
1186         mg->requeue_holder = true;
1187         mg->invalidate = false;
1188         mg->cache = cache;
1189         mg->old_oblock = oblock;
1190         mg->cblock = cblock;
1191         mg->old_ocell = cell;
1192         mg->new_ocell = NULL;
1193         mg->start_jiffies = jiffies;
1194
1195         inc_nr_migrations(cache);
1196         quiesce_migration(mg);
1197 }
1198
1199 static void demote_then_promote(struct cache *cache, struct prealloc *structs,
1200                                 dm_oblock_t old_oblock, dm_oblock_t new_oblock,
1201                                 dm_cblock_t cblock,
1202                                 struct dm_bio_prison_cell *old_ocell,
1203                                 struct dm_bio_prison_cell *new_ocell)
1204 {
1205         struct dm_cache_migration *mg = prealloc_get_migration(structs);
1206
1207         mg->err = false;
1208         mg->writeback = false;
1209         mg->demote = true;
1210         mg->promote = true;
1211         mg->requeue_holder = true;
1212         mg->invalidate = false;
1213         mg->cache = cache;
1214         mg->old_oblock = old_oblock;
1215         mg->new_oblock = new_oblock;
1216         mg->cblock = cblock;
1217         mg->old_ocell = old_ocell;
1218         mg->new_ocell = new_ocell;
1219         mg->start_jiffies = jiffies;
1220
1221         inc_nr_migrations(cache);
1222         quiesce_migration(mg);
1223 }
1224
1225 /*
1226  * Invalidate a cache entry.  No writeback occurs; any changes in the cache
1227  * block are thrown away.
1228  */
1229 static void invalidate(struct cache *cache, struct prealloc *structs,
1230                        dm_oblock_t oblock, dm_cblock_t cblock,
1231                        struct dm_bio_prison_cell *cell)
1232 {
1233         struct dm_cache_migration *mg = prealloc_get_migration(structs);
1234
1235         mg->err = false;
1236         mg->writeback = false;
1237         mg->demote = true;
1238         mg->promote = false;
1239         mg->requeue_holder = true;
1240         mg->invalidate = true;
1241         mg->cache = cache;
1242         mg->old_oblock = oblock;
1243         mg->cblock = cblock;
1244         mg->old_ocell = cell;
1245         mg->new_ocell = NULL;
1246         mg->start_jiffies = jiffies;
1247
1248         inc_nr_migrations(cache);
1249         quiesce_migration(mg);
1250 }
1251
1252 /*----------------------------------------------------------------
1253  * bio processing
1254  *--------------------------------------------------------------*/
1255 static void defer_bio(struct cache *cache, struct bio *bio)
1256 {
1257         unsigned long flags;
1258
1259         spin_lock_irqsave(&cache->lock, flags);
1260         bio_list_add(&cache->deferred_bios, bio);
1261         spin_unlock_irqrestore(&cache->lock, flags);
1262
1263         wake_worker(cache);
1264 }
1265
1266 static void process_flush_bio(struct cache *cache, struct bio *bio)
1267 {
1268         size_t pb_data_size = get_per_bio_data_size(cache);
1269         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1270
1271         BUG_ON(bio->bi_iter.bi_size);
1272         if (!pb->req_nr)
1273                 remap_to_origin(cache, bio);
1274         else
1275                 remap_to_cache(cache, bio, 0);
1276
1277         issue(cache, bio);
1278 }
1279
1280 /*
1281  * People generally discard large parts of a device, eg, the whole device
1282  * when formatting.  Splitting these large discards up into cache block
1283  * sized ios and then quiescing (always neccessary for discard) takes too
1284  * long.
1285  *
1286  * We keep it simple, and allow any size of discard to come in, and just
1287  * mark off blocks on the discard bitset.  No passdown occurs!
1288  *
1289  * To implement passdown we need to change the bio_prison such that a cell
1290  * can have a key that spans many blocks.
1291  */
1292 static void process_discard_bio(struct cache *cache, struct bio *bio)
1293 {
1294         dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector,
1295                                                   cache->discard_block_size);
1296         dm_block_t end_block = bio_end_sector(bio);
1297         dm_block_t b;
1298
1299         end_block = block_div(end_block, cache->discard_block_size);
1300
1301         for (b = start_block; b < end_block; b++)
1302                 set_discard(cache, to_dblock(b));
1303
1304         bio_endio(bio, 0);
1305 }
1306
1307 static bool spare_migration_bandwidth(struct cache *cache)
1308 {
1309         sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
1310                 cache->sectors_per_block;
1311         return current_volume < cache->migration_threshold;
1312 }
1313
1314 static void inc_hit_counter(struct cache *cache, struct bio *bio)
1315 {
1316         atomic_inc(bio_data_dir(bio) == READ ?
1317                    &cache->stats.read_hit : &cache->stats.write_hit);
1318 }
1319
1320 static void inc_miss_counter(struct cache *cache, struct bio *bio)
1321 {
1322         atomic_inc(bio_data_dir(bio) == READ ?
1323                    &cache->stats.read_miss : &cache->stats.write_miss);
1324 }
1325
1326 static void issue_cache_bio(struct cache *cache, struct bio *bio,
1327                             struct per_bio_data *pb,
1328                             dm_oblock_t oblock, dm_cblock_t cblock)
1329 {
1330         pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1331         remap_to_cache_dirty(cache, bio, oblock, cblock);
1332         issue(cache, bio);
1333 }
1334
1335 static void process_bio(struct cache *cache, struct prealloc *structs,
1336                         struct bio *bio)
1337 {
1338         int r;
1339         bool release_cell = true;
1340         dm_oblock_t block = get_bio_block(cache, bio);
1341         struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
1342         struct policy_result lookup_result;
1343         size_t pb_data_size = get_per_bio_data_size(cache);
1344         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1345         bool discarded_block = is_discarded_oblock(cache, block);
1346         bool passthrough = passthrough_mode(&cache->features);
1347         bool can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));
1348
1349         /*
1350          * Check to see if that block is currently migrating.
1351          */
1352         cell_prealloc = prealloc_get_cell(structs);
1353         r = bio_detain(cache, block, bio, cell_prealloc,
1354                        (cell_free_fn) prealloc_put_cell,
1355                        structs, &new_ocell);
1356         if (r > 0)
1357                 return;
1358
1359         r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
1360                        bio, &lookup_result);
1361
1362         if (r == -EWOULDBLOCK)
1363                 /* migration has been denied */
1364                 lookup_result.op = POLICY_MISS;
1365
1366         switch (lookup_result.op) {
1367         case POLICY_HIT:
1368                 if (passthrough) {
1369                         inc_miss_counter(cache, bio);
1370
1371                         /*
1372                          * Passthrough always maps to the origin,
1373                          * invalidating any cache blocks that are written
1374                          * to.
1375                          */
1376
1377                         if (bio_data_dir(bio) == WRITE) {
1378                                 atomic_inc(&cache->stats.demotion);
1379                                 invalidate(cache, structs, block, lookup_result.cblock, new_ocell);
1380                                 release_cell = false;
1381
1382                         } else {
1383                                 /* FIXME: factor out issue_origin() */
1384                                 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1385                                 remap_to_origin_clear_discard(cache, bio, block);
1386                                 issue(cache, bio);
1387                         }
1388                 } else {
1389                         inc_hit_counter(cache, bio);
1390
1391                         if (bio_data_dir(bio) == WRITE &&
1392                             writethrough_mode(&cache->features) &&
1393                             !is_dirty(cache, lookup_result.cblock)) {
1394                                 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1395                                 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
1396                                 issue(cache, bio);
1397                         } else
1398                                 issue_cache_bio(cache, bio, pb, block, lookup_result.cblock);
1399                 }
1400
1401                 break;
1402
1403         case POLICY_MISS:
1404                 inc_miss_counter(cache, bio);
1405                 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1406                 remap_to_origin_clear_discard(cache, bio, block);
1407                 issue(cache, bio);
1408                 break;
1409
1410         case POLICY_NEW:
1411                 atomic_inc(&cache->stats.promotion);
1412                 promote(cache, structs, block, lookup_result.cblock, new_ocell);
1413                 release_cell = false;
1414                 break;
1415
1416         case POLICY_REPLACE:
1417                 cell_prealloc = prealloc_get_cell(structs);
1418                 r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
1419                                (cell_free_fn) prealloc_put_cell,
1420                                structs, &old_ocell);
1421                 if (r > 0) {
1422                         /*
1423                          * We have to be careful to avoid lock inversion of
1424                          * the cells.  So we back off, and wait for the
1425                          * old_ocell to become free.
1426                          */
1427                         policy_force_mapping(cache->policy, block,
1428                                              lookup_result.old_oblock);
1429                         atomic_inc(&cache->stats.cache_cell_clash);
1430                         break;
1431                 }
1432                 atomic_inc(&cache->stats.demotion);
1433                 atomic_inc(&cache->stats.promotion);
1434
1435                 demote_then_promote(cache, structs, lookup_result.old_oblock,
1436                                     block, lookup_result.cblock,
1437                                     old_ocell, new_ocell);
1438                 release_cell = false;
1439                 break;
1440
1441         default:
1442                 DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__,
1443                             (unsigned) lookup_result.op);
1444                 bio_io_error(bio);
1445         }
1446
1447         if (release_cell)
1448                 cell_defer(cache, new_ocell, false);
1449 }
1450
1451 static int need_commit_due_to_time(struct cache *cache)
1452 {
1453         return jiffies < cache->last_commit_jiffies ||
1454                jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
1455 }
1456
1457 static int commit_if_needed(struct cache *cache)
1458 {
1459         int r = 0;
1460
1461         if ((cache->commit_requested || need_commit_due_to_time(cache)) &&
1462             dm_cache_changed_this_transaction(cache->cmd)) {
1463                 atomic_inc(&cache->stats.commit_count);
1464                 cache->commit_requested = false;
1465                 r = dm_cache_commit(cache->cmd, false);
1466                 cache->last_commit_jiffies = jiffies;
1467         }
1468
1469         return r;
1470 }
1471
1472 static void process_deferred_bios(struct cache *cache)
1473 {
1474         unsigned long flags;
1475         struct bio_list bios;
1476         struct bio *bio;
1477         struct prealloc structs;
1478
1479         memset(&structs, 0, sizeof(structs));
1480         bio_list_init(&bios);
1481
1482         spin_lock_irqsave(&cache->lock, flags);
1483         bio_list_merge(&bios, &cache->deferred_bios);
1484         bio_list_init(&cache->deferred_bios);
1485         spin_unlock_irqrestore(&cache->lock, flags);
1486
1487         while (!bio_list_empty(&bios)) {
1488                 /*
1489                  * If we've got no free migration structs, and processing
1490                  * this bio might require one, we pause until there are some
1491                  * prepared mappings to process.
1492                  */
1493                 if (prealloc_data_structs(cache, &structs)) {
1494                         spin_lock_irqsave(&cache->lock, flags);
1495                         bio_list_merge(&cache->deferred_bios, &bios);
1496                         spin_unlock_irqrestore(&cache->lock, flags);
1497                         break;
1498                 }
1499
1500                 bio = bio_list_pop(&bios);
1501
1502                 if (bio->bi_rw & REQ_FLUSH)
1503                         process_flush_bio(cache, bio);
1504                 else if (bio->bi_rw & REQ_DISCARD)
1505                         process_discard_bio(cache, bio);
1506                 else
1507                         process_bio(cache, &structs, bio);
1508         }
1509
1510         prealloc_free_structs(cache, &structs);
1511 }
1512
1513 static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
1514 {
1515         unsigned long flags;
1516         struct bio_list bios;
1517         struct bio *bio;
1518
1519         bio_list_init(&bios);
1520
1521         spin_lock_irqsave(&cache->lock, flags);
1522         bio_list_merge(&bios, &cache->deferred_flush_bios);
1523         bio_list_init(&cache->deferred_flush_bios);
1524         spin_unlock_irqrestore(&cache->lock, flags);
1525
1526         while ((bio = bio_list_pop(&bios)))
1527                 submit_bios ? generic_make_request(bio) : bio_io_error(bio);
1528 }
1529
1530 static void process_deferred_writethrough_bios(struct cache *cache)
1531 {
1532         unsigned long flags;
1533         struct bio_list bios;
1534         struct bio *bio;
1535
1536         bio_list_init(&bios);
1537
1538         spin_lock_irqsave(&cache->lock, flags);
1539         bio_list_merge(&bios, &cache->deferred_writethrough_bios);
1540         bio_list_init(&cache->deferred_writethrough_bios);
1541         spin_unlock_irqrestore(&cache->lock, flags);
1542
1543         while ((bio = bio_list_pop(&bios)))
1544                 generic_make_request(bio);
1545 }
1546
1547 static void writeback_some_dirty_blocks(struct cache *cache)
1548 {
1549         int r = 0;
1550         dm_oblock_t oblock;
1551         dm_cblock_t cblock;
1552         struct prealloc structs;
1553         struct dm_bio_prison_cell *old_ocell;
1554
1555         memset(&structs, 0, sizeof(structs));
1556
1557         while (spare_migration_bandwidth(cache)) {
1558                 if (prealloc_data_structs(cache, &structs))
1559                         break;
1560
1561                 r = policy_writeback_work(cache->policy, &oblock, &cblock);
1562                 if (r)
1563                         break;
1564
1565                 r = get_cell(cache, oblock, &structs, &old_ocell);
1566                 if (r) {
1567                         policy_set_dirty(cache->policy, oblock);
1568                         break;
1569                 }
1570
1571                 writeback(cache, &structs, oblock, cblock, old_ocell);
1572         }
1573
1574         prealloc_free_structs(cache, &structs);
1575 }
1576
1577 /*----------------------------------------------------------------
1578  * Invalidations.
1579  * Dropping something from the cache *without* writing back.
1580  *--------------------------------------------------------------*/
1581
1582 static void process_invalidation_request(struct cache *cache, struct invalidation_request *req)
1583 {
1584         int r = 0;
1585         uint64_t begin = from_cblock(req->cblocks->begin);
1586         uint64_t end = from_cblock(req->cblocks->end);
1587
1588         while (begin != end) {
1589                 r = policy_remove_cblock(cache->policy, to_cblock(begin));
1590                 if (!r) {
1591                         r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin));
1592                         if (r)
1593                                 break;
1594
1595                 } else if (r == -ENODATA) {
1596                         /* harmless, already unmapped */
1597                         r = 0;
1598
1599                 } else {
1600                         DMERR("policy_remove_cblock failed");
1601                         break;
1602                 }
1603
1604                 begin++;
1605         }
1606
1607         cache->commit_requested = true;
1608
1609         req->err = r;
1610         atomic_set(&req->complete, 1);
1611
1612         wake_up(&req->result_wait);
1613 }
1614
1615 static void process_invalidation_requests(struct cache *cache)
1616 {
1617         struct list_head list;
1618         struct invalidation_request *req, *tmp;
1619
1620         INIT_LIST_HEAD(&list);
1621         spin_lock(&cache->invalidation_lock);
1622         list_splice_init(&cache->invalidation_requests, &list);
1623         spin_unlock(&cache->invalidation_lock);
1624
1625         list_for_each_entry_safe (req, tmp, &list, list)
1626                 process_invalidation_request(cache, req);
1627 }
1628
1629 /*----------------------------------------------------------------
1630  * Main worker loop
1631  *--------------------------------------------------------------*/
1632 static bool is_quiescing(struct cache *cache)
1633 {
1634         return atomic_read(&cache->quiescing);
1635 }
1636
1637 static void ack_quiescing(struct cache *cache)
1638 {
1639         if (is_quiescing(cache)) {
1640                 atomic_inc(&cache->quiescing_ack);
1641                 wake_up(&cache->quiescing_wait);
1642         }
1643 }
1644
1645 static void wait_for_quiescing_ack(struct cache *cache)
1646 {
1647         wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
1648 }
1649
1650 static void start_quiescing(struct cache *cache)
1651 {
1652         atomic_inc(&cache->quiescing);
1653         wait_for_quiescing_ack(cache);
1654 }
1655
1656 static void stop_quiescing(struct cache *cache)
1657 {
1658         atomic_set(&cache->quiescing, 0);
1659         atomic_set(&cache->quiescing_ack, 0);
1660 }
1661
1662 static void wait_for_migrations(struct cache *cache)
1663 {
1664         wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
1665 }
1666
1667 static void stop_worker(struct cache *cache)
1668 {
1669         cancel_delayed_work(&cache->waker);
1670         flush_workqueue(cache->wq);
1671 }
1672
1673 static void requeue_deferred_io(struct cache *cache)
1674 {
1675         struct bio *bio;
1676         struct bio_list bios;
1677
1678         bio_list_init(&bios);
1679         bio_list_merge(&bios, &cache->deferred_bios);
1680         bio_list_init(&cache->deferred_bios);
1681
1682         while ((bio = bio_list_pop(&bios)))
1683                 bio_endio(bio, DM_ENDIO_REQUEUE);
1684 }
1685
1686 static int more_work(struct cache *cache)
1687 {
1688         if (is_quiescing(cache))
1689                 return !list_empty(&cache->quiesced_migrations) ||
1690                         !list_empty(&cache->completed_migrations) ||
1691                         !list_empty(&cache->need_commit_migrations);
1692         else
1693                 return !bio_list_empty(&cache->deferred_bios) ||
1694                         !bio_list_empty(&cache->deferred_flush_bios) ||
1695                         !bio_list_empty(&cache->deferred_writethrough_bios) ||
1696                         !list_empty(&cache->quiesced_migrations) ||
1697                         !list_empty(&cache->completed_migrations) ||
1698                         !list_empty(&cache->need_commit_migrations) ||
1699                         cache->invalidate;
1700 }
1701
1702 static void do_worker(struct work_struct *ws)
1703 {
1704         struct cache *cache = container_of(ws, struct cache, worker);
1705
1706         do {
1707                 if (!is_quiescing(cache)) {
1708                         writeback_some_dirty_blocks(cache);
1709                         process_deferred_writethrough_bios(cache);
1710                         process_deferred_bios(cache);
1711                         process_invalidation_requests(cache);
1712                 }
1713
1714                 process_migrations(cache, &cache->quiesced_migrations, issue_copy);
1715                 process_migrations(cache, &cache->completed_migrations, complete_migration);
1716
1717                 if (commit_if_needed(cache)) {
1718                         process_deferred_flush_bios(cache, false);
1719
1720                         /*
1721                          * FIXME: rollback metadata or just go into a
1722                          * failure mode and error everything
1723                          */
1724                 } else {
1725                         process_deferred_flush_bios(cache, true);
1726                         process_migrations(cache, &cache->need_commit_migrations,
1727                                            migration_success_post_commit);
1728                 }
1729
1730                 ack_quiescing(cache);
1731
1732         } while (more_work(cache));
1733 }
1734
1735 /*
1736  * We want to commit periodically so that not too much
1737  * unwritten metadata builds up.
1738  */
1739 static void do_waker(struct work_struct *ws)
1740 {
1741         struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
1742         policy_tick(cache->policy);
1743         wake_worker(cache);
1744         queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
1745 }
1746
1747 /*----------------------------------------------------------------*/
1748
1749 static int is_congested(struct dm_dev *dev, int bdi_bits)
1750 {
1751         struct request_queue *q = bdev_get_queue(dev->bdev);
1752         return bdi_congested(&q->backing_dev_info, bdi_bits);
1753 }
1754
1755 static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1756 {
1757         struct cache *cache = container_of(cb, struct cache, callbacks);
1758
1759         return is_congested(cache->origin_dev, bdi_bits) ||
1760                 is_congested(cache->cache_dev, bdi_bits);
1761 }
1762
1763 /*----------------------------------------------------------------
1764  * Target methods
1765  *--------------------------------------------------------------*/
1766
1767 /*
1768  * This function gets called on the error paths of the constructor, so we
1769  * have to cope with a partially initialised struct.
1770  */
1771 static void destroy(struct cache *cache)
1772 {
1773         unsigned i;
1774
1775         if (cache->next_migration)
1776                 mempool_free(cache->next_migration, cache->migration_pool);
1777
1778         if (cache->migration_pool)
1779                 mempool_destroy(cache->migration_pool);
1780
1781         if (cache->all_io_ds)
1782                 dm_deferred_set_destroy(cache->all_io_ds);
1783
1784         if (cache->prison)
1785                 dm_bio_prison_destroy(cache->prison);
1786
1787         if (cache->wq)
1788                 destroy_workqueue(cache->wq);
1789
1790         if (cache->dirty_bitset)
1791                 free_bitset(cache->dirty_bitset);
1792
1793         if (cache->discard_bitset)
1794                 free_bitset(cache->discard_bitset);
1795
1796         if (cache->copier)
1797                 dm_kcopyd_client_destroy(cache->copier);
1798
1799         if (cache->cmd)
1800                 dm_cache_metadata_close(cache->cmd);
1801
1802         if (cache->metadata_dev)
1803                 dm_put_device(cache->ti, cache->metadata_dev);
1804
1805         if (cache->origin_dev)
1806                 dm_put_device(cache->ti, cache->origin_dev);
1807
1808         if (cache->cache_dev)
1809                 dm_put_device(cache->ti, cache->cache_dev);
1810
1811         if (cache->policy)
1812                 dm_cache_policy_destroy(cache->policy);
1813
1814         for (i = 0; i < cache->nr_ctr_args ; i++)
1815                 kfree(cache->ctr_args[i]);
1816         kfree(cache->ctr_args);
1817
1818         kfree(cache);
1819 }
1820
1821 static void cache_dtr(struct dm_target *ti)
1822 {
1823         struct cache *cache = ti->private;
1824
1825         destroy(cache);
1826 }
1827
1828 static sector_t get_dev_size(struct dm_dev *dev)
1829 {
1830         return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1831 }
1832
1833 /*----------------------------------------------------------------*/
1834
1835 /*
1836  * Construct a cache device mapping.
1837  *
1838  * cache <metadata dev> <cache dev> <origin dev> <block size>
1839  *       <#feature args> [<feature arg>]*
1840  *       <policy> <#policy args> [<policy arg>]*
1841  *
1842  * metadata dev    : fast device holding the persistent metadata
1843  * cache dev       : fast device holding cached data blocks
1844  * origin dev      : slow device holding original data blocks
1845  * block size      : cache unit size in sectors
1846  *
1847  * #feature args   : number of feature arguments passed
1848  * feature args    : writethrough.  (The default is writeback.)
1849  *
1850  * policy          : the replacement policy to use
1851  * #policy args    : an even number of policy arguments corresponding
1852  *                   to key/value pairs passed to the policy
1853  * policy args     : key/value pairs passed to the policy
1854  *                   E.g. 'sequential_threshold 1024'
1855  *                   See cache-policies.txt for details.
1856  *
1857  * Optional feature arguments are:
1858  *   writethrough  : write through caching that prohibits cache block
1859  *                   content from being different from origin block content.
1860  *                   Without this argument, the default behaviour is to write
1861  *                   back cache block contents later for performance reasons,
1862  *                   so they may differ from the corresponding origin blocks.
1863  */
1864 struct cache_args {
1865         struct dm_target *ti;
1866
1867         struct dm_dev *metadata_dev;
1868
1869         struct dm_dev *cache_dev;
1870         sector_t cache_sectors;
1871
1872         struct dm_dev *origin_dev;
1873         sector_t origin_sectors;
1874
1875         uint32_t block_size;
1876
1877         const char *policy_name;
1878         int policy_argc;
1879         const char **policy_argv;
1880
1881         struct cache_features features;
1882 };
1883
1884 static void destroy_cache_args(struct cache_args *ca)
1885 {
1886         if (ca->metadata_dev)
1887                 dm_put_device(ca->ti, ca->metadata_dev);
1888
1889         if (ca->cache_dev)
1890                 dm_put_device(ca->ti, ca->cache_dev);
1891
1892         if (ca->origin_dev)
1893                 dm_put_device(ca->ti, ca->origin_dev);
1894
1895         kfree(ca);
1896 }
1897
1898 static bool at_least_one_arg(struct dm_arg_set *as, char **error)
1899 {
1900         if (!as->argc) {
1901                 *error = "Insufficient args";
1902                 return false;
1903         }
1904
1905         return true;
1906 }
1907
1908 static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
1909                               char **error)
1910 {
1911         int r;
1912         sector_t metadata_dev_size;
1913         char b[BDEVNAME_SIZE];
1914
1915         if (!at_least_one_arg(as, error))
1916                 return -EINVAL;
1917
1918         r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1919                           &ca->metadata_dev);
1920         if (r) {
1921                 *error = "Error opening metadata device";
1922                 return r;
1923         }
1924
1925         metadata_dev_size = get_dev_size(ca->metadata_dev);
1926         if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
1927                 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1928                        bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
1929
1930         return 0;
1931 }
1932
1933 static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
1934                            char **error)
1935 {
1936         int r;
1937
1938         if (!at_least_one_arg(as, error))
1939                 return -EINVAL;
1940
1941         r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1942                           &ca->cache_dev);
1943         if (r) {
1944                 *error = "Error opening cache device";
1945                 return r;
1946         }
1947         ca->cache_sectors = get_dev_size(ca->cache_dev);
1948
1949         return 0;
1950 }
1951
1952 static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
1953                             char **error)
1954 {
1955         int r;
1956
1957         if (!at_least_one_arg(as, error))
1958                 return -EINVAL;
1959
1960         r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1961                           &ca->origin_dev);
1962         if (r) {
1963                 *error = "Error opening origin device";
1964                 return r;
1965         }
1966
1967         ca->origin_sectors = get_dev_size(ca->origin_dev);
1968         if (ca->ti->len > ca->origin_sectors) {
1969                 *error = "Device size larger than cached device";
1970                 return -EINVAL;
1971         }
1972
1973         return 0;
1974 }
1975
1976 static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
1977                             char **error)
1978 {
1979         unsigned long block_size;
1980
1981         if (!at_least_one_arg(as, error))
1982                 return -EINVAL;
1983
1984         if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
1985             block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
1986             block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
1987             block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
1988                 *error = "Invalid data block size";
1989                 return -EINVAL;
1990         }
1991
1992         if (block_size > ca->cache_sectors) {
1993                 *error = "Data block size is larger than the cache device";
1994                 return -EINVAL;
1995         }
1996
1997         ca->block_size = block_size;
1998
1999         return 0;
2000 }
2001
2002 static void init_features(struct cache_features *cf)
2003 {
2004         cf->mode = CM_WRITE;
2005         cf->io_mode = CM_IO_WRITEBACK;
2006 }
2007
2008 static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
2009                           char **error)
2010 {
2011         static struct dm_arg _args[] = {
2012                 {0, 1, "Invalid number of cache feature arguments"},
2013         };
2014
2015         int r;
2016         unsigned argc;
2017         const char *arg;
2018         struct cache_features *cf = &ca->features;
2019
2020         init_features(cf);
2021
2022         r = dm_read_arg_group(_args, as, &argc, error);
2023         if (r)
2024                 return -EINVAL;
2025
2026         while (argc--) {
2027                 arg = dm_shift_arg(as);
2028
2029                 if (!strcasecmp(arg, "writeback"))
2030                         cf->io_mode = CM_IO_WRITEBACK;
2031
2032                 else if (!strcasecmp(arg, "writethrough"))
2033                         cf->io_mode = CM_IO_WRITETHROUGH;
2034
2035                 else if (!strcasecmp(arg, "passthrough"))
2036                         cf->io_mode = CM_IO_PASSTHROUGH;
2037
2038                 else {
2039                         *error = "Unrecognised cache feature requested";
2040                         return -EINVAL;
2041                 }
2042         }
2043
2044         return 0;
2045 }
2046
2047 static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
2048                         char **error)
2049 {
2050         static struct dm_arg _args[] = {
2051                 {0, 1024, "Invalid number of policy arguments"},
2052         };
2053
2054         int r;
2055
2056         if (!at_least_one_arg(as, error))
2057                 return -EINVAL;
2058
2059         ca->policy_name = dm_shift_arg(as);
2060
2061         r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
2062         if (r)
2063                 return -EINVAL;
2064
2065         ca->policy_argv = (const char **)as->argv;
2066         dm_consume_args(as, ca->policy_argc);
2067
2068         return 0;
2069 }
2070
2071 static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
2072                             char **error)
2073 {
2074         int r;
2075         struct dm_arg_set as;
2076
2077         as.argc = argc;
2078         as.argv = argv;
2079
2080         r = parse_metadata_dev(ca, &as, error);
2081         if (r)
2082                 return r;
2083
2084         r = parse_cache_dev(ca, &as, error);
2085         if (r)
2086                 return r;
2087
2088         r = parse_origin_dev(ca, &as, error);
2089         if (r)
2090                 return r;
2091
2092         r = parse_block_size(ca, &as, error);
2093         if (r)
2094                 return r;
2095
2096         r = parse_features(ca, &as, error);
2097         if (r)
2098                 return r;
2099
2100         r = parse_policy(ca, &as, error);
2101         if (r)
2102                 return r;
2103
2104         return 0;
2105 }
2106
2107 /*----------------------------------------------------------------*/
2108
2109 static struct kmem_cache *migration_cache;
2110
2111 #define NOT_CORE_OPTION 1
2112
2113 static int process_config_option(struct cache *cache, const char *key, const char *value)
2114 {
2115         unsigned long tmp;
2116
2117         if (!strcasecmp(key, "migration_threshold")) {
2118                 if (kstrtoul(value, 10, &tmp))
2119                         return -EINVAL;
2120
2121                 cache->migration_threshold = tmp;
2122                 return 0;
2123         }
2124
2125         return NOT_CORE_OPTION;
2126 }
2127
2128 static int set_config_value(struct cache *cache, const char *key, const char *value)
2129 {
2130         int r = process_config_option(cache, key, value);
2131
2132         if (r == NOT_CORE_OPTION)
2133                 r = policy_set_config_value(cache->policy, key, value);
2134
2135         if (r)
2136                 DMWARN("bad config value for %s: %s", key, value);
2137
2138         return r;
2139 }
2140
2141 static int set_config_values(struct cache *cache, int argc, const char **argv)
2142 {
2143         int r = 0;
2144
2145         if (argc & 1) {
2146                 DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
2147                 return -EINVAL;
2148         }
2149
2150         while (argc) {
2151                 r = set_config_value(cache, argv[0], argv[1]);
2152                 if (r)
2153                         break;
2154
2155                 argc -= 2;
2156                 argv += 2;
2157         }
2158
2159         return r;
2160 }
2161
2162 static int create_cache_policy(struct cache *cache, struct cache_args *ca,
2163                                char **error)
2164 {
2165         struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
2166                                                            cache->cache_size,
2167                                                            cache->origin_sectors,
2168                                                            cache->sectors_per_block);
2169         if (IS_ERR(p)) {
2170                 *error = "Error creating cache's policy";
2171                 return PTR_ERR(p);
2172         }
2173         cache->policy = p;
2174
2175         return 0;
2176 }
2177
2178 #define DEFAULT_MIGRATION_THRESHOLD 2048
2179
2180 static int cache_create(struct cache_args *ca, struct cache **result)
2181 {
2182         int r = 0;
2183         char **error = &ca->ti->error;
2184         struct cache *cache;
2185         struct dm_target *ti = ca->ti;
2186         dm_block_t origin_blocks;
2187         struct dm_cache_metadata *cmd;
2188         bool may_format = ca->features.mode == CM_WRITE;
2189
2190         cache = kzalloc(sizeof(*cache), GFP_KERNEL);
2191         if (!cache)
2192                 return -ENOMEM;
2193
2194         cache->ti = ca->ti;
2195         ti->private = cache;
2196         ti->num_flush_bios = 2;
2197         ti->flush_supported = true;
2198
2199         ti->num_discard_bios = 1;
2200         ti->discards_supported = true;
2201         ti->discard_zeroes_data_unsupported = true;
2202         /* Discard bios must be split on a block boundary */
2203         ti->split_discard_bios = true;
2204
2205         cache->features = ca->features;
2206         ti->per_bio_data_size = get_per_bio_data_size(cache);
2207
2208         cache->callbacks.congested_fn = cache_is_congested;
2209         dm_table_add_target_callbacks(ti->table, &cache->callbacks);
2210
2211         cache->metadata_dev = ca->metadata_dev;
2212         cache->origin_dev = ca->origin_dev;
2213         cache->cache_dev = ca->cache_dev;
2214
2215         ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
2216
2217         /* FIXME: factor out this whole section */
2218         origin_blocks = cache->origin_sectors = ca->origin_sectors;
2219         origin_blocks = block_div(origin_blocks, ca->block_size);
2220         cache->origin_blocks = to_oblock(origin_blocks);
2221
2222         cache->sectors_per_block = ca->block_size;
2223         if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
2224                 r = -EINVAL;
2225                 goto bad;
2226         }
2227
2228         if (ca->block_size & (ca->block_size - 1)) {
2229                 dm_block_t cache_size = ca->cache_sectors;
2230
2231                 cache->sectors_per_block_shift = -1;
2232                 cache_size = block_div(cache_size, ca->block_size);
2233                 cache->cache_size = to_cblock(cache_size);
2234         } else {
2235                 cache->sectors_per_block_shift = __ffs(ca->block_size);
2236                 cache->cache_size = to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift);
2237         }
2238
2239         r = create_cache_policy(cache, ca, error);
2240         if (r)
2241                 goto bad;
2242
2243         cache->policy_nr_args = ca->policy_argc;
2244         cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
2245
2246         r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
2247         if (r) {
2248                 *error = "Error setting cache policy's config values";
2249                 goto bad;
2250         }
2251
2252         cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
2253                                      ca->block_size, may_format,
2254                                      dm_cache_policy_get_hint_size(cache->policy));
2255         if (IS_ERR(cmd)) {
2256                 *error = "Error creating metadata object";
2257                 r = PTR_ERR(cmd);
2258                 goto bad;
2259         }
2260         cache->cmd = cmd;
2261
2262         if (passthrough_mode(&cache->features)) {
2263                 bool all_clean;
2264
2265                 r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
2266                 if (r) {
2267                         *error = "dm_cache_metadata_all_clean() failed";
2268                         goto bad;
2269                 }
2270
2271                 if (!all_clean) {
2272                         *error = "Cannot enter passthrough mode unless all blocks are clean";
2273                         r = -EINVAL;
2274                         goto bad;
2275                 }
2276         }
2277
2278         spin_lock_init(&cache->lock);
2279         bio_list_init(&cache->deferred_bios);
2280         bio_list_init(&cache->deferred_flush_bios);
2281         bio_list_init(&cache->deferred_writethrough_bios);
2282         INIT_LIST_HEAD(&cache->quiesced_migrations);
2283         INIT_LIST_HEAD(&cache->completed_migrations);
2284         INIT_LIST_HEAD(&cache->need_commit_migrations);
2285         atomic_set(&cache->nr_migrations, 0);
2286         init_waitqueue_head(&cache->migration_wait);
2287
2288         init_waitqueue_head(&cache->quiescing_wait);
2289         atomic_set(&cache->quiescing, 0);
2290         atomic_set(&cache->quiescing_ack, 0);
2291
2292         r = -ENOMEM;
2293         atomic_set(&cache->nr_dirty, 0);
2294         cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
2295         if (!cache->dirty_bitset) {
2296                 *error = "could not allocate dirty bitset";
2297                 goto bad;
2298         }
2299         clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
2300
2301         cache->discard_block_size = cache->sectors_per_block;
2302         cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
2303         cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
2304         if (!cache->discard_bitset) {
2305                 *error = "could not allocate discard bitset";
2306                 goto bad;
2307         }
2308         clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
2309
2310         cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2311         if (IS_ERR(cache->copier)) {
2312                 *error = "could not create kcopyd client";
2313                 r = PTR_ERR(cache->copier);
2314                 goto bad;
2315         }
2316
2317         cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2318         if (!cache->wq) {
2319                 *error = "could not create workqueue for metadata object";
2320                 goto bad;
2321         }
2322         INIT_WORK(&cache->worker, do_worker);
2323         INIT_DELAYED_WORK(&cache->waker, do_waker);
2324         cache->last_commit_jiffies = jiffies;
2325
2326         cache->prison = dm_bio_prison_create(PRISON_CELLS);
2327         if (!cache->prison) {
2328                 *error = "could not create bio prison";
2329                 goto bad;
2330         }
2331
2332         cache->all_io_ds = dm_deferred_set_create();
2333         if (!cache->all_io_ds) {
2334                 *error = "could not create all_io deferred set";
2335                 goto bad;
2336         }
2337
2338         cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
2339                                                          migration_cache);
2340         if (!cache->migration_pool) {
2341                 *error = "Error creating cache's migration mempool";
2342                 goto bad;
2343         }
2344
2345         cache->next_migration = NULL;
2346
2347         cache->need_tick_bio = true;
2348         cache->sized = false;
2349         cache->invalidate = false;
2350         cache->commit_requested = false;
2351         cache->loaded_mappings = false;
2352         cache->loaded_discards = false;
2353
2354         load_stats(cache);
2355
2356         atomic_set(&cache->stats.demotion, 0);
2357         atomic_set(&cache->stats.promotion, 0);
2358         atomic_set(&cache->stats.copies_avoided, 0);
2359         atomic_set(&cache->stats.cache_cell_clash, 0);
2360         atomic_set(&cache->stats.commit_count, 0);
2361         atomic_set(&cache->stats.discard_count, 0);
2362
2363         spin_lock_init(&cache->invalidation_lock);
2364         INIT_LIST_HEAD(&cache->invalidation_requests);
2365
2366         *result = cache;
2367         return 0;
2368
2369 bad:
2370         destroy(cache);
2371         return r;
2372 }
2373
2374 static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
2375 {
2376         unsigned i;
2377         const char **copy;
2378
2379         copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
2380         if (!copy)
2381                 return -ENOMEM;
2382         for (i = 0; i < argc; i++) {
2383                 copy[i] = kstrdup(argv[i], GFP_KERNEL);
2384                 if (!copy[i]) {
2385                         while (i--)
2386                                 kfree(copy[i]);
2387                         kfree(copy);
2388                         return -ENOMEM;
2389                 }
2390         }
2391
2392         cache->nr_ctr_args = argc;
2393         cache->ctr_args = copy;
2394
2395         return 0;
2396 }
2397
2398 static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
2399 {
2400         int r = -EINVAL;
2401         struct cache_args *ca;
2402         struct cache *cache = NULL;
2403
2404         ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2405         if (!ca) {
2406                 ti->error = "Error allocating memory for cache";
2407                 return -ENOMEM;
2408         }
2409         ca->ti = ti;
2410
2411         r = parse_cache_args(ca, argc, argv, &ti->error);
2412         if (r)
2413                 goto out;
2414
2415         r = cache_create(ca, &cache);
2416         if (r)
2417                 goto out;
2418
2419         r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
2420         if (r) {
2421                 destroy(cache);
2422                 goto out;
2423         }
2424
2425         ti->private = cache;
2426
2427 out:
2428         destroy_cache_args(ca);
2429         return r;
2430 }
2431
2432 static int cache_map(struct dm_target *ti, struct bio *bio)
2433 {
2434         struct cache *cache = ti->private;
2435
2436         int r;
2437         dm_oblock_t block = get_bio_block(cache, bio);
2438         size_t pb_data_size = get_per_bio_data_size(cache);
2439         bool can_migrate = false;
2440         bool discarded_block;
2441         struct dm_bio_prison_cell *cell;
2442         struct policy_result lookup_result;
2443         struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
2444
2445         if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
2446                 /*
2447                  * This can only occur if the io goes to a partial block at
2448                  * the end of the origin device.  We don't cache these.
2449                  * Just remap to the origin and carry on.
2450                  */
2451                 remap_to_origin(cache, bio);
2452                 return DM_MAPIO_REMAPPED;
2453         }
2454
2455         if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
2456                 defer_bio(cache, bio);
2457                 return DM_MAPIO_SUBMITTED;
2458         }
2459
2460         /*
2461          * Check to see if that block is currently migrating.
2462          */
2463         cell = alloc_prison_cell(cache);
2464         if (!cell) {
2465                 defer_bio(cache, bio);
2466                 return DM_MAPIO_SUBMITTED;
2467         }
2468
2469         r = bio_detain(cache, block, bio, cell,
2470                        (cell_free_fn) free_prison_cell,
2471                        cache, &cell);
2472         if (r) {
2473                 if (r < 0)
2474                         defer_bio(cache, bio);
2475
2476                 return DM_MAPIO_SUBMITTED;
2477         }
2478
2479         discarded_block = is_discarded_oblock(cache, block);
2480
2481         r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
2482                        bio, &lookup_result);
2483         if (r == -EWOULDBLOCK) {
2484                 cell_defer(cache, cell, true);
2485                 return DM_MAPIO_SUBMITTED;
2486
2487         } else if (r) {
2488                 DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r);
2489                 bio_io_error(bio);
2490                 return DM_MAPIO_SUBMITTED;
2491         }
2492
2493         r = DM_MAPIO_REMAPPED;
2494         switch (lookup_result.op) {
2495         case POLICY_HIT:
2496                 if (passthrough_mode(&cache->features)) {
2497                         if (bio_data_dir(bio) == WRITE) {
2498                                 /*
2499                                  * We need to invalidate this block, so
2500                                  * defer for the worker thread.
2501                                  */
2502                                 cell_defer(cache, cell, true);
2503                                 r = DM_MAPIO_SUBMITTED;
2504
2505                         } else {
2506                                 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2507                                 inc_miss_counter(cache, bio);
2508                                 remap_to_origin_clear_discard(cache, bio, block);
2509
2510                                 cell_defer(cache, cell, false);
2511                         }
2512
2513                 } else {
2514                         inc_hit_counter(cache, bio);
2515                         pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2516
2517                         if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
2518                             !is_dirty(cache, lookup_result.cblock))
2519                                 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
2520                         else
2521                                 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
2522
2523                         cell_defer(cache, cell, false);
2524                 }
2525                 break;
2526
2527         case POLICY_MISS:
2528                 inc_miss_counter(cache, bio);
2529                 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2530
2531                 if (pb->req_nr != 0) {
2532                         /*
2533                          * This is a duplicate writethrough io that is no
2534                          * longer needed because the block has been demoted.
2535                          */
2536                         bio_endio(bio, 0);
2537                         cell_defer(cache, cell, false);
2538                         return DM_MAPIO_SUBMITTED;
2539                 } else {
2540                         remap_to_origin_clear_discard(cache, bio, block);
2541                         cell_defer(cache, cell, false);
2542                 }
2543                 break;
2544
2545         default:
2546                 DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
2547                             (unsigned) lookup_result.op);
2548                 bio_io_error(bio);
2549                 r = DM_MAPIO_SUBMITTED;
2550         }
2551
2552         return r;
2553 }
2554
2555 static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
2556 {
2557         struct cache *cache = ti->private;
2558         unsigned long flags;
2559         size_t pb_data_size = get_per_bio_data_size(cache);
2560         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
2561
2562         if (pb->tick) {
2563                 policy_tick(cache->policy);
2564
2565                 spin_lock_irqsave(&cache->lock, flags);
2566                 cache->need_tick_bio = true;
2567                 spin_unlock_irqrestore(&cache->lock, flags);
2568         }
2569
2570         check_for_quiesced_migrations(cache, pb);
2571
2572         return 0;
2573 }
2574
2575 static int write_dirty_bitset(struct cache *cache)
2576 {
2577         unsigned i, r;
2578
2579         for (i = 0; i < from_cblock(cache->cache_size); i++) {
2580                 r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
2581                                        is_dirty(cache, to_cblock(i)));
2582                 if (r)
2583                         return r;
2584         }
2585
2586         return 0;
2587 }
2588
2589 static int write_discard_bitset(struct cache *cache)
2590 {
2591         unsigned i, r;
2592
2593         r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
2594                                            cache->discard_nr_blocks);
2595         if (r) {
2596                 DMERR("could not resize on-disk discard bitset");
2597                 return r;
2598         }
2599
2600         for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
2601                 r = dm_cache_set_discard(cache->cmd, to_dblock(i),
2602                                          is_discarded(cache, to_dblock(i)));
2603                 if (r)
2604                         return r;
2605         }
2606
2607         return 0;
2608 }
2609
2610 /*
2611  * returns true on success
2612  */
2613 static bool sync_metadata(struct cache *cache)
2614 {
2615         int r1, r2, r3, r4;
2616
2617         r1 = write_dirty_bitset(cache);
2618         if (r1)
2619                 DMERR("could not write dirty bitset");
2620
2621         r2 = write_discard_bitset(cache);
2622         if (r2)
2623                 DMERR("could not write discard bitset");
2624
2625         save_stats(cache);
2626
2627         r3 = dm_cache_write_hints(cache->cmd, cache->policy);
2628         if (r3)
2629                 DMERR("could not write hints");
2630
2631         /*
2632          * If writing the above metadata failed, we still commit, but don't
2633          * set the clean shutdown flag.  This will effectively force every
2634          * dirty bit to be set on reload.
2635          */
2636         r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3);
2637         if (r4)
2638                 DMERR("could not write cache metadata.  Data loss may occur.");
2639
2640         return !r1 && !r2 && !r3 && !r4;
2641 }
2642
2643 static void cache_postsuspend(struct dm_target *ti)
2644 {
2645         struct cache *cache = ti->private;
2646
2647         start_quiescing(cache);
2648         wait_for_migrations(cache);
2649         stop_worker(cache);
2650         requeue_deferred_io(cache);
2651         stop_quiescing(cache);
2652
2653         (void) sync_metadata(cache);
2654 }
2655
2656 static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
2657                         bool dirty, uint32_t hint, bool hint_valid)
2658 {
2659         int r;
2660         struct cache *cache = context;
2661
2662         r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid);
2663         if (r)
2664                 return r;
2665
2666         if (dirty)
2667                 set_dirty(cache, oblock, cblock);
2668         else
2669                 clear_dirty(cache, oblock, cblock);
2670
2671         return 0;
2672 }
2673
2674 static int load_discard(void *context, sector_t discard_block_size,
2675                         dm_dblock_t dblock, bool discard)
2676 {
2677         struct cache *cache = context;
2678
2679         /* FIXME: handle mis-matched block size */
2680
2681         if (discard)
2682                 set_discard(cache, dblock);
2683         else
2684                 clear_discard(cache, dblock);
2685
2686         return 0;
2687 }
2688
2689 static dm_cblock_t get_cache_dev_size(struct cache *cache)
2690 {
2691         sector_t size = get_dev_size(cache->cache_dev);
2692         (void) sector_div(size, cache->sectors_per_block);
2693         return to_cblock(size);
2694 }
2695
2696 static bool can_resize(struct cache *cache, dm_cblock_t new_size)
2697 {
2698         if (from_cblock(new_size) > from_cblock(cache->cache_size))
2699                 return true;
2700
2701         /*
2702          * We can't drop a dirty block when shrinking the cache.
2703          */
2704         while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
2705                 new_size = to_cblock(from_cblock(new_size) + 1);
2706                 if (is_dirty(cache, new_size)) {
2707                         DMERR("unable to shrink cache; cache block %llu is dirty",
2708                               (unsigned long long) from_cblock(new_size));
2709                         return false;
2710                 }
2711         }
2712
2713         return true;
2714 }
2715
2716 static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
2717 {
2718         int r;
2719
2720         r = dm_cache_resize(cache->cmd, new_size);
2721         if (r) {
2722                 DMERR("could not resize cache metadata");
2723                 return r;
2724         }
2725
2726         cache->cache_size = new_size;
2727
2728         return 0;
2729 }
2730
2731 static int cache_preresume(struct dm_target *ti)
2732 {
2733         int r = 0;
2734         struct cache *cache = ti->private;
2735         dm_cblock_t csize = get_cache_dev_size(cache);
2736
2737         /*
2738          * Check to see if the cache has resized.
2739          */
2740         if (!cache->sized) {
2741                 r = resize_cache_dev(cache, csize);
2742                 if (r)
2743                         return r;
2744
2745                 cache->sized = true;
2746
2747         } else if (csize != cache->cache_size) {
2748                 if (!can_resize(cache, csize))
2749                         return -EINVAL;
2750
2751                 r = resize_cache_dev(cache, csize);
2752                 if (r)
2753                         return r;
2754         }
2755
2756         if (!cache->loaded_mappings) {
2757                 r = dm_cache_load_mappings(cache->cmd, cache->policy,
2758                                            load_mapping, cache);
2759                 if (r) {
2760                         DMERR("could not load cache mappings");
2761                         return r;
2762                 }
2763
2764                 cache->loaded_mappings = true;
2765         }
2766
2767         if (!cache->loaded_discards) {
2768                 r = dm_cache_load_discards(cache->cmd, load_discard, cache);
2769                 if (r) {
2770                         DMERR("could not load origin discards");
2771                         return r;
2772                 }
2773
2774                 cache->loaded_discards = true;
2775         }
2776
2777         return r;
2778 }
2779
2780 static void cache_resume(struct dm_target *ti)
2781 {
2782         struct cache *cache = ti->private;
2783
2784         cache->need_tick_bio = true;
2785         do_waker(&cache->waker.work);
2786 }
2787
2788 /*
2789  * Status format:
2790  *
2791  * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
2792  * <cache block size> <#used cache blocks>/<#total cache blocks>
2793  * <#read hits> <#read misses> <#write hits> <#write misses>
2794  * <#demotions> <#promotions> <#dirty>
2795  * <#features> <features>*
2796  * <#core args> <core args>
2797  * <policy name> <#policy args> <policy args>*
2798  */
2799 static void cache_status(struct dm_target *ti, status_type_t type,
2800                          unsigned status_flags, char *result, unsigned maxlen)
2801 {
2802         int r = 0;
2803         unsigned i;
2804         ssize_t sz = 0;
2805         dm_block_t nr_free_blocks_metadata = 0;
2806         dm_block_t nr_blocks_metadata = 0;
2807         char buf[BDEVNAME_SIZE];
2808         struct cache *cache = ti->private;
2809         dm_cblock_t residency;
2810
2811         switch (type) {
2812         case STATUSTYPE_INFO:
2813                 /* Commit to ensure statistics aren't out-of-date */
2814                 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) {
2815                         r = dm_cache_commit(cache->cmd, false);
2816                         if (r)
2817                                 DMERR("could not commit metadata for accurate status");
2818                 }
2819
2820                 r = dm_cache_get_free_metadata_block_count(cache->cmd,
2821                                                            &nr_free_blocks_metadata);
2822                 if (r) {
2823                         DMERR("could not get metadata free block count");
2824                         goto err;
2825                 }
2826
2827                 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
2828                 if (r) {
2829                         DMERR("could not get metadata device size");
2830                         goto err;
2831                 }
2832
2833                 residency = policy_residency(cache->policy);
2834
2835                 DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ",
2836                        (unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
2837                        (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2838                        (unsigned long long)nr_blocks_metadata,
2839                        cache->sectors_per_block,
2840                        (unsigned long long) from_cblock(residency),
2841                        (unsigned long long) from_cblock(cache->cache_size),
2842                        (unsigned) atomic_read(&cache->stats.read_hit),
2843                        (unsigned) atomic_read(&cache->stats.read_miss),
2844                        (unsigned) atomic_read(&cache->stats.write_hit),
2845                        (unsigned) atomic_read(&cache->stats.write_miss),
2846                        (unsigned) atomic_read(&cache->stats.demotion),
2847                        (unsigned) atomic_read(&cache->stats.promotion),
2848                        (unsigned long) atomic_read(&cache->nr_dirty));
2849
2850                 if (writethrough_mode(&cache->features))
2851                         DMEMIT("1 writethrough ");
2852
2853                 else if (passthrough_mode(&cache->features))
2854                         DMEMIT("1 passthrough ");
2855
2856                 else if (writeback_mode(&cache->features))
2857                         DMEMIT("1 writeback ");
2858
2859                 else {
2860                         DMERR("internal error: unknown io mode: %d", (int) cache->features.io_mode);
2861                         goto err;
2862                 }
2863
2864                 DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
2865
2866                 DMEMIT("%s ", dm_cache_policy_get_name(cache->policy));
2867                 if (sz < maxlen) {
2868                         r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz);
2869                         if (r)
2870                                 DMERR("policy_emit_config_values returned %d", r);
2871                 }
2872
2873                 break;
2874
2875         case STATUSTYPE_TABLE:
2876                 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
2877                 DMEMIT("%s ", buf);
2878                 format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
2879                 DMEMIT("%s ", buf);
2880                 format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
2881                 DMEMIT("%s", buf);
2882
2883                 for (i = 0; i < cache->nr_ctr_args - 1; i++)
2884                         DMEMIT(" %s", cache->ctr_args[i]);
2885                 if (cache->nr_ctr_args)
2886                         DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
2887         }
2888
2889         return;
2890
2891 err:
2892         DMEMIT("Error");
2893 }
2894
2895 /*
2896  * A cache block range can take two forms:
2897  *
2898  * i) A single cblock, eg. '3456'
2899  * ii) A begin and end cblock with dots between, eg. 123-234
2900  */
2901 static int parse_cblock_range(struct cache *cache, const char *str,
2902                               struct cblock_range *result)
2903 {
2904         char dummy;
2905         uint64_t b, e;
2906         int r;
2907
2908         /*
2909          * Try and parse form (ii) first.
2910          */
2911         r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
2912         if (r < 0)
2913                 return r;
2914
2915         if (r == 2) {
2916                 result->begin = to_cblock(b);
2917                 result->end = to_cblock(e);
2918                 return 0;
2919         }
2920
2921         /*
2922          * That didn't work, try form (i).
2923          */
2924         r = sscanf(str, "%llu%c", &b, &dummy);
2925         if (r < 0)
2926                 return r;
2927
2928         if (r == 1) {
2929                 result->begin = to_cblock(b);
2930                 result->end = to_cblock(from_cblock(result->begin) + 1u);
2931                 return 0;
2932         }
2933
2934         DMERR("invalid cblock range '%s'", str);
2935         return -EINVAL;
2936 }
2937
2938 static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
2939 {
2940         uint64_t b = from_cblock(range->begin);
2941         uint64_t e = from_cblock(range->end);
2942         uint64_t n = from_cblock(cache->cache_size);
2943
2944         if (b >= n) {
2945                 DMERR("begin cblock out of range: %llu >= %llu", b, n);
2946                 return -EINVAL;
2947         }
2948
2949         if (e > n) {
2950                 DMERR("end cblock out of range: %llu > %llu", e, n);
2951                 return -EINVAL;
2952         }
2953
2954         if (b >= e) {
2955                 DMERR("invalid cblock range: %llu >= %llu", b, e);
2956                 return -EINVAL;
2957         }
2958
2959         return 0;
2960 }
2961
2962 static int request_invalidation(struct cache *cache, struct cblock_range *range)
2963 {
2964         struct invalidation_request req;
2965
2966         INIT_LIST_HEAD(&req.list);
2967         req.cblocks = range;
2968         atomic_set(&req.complete, 0);
2969         req.err = 0;
2970         init_waitqueue_head(&req.result_wait);
2971
2972         spin_lock(&cache->invalidation_lock);
2973         list_add(&req.list, &cache->invalidation_requests);
2974         spin_unlock(&cache->invalidation_lock);
2975         wake_worker(cache);
2976
2977         wait_event(req.result_wait, atomic_read(&req.complete));
2978         return req.err;
2979 }
2980
2981 static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
2982                                               const char **cblock_ranges)
2983 {
2984         int r = 0;
2985         unsigned i;
2986         struct cblock_range range;
2987
2988         if (!passthrough_mode(&cache->features)) {
2989                 DMERR("cache has to be in passthrough mode for invalidation");
2990                 return -EPERM;
2991         }
2992
2993         for (i = 0; i < count; i++) {
2994                 r = parse_cblock_range(cache, cblock_ranges[i], &range);
2995                 if (r)
2996                         break;
2997
2998                 r = validate_cblock_range(cache, &range);
2999                 if (r)
3000                         break;
3001
3002                 /*
3003                  * Pass begin and end origin blocks to the worker and wake it.
3004                  */
3005                 r = request_invalidation(cache, &range);
3006                 if (r)
3007                         break;
3008         }
3009
3010         return r;
3011 }
3012
3013 /*
3014  * Supports
3015  *      "<key> <value>"
3016  * and
3017  *     "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
3018  *
3019  * The key migration_threshold is supported by the cache target core.
3020  */
3021 static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
3022 {
3023         struct cache *cache = ti->private;
3024
3025         if (!argc)
3026                 return -EINVAL;
3027
3028         if (!strcasecmp(argv[0], "invalidate_cblocks"))
3029                 return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
3030
3031         if (argc != 2)
3032                 return -EINVAL;
3033
3034         return set_config_value(cache, argv[0], argv[1]);
3035 }
3036
3037 static int cache_iterate_devices(struct dm_target *ti,
3038                                  iterate_devices_callout_fn fn, void *data)
3039 {
3040         int r = 0;
3041         struct cache *cache = ti->private;
3042
3043         r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
3044         if (!r)
3045                 r = fn(ti, cache->origin_dev, 0, ti->len, data);
3046
3047         return r;
3048 }
3049
3050 /*
3051  * We assume I/O is going to the origin (which is the volume
3052  * more likely to have restrictions e.g. by being striped).
3053  * (Looking up the exact location of the data would be expensive
3054  * and could always be out of date by the time the bio is submitted.)
3055  */
3056 static int cache_bvec_merge(struct dm_target *ti,
3057                             struct bvec_merge_data *bvm,
3058                             struct bio_vec *biovec, int max_size)
3059 {
3060         struct cache *cache = ti->private;
3061         struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
3062
3063         if (!q->merge_bvec_fn)
3064                 return max_size;
3065
3066         bvm->bi_bdev = cache->origin_dev->bdev;
3067         return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
3068 }
3069
3070 static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
3071 {
3072         /*
3073          * FIXME: these limits may be incompatible with the cache device
3074          */
3075         limits->max_discard_sectors = cache->discard_block_size;
3076         limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
3077 }
3078
3079 static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
3080 {
3081         struct cache *cache = ti->private;
3082         uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3083
3084         /*
3085          * If the system-determined stacked limits are compatible with the
3086          * cache's blocksize (io_opt is a factor) do not override them.
3087          */
3088         if (io_opt_sectors < cache->sectors_per_block ||
3089             do_div(io_opt_sectors, cache->sectors_per_block)) {
3090                 blk_limits_io_min(limits, 0);
3091                 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
3092         }
3093         set_discard_limits(cache, limits);
3094 }
3095
3096 /*----------------------------------------------------------------*/
3097
3098 static struct target_type cache_target = {
3099         .name = "cache",
3100         .version = {1, 4, 0},
3101         .module = THIS_MODULE,
3102         .ctr = cache_ctr,
3103         .dtr = cache_dtr,
3104         .map = cache_map,
3105         .end_io = cache_end_io,
3106         .postsuspend = cache_postsuspend,
3107         .preresume = cache_preresume,
3108         .resume = cache_resume,
3109         .status = cache_status,
3110         .message = cache_message,
3111         .iterate_devices = cache_iterate_devices,
3112         .merge = cache_bvec_merge,
3113         .io_hints = cache_io_hints,
3114 };
3115
3116 static int __init dm_cache_init(void)
3117 {
3118         int r;
3119
3120         r = dm_register_target(&cache_target);
3121         if (r) {
3122                 DMERR("cache target registration failed: %d", r);
3123                 return r;
3124         }
3125
3126         migration_cache = KMEM_CACHE(dm_cache_migration, 0);
3127         if (!migration_cache) {
3128                 dm_unregister_target(&cache_target);
3129                 return -ENOMEM;
3130         }
3131
3132         return 0;
3133 }
3134
3135 static void __exit dm_cache_exit(void)
3136 {
3137         dm_unregister_target(&cache_target);
3138         kmem_cache_destroy(migration_cache);
3139 }
3140
3141 module_init(dm_cache_init);
3142 module_exit(dm_cache_exit);
3143
3144 MODULE_DESCRIPTION(DM_NAME " cache target");
3145 MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
3146 MODULE_LICENSE("GPL");