2 * Copyright (C) 2011-2012 Red Hat UK.
4 * This file is released under the GPL.
7 #include "dm-thin-metadata.h"
8 #include "dm-bio-prison-v1.h"
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/dm-kcopyd.h>
14 #include <linux/jiffies.h>
15 #include <linux/log2.h>
16 #include <linux/list.h>
17 #include <linux/rculist.h>
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/sort.h>
23 #include <linux/rbtree.h>
25 #define DM_MSG_PREFIX "thin"
30 #define ENDIO_HOOK_POOL_SIZE 1024
31 #define MAPPING_POOL_SIZE 1024
32 #define COMMIT_PERIOD HZ
33 #define NO_SPACE_TIMEOUT_SECS 60
35 static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
37 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
38 "A percentage of time allocated for copy on write");
41 * The block size of the device holding pool data must be
42 * between 64KB and 1GB.
44 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
45 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
48 * Device id is restricted to 24 bits.
50 #define MAX_DEV_ID ((1 << 24) - 1)
53 * How do we handle breaking sharing of data blocks?
54 * =================================================
56 * We use a standard copy-on-write btree to store the mappings for the
57 * devices (note I'm talking about copy-on-write of the metadata here, not
58 * the data). When you take an internal snapshot you clone the root node
59 * of the origin btree. After this there is no concept of an origin or a
60 * snapshot. They are just two device trees that happen to point to the
63 * When we get a write in we decide if it's to a shared data block using
64 * some timestamp magic. If it is, we have to break sharing.
66 * Let's say we write to a shared block in what was the origin. The
69 * i) plug io further to this physical block. (see bio_prison code).
71 * ii) quiesce any read io to that shared data block. Obviously
72 * including all devices that share this block. (see dm_deferred_set code)
74 * iii) copy the data block to a newly allocate block. This step can be
75 * missed out if the io covers the block. (schedule_copy).
77 * iv) insert the new mapping into the origin's btree
78 * (process_prepared_mapping). This act of inserting breaks some
79 * sharing of btree nodes between the two devices. Breaking sharing only
80 * effects the btree of that specific device. Btrees for the other
81 * devices that share the block never change. The btree for the origin
82 * device as it was after the last commit is untouched, ie. we're using
83 * persistent data structures in the functional programming sense.
85 * v) unplug io to this physical block, including the io that triggered
86 * the breaking of sharing.
88 * Steps (ii) and (iii) occur in parallel.
90 * The metadata _doesn't_ need to be committed before the io continues. We
91 * get away with this because the io is always written to a _new_ block.
92 * If there's a crash, then:
94 * - The origin mapping will point to the old origin block (the shared
95 * one). This will contain the data as it was before the io that triggered
96 * the breaking of sharing came in.
98 * - The snap mapping still points to the old block. As it would after
101 * The downside of this scheme is the timestamp magic isn't perfect, and
102 * will continue to think that data block in the snapshot device is shared
103 * even after the write to the origin has broken sharing. I suspect data
104 * blocks will typically be shared by many different devices, so we're
105 * breaking sharing n + 1 times, rather than n, where n is the number of
106 * devices that reference this data block. At the moment I think the
107 * benefits far, far outweigh the disadvantages.
110 /*----------------------------------------------------------------*/
120 static void build_key(struct dm_thin_device *td, enum lock_space ls,
121 dm_block_t b, dm_block_t e, struct dm_cell_key *key)
123 key->virtual = (ls == VIRTUAL);
124 key->dev = dm_thin_dev_id(td);
125 key->block_begin = b;
129 static void build_data_key(struct dm_thin_device *td, dm_block_t b,
130 struct dm_cell_key *key)
132 build_key(td, PHYSICAL, b, b + 1llu, key);
135 static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
136 struct dm_cell_key *key)
138 build_key(td, VIRTUAL, b, b + 1llu, key);
141 /*----------------------------------------------------------------*/
143 #define THROTTLE_THRESHOLD (1 * HZ)
146 struct rw_semaphore lock;
147 unsigned long threshold;
148 bool throttle_applied;
151 static void throttle_init(struct throttle *t)
153 init_rwsem(&t->lock);
154 t->throttle_applied = false;
157 static void throttle_work_start(struct throttle *t)
159 t->threshold = jiffies + THROTTLE_THRESHOLD;
162 static void throttle_work_update(struct throttle *t)
164 if (!t->throttle_applied && jiffies > t->threshold) {
165 down_write(&t->lock);
166 t->throttle_applied = true;
170 static void throttle_work_complete(struct throttle *t)
172 if (t->throttle_applied) {
173 t->throttle_applied = false;
178 static void throttle_lock(struct throttle *t)
183 static void throttle_unlock(struct throttle *t)
188 /*----------------------------------------------------------------*/
191 * A pool device ties together a metadata device and a data device. It
192 * also provides the interface for creating and destroying internal
195 struct dm_thin_new_mapping;
198 * The pool runs in 4 modes. Ordered in degraded order for comparisons.
201 PM_WRITE, /* metadata may be changed */
202 PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
203 PM_READ_ONLY, /* metadata may not be changed */
204 PM_FAIL, /* all I/O fails */
207 struct pool_features {
210 bool zero_new_blocks:1;
211 bool discard_enabled:1;
212 bool discard_passdown:1;
213 bool error_if_no_space:1;
217 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
218 typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
219 typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
221 #define CELL_SORT_ARRAY_SIZE 8192
224 struct list_head list;
225 struct dm_target *ti; /* Only set if a pool target is bound */
227 struct mapped_device *pool_md;
228 struct block_device *md_dev;
229 struct dm_pool_metadata *pmd;
231 dm_block_t low_water_blocks;
232 uint32_t sectors_per_block;
233 int sectors_per_block_shift;
235 struct pool_features pf;
236 bool low_water_triggered:1; /* A dm event has been sent */
238 bool out_of_data_space:1;
240 struct dm_bio_prison *prison;
241 struct dm_kcopyd_client *copier;
243 struct workqueue_struct *wq;
244 struct throttle throttle;
245 struct work_struct worker;
246 struct delayed_work waker;
247 struct delayed_work no_space_timeout;
249 unsigned long last_commit_jiffies;
253 struct bio_list deferred_flush_bios;
254 struct list_head prepared_mappings;
255 struct list_head prepared_discards;
256 struct list_head prepared_discards_pt2;
257 struct list_head active_thins;
259 struct dm_deferred_set *shared_read_ds;
260 struct dm_deferred_set *all_io_ds;
262 struct dm_thin_new_mapping *next_mapping;
263 mempool_t *mapping_pool;
265 process_bio_fn process_bio;
266 process_bio_fn process_discard;
268 process_cell_fn process_cell;
269 process_cell_fn process_discard_cell;
271 process_mapping_fn process_prepared_mapping;
272 process_mapping_fn process_prepared_discard;
273 process_mapping_fn process_prepared_discard_pt2;
275 struct dm_bio_prison_cell **cell_sort_array;
278 static enum pool_mode get_pool_mode(struct pool *pool);
279 static void metadata_operation_failed(struct pool *pool, const char *op, int r);
282 * Target context for a pool.
285 struct dm_target *ti;
287 struct dm_dev *data_dev;
288 struct dm_dev *metadata_dev;
289 struct dm_target_callbacks callbacks;
291 dm_block_t low_water_blocks;
292 struct pool_features requested_pf; /* Features requested during table load */
293 struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
297 * Target context for a thin.
300 struct list_head list;
301 struct dm_dev *pool_dev;
302 struct dm_dev *origin_dev;
303 sector_t origin_size;
307 struct dm_thin_device *td;
308 struct mapped_device *thin_md;
312 struct list_head deferred_cells;
313 struct bio_list deferred_bio_list;
314 struct bio_list retry_on_resume_list;
315 struct rb_root sort_bio_list; /* sorted list of deferred bios */
318 * Ensures the thin is not destroyed until the worker has finished
319 * iterating the active_thins list.
322 struct completion can_destroy;
325 /*----------------------------------------------------------------*/
327 static bool block_size_is_power_of_two(struct pool *pool)
329 return pool->sectors_per_block_shift >= 0;
332 static sector_t block_to_sectors(struct pool *pool, dm_block_t b)
334 return block_size_is_power_of_two(pool) ?
335 (b << pool->sectors_per_block_shift) :
336 (b * pool->sectors_per_block);
339 /*----------------------------------------------------------------*/
343 struct blk_plug plug;
344 struct bio *parent_bio;
348 static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent)
353 blk_start_plug(&op->plug);
354 op->parent_bio = parent;
358 static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t data_e)
360 struct thin_c *tc = op->tc;
361 sector_t s = block_to_sectors(tc->pool, data_b);
362 sector_t len = block_to_sectors(tc->pool, data_e - data_b);
364 return __blkdev_issue_discard(tc->pool_dev->bdev, s, len,
365 GFP_NOWAIT, 0, &op->bio);
368 static void end_discard(struct discard_op *op, int r)
372 * Even if one of the calls to issue_discard failed, we
373 * need to wait for the chain to complete.
375 bio_chain(op->bio, op->parent_bio);
376 bio_set_op_attrs(op->bio, REQ_OP_DISCARD, 0);
380 blk_finish_plug(&op->plug);
383 * Even if r is set, there could be sub discards in flight that we
386 if (r && !op->parent_bio->bi_error)
387 op->parent_bio->bi_error = r;
388 bio_endio(op->parent_bio);
391 /*----------------------------------------------------------------*/
394 * wake_worker() is used when new work is queued and when pool_resume is
395 * ready to continue deferred IO processing.
397 static void wake_worker(struct pool *pool)
399 queue_work(pool->wq, &pool->worker);
402 /*----------------------------------------------------------------*/
404 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
405 struct dm_bio_prison_cell **cell_result)
408 struct dm_bio_prison_cell *cell_prealloc;
411 * Allocate a cell from the prison's mempool.
412 * This might block but it can't fail.
414 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
416 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
419 * We reused an old cell; we can get rid of
422 dm_bio_prison_free_cell(pool->prison, cell_prealloc);
427 static void cell_release(struct pool *pool,
428 struct dm_bio_prison_cell *cell,
429 struct bio_list *bios)
431 dm_cell_release(pool->prison, cell, bios);
432 dm_bio_prison_free_cell(pool->prison, cell);
435 static void cell_visit_release(struct pool *pool,
436 void (*fn)(void *, struct dm_bio_prison_cell *),
438 struct dm_bio_prison_cell *cell)
440 dm_cell_visit_release(pool->prison, fn, context, cell);
441 dm_bio_prison_free_cell(pool->prison, cell);
444 static void cell_release_no_holder(struct pool *pool,
445 struct dm_bio_prison_cell *cell,
446 struct bio_list *bios)
448 dm_cell_release_no_holder(pool->prison, cell, bios);
449 dm_bio_prison_free_cell(pool->prison, cell);
452 static void cell_error_with_code(struct pool *pool,
453 struct dm_bio_prison_cell *cell, int error_code)
455 dm_cell_error(pool->prison, cell, error_code);
456 dm_bio_prison_free_cell(pool->prison, cell);
459 static int get_pool_io_error_code(struct pool *pool)
461 return pool->out_of_data_space ? -ENOSPC : -EIO;
464 static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
466 int error = get_pool_io_error_code(pool);
468 cell_error_with_code(pool, cell, error);
471 static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
473 cell_error_with_code(pool, cell, 0);
476 static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
478 cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE);
481 /*----------------------------------------------------------------*/
484 * A global list of pools that uses a struct mapped_device as a key.
486 static struct dm_thin_pool_table {
488 struct list_head pools;
489 } dm_thin_pool_table;
491 static void pool_table_init(void)
493 mutex_init(&dm_thin_pool_table.mutex);
494 INIT_LIST_HEAD(&dm_thin_pool_table.pools);
497 static void __pool_table_insert(struct pool *pool)
499 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
500 list_add(&pool->list, &dm_thin_pool_table.pools);
503 static void __pool_table_remove(struct pool *pool)
505 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
506 list_del(&pool->list);
509 static struct pool *__pool_table_lookup(struct mapped_device *md)
511 struct pool *pool = NULL, *tmp;
513 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
515 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
516 if (tmp->pool_md == md) {
525 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
527 struct pool *pool = NULL, *tmp;
529 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
531 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
532 if (tmp->md_dev == md_dev) {
541 /*----------------------------------------------------------------*/
543 struct dm_thin_endio_hook {
545 struct dm_deferred_entry *shared_read_entry;
546 struct dm_deferred_entry *all_io_entry;
547 struct dm_thin_new_mapping *overwrite_mapping;
548 struct rb_node rb_node;
549 struct dm_bio_prison_cell *cell;
552 static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
554 bio_list_merge(bios, master);
555 bio_list_init(master);
558 static void error_bio_list(struct bio_list *bios, int error)
562 while ((bio = bio_list_pop(bios))) {
563 bio->bi_error = error;
568 static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error)
570 struct bio_list bios;
573 bio_list_init(&bios);
575 spin_lock_irqsave(&tc->lock, flags);
576 __merge_bio_list(&bios, master);
577 spin_unlock_irqrestore(&tc->lock, flags);
579 error_bio_list(&bios, error);
582 static void requeue_deferred_cells(struct thin_c *tc)
584 struct pool *pool = tc->pool;
586 struct list_head cells;
587 struct dm_bio_prison_cell *cell, *tmp;
589 INIT_LIST_HEAD(&cells);
591 spin_lock_irqsave(&tc->lock, flags);
592 list_splice_init(&tc->deferred_cells, &cells);
593 spin_unlock_irqrestore(&tc->lock, flags);
595 list_for_each_entry_safe(cell, tmp, &cells, user_list)
596 cell_requeue(pool, cell);
599 static void requeue_io(struct thin_c *tc)
601 struct bio_list bios;
604 bio_list_init(&bios);
606 spin_lock_irqsave(&tc->lock, flags);
607 __merge_bio_list(&bios, &tc->deferred_bio_list);
608 __merge_bio_list(&bios, &tc->retry_on_resume_list);
609 spin_unlock_irqrestore(&tc->lock, flags);
611 error_bio_list(&bios, DM_ENDIO_REQUEUE);
612 requeue_deferred_cells(tc);
615 static void error_retry_list_with_code(struct pool *pool, int error)
620 list_for_each_entry_rcu(tc, &pool->active_thins, list)
621 error_thin_bio_list(tc, &tc->retry_on_resume_list, error);
625 static void error_retry_list(struct pool *pool)
627 int error = get_pool_io_error_code(pool);
629 error_retry_list_with_code(pool, error);
633 * This section of code contains the logic for processing a thin device's IO.
634 * Much of the code depends on pool object resources (lists, workqueues, etc)
635 * but most is exclusively called from the thin target rather than the thin-pool
639 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
641 struct pool *pool = tc->pool;
642 sector_t block_nr = bio->bi_iter.bi_sector;
644 if (block_size_is_power_of_two(pool))
645 block_nr >>= pool->sectors_per_block_shift;
647 (void) sector_div(block_nr, pool->sectors_per_block);
653 * Returns the _complete_ blocks that this bio covers.
655 static void get_bio_block_range(struct thin_c *tc, struct bio *bio,
656 dm_block_t *begin, dm_block_t *end)
658 struct pool *pool = tc->pool;
659 sector_t b = bio->bi_iter.bi_sector;
660 sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
662 b += pool->sectors_per_block - 1ull; /* so we round up */
664 if (block_size_is_power_of_two(pool)) {
665 b >>= pool->sectors_per_block_shift;
666 e >>= pool->sectors_per_block_shift;
668 (void) sector_div(b, pool->sectors_per_block);
669 (void) sector_div(e, pool->sectors_per_block);
673 /* Can happen if the bio is within a single block. */
680 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
682 struct pool *pool = tc->pool;
683 sector_t bi_sector = bio->bi_iter.bi_sector;
685 bio->bi_bdev = tc->pool_dev->bdev;
686 if (block_size_is_power_of_two(pool))
687 bio->bi_iter.bi_sector =
688 (block << pool->sectors_per_block_shift) |
689 (bi_sector & (pool->sectors_per_block - 1));
691 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
692 sector_div(bi_sector, pool->sectors_per_block);
695 static void remap_to_origin(struct thin_c *tc, struct bio *bio)
697 bio->bi_bdev = tc->origin_dev->bdev;
700 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
702 return op_is_flush(bio->bi_opf) &&
703 dm_thin_changed_this_transaction(tc->td);
706 static void inc_all_io_entry(struct pool *pool, struct bio *bio)
708 struct dm_thin_endio_hook *h;
710 if (bio_op(bio) == REQ_OP_DISCARD)
713 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
714 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
717 static void issue(struct thin_c *tc, struct bio *bio)
719 struct pool *pool = tc->pool;
722 if (!bio_triggers_commit(tc, bio)) {
723 generic_make_request(bio);
728 * Complete bio with an error if earlier I/O caused changes to
729 * the metadata that can't be committed e.g, due to I/O errors
730 * on the metadata device.
732 if (dm_thin_aborted_changes(tc->td)) {
738 * Batch together any bios that trigger commits and then issue a
739 * single commit for them in process_deferred_bios().
741 spin_lock_irqsave(&pool->lock, flags);
742 bio_list_add(&pool->deferred_flush_bios, bio);
743 spin_unlock_irqrestore(&pool->lock, flags);
746 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
748 remap_to_origin(tc, bio);
752 static void remap_and_issue(struct thin_c *tc, struct bio *bio,
755 remap(tc, bio, block);
759 /*----------------------------------------------------------------*/
762 * Bio endio functions.
764 struct dm_thin_new_mapping {
765 struct list_head list;
771 * Track quiescing, copying and zeroing preparation actions. When this
772 * counter hits zero the block is prepared and can be inserted into the
775 atomic_t prepare_actions;
779 dm_block_t virt_begin, virt_end;
780 dm_block_t data_block;
781 struct dm_bio_prison_cell *cell;
784 * If the bio covers the whole area of a block then we can avoid
785 * zeroing or copying. Instead this bio is hooked. The bio will
786 * still be in the cell, so care has to be taken to avoid issuing
790 bio_end_io_t *saved_bi_end_io;
793 static void __complete_mapping_preparation(struct dm_thin_new_mapping *m)
795 struct pool *pool = m->tc->pool;
797 if (atomic_dec_and_test(&m->prepare_actions)) {
798 list_add_tail(&m->list, &pool->prepared_mappings);
803 static void complete_mapping_preparation(struct dm_thin_new_mapping *m)
806 struct pool *pool = m->tc->pool;
808 spin_lock_irqsave(&pool->lock, flags);
809 __complete_mapping_preparation(m);
810 spin_unlock_irqrestore(&pool->lock, flags);
813 static void copy_complete(int read_err, unsigned long write_err, void *context)
815 struct dm_thin_new_mapping *m = context;
817 m->err = read_err || write_err ? -EIO : 0;
818 complete_mapping_preparation(m);
821 static void overwrite_endio(struct bio *bio)
823 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
824 struct dm_thin_new_mapping *m = h->overwrite_mapping;
826 bio->bi_end_io = m->saved_bi_end_io;
828 m->err = bio->bi_error;
829 complete_mapping_preparation(m);
832 /*----------------------------------------------------------------*/
839 * Prepared mapping jobs.
843 * This sends the bios in the cell, except the original holder, back
844 * to the deferred_bios list.
846 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
848 struct pool *pool = tc->pool;
851 spin_lock_irqsave(&tc->lock, flags);
852 cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
853 spin_unlock_irqrestore(&tc->lock, flags);
858 static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
862 struct bio_list defer_bios;
863 struct bio_list issue_bios;
866 static void __inc_remap_and_issue_cell(void *context,
867 struct dm_bio_prison_cell *cell)
869 struct remap_info *info = context;
872 while ((bio = bio_list_pop(&cell->bios))) {
873 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD)
874 bio_list_add(&info->defer_bios, bio);
876 inc_all_io_entry(info->tc->pool, bio);
879 * We can't issue the bios with the bio prison lock
880 * held, so we add them to a list to issue on
881 * return from this function.
883 bio_list_add(&info->issue_bios, bio);
888 static void inc_remap_and_issue_cell(struct thin_c *tc,
889 struct dm_bio_prison_cell *cell,
893 struct remap_info info;
896 bio_list_init(&info.defer_bios);
897 bio_list_init(&info.issue_bios);
900 * We have to be careful to inc any bios we're about to issue
901 * before the cell is released, and avoid a race with new bios
902 * being added to the cell.
904 cell_visit_release(tc->pool, __inc_remap_and_issue_cell,
907 while ((bio = bio_list_pop(&info.defer_bios)))
908 thin_defer_bio(tc, bio);
910 while ((bio = bio_list_pop(&info.issue_bios)))
911 remap_and_issue(info.tc, bio, block);
914 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
916 cell_error(m->tc->pool, m->cell);
918 mempool_free(m, m->tc->pool->mapping_pool);
921 static void process_prepared_mapping(struct dm_thin_new_mapping *m)
923 struct thin_c *tc = m->tc;
924 struct pool *pool = tc->pool;
925 struct bio *bio = m->bio;
929 cell_error(pool, m->cell);
934 * Commit the prepared block into the mapping btree.
935 * Any I/O for this block arriving after this point will get
936 * remapped to it directly.
938 r = dm_thin_insert_block(tc->td, m->virt_begin, m->data_block);
940 metadata_operation_failed(pool, "dm_thin_insert_block", r);
941 cell_error(pool, m->cell);
946 * Release any bios held while the block was being provisioned.
947 * If we are processing a write bio that completely covers the block,
948 * we already processed it so can ignore it now when processing
949 * the bios in the cell.
952 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
955 inc_all_io_entry(tc->pool, m->cell->holder);
956 remap_and_issue(tc, m->cell->holder, m->data_block);
957 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
962 mempool_free(m, pool->mapping_pool);
965 /*----------------------------------------------------------------*/
967 static void free_discard_mapping(struct dm_thin_new_mapping *m)
969 struct thin_c *tc = m->tc;
971 cell_defer_no_holder(tc, m->cell);
972 mempool_free(m, tc->pool->mapping_pool);
975 static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
977 bio_io_error(m->bio);
978 free_discard_mapping(m);
981 static void process_prepared_discard_success(struct dm_thin_new_mapping *m)
984 free_discard_mapping(m);
987 static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m)
990 struct thin_c *tc = m->tc;
992 r = dm_thin_remove_range(tc->td, m->cell->key.block_begin, m->cell->key.block_end);
994 metadata_operation_failed(tc->pool, "dm_thin_remove_range", r);
995 bio_io_error(m->bio);
999 cell_defer_no_holder(tc, m->cell);
1000 mempool_free(m, tc->pool->mapping_pool);
1003 /*----------------------------------------------------------------*/
1005 static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m,
1006 struct bio *discard_parent)
1009 * We've already unmapped this range of blocks, but before we
1010 * passdown we have to check that these blocks are now unused.
1014 struct thin_c *tc = m->tc;
1015 struct pool *pool = tc->pool;
1016 dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
1017 struct discard_op op;
1019 begin_discard(&op, tc, discard_parent);
1021 /* find start of unmapped run */
1022 for (; b < end; b++) {
1023 r = dm_pool_block_is_used(pool->pmd, b, &used);
1034 /* find end of run */
1035 for (e = b + 1; e != end; e++) {
1036 r = dm_pool_block_is_used(pool->pmd, e, &used);
1044 r = issue_discard(&op, b, e);
1051 end_discard(&op, r);
1054 static void queue_passdown_pt2(struct dm_thin_new_mapping *m)
1056 unsigned long flags;
1057 struct pool *pool = m->tc->pool;
1059 spin_lock_irqsave(&pool->lock, flags);
1060 list_add_tail(&m->list, &pool->prepared_discards_pt2);
1061 spin_unlock_irqrestore(&pool->lock, flags);
1065 static void passdown_endio(struct bio *bio)
1068 * It doesn't matter if the passdown discard failed, we still want
1069 * to unmap (we ignore err).
1071 queue_passdown_pt2(bio->bi_private);
1075 static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
1078 struct thin_c *tc = m->tc;
1079 struct pool *pool = tc->pool;
1080 struct bio *discard_parent;
1081 dm_block_t data_end = m->data_block + (m->virt_end - m->virt_begin);
1084 * Only this thread allocates blocks, so we can be sure that the
1085 * newly unmapped blocks will not be allocated before the end of
1088 r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end);
1090 metadata_operation_failed(pool, "dm_thin_remove_range", r);
1091 bio_io_error(m->bio);
1092 cell_defer_no_holder(tc, m->cell);
1093 mempool_free(m, pool->mapping_pool);
1097 discard_parent = bio_alloc(GFP_NOIO, 1);
1098 if (!discard_parent) {
1099 DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
1100 dm_device_name(tc->pool->pool_md));
1101 queue_passdown_pt2(m);
1104 discard_parent->bi_end_io = passdown_endio;
1105 discard_parent->bi_private = m;
1107 if (m->maybe_shared)
1108 passdown_double_checking_shared_status(m, discard_parent);
1110 struct discard_op op;
1112 begin_discard(&op, tc, discard_parent);
1113 r = issue_discard(&op, m->data_block, data_end);
1114 end_discard(&op, r);
1119 * Increment the unmapped blocks. This prevents a race between the
1120 * passdown io and reallocation of freed blocks.
1122 r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
1124 metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
1125 bio_io_error(m->bio);
1126 cell_defer_no_holder(tc, m->cell);
1127 mempool_free(m, pool->mapping_pool);
1132 static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)
1135 struct thin_c *tc = m->tc;
1136 struct pool *pool = tc->pool;
1139 * The passdown has completed, so now we can decrement all those
1142 r = dm_pool_dec_data_range(pool->pmd, m->data_block,
1143 m->data_block + (m->virt_end - m->virt_begin));
1145 metadata_operation_failed(pool, "dm_pool_dec_data_range", r);
1146 bio_io_error(m->bio);
1150 cell_defer_no_holder(tc, m->cell);
1151 mempool_free(m, pool->mapping_pool);
1154 static void process_prepared(struct pool *pool, struct list_head *head,
1155 process_mapping_fn *fn)
1157 unsigned long flags;
1158 struct list_head maps;
1159 struct dm_thin_new_mapping *m, *tmp;
1161 INIT_LIST_HEAD(&maps);
1162 spin_lock_irqsave(&pool->lock, flags);
1163 list_splice_init(head, &maps);
1164 spin_unlock_irqrestore(&pool->lock, flags);
1166 list_for_each_entry_safe(m, tmp, &maps, list)
1171 * Deferred bio jobs.
1173 static int io_overlaps_block(struct pool *pool, struct bio *bio)
1175 return bio->bi_iter.bi_size ==
1176 (pool->sectors_per_block << SECTOR_SHIFT);
1179 static int io_overwrites_block(struct pool *pool, struct bio *bio)
1181 return (bio_data_dir(bio) == WRITE) &&
1182 io_overlaps_block(pool, bio);
1185 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
1188 *save = bio->bi_end_io;
1189 bio->bi_end_io = fn;
1192 static int ensure_next_mapping(struct pool *pool)
1194 if (pool->next_mapping)
1197 pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
1199 return pool->next_mapping ? 0 : -ENOMEM;
1202 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
1204 struct dm_thin_new_mapping *m = pool->next_mapping;
1206 BUG_ON(!pool->next_mapping);
1208 memset(m, 0, sizeof(struct dm_thin_new_mapping));
1209 INIT_LIST_HEAD(&m->list);
1212 pool->next_mapping = NULL;
1217 static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
1218 sector_t begin, sector_t end)
1221 struct dm_io_region to;
1223 to.bdev = tc->pool_dev->bdev;
1225 to.count = end - begin;
1227 r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
1229 DMERR_LIMIT("dm_kcopyd_zero() failed");
1230 copy_complete(1, 1, m);
1234 static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
1235 dm_block_t data_begin,
1236 struct dm_thin_new_mapping *m)
1238 struct pool *pool = tc->pool;
1239 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1241 h->overwrite_mapping = m;
1243 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1244 inc_all_io_entry(pool, bio);
1245 remap_and_issue(tc, bio, data_begin);
1249 * A partial copy also needs to zero the uncopied region.
1251 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1252 struct dm_dev *origin, dm_block_t data_origin,
1253 dm_block_t data_dest,
1254 struct dm_bio_prison_cell *cell, struct bio *bio,
1258 struct pool *pool = tc->pool;
1259 struct dm_thin_new_mapping *m = get_next_mapping(pool);
1262 m->virt_begin = virt_block;
1263 m->virt_end = virt_block + 1u;
1264 m->data_block = data_dest;
1268 * quiesce action + copy action + an extra reference held for the
1269 * duration of this function (we may need to inc later for a
1272 atomic_set(&m->prepare_actions, 3);
1274 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
1275 complete_mapping_preparation(m); /* already quiesced */
1278 * IO to pool_dev remaps to the pool target's data_dev.
1280 * If the whole block of data is being overwritten, we can issue the
1281 * bio immediately. Otherwise we use kcopyd to clone the data first.
1283 if (io_overwrites_block(pool, bio))
1284 remap_and_issue_overwrite(tc, bio, data_dest, m);
1286 struct dm_io_region from, to;
1288 from.bdev = origin->bdev;
1289 from.sector = data_origin * pool->sectors_per_block;
1292 to.bdev = tc->pool_dev->bdev;
1293 to.sector = data_dest * pool->sectors_per_block;
1296 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
1297 0, copy_complete, m);
1299 DMERR_LIMIT("dm_kcopyd_copy() failed");
1300 copy_complete(1, 1, m);
1303 * We allow the zero to be issued, to simplify the
1304 * error path. Otherwise we'd need to start
1305 * worrying about decrementing the prepare_actions
1311 * Do we need to zero a tail region?
1313 if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) {
1314 atomic_inc(&m->prepare_actions);
1316 data_dest * pool->sectors_per_block + len,
1317 (data_dest + 1) * pool->sectors_per_block);
1321 complete_mapping_preparation(m); /* drop our ref */
1324 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1325 dm_block_t data_origin, dm_block_t data_dest,
1326 struct dm_bio_prison_cell *cell, struct bio *bio)
1328 schedule_copy(tc, virt_block, tc->pool_dev,
1329 data_origin, data_dest, cell, bio,
1330 tc->pool->sectors_per_block);
1333 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1334 dm_block_t data_block, struct dm_bio_prison_cell *cell,
1337 struct pool *pool = tc->pool;
1338 struct dm_thin_new_mapping *m = get_next_mapping(pool);
1340 atomic_set(&m->prepare_actions, 1); /* no need to quiesce */
1342 m->virt_begin = virt_block;
1343 m->virt_end = virt_block + 1u;
1344 m->data_block = data_block;
1348 * If the whole block of data is being overwritten or we are not
1349 * zeroing pre-existing data, we can issue the bio immediately.
1350 * Otherwise we use kcopyd to zero the data first.
1352 if (pool->pf.zero_new_blocks) {
1353 if (io_overwrites_block(pool, bio))
1354 remap_and_issue_overwrite(tc, bio, data_block, m);
1356 ll_zero(tc, m, data_block * pool->sectors_per_block,
1357 (data_block + 1) * pool->sectors_per_block);
1359 process_prepared_mapping(m);
1362 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1363 dm_block_t data_dest,
1364 struct dm_bio_prison_cell *cell, struct bio *bio)
1366 struct pool *pool = tc->pool;
1367 sector_t virt_block_begin = virt_block * pool->sectors_per_block;
1368 sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;
1370 if (virt_block_end <= tc->origin_size)
1371 schedule_copy(tc, virt_block, tc->origin_dev,
1372 virt_block, data_dest, cell, bio,
1373 pool->sectors_per_block);
1375 else if (virt_block_begin < tc->origin_size)
1376 schedule_copy(tc, virt_block, tc->origin_dev,
1377 virt_block, data_dest, cell, bio,
1378 tc->origin_size - virt_block_begin);
1381 schedule_zero(tc, virt_block, data_dest, cell, bio);
1384 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1386 static void check_for_space(struct pool *pool)
1391 if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
1394 r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
1399 set_pool_mode(pool, PM_WRITE);
1403 * A non-zero return indicates read_only or fail_io mode.
1404 * Many callers don't care about the return value.
1406 static int commit(struct pool *pool)
1410 if (get_pool_mode(pool) >= PM_READ_ONLY)
1413 r = dm_pool_commit_metadata(pool->pmd);
1415 metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
1417 check_for_space(pool);
1422 static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
1424 unsigned long flags;
1426 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
1427 DMWARN("%s: reached low water mark for data device: sending event.",
1428 dm_device_name(pool->pool_md));
1429 spin_lock_irqsave(&pool->lock, flags);
1430 pool->low_water_triggered = true;
1431 spin_unlock_irqrestore(&pool->lock, flags);
1432 dm_table_event(pool->ti->table);
1436 static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1439 dm_block_t free_blocks;
1440 struct pool *pool = tc->pool;
1442 if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
1445 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1447 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
1451 check_low_water_mark(pool, free_blocks);
1455 * Try to commit to see if that will free up some
1462 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1464 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
1469 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
1474 r = dm_pool_alloc_data_block(pool->pmd, result);
1476 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
1484 * If we have run out of space, queue bios until the device is
1485 * resumed, presumably after having been reloaded with more space.
1487 static void retry_on_resume(struct bio *bio)
1489 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1490 struct thin_c *tc = h->tc;
1491 unsigned long flags;
1493 spin_lock_irqsave(&tc->lock, flags);
1494 bio_list_add(&tc->retry_on_resume_list, bio);
1495 spin_unlock_irqrestore(&tc->lock, flags);
1498 static int should_error_unserviceable_bio(struct pool *pool)
1500 enum pool_mode m = get_pool_mode(pool);
1504 /* Shouldn't get here */
1505 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
1508 case PM_OUT_OF_DATA_SPACE:
1509 return pool->pf.error_if_no_space ? -ENOSPC : 0;
1515 /* Shouldn't get here */
1516 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
1521 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
1523 int error = should_error_unserviceable_bio(pool);
1526 bio->bi_error = error;
1529 retry_on_resume(bio);
1532 static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1535 struct bio_list bios;
1538 error = should_error_unserviceable_bio(pool);
1540 cell_error_with_code(pool, cell, error);
1544 bio_list_init(&bios);
1545 cell_release(pool, cell, &bios);
1547 while ((bio = bio_list_pop(&bios)))
1548 retry_on_resume(bio);
1551 static void process_discard_cell_no_passdown(struct thin_c *tc,
1552 struct dm_bio_prison_cell *virt_cell)
1554 struct pool *pool = tc->pool;
1555 struct dm_thin_new_mapping *m = get_next_mapping(pool);
1558 * We don't need to lock the data blocks, since there's no
1559 * passdown. We only lock data blocks for allocation and breaking sharing.
1562 m->virt_begin = virt_cell->key.block_begin;
1563 m->virt_end = virt_cell->key.block_end;
1564 m->cell = virt_cell;
1565 m->bio = virt_cell->holder;
1567 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1568 pool->process_prepared_discard(m);
1571 static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t end,
1574 struct pool *pool = tc->pool;
1578 struct dm_cell_key data_key;
1579 struct dm_bio_prison_cell *data_cell;
1580 struct dm_thin_new_mapping *m;
1581 dm_block_t virt_begin, virt_end, data_begin;
1583 while (begin != end) {
1584 r = ensure_next_mapping(pool);
1586 /* we did our best */
1589 r = dm_thin_find_mapped_range(tc->td, begin, end, &virt_begin, &virt_end,
1590 &data_begin, &maybe_shared);
1593 * Silently fail, letting any mappings we've
1598 build_key(tc->td, PHYSICAL, data_begin, data_begin + (virt_end - virt_begin), &data_key);
1599 if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) {
1600 /* contention, we'll give up with this range */
1606 * IO may still be going to the destination block. We must
1607 * quiesce before we can do the removal.
1609 m = get_next_mapping(pool);
1611 m->maybe_shared = maybe_shared;
1612 m->virt_begin = virt_begin;
1613 m->virt_end = virt_end;
1614 m->data_block = data_begin;
1615 m->cell = data_cell;
1619 * The parent bio must not complete before sub discard bios are
1620 * chained to it (see end_discard's bio_chain)!
1622 * This per-mapping bi_remaining increment is paired with
1623 * the implicit decrement that occurs via bio_endio() in
1626 bio_inc_remaining(bio);
1627 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1628 pool->process_prepared_discard(m);
1634 static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_prison_cell *virt_cell)
1636 struct bio *bio = virt_cell->holder;
1637 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1640 * The virt_cell will only get freed once the origin bio completes.
1641 * This means it will remain locked while all the individual
1642 * passdown bios are in flight.
1644 h->cell = virt_cell;
1645 break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio);
1648 * We complete the bio now, knowing that the bi_remaining field
1649 * will prevent completion until the sub range discards have
1655 static void process_discard_bio(struct thin_c *tc, struct bio *bio)
1657 dm_block_t begin, end;
1658 struct dm_cell_key virt_key;
1659 struct dm_bio_prison_cell *virt_cell;
1661 get_bio_block_range(tc, bio, &begin, &end);
1664 * The discard covers less than a block.
1670 build_key(tc->td, VIRTUAL, begin, end, &virt_key);
1671 if (bio_detain(tc->pool, &virt_key, bio, &virt_cell))
1673 * Potential starvation issue: We're relying on the
1674 * fs/application being well behaved, and not trying to
1675 * send IO to a region at the same time as discarding it.
1676 * If they do this persistently then it's possible this
1677 * cell will never be granted.
1681 tc->pool->process_discard_cell(tc, virt_cell);
1684 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1685 struct dm_cell_key *key,
1686 struct dm_thin_lookup_result *lookup_result,
1687 struct dm_bio_prison_cell *cell)
1690 dm_block_t data_block;
1691 struct pool *pool = tc->pool;
1693 r = alloc_data_block(tc, &data_block);
1696 schedule_internal_copy(tc, block, lookup_result->block,
1697 data_block, cell, bio);
1701 retry_bios_on_resume(pool, cell);
1705 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1707 cell_error(pool, cell);
1712 static void __remap_and_issue_shared_cell(void *context,
1713 struct dm_bio_prison_cell *cell)
1715 struct remap_info *info = context;
1718 while ((bio = bio_list_pop(&cell->bios))) {
1719 if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) ||
1720 bio_op(bio) == REQ_OP_DISCARD)
1721 bio_list_add(&info->defer_bios, bio);
1723 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
1725 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
1726 inc_all_io_entry(info->tc->pool, bio);
1727 bio_list_add(&info->issue_bios, bio);
1732 static void remap_and_issue_shared_cell(struct thin_c *tc,
1733 struct dm_bio_prison_cell *cell,
1737 struct remap_info info;
1740 bio_list_init(&info.defer_bios);
1741 bio_list_init(&info.issue_bios);
1743 cell_visit_release(tc->pool, __remap_and_issue_shared_cell,
1746 while ((bio = bio_list_pop(&info.defer_bios)))
1747 thin_defer_bio(tc, bio);
1749 while ((bio = bio_list_pop(&info.issue_bios)))
1750 remap_and_issue(tc, bio, block);
1753 static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1755 struct dm_thin_lookup_result *lookup_result,
1756 struct dm_bio_prison_cell *virt_cell)
1758 struct dm_bio_prison_cell *data_cell;
1759 struct pool *pool = tc->pool;
1760 struct dm_cell_key key;
1763 * If cell is already occupied, then sharing is already in the process
1764 * of being broken so we have nothing further to do here.
1766 build_data_key(tc->td, lookup_result->block, &key);
1767 if (bio_detain(pool, &key, bio, &data_cell)) {
1768 cell_defer_no_holder(tc, virt_cell);
1772 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) {
1773 break_sharing(tc, bio, block, &key, lookup_result, data_cell);
1774 cell_defer_no_holder(tc, virt_cell);
1776 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1778 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1779 inc_all_io_entry(pool, bio);
1780 remap_and_issue(tc, bio, lookup_result->block);
1782 remap_and_issue_shared_cell(tc, data_cell, lookup_result->block);
1783 remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block);
1787 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1788 struct dm_bio_prison_cell *cell)
1791 dm_block_t data_block;
1792 struct pool *pool = tc->pool;
1795 * Remap empty bios (flushes) immediately, without provisioning.
1797 if (!bio->bi_iter.bi_size) {
1798 inc_all_io_entry(pool, bio);
1799 cell_defer_no_holder(tc, cell);
1801 remap_and_issue(tc, bio, 0);
1806 * Fill read bios with zeroes and complete them immediately.
1808 if (bio_data_dir(bio) == READ) {
1810 cell_defer_no_holder(tc, cell);
1815 r = alloc_data_block(tc, &data_block);
1819 schedule_external_copy(tc, block, data_block, cell, bio);
1821 schedule_zero(tc, block, data_block, cell, bio);
1825 retry_bios_on_resume(pool, cell);
1829 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1831 cell_error(pool, cell);
1836 static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1839 struct pool *pool = tc->pool;
1840 struct bio *bio = cell->holder;
1841 dm_block_t block = get_bio_block(tc, bio);
1842 struct dm_thin_lookup_result lookup_result;
1844 if (tc->requeue_mode) {
1845 cell_requeue(pool, cell);
1849 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1852 if (lookup_result.shared)
1853 process_shared_bio(tc, bio, block, &lookup_result, cell);
1855 inc_all_io_entry(pool, bio);
1856 remap_and_issue(tc, bio, lookup_result.block);
1857 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
1862 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1863 inc_all_io_entry(pool, bio);
1864 cell_defer_no_holder(tc, cell);
1866 if (bio_end_sector(bio) <= tc->origin_size)
1867 remap_to_origin_and_issue(tc, bio);
1869 else if (bio->bi_iter.bi_sector < tc->origin_size) {
1871 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT;
1872 remap_to_origin_and_issue(tc, bio);
1879 provision_block(tc, bio, block, cell);
1883 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1885 cell_defer_no_holder(tc, cell);
1891 static void process_bio(struct thin_c *tc, struct bio *bio)
1893 struct pool *pool = tc->pool;
1894 dm_block_t block = get_bio_block(tc, bio);
1895 struct dm_bio_prison_cell *cell;
1896 struct dm_cell_key key;
1899 * If cell is already occupied, then the block is already
1900 * being provisioned so we have nothing further to do here.
1902 build_virtual_key(tc->td, block, &key);
1903 if (bio_detain(pool, &key, bio, &cell))
1906 process_cell(tc, cell);
1909 static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
1910 struct dm_bio_prison_cell *cell)
1913 int rw = bio_data_dir(bio);
1914 dm_block_t block = get_bio_block(tc, bio);
1915 struct dm_thin_lookup_result lookup_result;
1917 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1920 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) {
1921 handle_unserviceable_bio(tc->pool, bio);
1923 cell_defer_no_holder(tc, cell);
1925 inc_all_io_entry(tc->pool, bio);
1926 remap_and_issue(tc, bio, lookup_result.block);
1928 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
1934 cell_defer_no_holder(tc, cell);
1936 handle_unserviceable_bio(tc->pool, bio);
1940 if (tc->origin_dev) {
1941 inc_all_io_entry(tc->pool, bio);
1942 remap_to_origin_and_issue(tc, bio);
1951 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1954 cell_defer_no_holder(tc, cell);
1960 static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1962 __process_bio_read_only(tc, bio, NULL);
1965 static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1967 __process_bio_read_only(tc, cell->holder, cell);
1970 static void process_bio_success(struct thin_c *tc, struct bio *bio)
1975 static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1980 static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1982 cell_success(tc->pool, cell);
1985 static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1987 cell_error(tc->pool, cell);
1991 * FIXME: should we also commit due to size of transaction, measured in
1994 static int need_commit_due_to_time(struct pool *pool)
1996 return !time_in_range(jiffies, pool->last_commit_jiffies,
1997 pool->last_commit_jiffies + COMMIT_PERIOD);
2000 #define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
2001 #define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook))
2003 static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
2005 struct rb_node **rbp, *parent;
2006 struct dm_thin_endio_hook *pbd;
2007 sector_t bi_sector = bio->bi_iter.bi_sector;
2009 rbp = &tc->sort_bio_list.rb_node;
2013 pbd = thin_pbd(parent);
2015 if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector)
2016 rbp = &(*rbp)->rb_left;
2018 rbp = &(*rbp)->rb_right;
2021 pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
2022 rb_link_node(&pbd->rb_node, parent, rbp);
2023 rb_insert_color(&pbd->rb_node, &tc->sort_bio_list);
2026 static void __extract_sorted_bios(struct thin_c *tc)
2028 struct rb_node *node;
2029 struct dm_thin_endio_hook *pbd;
2032 for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) {
2033 pbd = thin_pbd(node);
2034 bio = thin_bio(pbd);
2036 bio_list_add(&tc->deferred_bio_list, bio);
2037 rb_erase(&pbd->rb_node, &tc->sort_bio_list);
2040 WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list));
2043 static void __sort_thin_deferred_bios(struct thin_c *tc)
2046 struct bio_list bios;
2048 bio_list_init(&bios);
2049 bio_list_merge(&bios, &tc->deferred_bio_list);
2050 bio_list_init(&tc->deferred_bio_list);
2052 /* Sort deferred_bio_list using rb-tree */
2053 while ((bio = bio_list_pop(&bios)))
2054 __thin_bio_rb_add(tc, bio);
2057 * Transfer the sorted bios in sort_bio_list back to
2058 * deferred_bio_list to allow lockless submission of
2061 __extract_sorted_bios(tc);
2064 static void process_thin_deferred_bios(struct thin_c *tc)
2066 struct pool *pool = tc->pool;
2067 unsigned long flags;
2069 struct bio_list bios;
2070 struct blk_plug plug;
2073 if (tc->requeue_mode) {
2074 error_thin_bio_list(tc, &tc->deferred_bio_list, DM_ENDIO_REQUEUE);
2078 bio_list_init(&bios);
2080 spin_lock_irqsave(&tc->lock, flags);
2082 if (bio_list_empty(&tc->deferred_bio_list)) {
2083 spin_unlock_irqrestore(&tc->lock, flags);
2087 __sort_thin_deferred_bios(tc);
2089 bio_list_merge(&bios, &tc->deferred_bio_list);
2090 bio_list_init(&tc->deferred_bio_list);
2092 spin_unlock_irqrestore(&tc->lock, flags);
2094 blk_start_plug(&plug);
2095 while ((bio = bio_list_pop(&bios))) {
2097 * If we've got no free new_mapping structs, and processing
2098 * this bio might require one, we pause until there are some
2099 * prepared mappings to process.
2101 if (ensure_next_mapping(pool)) {
2102 spin_lock_irqsave(&tc->lock, flags);
2103 bio_list_add(&tc->deferred_bio_list, bio);
2104 bio_list_merge(&tc->deferred_bio_list, &bios);
2105 spin_unlock_irqrestore(&tc->lock, flags);
2109 if (bio_op(bio) == REQ_OP_DISCARD)
2110 pool->process_discard(tc, bio);
2112 pool->process_bio(tc, bio);
2114 if ((count++ & 127) == 0) {
2115 throttle_work_update(&pool->throttle);
2116 dm_pool_issue_prefetches(pool->pmd);
2119 blk_finish_plug(&plug);
2122 static int cmp_cells(const void *lhs, const void *rhs)
2124 struct dm_bio_prison_cell *lhs_cell = *((struct dm_bio_prison_cell **) lhs);
2125 struct dm_bio_prison_cell *rhs_cell = *((struct dm_bio_prison_cell **) rhs);
2127 BUG_ON(!lhs_cell->holder);
2128 BUG_ON(!rhs_cell->holder);
2130 if (lhs_cell->holder->bi_iter.bi_sector < rhs_cell->holder->bi_iter.bi_sector)
2133 if (lhs_cell->holder->bi_iter.bi_sector > rhs_cell->holder->bi_iter.bi_sector)
2139 static unsigned sort_cells(struct pool *pool, struct list_head *cells)
2142 struct dm_bio_prison_cell *cell, *tmp;
2144 list_for_each_entry_safe(cell, tmp, cells, user_list) {
2145 if (count >= CELL_SORT_ARRAY_SIZE)
2148 pool->cell_sort_array[count++] = cell;
2149 list_del(&cell->user_list);
2152 sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL);
2157 static void process_thin_deferred_cells(struct thin_c *tc)
2159 struct pool *pool = tc->pool;
2160 unsigned long flags;
2161 struct list_head cells;
2162 struct dm_bio_prison_cell *cell;
2163 unsigned i, j, count;
2165 INIT_LIST_HEAD(&cells);
2167 spin_lock_irqsave(&tc->lock, flags);
2168 list_splice_init(&tc->deferred_cells, &cells);
2169 spin_unlock_irqrestore(&tc->lock, flags);
2171 if (list_empty(&cells))
2175 count = sort_cells(tc->pool, &cells);
2177 for (i = 0; i < count; i++) {
2178 cell = pool->cell_sort_array[i];
2179 BUG_ON(!cell->holder);
2182 * If we've got no free new_mapping structs, and processing
2183 * this bio might require one, we pause until there are some
2184 * prepared mappings to process.
2186 if (ensure_next_mapping(pool)) {
2187 for (j = i; j < count; j++)
2188 list_add(&pool->cell_sort_array[j]->user_list, &cells);
2190 spin_lock_irqsave(&tc->lock, flags);
2191 list_splice(&cells, &tc->deferred_cells);
2192 spin_unlock_irqrestore(&tc->lock, flags);
2196 if (bio_op(cell->holder) == REQ_OP_DISCARD)
2197 pool->process_discard_cell(tc, cell);
2199 pool->process_cell(tc, cell);
2201 } while (!list_empty(&cells));
2204 static void thin_get(struct thin_c *tc);
2205 static void thin_put(struct thin_c *tc);
2208 * We can't hold rcu_read_lock() around code that can block. So we
2209 * find a thin with the rcu lock held; bump a refcount; then drop
2212 static struct thin_c *get_first_thin(struct pool *pool)
2214 struct thin_c *tc = NULL;
2217 if (!list_empty(&pool->active_thins)) {
2218 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
2226 static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
2228 struct thin_c *old_tc = tc;
2231 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
2243 static void process_deferred_bios(struct pool *pool)
2245 unsigned long flags;
2247 struct bio_list bios;
2250 tc = get_first_thin(pool);
2252 process_thin_deferred_cells(tc);
2253 process_thin_deferred_bios(tc);
2254 tc = get_next_thin(pool, tc);
2258 * If there are any deferred flush bios, we must commit
2259 * the metadata before issuing them.
2261 bio_list_init(&bios);
2262 spin_lock_irqsave(&pool->lock, flags);
2263 bio_list_merge(&bios, &pool->deferred_flush_bios);
2264 bio_list_init(&pool->deferred_flush_bios);
2265 spin_unlock_irqrestore(&pool->lock, flags);
2267 if (bio_list_empty(&bios) &&
2268 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
2272 while ((bio = bio_list_pop(&bios)))
2276 pool->last_commit_jiffies = jiffies;
2278 while ((bio = bio_list_pop(&bios)))
2279 generic_make_request(bio);
2282 static void do_worker(struct work_struct *ws)
2284 struct pool *pool = container_of(ws, struct pool, worker);
2286 throttle_work_start(&pool->throttle);
2287 dm_pool_issue_prefetches(pool->pmd);
2288 throttle_work_update(&pool->throttle);
2289 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
2290 throttle_work_update(&pool->throttle);
2291 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
2292 throttle_work_update(&pool->throttle);
2293 process_prepared(pool, &pool->prepared_discards_pt2, &pool->process_prepared_discard_pt2);
2294 throttle_work_update(&pool->throttle);
2295 process_deferred_bios(pool);
2296 throttle_work_complete(&pool->throttle);
2300 * We want to commit periodically so that not too much
2301 * unwritten data builds up.
2303 static void do_waker(struct work_struct *ws)
2305 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
2307 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
2310 static void notify_of_pool_mode_change_to_oods(struct pool *pool);
2313 * We're holding onto IO to allow userland time to react. After the
2314 * timeout either the pool will have been resized (and thus back in
2315 * PM_WRITE mode), or we degrade to PM_OUT_OF_DATA_SPACE w/ error_if_no_space.
2317 static void do_no_space_timeout(struct work_struct *ws)
2319 struct pool *pool = container_of(to_delayed_work(ws), struct pool,
2322 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
2323 pool->pf.error_if_no_space = true;
2324 notify_of_pool_mode_change_to_oods(pool);
2325 error_retry_list_with_code(pool, -ENOSPC);
2329 /*----------------------------------------------------------------*/
2332 struct work_struct worker;
2333 struct completion complete;
2336 static struct pool_work *to_pool_work(struct work_struct *ws)
2338 return container_of(ws, struct pool_work, worker);
2341 static void pool_work_complete(struct pool_work *pw)
2343 complete(&pw->complete);
2346 static void pool_work_wait(struct pool_work *pw, struct pool *pool,
2347 void (*fn)(struct work_struct *))
2349 INIT_WORK_ONSTACK(&pw->worker, fn);
2350 init_completion(&pw->complete);
2351 queue_work(pool->wq, &pw->worker);
2352 wait_for_completion(&pw->complete);
2355 /*----------------------------------------------------------------*/
2357 struct noflush_work {
2358 struct pool_work pw;
2362 static struct noflush_work *to_noflush(struct work_struct *ws)
2364 return container_of(to_pool_work(ws), struct noflush_work, pw);
2367 static void do_noflush_start(struct work_struct *ws)
2369 struct noflush_work *w = to_noflush(ws);
2370 w->tc->requeue_mode = true;
2372 pool_work_complete(&w->pw);
2375 static void do_noflush_stop(struct work_struct *ws)
2377 struct noflush_work *w = to_noflush(ws);
2378 w->tc->requeue_mode = false;
2379 pool_work_complete(&w->pw);
2382 static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
2384 struct noflush_work w;
2387 pool_work_wait(&w.pw, tc->pool, fn);
2390 /*----------------------------------------------------------------*/
2392 static enum pool_mode get_pool_mode(struct pool *pool)
2394 return pool->pf.mode;
2397 static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
2399 dm_table_event(pool->ti->table);
2400 DMINFO("%s: switching pool to %s mode",
2401 dm_device_name(pool->pool_md), new_mode);
2404 static void notify_of_pool_mode_change_to_oods(struct pool *pool)
2406 if (!pool->pf.error_if_no_space)
2407 notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)");
2409 notify_of_pool_mode_change(pool, "out-of-data-space (error IO)");
2412 static bool passdown_enabled(struct pool_c *pt)
2414 return pt->adjusted_pf.discard_passdown;
2417 static void set_discard_callbacks(struct pool *pool)
2419 struct pool_c *pt = pool->ti->private;
2421 if (passdown_enabled(pt)) {
2422 pool->process_discard_cell = process_discard_cell_passdown;
2423 pool->process_prepared_discard = process_prepared_discard_passdown_pt1;
2424 pool->process_prepared_discard_pt2 = process_prepared_discard_passdown_pt2;
2426 pool->process_discard_cell = process_discard_cell_no_passdown;
2427 pool->process_prepared_discard = process_prepared_discard_no_passdown;
2431 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2433 struct pool_c *pt = pool->ti->private;
2434 bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
2435 enum pool_mode old_mode = get_pool_mode(pool);
2436 unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
2439 * Never allow the pool to transition to PM_WRITE mode if user
2440 * intervention is required to verify metadata and data consistency.
2442 if (new_mode == PM_WRITE && needs_check) {
2443 DMERR("%s: unable to switch pool to write mode until repaired.",
2444 dm_device_name(pool->pool_md));
2445 if (old_mode != new_mode)
2446 new_mode = old_mode;
2448 new_mode = PM_READ_ONLY;
2451 * If we were in PM_FAIL mode, rollback of metadata failed. We're
2452 * not going to recover without a thin_repair. So we never let the
2453 * pool move out of the old mode.
2455 if (old_mode == PM_FAIL)
2456 new_mode = old_mode;
2460 if (old_mode != new_mode)
2461 notify_of_pool_mode_change(pool, "failure");
2462 dm_pool_metadata_read_only(pool->pmd);
2463 pool->process_bio = process_bio_fail;
2464 pool->process_discard = process_bio_fail;
2465 pool->process_cell = process_cell_fail;
2466 pool->process_discard_cell = process_cell_fail;
2467 pool->process_prepared_mapping = process_prepared_mapping_fail;
2468 pool->process_prepared_discard = process_prepared_discard_fail;
2470 error_retry_list(pool);
2474 if (old_mode != new_mode)
2475 notify_of_pool_mode_change(pool, "read-only");
2476 dm_pool_metadata_read_only(pool->pmd);
2477 pool->process_bio = process_bio_read_only;
2478 pool->process_discard = process_bio_success;
2479 pool->process_cell = process_cell_read_only;
2480 pool->process_discard_cell = process_cell_success;
2481 pool->process_prepared_mapping = process_prepared_mapping_fail;
2482 pool->process_prepared_discard = process_prepared_discard_success;
2484 error_retry_list(pool);
2487 case PM_OUT_OF_DATA_SPACE:
2489 * Ideally we'd never hit this state; the low water mark
2490 * would trigger userland to extend the pool before we
2491 * completely run out of data space. However, many small
2492 * IOs to unprovisioned space can consume data space at an
2493 * alarming rate. Adjust your low water mark if you're
2494 * frequently seeing this mode.
2496 if (old_mode != new_mode)
2497 notify_of_pool_mode_change_to_oods(pool);
2498 pool->out_of_data_space = true;
2499 pool->process_bio = process_bio_read_only;
2500 pool->process_discard = process_discard_bio;
2501 pool->process_cell = process_cell_read_only;
2502 pool->process_prepared_mapping = process_prepared_mapping;
2503 set_discard_callbacks(pool);
2505 if (!pool->pf.error_if_no_space && no_space_timeout)
2506 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
2510 if (old_mode != new_mode)
2511 notify_of_pool_mode_change(pool, "write");
2512 pool->out_of_data_space = false;
2513 pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
2514 dm_pool_metadata_read_write(pool->pmd);
2515 pool->process_bio = process_bio;
2516 pool->process_discard = process_discard_bio;
2517 pool->process_cell = process_cell;
2518 pool->process_prepared_mapping = process_prepared_mapping;
2519 set_discard_callbacks(pool);
2523 pool->pf.mode = new_mode;
2525 * The pool mode may have changed, sync it so bind_control_target()
2526 * doesn't cause an unexpected mode transition on resume.
2528 pt->adjusted_pf.mode = new_mode;
2531 static void abort_transaction(struct pool *pool)
2533 const char *dev_name = dm_device_name(pool->pool_md);
2535 DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
2536 if (dm_pool_abort_metadata(pool->pmd)) {
2537 DMERR("%s: failed to abort metadata transaction", dev_name);
2538 set_pool_mode(pool, PM_FAIL);
2541 if (dm_pool_metadata_set_needs_check(pool->pmd)) {
2542 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
2543 set_pool_mode(pool, PM_FAIL);
2547 static void metadata_operation_failed(struct pool *pool, const char *op, int r)
2549 DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
2550 dm_device_name(pool->pool_md), op, r);
2552 abort_transaction(pool);
2553 set_pool_mode(pool, PM_READ_ONLY);
2556 /*----------------------------------------------------------------*/
2559 * Mapping functions.
2563 * Called only while mapping a thin bio to hand it over to the workqueue.
2565 static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
2567 unsigned long flags;
2568 struct pool *pool = tc->pool;
2570 spin_lock_irqsave(&tc->lock, flags);
2571 bio_list_add(&tc->deferred_bio_list, bio);
2572 spin_unlock_irqrestore(&tc->lock, flags);
2577 static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
2579 struct pool *pool = tc->pool;
2581 throttle_lock(&pool->throttle);
2582 thin_defer_bio(tc, bio);
2583 throttle_unlock(&pool->throttle);
2586 static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2588 unsigned long flags;
2589 struct pool *pool = tc->pool;
2591 throttle_lock(&pool->throttle);
2592 spin_lock_irqsave(&tc->lock, flags);
2593 list_add_tail(&cell->user_list, &tc->deferred_cells);
2594 spin_unlock_irqrestore(&tc->lock, flags);
2595 throttle_unlock(&pool->throttle);
2600 static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
2602 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
2605 h->shared_read_entry = NULL;
2606 h->all_io_entry = NULL;
2607 h->overwrite_mapping = NULL;
2612 * Non-blocking function called from the thin target's map function.
2614 static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2617 struct thin_c *tc = ti->private;
2618 dm_block_t block = get_bio_block(tc, bio);
2619 struct dm_thin_device *td = tc->td;
2620 struct dm_thin_lookup_result result;
2621 struct dm_bio_prison_cell *virt_cell, *data_cell;
2622 struct dm_cell_key key;
2624 thin_hook_bio(tc, bio);
2626 if (tc->requeue_mode) {
2627 bio->bi_error = DM_ENDIO_REQUEUE;
2629 return DM_MAPIO_SUBMITTED;
2632 if (get_pool_mode(tc->pool) == PM_FAIL) {
2634 return DM_MAPIO_SUBMITTED;
2637 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) {
2638 thin_defer_bio_with_throttle(tc, bio);
2639 return DM_MAPIO_SUBMITTED;
2643 * We must hold the virtual cell before doing the lookup, otherwise
2644 * there's a race with discard.
2646 build_virtual_key(tc->td, block, &key);
2647 if (bio_detain(tc->pool, &key, bio, &virt_cell))
2648 return DM_MAPIO_SUBMITTED;
2650 r = dm_thin_find_block(td, block, 0, &result);
2653 * Note that we defer readahead too.
2657 if (unlikely(result.shared)) {
2659 * We have a race condition here between the
2660 * result.shared value returned by the lookup and
2661 * snapshot creation, which may cause new
2664 * To avoid this always quiesce the origin before
2665 * taking the snap. You want to do this anyway to
2666 * ensure a consistent application view
2669 * More distant ancestors are irrelevant. The
2670 * shared flag will be set in their case.
2672 thin_defer_cell(tc, virt_cell);
2673 return DM_MAPIO_SUBMITTED;
2676 build_data_key(tc->td, result.block, &key);
2677 if (bio_detain(tc->pool, &key, bio, &data_cell)) {
2678 cell_defer_no_holder(tc, virt_cell);
2679 return DM_MAPIO_SUBMITTED;
2682 inc_all_io_entry(tc->pool, bio);
2683 cell_defer_no_holder(tc, data_cell);
2684 cell_defer_no_holder(tc, virt_cell);
2686 remap(tc, bio, result.block);
2687 return DM_MAPIO_REMAPPED;
2691 thin_defer_cell(tc, virt_cell);
2692 return DM_MAPIO_SUBMITTED;
2696 * Must always call bio_io_error on failure.
2697 * dm_thin_find_block can fail with -EINVAL if the
2698 * pool is switched to fail-io mode.
2701 cell_defer_no_holder(tc, virt_cell);
2702 return DM_MAPIO_SUBMITTED;
2706 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
2708 struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
2709 struct request_queue *q;
2711 if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
2714 q = bdev_get_queue(pt->data_dev->bdev);
2715 return bdi_congested(q->backing_dev_info, bdi_bits);
2718 static void requeue_bios(struct pool *pool)
2720 unsigned long flags;
2724 list_for_each_entry_rcu(tc, &pool->active_thins, list) {
2725 spin_lock_irqsave(&tc->lock, flags);
2726 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
2727 bio_list_init(&tc->retry_on_resume_list);
2728 spin_unlock_irqrestore(&tc->lock, flags);
2733 /*----------------------------------------------------------------
2734 * Binding of control targets to a pool object
2735 *--------------------------------------------------------------*/
2736 static bool data_dev_supports_discard(struct pool_c *pt)
2738 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2740 return q && blk_queue_discard(q);
2743 static bool is_factor(sector_t block_size, uint32_t n)
2745 return !sector_div(block_size, n);
2749 * If discard_passdown was enabled verify that the data device
2750 * supports discards. Disable discard_passdown if not.
2752 static void disable_passdown_if_not_supported(struct pool_c *pt)
2754 struct pool *pool = pt->pool;
2755 struct block_device *data_bdev = pt->data_dev->bdev;
2756 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
2757 const char *reason = NULL;
2758 char buf[BDEVNAME_SIZE];
2760 if (!pt->adjusted_pf.discard_passdown)
2763 if (!data_dev_supports_discard(pt))
2764 reason = "discard unsupported";
2766 else if (data_limits->max_discard_sectors < pool->sectors_per_block)
2767 reason = "max discard sectors smaller than a block";
2770 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
2771 pt->adjusted_pf.discard_passdown = false;
2775 static int bind_control_target(struct pool *pool, struct dm_target *ti)
2777 struct pool_c *pt = ti->private;
2780 * We want to make sure that a pool in PM_FAIL mode is never upgraded.
2782 enum pool_mode old_mode = get_pool_mode(pool);
2783 enum pool_mode new_mode = pt->adjusted_pf.mode;
2786 * Don't change the pool's mode until set_pool_mode() below.
2787 * Otherwise the pool's process_* function pointers may
2788 * not match the desired pool mode.
2790 pt->adjusted_pf.mode = old_mode;
2793 pool->pf = pt->adjusted_pf;
2794 pool->low_water_blocks = pt->low_water_blocks;
2796 set_pool_mode(pool, new_mode);
2801 static void unbind_control_target(struct pool *pool, struct dm_target *ti)
2807 /*----------------------------------------------------------------
2809 *--------------------------------------------------------------*/
2810 /* Initialize pool features. */
2811 static void pool_features_init(struct pool_features *pf)
2813 pf->mode = PM_WRITE;
2814 pf->zero_new_blocks = true;
2815 pf->discard_enabled = true;
2816 pf->discard_passdown = true;
2817 pf->error_if_no_space = false;
2820 static void __pool_destroy(struct pool *pool)
2822 __pool_table_remove(pool);
2824 vfree(pool->cell_sort_array);
2825 if (dm_pool_metadata_close(pool->pmd) < 0)
2826 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2828 dm_bio_prison_destroy(pool->prison);
2829 dm_kcopyd_client_destroy(pool->copier);
2832 destroy_workqueue(pool->wq);
2834 if (pool->next_mapping)
2835 mempool_free(pool->next_mapping, pool->mapping_pool);
2836 mempool_destroy(pool->mapping_pool);
2837 dm_deferred_set_destroy(pool->shared_read_ds);
2838 dm_deferred_set_destroy(pool->all_io_ds);
2842 static struct kmem_cache *_new_mapping_cache;
2844 static struct pool *pool_create(struct mapped_device *pool_md,
2845 struct block_device *metadata_dev,
2846 unsigned long block_size,
2847 int read_only, char **error)
2852 struct dm_pool_metadata *pmd;
2853 bool format_device = read_only ? false : true;
2855 pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
2857 *error = "Error creating metadata object";
2858 return (struct pool *)pmd;
2861 pool = kmalloc(sizeof(*pool), GFP_KERNEL);
2863 *error = "Error allocating memory for pool";
2864 err_p = ERR_PTR(-ENOMEM);
2869 pool->sectors_per_block = block_size;
2870 if (block_size & (block_size - 1))
2871 pool->sectors_per_block_shift = -1;
2873 pool->sectors_per_block_shift = __ffs(block_size);
2874 pool->low_water_blocks = 0;
2875 pool_features_init(&pool->pf);
2876 pool->prison = dm_bio_prison_create();
2877 if (!pool->prison) {
2878 *error = "Error creating pool's bio prison";
2879 err_p = ERR_PTR(-ENOMEM);
2883 pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2884 if (IS_ERR(pool->copier)) {
2885 r = PTR_ERR(pool->copier);
2886 *error = "Error creating pool's kcopyd client";
2888 goto bad_kcopyd_client;
2892 * Create singlethreaded workqueue that will service all devices
2893 * that use this metadata.
2895 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2897 *error = "Error creating pool's workqueue";
2898 err_p = ERR_PTR(-ENOMEM);
2902 throttle_init(&pool->throttle);
2903 INIT_WORK(&pool->worker, do_worker);
2904 INIT_DELAYED_WORK(&pool->waker, do_waker);
2905 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
2906 spin_lock_init(&pool->lock);
2907 bio_list_init(&pool->deferred_flush_bios);
2908 INIT_LIST_HEAD(&pool->prepared_mappings);
2909 INIT_LIST_HEAD(&pool->prepared_discards);
2910 INIT_LIST_HEAD(&pool->prepared_discards_pt2);
2911 INIT_LIST_HEAD(&pool->active_thins);
2912 pool->low_water_triggered = false;
2913 pool->suspended = true;
2914 pool->out_of_data_space = false;
2916 pool->shared_read_ds = dm_deferred_set_create();
2917 if (!pool->shared_read_ds) {
2918 *error = "Error creating pool's shared read deferred set";
2919 err_p = ERR_PTR(-ENOMEM);
2920 goto bad_shared_read_ds;
2923 pool->all_io_ds = dm_deferred_set_create();
2924 if (!pool->all_io_ds) {
2925 *error = "Error creating pool's all io deferred set";
2926 err_p = ERR_PTR(-ENOMEM);
2930 pool->next_mapping = NULL;
2931 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
2932 _new_mapping_cache);
2933 if (!pool->mapping_pool) {
2934 *error = "Error creating pool's mapping mempool";
2935 err_p = ERR_PTR(-ENOMEM);
2936 goto bad_mapping_pool;
2939 pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE);
2940 if (!pool->cell_sort_array) {
2941 *error = "Error allocating cell sort array";
2942 err_p = ERR_PTR(-ENOMEM);
2943 goto bad_sort_array;
2946 pool->ref_count = 1;
2947 pool->last_commit_jiffies = jiffies;
2948 pool->pool_md = pool_md;
2949 pool->md_dev = metadata_dev;
2950 __pool_table_insert(pool);
2955 mempool_destroy(pool->mapping_pool);
2957 dm_deferred_set_destroy(pool->all_io_ds);
2959 dm_deferred_set_destroy(pool->shared_read_ds);
2961 destroy_workqueue(pool->wq);
2963 dm_kcopyd_client_destroy(pool->copier);
2965 dm_bio_prison_destroy(pool->prison);
2969 if (dm_pool_metadata_close(pmd))
2970 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2975 static void __pool_inc(struct pool *pool)
2977 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2981 static void __pool_dec(struct pool *pool)
2983 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2984 BUG_ON(!pool->ref_count);
2985 if (!--pool->ref_count)
2986 __pool_destroy(pool);
2989 static struct pool *__pool_find(struct mapped_device *pool_md,
2990 struct block_device *metadata_dev,
2991 unsigned long block_size, int read_only,
2992 char **error, int *created)
2994 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
2997 if (pool->pool_md != pool_md) {
2998 *error = "metadata device already in use by a pool";
2999 return ERR_PTR(-EBUSY);
3004 pool = __pool_table_lookup(pool_md);
3006 if (pool->md_dev != metadata_dev) {
3007 *error = "different pool cannot replace a pool";
3008 return ERR_PTR(-EINVAL);
3013 pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
3021 /*----------------------------------------------------------------
3022 * Pool target methods
3023 *--------------------------------------------------------------*/
3024 static void pool_dtr(struct dm_target *ti)
3026 struct pool_c *pt = ti->private;
3028 mutex_lock(&dm_thin_pool_table.mutex);
3030 unbind_control_target(pt->pool, ti);
3031 __pool_dec(pt->pool);
3032 dm_put_device(ti, pt->metadata_dev);
3033 dm_put_device(ti, pt->data_dev);
3036 mutex_unlock(&dm_thin_pool_table.mutex);
3039 static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
3040 struct dm_target *ti)
3044 const char *arg_name;
3046 static struct dm_arg _args[] = {
3047 {0, 4, "Invalid number of pool feature arguments"},
3051 * No feature arguments supplied.
3056 r = dm_read_arg_group(_args, as, &argc, &ti->error);
3060 while (argc && !r) {
3061 arg_name = dm_shift_arg(as);
3064 if (!strcasecmp(arg_name, "skip_block_zeroing"))
3065 pf->zero_new_blocks = false;
3067 else if (!strcasecmp(arg_name, "ignore_discard"))
3068 pf->discard_enabled = false;
3070 else if (!strcasecmp(arg_name, "no_discard_passdown"))
3071 pf->discard_passdown = false;
3073 else if (!strcasecmp(arg_name, "read_only"))
3074 pf->mode = PM_READ_ONLY;
3076 else if (!strcasecmp(arg_name, "error_if_no_space"))
3077 pf->error_if_no_space = true;
3080 ti->error = "Unrecognised pool feature requested";
3089 static void metadata_low_callback(void *context)
3091 struct pool *pool = context;
3093 DMWARN("%s: reached low water mark for metadata device: sending event.",
3094 dm_device_name(pool->pool_md));
3096 dm_table_event(pool->ti->table);
3099 static sector_t get_dev_size(struct block_device *bdev)
3101 return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
3104 static void warn_if_metadata_device_too_big(struct block_device *bdev)
3106 sector_t metadata_dev_size = get_dev_size(bdev);
3107 char buffer[BDEVNAME_SIZE];
3109 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
3110 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
3111 bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
3114 static sector_t get_metadata_dev_size(struct block_device *bdev)
3116 sector_t metadata_dev_size = get_dev_size(bdev);
3118 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
3119 metadata_dev_size = THIN_METADATA_MAX_SECTORS;
3121 return metadata_dev_size;
3124 static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
3126 sector_t metadata_dev_size = get_metadata_dev_size(bdev);
3128 sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
3130 return metadata_dev_size;
3134 * When a metadata threshold is crossed a dm event is triggered, and
3135 * userland should respond by growing the metadata device. We could let
3136 * userland set the threshold, like we do with the data threshold, but I'm
3137 * not sure they know enough to do this well.
3139 static dm_block_t calc_metadata_threshold(struct pool_c *pt)
3142 * 4M is ample for all ops with the possible exception of thin
3143 * device deletion which is harmless if it fails (just retry the
3144 * delete after you've grown the device).
3146 dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
3147 return min((dm_block_t)1024ULL /* 4M */, quarter);
3151 * thin-pool <metadata dev> <data dev>
3152 * <data block size (sectors)>
3153 * <low water mark (blocks)>
3154 * [<#feature args> [<arg>]*]
3156 * Optional feature arguments are:
3157 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
3158 * ignore_discard: disable discard
3159 * no_discard_passdown: don't pass discards down to the data device
3160 * read_only: Don't allow any changes to be made to the pool metadata.
3161 * error_if_no_space: error IOs, instead of queueing, if no space.
3163 static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
3165 int r, pool_created = 0;
3168 struct pool_features pf;
3169 struct dm_arg_set as;
3170 struct dm_dev *data_dev;
3171 unsigned long block_size;
3172 dm_block_t low_water_blocks;
3173 struct dm_dev *metadata_dev;
3174 fmode_t metadata_mode;
3177 * FIXME Remove validation from scope of lock.
3179 mutex_lock(&dm_thin_pool_table.mutex);
3182 ti->error = "Invalid argument count";
3191 * Set default pool features.
3193 pool_features_init(&pf);
3195 dm_consume_args(&as, 4);
3196 r = parse_pool_features(&as, &pf, ti);
3200 metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
3201 r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
3203 ti->error = "Error opening metadata block device";
3206 warn_if_metadata_device_too_big(metadata_dev->bdev);
3208 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
3210 ti->error = "Error getting data device";
3214 if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
3215 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
3216 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
3217 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
3218 ti->error = "Invalid block size";
3223 if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
3224 ti->error = "Invalid low water mark";
3229 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
3235 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
3236 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
3243 * 'pool_created' reflects whether this is the first table load.
3244 * Top level discard support is not allowed to be changed after
3245 * initial load. This would require a pool reload to trigger thin
3248 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
3249 ti->error = "Discard support cannot be disabled once enabled";
3251 goto out_flags_changed;
3256 pt->metadata_dev = metadata_dev;
3257 pt->data_dev = data_dev;
3258 pt->low_water_blocks = low_water_blocks;
3259 pt->adjusted_pf = pt->requested_pf = pf;
3260 ti->num_flush_bios = 1;
3263 * Only need to enable discards if the pool should pass
3264 * them down to the data device. The thin device's discard
3265 * processing will cause mappings to be removed from the btree.
3267 if (pf.discard_enabled && pf.discard_passdown) {
3268 ti->num_discard_bios = 1;
3271 * Setting 'discards_supported' circumvents the normal
3272 * stacking of discard limits (this keeps the pool and
3273 * thin devices' discard limits consistent).
3275 ti->discards_supported = true;
3279 r = dm_pool_register_metadata_threshold(pt->pool->pmd,
3280 calc_metadata_threshold(pt),
3281 metadata_low_callback,
3284 goto out_flags_changed;
3286 pt->callbacks.congested_fn = pool_is_congested;
3287 dm_table_add_target_callbacks(ti->table, &pt->callbacks);
3289 mutex_unlock(&dm_thin_pool_table.mutex);
3298 dm_put_device(ti, data_dev);
3300 dm_put_device(ti, metadata_dev);
3302 mutex_unlock(&dm_thin_pool_table.mutex);
3307 static int pool_map(struct dm_target *ti, struct bio *bio)
3310 struct pool_c *pt = ti->private;
3311 struct pool *pool = pt->pool;
3312 unsigned long flags;
3315 * As this is a singleton target, ti->begin is always zero.
3317 spin_lock_irqsave(&pool->lock, flags);
3318 bio->bi_bdev = pt->data_dev->bdev;
3319 r = DM_MAPIO_REMAPPED;
3320 spin_unlock_irqrestore(&pool->lock, flags);
3325 static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
3328 struct pool_c *pt = ti->private;
3329 struct pool *pool = pt->pool;
3330 sector_t data_size = ti->len;
3331 dm_block_t sb_data_size;
3333 *need_commit = false;
3335 (void) sector_div(data_size, pool->sectors_per_block);
3337 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
3339 DMERR("%s: failed to retrieve data device size",
3340 dm_device_name(pool->pool_md));
3344 if (data_size < sb_data_size) {
3345 DMERR("%s: pool target (%llu blocks) too small: expected %llu",
3346 dm_device_name(pool->pool_md),
3347 (unsigned long long)data_size, sb_data_size);
3350 } else if (data_size > sb_data_size) {
3351 if (dm_pool_metadata_needs_check(pool->pmd)) {
3352 DMERR("%s: unable to grow the data device until repaired.",
3353 dm_device_name(pool->pool_md));
3358 DMINFO("%s: growing the data device from %llu to %llu blocks",
3359 dm_device_name(pool->pool_md),
3360 sb_data_size, (unsigned long long)data_size);
3361 r = dm_pool_resize_data_dev(pool->pmd, data_size);
3363 metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);
3367 *need_commit = true;
3373 static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
3376 struct pool_c *pt = ti->private;
3377 struct pool *pool = pt->pool;
3378 dm_block_t metadata_dev_size, sb_metadata_dev_size;
3380 *need_commit = false;
3382 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
3384 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
3386 DMERR("%s: failed to retrieve metadata device size",
3387 dm_device_name(pool->pool_md));
3391 if (metadata_dev_size < sb_metadata_dev_size) {
3392 DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
3393 dm_device_name(pool->pool_md),
3394 metadata_dev_size, sb_metadata_dev_size);
3397 } else if (metadata_dev_size > sb_metadata_dev_size) {
3398 if (dm_pool_metadata_needs_check(pool->pmd)) {
3399 DMERR("%s: unable to grow the metadata device until repaired.",
3400 dm_device_name(pool->pool_md));
3404 warn_if_metadata_device_too_big(pool->md_dev);
3405 DMINFO("%s: growing the metadata device from %llu to %llu blocks",
3406 dm_device_name(pool->pool_md),
3407 sb_metadata_dev_size, metadata_dev_size);
3408 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
3410 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
3414 *need_commit = true;
3421 * Retrieves the number of blocks of the data device from
3422 * the superblock and compares it to the actual device size,
3423 * thus resizing the data device in case it has grown.
3425 * This both copes with opening preallocated data devices in the ctr
3426 * being followed by a resume
3428 * calling the resume method individually after userspace has
3429 * grown the data device in reaction to a table event.
3431 static int pool_preresume(struct dm_target *ti)
3434 bool need_commit1, need_commit2;
3435 struct pool_c *pt = ti->private;
3436 struct pool *pool = pt->pool;
3439 * Take control of the pool object.
3441 r = bind_control_target(pool, ti);
3445 r = maybe_resize_data_dev(ti, &need_commit1);
3449 r = maybe_resize_metadata_dev(ti, &need_commit2);
3453 if (need_commit1 || need_commit2)
3454 (void) commit(pool);
3459 static void pool_suspend_active_thins(struct pool *pool)
3463 /* Suspend all active thin devices */
3464 tc = get_first_thin(pool);
3466 dm_internal_suspend_noflush(tc->thin_md);
3467 tc = get_next_thin(pool, tc);
3471 static void pool_resume_active_thins(struct pool *pool)
3475 /* Resume all active thin devices */
3476 tc = get_first_thin(pool);
3478 dm_internal_resume(tc->thin_md);
3479 tc = get_next_thin(pool, tc);
3483 static void pool_resume(struct dm_target *ti)
3485 struct pool_c *pt = ti->private;
3486 struct pool *pool = pt->pool;
3487 unsigned long flags;
3490 * Must requeue active_thins' bios and then resume
3491 * active_thins _before_ clearing 'suspend' flag.
3494 pool_resume_active_thins(pool);
3496 spin_lock_irqsave(&pool->lock, flags);
3497 pool->low_water_triggered = false;
3498 pool->suspended = false;
3499 spin_unlock_irqrestore(&pool->lock, flags);
3501 do_waker(&pool->waker.work);
3504 static void pool_presuspend(struct dm_target *ti)
3506 struct pool_c *pt = ti->private;
3507 struct pool *pool = pt->pool;
3508 unsigned long flags;
3510 spin_lock_irqsave(&pool->lock, flags);
3511 pool->suspended = true;
3512 spin_unlock_irqrestore(&pool->lock, flags);
3514 pool_suspend_active_thins(pool);
3517 static void pool_presuspend_undo(struct dm_target *ti)
3519 struct pool_c *pt = ti->private;
3520 struct pool *pool = pt->pool;
3521 unsigned long flags;
3523 pool_resume_active_thins(pool);
3525 spin_lock_irqsave(&pool->lock, flags);
3526 pool->suspended = false;
3527 spin_unlock_irqrestore(&pool->lock, flags);
3530 static void pool_postsuspend(struct dm_target *ti)
3532 struct pool_c *pt = ti->private;
3533 struct pool *pool = pt->pool;
3535 cancel_delayed_work_sync(&pool->waker);
3536 cancel_delayed_work_sync(&pool->no_space_timeout);
3537 flush_workqueue(pool->wq);
3538 (void) commit(pool);
3541 static int check_arg_count(unsigned argc, unsigned args_required)
3543 if (argc != args_required) {
3544 DMWARN("Message received with %u arguments instead of %u.",
3545 argc, args_required);
3552 static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
3554 if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
3555 *dev_id <= MAX_DEV_ID)
3559 DMWARN("Message received with invalid device id: %s", arg);
3564 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
3569 r = check_arg_count(argc, 2);
3573 r = read_dev_id(argv[1], &dev_id, 1);
3577 r = dm_pool_create_thin(pool->pmd, dev_id);
3579 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
3587 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3590 dm_thin_id origin_dev_id;
3593 r = check_arg_count(argc, 3);
3597 r = read_dev_id(argv[1], &dev_id, 1);
3601 r = read_dev_id(argv[2], &origin_dev_id, 1);
3605 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
3607 DMWARN("Creation of new snapshot %s of device %s failed.",
3615 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
3620 r = check_arg_count(argc, 2);
3624 r = read_dev_id(argv[1], &dev_id, 1);
3628 r = dm_pool_delete_thin_device(pool->pmd, dev_id);
3630 DMWARN("Deletion of thin device %s failed.", argv[1]);
3635 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
3637 dm_thin_id old_id, new_id;
3640 r = check_arg_count(argc, 3);
3644 if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
3645 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
3649 if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
3650 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
3654 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
3656 DMWARN("Failed to change transaction id from %s to %s.",
3664 static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3668 r = check_arg_count(argc, 1);
3672 (void) commit(pool);
3674 r = dm_pool_reserve_metadata_snap(pool->pmd);
3676 DMWARN("reserve_metadata_snap message failed.");
3681 static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3685 r = check_arg_count(argc, 1);
3689 r = dm_pool_release_metadata_snap(pool->pmd);
3691 DMWARN("release_metadata_snap message failed.");
3697 * Messages supported:
3698 * create_thin <dev_id>
3699 * create_snap <dev_id> <origin_id>
3701 * set_transaction_id <current_trans_id> <new_trans_id>
3702 * reserve_metadata_snap
3703 * release_metadata_snap
3705 static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
3708 struct pool_c *pt = ti->private;
3709 struct pool *pool = pt->pool;
3711 if (get_pool_mode(pool) >= PM_READ_ONLY) {
3712 DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
3713 dm_device_name(pool->pool_md));
3717 if (!strcasecmp(argv[0], "create_thin"))
3718 r = process_create_thin_mesg(argc, argv, pool);
3720 else if (!strcasecmp(argv[0], "create_snap"))
3721 r = process_create_snap_mesg(argc, argv, pool);
3723 else if (!strcasecmp(argv[0], "delete"))
3724 r = process_delete_mesg(argc, argv, pool);
3726 else if (!strcasecmp(argv[0], "set_transaction_id"))
3727 r = process_set_transaction_id_mesg(argc, argv, pool);
3729 else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
3730 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
3732 else if (!strcasecmp(argv[0], "release_metadata_snap"))
3733 r = process_release_metadata_snap_mesg(argc, argv, pool);
3736 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
3739 (void) commit(pool);
3744 static void emit_flags(struct pool_features *pf, char *result,
3745 unsigned sz, unsigned maxlen)
3747 unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
3748 !pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
3749 pf->error_if_no_space;
3750 DMEMIT("%u ", count);
3752 if (!pf->zero_new_blocks)
3753 DMEMIT("skip_block_zeroing ");
3755 if (!pf->discard_enabled)
3756 DMEMIT("ignore_discard ");
3758 if (!pf->discard_passdown)
3759 DMEMIT("no_discard_passdown ");
3761 if (pf->mode == PM_READ_ONLY)
3762 DMEMIT("read_only ");
3764 if (pf->error_if_no_space)
3765 DMEMIT("error_if_no_space ");
3770 * <transaction id> <used metadata sectors>/<total metadata sectors>
3771 * <used data sectors>/<total data sectors> <held metadata root>
3772 * <pool mode> <discard config> <no space config> <needs_check>
3774 static void pool_status(struct dm_target *ti, status_type_t type,
3775 unsigned status_flags, char *result, unsigned maxlen)
3779 uint64_t transaction_id;
3780 dm_block_t nr_free_blocks_data;
3781 dm_block_t nr_free_blocks_metadata;
3782 dm_block_t nr_blocks_data;
3783 dm_block_t nr_blocks_metadata;
3784 dm_block_t held_root;
3785 char buf[BDEVNAME_SIZE];
3786 char buf2[BDEVNAME_SIZE];
3787 struct pool_c *pt = ti->private;
3788 struct pool *pool = pt->pool;
3791 case STATUSTYPE_INFO:
3792 if (get_pool_mode(pool) == PM_FAIL) {
3797 /* Commit to ensure statistics aren't out-of-date */
3798 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
3799 (void) commit(pool);
3801 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
3803 DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
3804 dm_device_name(pool->pool_md), r);
3808 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
3810 DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
3811 dm_device_name(pool->pool_md), r);
3815 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
3817 DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
3818 dm_device_name(pool->pool_md), r);
3822 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
3824 DMERR("%s: dm_pool_get_free_block_count returned %d",
3825 dm_device_name(pool->pool_md), r);
3829 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
3831 DMERR("%s: dm_pool_get_data_dev_size returned %d",
3832 dm_device_name(pool->pool_md), r);
3836 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
3838 DMERR("%s: dm_pool_get_metadata_snap returned %d",
3839 dm_device_name(pool->pool_md), r);
3843 DMEMIT("%llu %llu/%llu %llu/%llu ",
3844 (unsigned long long)transaction_id,
3845 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
3846 (unsigned long long)nr_blocks_metadata,
3847 (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
3848 (unsigned long long)nr_blocks_data);
3851 DMEMIT("%llu ", held_root);
3855 if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
3856 DMEMIT("out_of_data_space ");
3857 else if (pool->pf.mode == PM_READ_ONLY)
3862 if (!pool->pf.discard_enabled)
3863 DMEMIT("ignore_discard ");
3864 else if (pool->pf.discard_passdown)
3865 DMEMIT("discard_passdown ");
3867 DMEMIT("no_discard_passdown ");
3869 if (pool->pf.error_if_no_space)
3870 DMEMIT("error_if_no_space ");
3872 DMEMIT("queue_if_no_space ");
3874 if (dm_pool_metadata_needs_check(pool->pmd))
3875 DMEMIT("needs_check ");
3881 case STATUSTYPE_TABLE:
3882 DMEMIT("%s %s %lu %llu ",
3883 format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
3884 format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
3885 (unsigned long)pool->sectors_per_block,
3886 (unsigned long long)pt->low_water_blocks);
3887 emit_flags(&pt->requested_pf, result, sz, maxlen);
3896 static int pool_iterate_devices(struct dm_target *ti,
3897 iterate_devices_callout_fn fn, void *data)
3899 struct pool_c *pt = ti->private;
3901 return fn(ti, pt->data_dev, 0, ti->len, data);
3904 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
3906 struct pool_c *pt = ti->private;
3907 struct pool *pool = pt->pool;
3908 sector_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3911 * If max_sectors is smaller than pool->sectors_per_block adjust it
3912 * to the highest possible power-of-2 factor of pool->sectors_per_block.
3913 * This is especially beneficial when the pool's data device is a RAID
3914 * device that has a full stripe width that matches pool->sectors_per_block
3915 * -- because even though partial RAID stripe-sized IOs will be issued to a
3916 * single RAID stripe; when aggregated they will end on a full RAID stripe
3917 * boundary.. which avoids additional partial RAID stripe writes cascading
3919 if (limits->max_sectors < pool->sectors_per_block) {
3920 while (!is_factor(pool->sectors_per_block, limits->max_sectors)) {
3921 if ((limits->max_sectors & (limits->max_sectors - 1)) == 0)
3922 limits->max_sectors--;
3923 limits->max_sectors = rounddown_pow_of_two(limits->max_sectors);
3928 * If the system-determined stacked limits are compatible with the
3929 * pool's blocksize (io_opt is a factor) do not override them.
3931 if (io_opt_sectors < pool->sectors_per_block ||
3932 !is_factor(io_opt_sectors, pool->sectors_per_block)) {
3933 if (is_factor(pool->sectors_per_block, limits->max_sectors))
3934 blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT);
3936 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT);
3937 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
3941 * pt->adjusted_pf is a staging area for the actual features to use.
3942 * They get transferred to the live pool in bind_control_target()
3943 * called from pool_preresume().
3945 if (!pt->adjusted_pf.discard_enabled) {
3947 * Must explicitly disallow stacking discard limits otherwise the
3948 * block layer will stack them if pool's data device has support.
3949 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
3950 * user to see that, so make sure to set all discard limits to 0.
3952 limits->discard_granularity = 0;
3956 disable_passdown_if_not_supported(pt);
3959 * The pool uses the same discard limits as the underlying data
3960 * device. DM core has already set this up.
3964 static struct target_type pool_target = {
3965 .name = "thin-pool",
3966 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
3967 DM_TARGET_IMMUTABLE,
3968 .version = {1, 19, 0},
3969 .module = THIS_MODULE,
3973 .presuspend = pool_presuspend,
3974 .presuspend_undo = pool_presuspend_undo,
3975 .postsuspend = pool_postsuspend,
3976 .preresume = pool_preresume,
3977 .resume = pool_resume,
3978 .message = pool_message,
3979 .status = pool_status,
3980 .iterate_devices = pool_iterate_devices,
3981 .io_hints = pool_io_hints,
3984 /*----------------------------------------------------------------
3985 * Thin target methods
3986 *--------------------------------------------------------------*/
3987 static void thin_get(struct thin_c *tc)
3989 atomic_inc(&tc->refcount);
3992 static void thin_put(struct thin_c *tc)
3994 if (atomic_dec_and_test(&tc->refcount))
3995 complete(&tc->can_destroy);
3998 static void thin_dtr(struct dm_target *ti)
4000 struct thin_c *tc = ti->private;
4001 unsigned long flags;
4003 spin_lock_irqsave(&tc->pool->lock, flags);
4004 list_del_rcu(&tc->list);
4005 spin_unlock_irqrestore(&tc->pool->lock, flags);
4009 wait_for_completion(&tc->can_destroy);
4011 mutex_lock(&dm_thin_pool_table.mutex);
4013 __pool_dec(tc->pool);
4014 dm_pool_close_thin_device(tc->td);
4015 dm_put_device(ti, tc->pool_dev);
4017 dm_put_device(ti, tc->origin_dev);
4020 mutex_unlock(&dm_thin_pool_table.mutex);
4024 * Thin target parameters:
4026 * <pool_dev> <dev_id> [origin_dev]
4028 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
4029 * dev_id: the internal device identifier
4030 * origin_dev: a device external to the pool that should act as the origin
4032 * If the pool device has discards disabled, they get disabled for the thin
4035 static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
4039 struct dm_dev *pool_dev, *origin_dev;
4040 struct mapped_device *pool_md;
4041 unsigned long flags;
4043 mutex_lock(&dm_thin_pool_table.mutex);
4045 if (argc != 2 && argc != 3) {
4046 ti->error = "Invalid argument count";
4051 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
4053 ti->error = "Out of memory";
4057 tc->thin_md = dm_table_get_md(ti->table);
4058 spin_lock_init(&tc->lock);
4059 INIT_LIST_HEAD(&tc->deferred_cells);
4060 bio_list_init(&tc->deferred_bio_list);
4061 bio_list_init(&tc->retry_on_resume_list);
4062 tc->sort_bio_list = RB_ROOT;
4065 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
4067 ti->error = "Error opening origin device";
4068 goto bad_origin_dev;
4070 tc->origin_dev = origin_dev;
4073 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
4075 ti->error = "Error opening pool device";
4078 tc->pool_dev = pool_dev;
4080 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
4081 ti->error = "Invalid device id";
4086 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
4088 ti->error = "Couldn't get pool mapped device";
4093 tc->pool = __pool_table_lookup(pool_md);
4095 ti->error = "Couldn't find pool object";
4097 goto bad_pool_lookup;
4099 __pool_inc(tc->pool);
4101 if (get_pool_mode(tc->pool) == PM_FAIL) {
4102 ti->error = "Couldn't open thin device, Pool is in fail mode";
4107 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
4109 ti->error = "Couldn't open thin internal device";
4113 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
4117 ti->num_flush_bios = 1;
4118 ti->flush_supported = true;
4119 ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
4121 /* In case the pool supports discards, pass them on. */
4122 if (tc->pool->pf.discard_enabled) {
4123 ti->discards_supported = true;
4124 ti->num_discard_bios = 1;
4125 ti->split_discard_bios = false;
4128 mutex_unlock(&dm_thin_pool_table.mutex);
4130 spin_lock_irqsave(&tc->pool->lock, flags);
4131 if (tc->pool->suspended) {
4132 spin_unlock_irqrestore(&tc->pool->lock, flags);
4133 mutex_lock(&dm_thin_pool_table.mutex); /* reacquire for __pool_dec */
4134 ti->error = "Unable to activate thin device while pool is suspended";
4138 atomic_set(&tc->refcount, 1);
4139 init_completion(&tc->can_destroy);
4140 list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
4141 spin_unlock_irqrestore(&tc->pool->lock, flags);
4143 * This synchronize_rcu() call is needed here otherwise we risk a
4144 * wake_worker() call finding no bios to process (because the newly
4145 * added tc isn't yet visible). So this reduces latency since we
4146 * aren't then dependent on the periodic commit to wake_worker().
4155 dm_pool_close_thin_device(tc->td);
4157 __pool_dec(tc->pool);
4161 dm_put_device(ti, tc->pool_dev);
4164 dm_put_device(ti, tc->origin_dev);
4168 mutex_unlock(&dm_thin_pool_table.mutex);
4173 static int thin_map(struct dm_target *ti, struct bio *bio)
4175 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
4177 return thin_bio_map(ti, bio);
4180 static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
4182 unsigned long flags;
4183 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
4184 struct list_head work;
4185 struct dm_thin_new_mapping *m, *tmp;
4186 struct pool *pool = h->tc->pool;
4188 if (h->shared_read_entry) {
4189 INIT_LIST_HEAD(&work);
4190 dm_deferred_entry_dec(h->shared_read_entry, &work);
4192 spin_lock_irqsave(&pool->lock, flags);
4193 list_for_each_entry_safe(m, tmp, &work, list) {
4195 __complete_mapping_preparation(m);
4197 spin_unlock_irqrestore(&pool->lock, flags);
4200 if (h->all_io_entry) {
4201 INIT_LIST_HEAD(&work);
4202 dm_deferred_entry_dec(h->all_io_entry, &work);
4203 if (!list_empty(&work)) {
4204 spin_lock_irqsave(&pool->lock, flags);
4205 list_for_each_entry_safe(m, tmp, &work, list)
4206 list_add_tail(&m->list, &pool->prepared_discards);
4207 spin_unlock_irqrestore(&pool->lock, flags);
4213 cell_defer_no_holder(h->tc, h->cell);
4218 static void thin_presuspend(struct dm_target *ti)
4220 struct thin_c *tc = ti->private;
4222 if (dm_noflush_suspending(ti))
4223 noflush_work(tc, do_noflush_start);
4226 static void thin_postsuspend(struct dm_target *ti)
4228 struct thin_c *tc = ti->private;
4231 * The dm_noflush_suspending flag has been cleared by now, so
4232 * unfortunately we must always run this.
4234 noflush_work(tc, do_noflush_stop);
4237 static int thin_preresume(struct dm_target *ti)
4239 struct thin_c *tc = ti->private;
4242 tc->origin_size = get_dev_size(tc->origin_dev->bdev);
4248 * <nr mapped sectors> <highest mapped sector>
4250 static void thin_status(struct dm_target *ti, status_type_t type,
4251 unsigned status_flags, char *result, unsigned maxlen)
4255 dm_block_t mapped, highest;
4256 char buf[BDEVNAME_SIZE];
4257 struct thin_c *tc = ti->private;
4259 if (get_pool_mode(tc->pool) == PM_FAIL) {
4268 case STATUSTYPE_INFO:
4269 r = dm_thin_get_mapped_count(tc->td, &mapped);
4271 DMERR("dm_thin_get_mapped_count returned %d", r);
4275 r = dm_thin_get_highest_mapped_block(tc->td, &highest);
4277 DMERR("dm_thin_get_highest_mapped_block returned %d", r);
4281 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
4283 DMEMIT("%llu", ((highest + 1) *
4284 tc->pool->sectors_per_block) - 1);
4289 case STATUSTYPE_TABLE:
4291 format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
4292 (unsigned long) tc->dev_id);
4294 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
4305 static int thin_iterate_devices(struct dm_target *ti,
4306 iterate_devices_callout_fn fn, void *data)
4309 struct thin_c *tc = ti->private;
4310 struct pool *pool = tc->pool;
4313 * We can't call dm_pool_get_data_dev_size() since that blocks. So
4314 * we follow a more convoluted path through to the pool's target.
4317 return 0; /* nothing is bound */
4319 blocks = pool->ti->len;
4320 (void) sector_div(blocks, pool->sectors_per_block);
4322 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
4327 static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
4329 struct thin_c *tc = ti->private;
4330 struct pool *pool = tc->pool;
4332 if (!pool->pf.discard_enabled)
4335 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
4336 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
4339 static struct target_type thin_target = {
4341 .version = {1, 19, 0},
4342 .module = THIS_MODULE,
4346 .end_io = thin_endio,
4347 .preresume = thin_preresume,
4348 .presuspend = thin_presuspend,
4349 .postsuspend = thin_postsuspend,
4350 .status = thin_status,
4351 .iterate_devices = thin_iterate_devices,
4352 .io_hints = thin_io_hints,
4355 /*----------------------------------------------------------------*/
4357 static int __init dm_thin_init(void)
4363 r = dm_register_target(&thin_target);
4367 r = dm_register_target(&pool_target);
4369 goto bad_pool_target;
4373 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
4374 if (!_new_mapping_cache)
4375 goto bad_new_mapping_cache;
4379 bad_new_mapping_cache:
4380 dm_unregister_target(&pool_target);
4382 dm_unregister_target(&thin_target);
4387 static void dm_thin_exit(void)
4389 dm_unregister_target(&thin_target);
4390 dm_unregister_target(&pool_target);
4392 kmem_cache_destroy(_new_mapping_cache);
4395 module_init(dm_thin_init);
4396 module_exit(dm_thin_exit);
4398 module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
4399 MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
4401 MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
4402 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
4403 MODULE_LICENSE("GPL");