2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/mempool.h>
18 #include <linux/slab.h>
19 #include <linux/idr.h>
20 #include <linux/hdreg.h>
21 #include <linux/delay.h>
22 #include <linux/wait.h>
23 #include <linux/kthread.h>
24 #include <linux/ktime.h>
25 #include <linux/elevator.h> /* for rq_end_sector() */
26 #include <linux/blk-mq.h>
28 #include <trace/events/block.h>
30 #define DM_MSG_PREFIX "core"
34 * ratelimit state to be used in DMXXX_LIMIT().
36 DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
37 DEFAULT_RATELIMIT_INTERVAL,
38 DEFAULT_RATELIMIT_BURST);
39 EXPORT_SYMBOL(dm_ratelimit_state);
43 * Cookies are numeric values sent with CHANGE and REMOVE
44 * uevents while resuming, removing or renaming the device.
46 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
47 #define DM_COOKIE_LENGTH 24
49 static const char *_name = DM_NAME;
51 static unsigned int major = 0;
52 static unsigned int _major = 0;
54 static DEFINE_IDR(_minor_idr);
56 static DEFINE_SPINLOCK(_minor_lock);
58 static void do_deferred_remove(struct work_struct *w);
60 static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
62 static struct workqueue_struct *deferred_remove_workqueue;
66 * One of these is allocated per bio.
69 struct mapped_device *md;
73 unsigned long start_time;
74 spinlock_t endio_lock;
75 struct dm_stats_aux stats_aux;
79 * For request-based dm.
80 * One of these is allocated per request.
82 struct dm_rq_target_io {
83 struct mapped_device *md;
85 struct request *orig, *clone;
86 struct kthread_work work;
89 struct dm_stats_aux stats_aux;
90 unsigned long duration_jiffies;
95 * For request-based dm - the bio clones we allocate are embedded in these
98 * We allocate these with bio_alloc_bioset, using the front_pad parameter when
99 * the bioset is created - this means the bio has to come at the end of the
102 struct dm_rq_clone_bio_info {
104 struct dm_rq_target_io *tio;
108 union map_info *dm_get_rq_mapinfo(struct request *rq)
110 if (rq && rq->end_io_data)
111 return &((struct dm_rq_target_io *)rq->end_io_data)->info;
114 EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
116 #define MINOR_ALLOCED ((void *)-1)
119 * Bits for the md->flags field.
121 #define DMF_BLOCK_IO_FOR_SUSPEND 0
122 #define DMF_SUSPENDED 1
124 #define DMF_FREEING 3
125 #define DMF_DELETING 4
126 #define DMF_NOFLUSH_SUSPENDING 5
127 #define DMF_DEFERRED_REMOVE 6
128 #define DMF_SUSPENDED_INTERNALLY 7
131 * A dummy definition to make RCU happy.
132 * struct dm_table should never be dereferenced in this file.
139 * Work processed by per-device workqueue.
141 struct mapped_device {
142 struct srcu_struct io_barrier;
143 struct mutex suspend_lock;
148 * The current mapping.
149 * Use dm_get_live_table{_fast} or take suspend_lock for
152 struct dm_table __rcu *map;
154 struct list_head table_devices;
155 struct mutex table_devices_lock;
159 struct request_queue *queue;
161 /* Protect queue and type against concurrent access. */
162 struct mutex type_lock;
164 struct target_type *immutable_target_type;
166 struct gendisk *disk;
172 * A list of ios that arrived while we were suspended.
175 wait_queue_head_t wait;
176 struct work_struct work;
177 struct bio_list deferred;
178 spinlock_t deferred_lock;
181 * Processing queue (flush)
183 struct workqueue_struct *wq;
186 * io objects are allocated from here.
197 wait_queue_head_t eventq;
199 struct list_head uevent_list;
200 spinlock_t uevent_lock; /* Protect access to uevent_list */
203 * freeze/thaw support require holding onto a super block
205 struct super_block *frozen_sb;
206 struct block_device *bdev;
208 /* forced geometry settings */
209 struct hd_geometry geometry;
211 /* kobject and completion */
212 struct dm_kobject_holder kobj_holder;
214 /* zero-length flush that will be cloned and submitted to targets */
215 struct bio flush_bio;
217 /* the number of internal suspends */
218 unsigned internal_suspend_count;
220 struct dm_stats stats;
222 struct kthread_worker kworker;
223 struct task_struct *kworker_task;
225 /* for request-based merge heuristic in dm_request_fn() */
226 unsigned seq_rq_merge_deadline_usecs;
228 sector_t last_rq_pos;
229 ktime_t last_rq_start_time;
231 /* for blk-mq request-based DM support */
232 struct blk_mq_tag_set tag_set;
236 #ifdef CONFIG_DM_MQ_DEFAULT
237 static bool use_blk_mq = true;
239 static bool use_blk_mq = false;
242 bool dm_use_blk_mq(struct mapped_device *md)
244 return md->use_blk_mq;
248 * For mempools pre-allocation at the table loading time.
250 struct dm_md_mempools {
256 struct table_device {
257 struct list_head list;
259 struct dm_dev dm_dev;
262 #define RESERVED_BIO_BASED_IOS 16
263 #define RESERVED_REQUEST_BASED_IOS 256
264 #define RESERVED_MAX_IOS 1024
265 static struct kmem_cache *_io_cache;
266 static struct kmem_cache *_rq_tio_cache;
267 static struct kmem_cache *_rq_cache;
270 * Bio-based DM's mempools' reserved IOs set by the user.
272 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
275 * Request-based DM's mempools' reserved IOs set by the user.
277 static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
279 static unsigned __dm_get_module_param(unsigned *module_param,
280 unsigned def, unsigned max)
282 unsigned param = ACCESS_ONCE(*module_param);
283 unsigned modified_param = 0;
286 modified_param = def;
287 else if (param > max)
288 modified_param = max;
290 if (modified_param) {
291 (void)cmpxchg(module_param, param, modified_param);
292 param = modified_param;
298 unsigned dm_get_reserved_bio_based_ios(void)
300 return __dm_get_module_param(&reserved_bio_based_ios,
301 RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
303 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
305 unsigned dm_get_reserved_rq_based_ios(void)
307 return __dm_get_module_param(&reserved_rq_based_ios,
308 RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
310 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
312 static int __init local_init(void)
316 /* allocate a slab for the dm_ios */
317 _io_cache = KMEM_CACHE(dm_io, 0);
321 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
323 goto out_free_io_cache;
325 _rq_cache = kmem_cache_create("dm_clone_request", sizeof(struct request),
326 __alignof__(struct request), 0, NULL);
328 goto out_free_rq_tio_cache;
330 r = dm_uevent_init();
332 goto out_free_rq_cache;
334 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
335 if (!deferred_remove_workqueue) {
337 goto out_uevent_exit;
341 r = register_blkdev(_major, _name);
343 goto out_free_workqueue;
351 destroy_workqueue(deferred_remove_workqueue);
355 kmem_cache_destroy(_rq_cache);
356 out_free_rq_tio_cache:
357 kmem_cache_destroy(_rq_tio_cache);
359 kmem_cache_destroy(_io_cache);
364 static void local_exit(void)
366 flush_scheduled_work();
367 destroy_workqueue(deferred_remove_workqueue);
369 kmem_cache_destroy(_rq_cache);
370 kmem_cache_destroy(_rq_tio_cache);
371 kmem_cache_destroy(_io_cache);
372 unregister_blkdev(_major, _name);
377 DMINFO("cleaned up");
380 static int (*_inits[])(void) __initdata = {
391 static void (*_exits[])(void) = {
402 static int __init dm_init(void)
404 const int count = ARRAY_SIZE(_inits);
408 for (i = 0; i < count; i++) {
423 static void __exit dm_exit(void)
425 int i = ARRAY_SIZE(_exits);
431 * Should be empty by this point.
433 idr_destroy(&_minor_idr);
437 * Block device functions
439 int dm_deleting_md(struct mapped_device *md)
441 return test_bit(DMF_DELETING, &md->flags);
444 static int dm_blk_open(struct block_device *bdev, fmode_t mode)
446 struct mapped_device *md;
448 spin_lock(&_minor_lock);
450 md = bdev->bd_disk->private_data;
454 if (test_bit(DMF_FREEING, &md->flags) ||
455 dm_deleting_md(md)) {
461 atomic_inc(&md->open_count);
463 spin_unlock(&_minor_lock);
465 return md ? 0 : -ENXIO;
468 static void dm_blk_close(struct gendisk *disk, fmode_t mode)
470 struct mapped_device *md;
472 spin_lock(&_minor_lock);
474 md = disk->private_data;
478 if (atomic_dec_and_test(&md->open_count) &&
479 (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
480 queue_work(deferred_remove_workqueue, &deferred_remove_work);
484 spin_unlock(&_minor_lock);
487 int dm_open_count(struct mapped_device *md)
489 return atomic_read(&md->open_count);
493 * Guarantees nothing is using the device before it's deleted.
495 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
499 spin_lock(&_minor_lock);
501 if (dm_open_count(md)) {
504 set_bit(DMF_DEFERRED_REMOVE, &md->flags);
505 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
508 set_bit(DMF_DELETING, &md->flags);
510 spin_unlock(&_minor_lock);
515 int dm_cancel_deferred_remove(struct mapped_device *md)
519 spin_lock(&_minor_lock);
521 if (test_bit(DMF_DELETING, &md->flags))
524 clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
526 spin_unlock(&_minor_lock);
531 static void do_deferred_remove(struct work_struct *w)
533 dm_deferred_remove();
536 sector_t dm_get_size(struct mapped_device *md)
538 return get_capacity(md->disk);
541 struct request_queue *dm_get_md_queue(struct mapped_device *md)
546 struct dm_stats *dm_get_stats(struct mapped_device *md)
551 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
553 struct mapped_device *md = bdev->bd_disk->private_data;
555 return dm_get_geometry(md, geo);
558 static int dm_get_live_table_for_ioctl(struct mapped_device *md,
559 struct dm_target **tgt, struct block_device **bdev,
560 fmode_t *mode, int *srcu_idx)
562 struct dm_table *map;
567 map = dm_get_live_table(md, srcu_idx);
568 if (!map || !dm_table_get_size(map))
571 /* We only support devices that have a single target */
572 if (dm_table_get_num_targets(map) != 1)
575 *tgt = dm_table_get_target(map, 0);
577 if (!(*tgt)->type->prepare_ioctl)
580 if (dm_suspended_md(md)) {
585 r = (*tgt)->type->prepare_ioctl(*tgt, bdev, mode);
592 dm_put_live_table(md, *srcu_idx);
593 if (r == -ENOTCONN) {
600 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
601 unsigned int cmd, unsigned long arg)
603 struct mapped_device *md = bdev->bd_disk->private_data;
604 struct dm_target *tgt;
607 r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
613 * Target determined this ioctl is being issued against
614 * a logical partition of the parent bdev; so extra
615 * validation is needed.
617 r = scsi_verify_blk_ioctl(NULL, cmd);
622 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
624 dm_put_live_table(md, srcu_idx);
628 static struct dm_io *alloc_io(struct mapped_device *md)
630 return mempool_alloc(md->io_pool, GFP_NOIO);
633 static void free_io(struct mapped_device *md, struct dm_io *io)
635 mempool_free(io, md->io_pool);
638 static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
640 bio_put(&tio->clone);
643 static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
646 return mempool_alloc(md->io_pool, gfp_mask);
649 static void free_rq_tio(struct dm_rq_target_io *tio)
651 mempool_free(tio, tio->md->io_pool);
654 static struct request *alloc_clone_request(struct mapped_device *md,
657 return mempool_alloc(md->rq_pool, gfp_mask);
660 static void free_clone_request(struct mapped_device *md, struct request *rq)
662 mempool_free(rq, md->rq_pool);
665 static int md_in_flight(struct mapped_device *md)
667 return atomic_read(&md->pending[READ]) +
668 atomic_read(&md->pending[WRITE]);
671 static void start_io_acct(struct dm_io *io)
673 struct mapped_device *md = io->md;
674 struct bio *bio = io->bio;
676 int rw = bio_data_dir(bio);
678 io->start_time = jiffies;
680 cpu = part_stat_lock();
681 part_round_stats(cpu, &dm_disk(md)->part0);
683 atomic_set(&dm_disk(md)->part0.in_flight[rw],
684 atomic_inc_return(&md->pending[rw]));
686 if (unlikely(dm_stats_used(&md->stats)))
687 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
688 bio_sectors(bio), false, 0, &io->stats_aux);
691 static void end_io_acct(struct dm_io *io)
693 struct mapped_device *md = io->md;
694 struct bio *bio = io->bio;
695 unsigned long duration = jiffies - io->start_time;
697 int rw = bio_data_dir(bio);
699 generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time);
701 if (unlikely(dm_stats_used(&md->stats)))
702 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
703 bio_sectors(bio), true, duration, &io->stats_aux);
706 * After this is decremented the bio must not be touched if it is
709 pending = atomic_dec_return(&md->pending[rw]);
710 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
711 pending += atomic_read(&md->pending[rw^0x1]);
713 /* nudge anyone waiting on suspend queue */
719 * Add the bio to the list of deferred io.
721 static void queue_io(struct mapped_device *md, struct bio *bio)
725 spin_lock_irqsave(&md->deferred_lock, flags);
726 bio_list_add(&md->deferred, bio);
727 spin_unlock_irqrestore(&md->deferred_lock, flags);
728 queue_work(md->wq, &md->work);
732 * Everyone (including functions in this file), should use this
733 * function to access the md->map field, and make sure they call
734 * dm_put_live_table() when finished.
736 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
738 *srcu_idx = srcu_read_lock(&md->io_barrier);
740 return srcu_dereference(md->map, &md->io_barrier);
743 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
745 srcu_read_unlock(&md->io_barrier, srcu_idx);
748 void dm_sync_table(struct mapped_device *md)
750 synchronize_srcu(&md->io_barrier);
751 synchronize_rcu_expedited();
755 * A fast alternative to dm_get_live_table/dm_put_live_table.
756 * The caller must not block between these two functions.
758 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
761 return rcu_dereference(md->map);
764 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
770 * Open a table device so we can use it as a map destination.
772 static int open_table_device(struct table_device *td, dev_t dev,
773 struct mapped_device *md)
775 static char *_claim_ptr = "I belong to device-mapper";
776 struct block_device *bdev;
780 BUG_ON(td->dm_dev.bdev);
782 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr);
784 return PTR_ERR(bdev);
786 r = bd_link_disk_holder(bdev, dm_disk(md));
788 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
792 td->dm_dev.bdev = bdev;
797 * Close a table device that we've been using.
799 static void close_table_device(struct table_device *td, struct mapped_device *md)
801 if (!td->dm_dev.bdev)
804 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
805 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
806 td->dm_dev.bdev = NULL;
809 static struct table_device *find_table_device(struct list_head *l, dev_t dev,
811 struct table_device *td;
813 list_for_each_entry(td, l, list)
814 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
820 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
821 struct dm_dev **result) {
823 struct table_device *td;
825 mutex_lock(&md->table_devices_lock);
826 td = find_table_device(&md->table_devices, dev, mode);
828 td = kmalloc(sizeof(*td), GFP_KERNEL);
830 mutex_unlock(&md->table_devices_lock);
834 td->dm_dev.mode = mode;
835 td->dm_dev.bdev = NULL;
837 if ((r = open_table_device(td, dev, md))) {
838 mutex_unlock(&md->table_devices_lock);
843 format_dev_t(td->dm_dev.name, dev);
845 atomic_set(&td->count, 0);
846 list_add(&td->list, &md->table_devices);
848 atomic_inc(&td->count);
849 mutex_unlock(&md->table_devices_lock);
851 *result = &td->dm_dev;
854 EXPORT_SYMBOL_GPL(dm_get_table_device);
856 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
858 struct table_device *td = container_of(d, struct table_device, dm_dev);
860 mutex_lock(&md->table_devices_lock);
861 if (atomic_dec_and_test(&td->count)) {
862 close_table_device(td, md);
866 mutex_unlock(&md->table_devices_lock);
868 EXPORT_SYMBOL(dm_put_table_device);
870 static void free_table_devices(struct list_head *devices)
872 struct list_head *tmp, *next;
874 list_for_each_safe(tmp, next, devices) {
875 struct table_device *td = list_entry(tmp, struct table_device, list);
877 DMWARN("dm_destroy: %s still exists with %d references",
878 td->dm_dev.name, atomic_read(&td->count));
884 * Get the geometry associated with a dm device
886 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
894 * Set the geometry of a device.
896 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
898 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
900 if (geo->start > sz) {
901 DMWARN("Start sector is beyond the geometry limits.");
910 /*-----------------------------------------------------------------
912 * A more elegant soln is in the works that uses the queue
913 * merge fn, unfortunately there are a couple of changes to
914 * the block layer that I want to make for this. So in the
915 * interests of getting something for people to use I give
916 * you this clearly demarcated crap.
917 *---------------------------------------------------------------*/
919 static int __noflush_suspending(struct mapped_device *md)
921 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
925 * Decrements the number of outstanding ios that a bio has been
926 * cloned into, completing the original io if necc.
928 static void dec_pending(struct dm_io *io, int error)
933 struct mapped_device *md = io->md;
935 /* Push-back supersedes any I/O errors */
936 if (unlikely(error)) {
937 spin_lock_irqsave(&io->endio_lock, flags);
938 if (!(io->error > 0 && __noflush_suspending(md)))
940 spin_unlock_irqrestore(&io->endio_lock, flags);
943 if (atomic_dec_and_test(&io->io_count)) {
944 if (io->error == DM_ENDIO_REQUEUE) {
946 * Target requested pushing back the I/O.
948 spin_lock_irqsave(&md->deferred_lock, flags);
949 if (__noflush_suspending(md))
950 bio_list_add_head(&md->deferred, io->bio);
952 /* noflush suspend was interrupted. */
954 spin_unlock_irqrestore(&md->deferred_lock, flags);
957 io_error = io->error;
962 if (io_error == DM_ENDIO_REQUEUE)
965 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
967 * Preflush done for flush with data, reissue
970 bio->bi_rw &= ~REQ_FLUSH;
973 /* done with normal IO or empty flush */
974 trace_block_bio_complete(md->queue, bio, io_error);
975 bio->bi_error = io_error;
981 static void disable_write_same(struct mapped_device *md)
983 struct queue_limits *limits = dm_get_queue_limits(md);
985 /* device doesn't really support WRITE SAME, disable it */
986 limits->max_write_same_sectors = 0;
989 static void clone_endio(struct bio *bio)
991 int error = bio->bi_error;
993 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
994 struct dm_io *io = tio->io;
995 struct mapped_device *md = tio->io->md;
996 dm_endio_fn endio = tio->ti->type->end_io;
999 r = endio(tio->ti, bio, error);
1000 if (r < 0 || r == DM_ENDIO_REQUEUE)
1002 * error and requeue request are handled
1006 else if (r == DM_ENDIO_INCOMPLETE)
1007 /* The target will handle the io */
1010 DMWARN("unimplemented target endio return value: %d", r);
1015 if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) &&
1016 !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
1017 disable_write_same(md);
1020 dec_pending(io, error);
1024 * Partial completion handling for request-based dm
1026 static void end_clone_bio(struct bio *clone)
1028 struct dm_rq_clone_bio_info *info =
1029 container_of(clone, struct dm_rq_clone_bio_info, clone);
1030 struct dm_rq_target_io *tio = info->tio;
1031 struct bio *bio = info->orig;
1032 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
1038 * An error has already been detected on the request.
1039 * Once error occurred, just let clone->end_io() handle
1043 else if (bio->bi_error) {
1045 * Don't notice the error to the upper layer yet.
1046 * The error handling decision is made by the target driver,
1047 * when the request is completed.
1049 tio->error = bio->bi_error;
1054 * I/O for the bio successfully completed.
1055 * Notice the data completion to the upper layer.
1059 * bios are processed from the head of the list.
1060 * So the completing bio should always be rq->bio.
1061 * If it's not, something wrong is happening.
1063 if (tio->orig->bio != bio)
1064 DMERR("bio completion is going in the middle of the request");
1067 * Update the original request.
1068 * Do not use blk_end_request() here, because it may complete
1069 * the original request before the clone, and break the ordering.
1071 blk_update_request(tio->orig, 0, nr_bytes);
1074 static struct dm_rq_target_io *tio_from_request(struct request *rq)
1076 return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
1079 static void rq_end_stats(struct mapped_device *md, struct request *orig)
1081 if (unlikely(dm_stats_used(&md->stats))) {
1082 struct dm_rq_target_io *tio = tio_from_request(orig);
1083 tio->duration_jiffies = jiffies - tio->duration_jiffies;
1084 dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig),
1085 tio->n_sectors, true, tio->duration_jiffies,
1091 * Don't touch any member of the md after calling this function because
1092 * the md may be freed in dm_put() at the end of this function.
1093 * Or do dm_get() before calling this function and dm_put() later.
1095 static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
1097 atomic_dec(&md->pending[rw]);
1099 /* nudge anyone waiting on suspend queue */
1100 if (!md_in_flight(md))
1104 * Run this off this callpath, as drivers could invoke end_io while
1105 * inside their request_fn (and holding the queue lock). Calling
1106 * back into ->request_fn() could deadlock attempting to grab the
1110 if (md->queue->mq_ops)
1111 blk_mq_run_hw_queues(md->queue, true);
1113 blk_run_queue_async(md->queue);
1117 * dm_put() must be at the end of this function. See the comment above
1122 static void free_rq_clone(struct request *clone)
1124 struct dm_rq_target_io *tio = clone->end_io_data;
1125 struct mapped_device *md = tio->md;
1127 blk_rq_unprep_clone(clone);
1129 if (md->type == DM_TYPE_MQ_REQUEST_BASED)
1130 /* stacked on blk-mq queue(s) */
1131 tio->ti->type->release_clone_rq(clone);
1132 else if (!md->queue->mq_ops)
1133 /* request_fn queue stacked on request_fn queue(s) */
1134 free_clone_request(md, clone);
1136 * NOTE: for the blk-mq queue stacked on request_fn queue(s) case:
1137 * no need to call free_clone_request() because we leverage blk-mq by
1138 * allocating the clone at the end of the blk-mq pdu (see: clone_rq)
1141 if (!md->queue->mq_ops)
1146 * Complete the clone and the original request.
1147 * Must be called without clone's queue lock held,
1148 * see end_clone_request() for more details.
1150 static void dm_end_request(struct request *clone, int error)
1152 int rw = rq_data_dir(clone);
1153 struct dm_rq_target_io *tio = clone->end_io_data;
1154 struct mapped_device *md = tio->md;
1155 struct request *rq = tio->orig;
1157 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
1158 rq->errors = clone->errors;
1159 rq->resid_len = clone->resid_len;
1163 * We are using the sense buffer of the original
1165 * So setting the length of the sense data is enough.
1167 rq->sense_len = clone->sense_len;
1170 free_rq_clone(clone);
1171 rq_end_stats(md, rq);
1173 blk_end_request_all(rq, error);
1175 blk_mq_end_request(rq, error);
1176 rq_completed(md, rw, true);
1179 static void dm_unprep_request(struct request *rq)
1181 struct dm_rq_target_io *tio = tio_from_request(rq);
1182 struct request *clone = tio->clone;
1184 if (!rq->q->mq_ops) {
1186 rq->cmd_flags &= ~REQ_DONTPREP;
1190 free_rq_clone(clone);
1194 * Requeue the original request of a clone.
1196 static void old_requeue_request(struct request *rq)
1198 struct request_queue *q = rq->q;
1199 unsigned long flags;
1201 spin_lock_irqsave(q->queue_lock, flags);
1202 blk_requeue_request(q, rq);
1203 blk_run_queue_async(q);
1204 spin_unlock_irqrestore(q->queue_lock, flags);
1207 static void dm_requeue_original_request(struct mapped_device *md,
1210 int rw = rq_data_dir(rq);
1212 dm_unprep_request(rq);
1214 rq_end_stats(md, rq);
1216 old_requeue_request(rq);
1218 blk_mq_requeue_request(rq);
1219 blk_mq_kick_requeue_list(rq->q);
1222 rq_completed(md, rw, false);
1225 static void old_stop_queue(struct request_queue *q)
1227 unsigned long flags;
1229 if (blk_queue_stopped(q))
1232 spin_lock_irqsave(q->queue_lock, flags);
1234 spin_unlock_irqrestore(q->queue_lock, flags);
1237 static void stop_queue(struct request_queue *q)
1242 blk_mq_stop_hw_queues(q);
1245 static void old_start_queue(struct request_queue *q)
1247 unsigned long flags;
1249 spin_lock_irqsave(q->queue_lock, flags);
1250 if (blk_queue_stopped(q))
1252 spin_unlock_irqrestore(q->queue_lock, flags);
1255 static void start_queue(struct request_queue *q)
1260 blk_mq_start_stopped_hw_queues(q, true);
1263 static void dm_done(struct request *clone, int error, bool mapped)
1266 struct dm_rq_target_io *tio = clone->end_io_data;
1267 dm_request_endio_fn rq_end_io = NULL;
1270 rq_end_io = tio->ti->type->rq_end_io;
1272 if (mapped && rq_end_io)
1273 r = rq_end_io(tio->ti, clone, error, &tio->info);
1276 if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) &&
1277 !clone->q->limits.max_write_same_sectors))
1278 disable_write_same(tio->md);
1281 /* The target wants to complete the I/O */
1282 dm_end_request(clone, r);
1283 else if (r == DM_ENDIO_INCOMPLETE)
1284 /* The target will handle the I/O */
1286 else if (r == DM_ENDIO_REQUEUE)
1287 /* The target wants to requeue the I/O */
1288 dm_requeue_original_request(tio->md, tio->orig);
1290 DMWARN("unimplemented target endio return value: %d", r);
1296 * Request completion handler for request-based dm
1298 static void dm_softirq_done(struct request *rq)
1301 struct dm_rq_target_io *tio = tio_from_request(rq);
1302 struct request *clone = tio->clone;
1306 rq_end_stats(tio->md, rq);
1307 rw = rq_data_dir(rq);
1308 if (!rq->q->mq_ops) {
1309 blk_end_request_all(rq, tio->error);
1310 rq_completed(tio->md, rw, false);
1313 blk_mq_end_request(rq, tio->error);
1314 rq_completed(tio->md, rw, false);
1319 if (rq->cmd_flags & REQ_FAILED)
1322 dm_done(clone, tio->error, mapped);
1326 * Complete the clone and the original request with the error status
1327 * through softirq context.
1329 static void dm_complete_request(struct request *rq, int error)
1331 struct dm_rq_target_io *tio = tio_from_request(rq);
1334 blk_complete_request(rq);
1338 * Complete the not-mapped clone and the original request with the error status
1339 * through softirq context.
1340 * Target's rq_end_io() function isn't called.
1341 * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
1343 static void dm_kill_unmapped_request(struct request *rq, int error)
1345 rq->cmd_flags |= REQ_FAILED;
1346 dm_complete_request(rq, error);
1350 * Called with the clone's queue lock held (for non-blk-mq)
1352 static void end_clone_request(struct request *clone, int error)
1354 struct dm_rq_target_io *tio = clone->end_io_data;
1356 if (!clone->q->mq_ops) {
1358 * For just cleaning up the information of the queue in which
1359 * the clone was dispatched.
1360 * The clone is *NOT* freed actually here because it is alloced
1361 * from dm own mempool (REQ_ALLOCED isn't set).
1363 __blk_put_request(clone->q, clone);
1367 * Actual request completion is done in a softirq context which doesn't
1368 * hold the clone's queue lock. Otherwise, deadlock could occur because:
1369 * - another request may be submitted by the upper level driver
1370 * of the stacking during the completion
1371 * - the submission which requires queue lock may be done
1372 * against this clone's queue
1374 dm_complete_request(tio->orig, error);
1378 * Return maximum size of I/O possible at the supplied sector up to the current
1381 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
1383 sector_t target_offset = dm_target_offset(ti, sector);
1385 return ti->len - target_offset;
1388 static sector_t max_io_len(sector_t sector, struct dm_target *ti)
1390 sector_t len = max_io_len_target_boundary(sector, ti);
1391 sector_t offset, max_len;
1394 * Does the target need to split even further?
1396 if (ti->max_io_len) {
1397 offset = dm_target_offset(ti, sector);
1398 if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
1399 max_len = sector_div(offset, ti->max_io_len);
1401 max_len = offset & (ti->max_io_len - 1);
1402 max_len = ti->max_io_len - max_len;
1411 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1413 if (len > UINT_MAX) {
1414 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1415 (unsigned long long)len, UINT_MAX);
1416 ti->error = "Maximum size of target IO is too large";
1420 ti->max_io_len = (uint32_t) len;
1424 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1427 * A target may call dm_accept_partial_bio only from the map routine. It is
1428 * allowed for all bio types except REQ_FLUSH.
1430 * dm_accept_partial_bio informs the dm that the target only wants to process
1431 * additional n_sectors sectors of the bio and the rest of the data should be
1432 * sent in a next bio.
1434 * A diagram that explains the arithmetics:
1435 * +--------------------+---------------+-------+
1437 * +--------------------+---------------+-------+
1439 * <-------------- *tio->len_ptr --------------->
1440 * <------- bi_size ------->
1443 * Region 1 was already iterated over with bio_advance or similar function.
1444 * (it may be empty if the target doesn't use bio_advance)
1445 * Region 2 is the remaining bio size that the target wants to process.
1446 * (it may be empty if region 1 is non-empty, although there is no reason
1448 * The target requires that region 3 is to be sent in the next bio.
1450 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1451 * the partially processed part (the sum of regions 1+2) must be the same for all
1452 * copies of the bio.
1454 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
1456 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1457 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
1458 BUG_ON(bio->bi_rw & REQ_FLUSH);
1459 BUG_ON(bi_size > *tio->len_ptr);
1460 BUG_ON(n_sectors > bi_size);
1461 *tio->len_ptr -= bi_size - n_sectors;
1462 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1464 EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1466 static void __map_bio(struct dm_target_io *tio)
1470 struct mapped_device *md;
1471 struct bio *clone = &tio->clone;
1472 struct dm_target *ti = tio->ti;
1474 clone->bi_end_io = clone_endio;
1477 * Map the clone. If r == 0 we don't need to do
1478 * anything, the target has assumed ownership of
1481 atomic_inc(&tio->io->io_count);
1482 sector = clone->bi_iter.bi_sector;
1483 r = ti->type->map(ti, clone);
1484 if (r == DM_MAPIO_REMAPPED) {
1485 /* the bio has been remapped so dispatch it */
1487 trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
1488 tio->io->bio->bi_bdev->bd_dev, sector);
1490 generic_make_request(clone);
1491 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1492 /* error the io and bail out, or requeue it if needed */
1494 dec_pending(tio->io, r);
1496 } else if (r != DM_MAPIO_SUBMITTED) {
1497 DMWARN("unimplemented target map return value: %d", r);
1503 struct mapped_device *md;
1504 struct dm_table *map;
1508 unsigned sector_count;
1511 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
1513 bio->bi_iter.bi_sector = sector;
1514 bio->bi_iter.bi_size = to_bytes(len);
1518 * Creates a bio that consists of range of complete bvecs.
1520 static void clone_bio(struct dm_target_io *tio, struct bio *bio,
1521 sector_t sector, unsigned len)
1523 struct bio *clone = &tio->clone;
1525 __bio_clone_fast(clone, bio);
1527 if (bio_integrity(bio))
1528 bio_integrity_clone(clone, bio, GFP_NOIO);
1530 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1531 clone->bi_iter.bi_size = to_bytes(len);
1533 if (bio_integrity(bio))
1534 bio_integrity_trim(clone, 0, len);
1537 static struct dm_target_io *alloc_tio(struct clone_info *ci,
1538 struct dm_target *ti,
1539 unsigned target_bio_nr)
1541 struct dm_target_io *tio;
1544 clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
1545 tio = container_of(clone, struct dm_target_io, clone);
1549 tio->target_bio_nr = target_bio_nr;
1554 static void __clone_and_map_simple_bio(struct clone_info *ci,
1555 struct dm_target *ti,
1556 unsigned target_bio_nr, unsigned *len)
1558 struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr);
1559 struct bio *clone = &tio->clone;
1563 __bio_clone_fast(clone, ci->bio);
1565 bio_setup_sector(clone, ci->sector, *len);
1570 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1571 unsigned num_bios, unsigned *len)
1573 unsigned target_bio_nr;
1575 for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
1576 __clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
1579 static int __send_empty_flush(struct clone_info *ci)
1581 unsigned target_nr = 0;
1582 struct dm_target *ti;
1584 BUG_ON(bio_has_data(ci->bio));
1585 while ((ti = dm_table_get_target(ci->map, target_nr++)))
1586 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
1591 static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1592 sector_t sector, unsigned *len)
1594 struct bio *bio = ci->bio;
1595 struct dm_target_io *tio;
1596 unsigned target_bio_nr;
1597 unsigned num_target_bios = 1;
1600 * Does the target want to receive duplicate copies of the bio?
1602 if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
1603 num_target_bios = ti->num_write_bios(ti, bio);
1605 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
1606 tio = alloc_tio(ci, ti, target_bio_nr);
1608 clone_bio(tio, bio, sector, *len);
1613 typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
1615 static unsigned get_num_discard_bios(struct dm_target *ti)
1617 return ti->num_discard_bios;
1620 static unsigned get_num_write_same_bios(struct dm_target *ti)
1622 return ti->num_write_same_bios;
1625 typedef bool (*is_split_required_fn)(struct dm_target *ti);
1627 static bool is_split_required_for_discard(struct dm_target *ti)
1629 return ti->split_discard_bios;
1632 static int __send_changing_extent_only(struct clone_info *ci,
1633 get_num_bios_fn get_num_bios,
1634 is_split_required_fn is_split_required)
1636 struct dm_target *ti;
1641 ti = dm_table_find_target(ci->map, ci->sector);
1642 if (!dm_target_is_valid(ti))
1646 * Even though the device advertised support for this type of
1647 * request, that does not mean every target supports it, and
1648 * reconfiguration might also have changed that since the
1649 * check was performed.
1651 num_bios = get_num_bios ? get_num_bios(ti) : 0;
1655 if (is_split_required && !is_split_required(ti))
1656 len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1658 len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
1660 __send_duplicate_bios(ci, ti, num_bios, &len);
1663 } while (ci->sector_count -= len);
1668 static int __send_discard(struct clone_info *ci)
1670 return __send_changing_extent_only(ci, get_num_discard_bios,
1671 is_split_required_for_discard);
1674 static int __send_write_same(struct clone_info *ci)
1676 return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
1680 * Select the correct strategy for processing a non-flush bio.
1682 static int __split_and_process_non_flush(struct clone_info *ci)
1684 struct bio *bio = ci->bio;
1685 struct dm_target *ti;
1688 if (unlikely(bio->bi_rw & REQ_DISCARD))
1689 return __send_discard(ci);
1690 else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
1691 return __send_write_same(ci);
1693 ti = dm_table_find_target(ci->map, ci->sector);
1694 if (!dm_target_is_valid(ti))
1697 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1699 __clone_and_map_data_bio(ci, ti, ci->sector, &len);
1702 ci->sector_count -= len;
1708 * Entry point to split a bio into clones and submit them to the targets.
1710 static void __split_and_process_bio(struct mapped_device *md,
1711 struct dm_table *map, struct bio *bio)
1713 struct clone_info ci;
1716 if (unlikely(!map)) {
1723 ci.io = alloc_io(md);
1725 atomic_set(&ci.io->io_count, 1);
1728 spin_lock_init(&ci.io->endio_lock);
1729 ci.sector = bio->bi_iter.bi_sector;
1731 start_io_acct(ci.io);
1733 if (bio->bi_rw & REQ_FLUSH) {
1734 ci.bio = &ci.md->flush_bio;
1735 ci.sector_count = 0;
1736 error = __send_empty_flush(&ci);
1737 /* dec_pending submits any data associated with flush */
1740 ci.sector_count = bio_sectors(bio);
1741 while (ci.sector_count && !error)
1742 error = __split_and_process_non_flush(&ci);
1745 /* drop the extra reference count */
1746 dec_pending(ci.io, error);
1748 /*-----------------------------------------------------------------
1750 *---------------------------------------------------------------*/
1753 * The request function that just remaps the bio built up by
1756 static void dm_make_request(struct request_queue *q, struct bio *bio)
1758 int rw = bio_data_dir(bio);
1759 struct mapped_device *md = q->queuedata;
1761 struct dm_table *map;
1763 map = dm_get_live_table(md, &srcu_idx);
1765 blk_queue_split(q, &bio, q->bio_split);
1767 generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
1769 /* if we're suspended, we have to queue this io for later */
1770 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1771 dm_put_live_table(md, srcu_idx);
1773 if (bio_rw(bio) != READA)
1780 __split_and_process_bio(md, map, bio);
1781 dm_put_live_table(md, srcu_idx);
1785 int dm_request_based(struct mapped_device *md)
1787 return blk_queue_stackable(md->queue);
1790 static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
1794 if (blk_queue_io_stat(clone->q))
1795 clone->cmd_flags |= REQ_IO_STAT;
1797 clone->start_time = jiffies;
1798 r = blk_insert_cloned_request(clone->q, clone);
1800 /* must complete clone in terms of original request */
1801 dm_complete_request(rq, r);
1804 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1807 struct dm_rq_target_io *tio = data;
1808 struct dm_rq_clone_bio_info *info =
1809 container_of(bio, struct dm_rq_clone_bio_info, clone);
1811 info->orig = bio_orig;
1813 bio->bi_end_io = end_clone_bio;
1818 static int setup_clone(struct request *clone, struct request *rq,
1819 struct dm_rq_target_io *tio, gfp_t gfp_mask)
1823 r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask,
1824 dm_rq_bio_constructor, tio);
1828 clone->cmd = rq->cmd;
1829 clone->cmd_len = rq->cmd_len;
1830 clone->sense = rq->sense;
1831 clone->end_io = end_clone_request;
1832 clone->end_io_data = tio;
1839 static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1840 struct dm_rq_target_io *tio, gfp_t gfp_mask)
1843 * Do not allocate a clone if tio->clone was already set
1844 * (see: dm_mq_queue_rq).
1846 bool alloc_clone = !tio->clone;
1847 struct request *clone;
1850 clone = alloc_clone_request(md, gfp_mask);
1856 blk_rq_init(NULL, clone);
1857 if (setup_clone(clone, rq, tio, gfp_mask)) {
1860 free_clone_request(md, clone);
1867 static void map_tio_request(struct kthread_work *work);
1869 static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
1870 struct mapped_device *md)
1877 memset(&tio->info, 0, sizeof(tio->info));
1878 if (md->kworker_task)
1879 init_kthread_work(&tio->work, map_tio_request);
1882 static struct dm_rq_target_io *prep_tio(struct request *rq,
1883 struct mapped_device *md, gfp_t gfp_mask)
1885 struct dm_rq_target_io *tio;
1887 struct dm_table *table;
1889 tio = alloc_rq_tio(md, gfp_mask);
1893 init_tio(tio, rq, md);
1895 table = dm_get_live_table(md, &srcu_idx);
1896 if (!dm_table_mq_request_based(table)) {
1897 if (!clone_rq(rq, md, tio, gfp_mask)) {
1898 dm_put_live_table(md, srcu_idx);
1903 dm_put_live_table(md, srcu_idx);
1909 * Called with the queue lock held.
1911 static int dm_prep_fn(struct request_queue *q, struct request *rq)
1913 struct mapped_device *md = q->queuedata;
1914 struct dm_rq_target_io *tio;
1916 if (unlikely(rq->special)) {
1917 DMWARN("Already has something in rq->special.");
1918 return BLKPREP_KILL;
1921 tio = prep_tio(rq, md, GFP_ATOMIC);
1923 return BLKPREP_DEFER;
1926 rq->cmd_flags |= REQ_DONTPREP;
1933 * 0 : the request has been processed
1934 * DM_MAPIO_REQUEUE : the original request needs to be requeued
1935 * < 0 : the request was completed due to failure
1937 static int map_request(struct dm_rq_target_io *tio, struct request *rq,
1938 struct mapped_device *md)
1941 struct dm_target *ti = tio->ti;
1942 struct request *clone = NULL;
1946 r = ti->type->map_rq(ti, clone, &tio->info);
1948 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
1950 /* The target wants to complete the I/O */
1951 dm_kill_unmapped_request(rq, r);
1954 if (r != DM_MAPIO_REMAPPED)
1956 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
1958 ti->type->release_clone_rq(clone);
1959 return DM_MAPIO_REQUEUE;
1964 case DM_MAPIO_SUBMITTED:
1965 /* The target has taken the I/O to submit by itself later */
1967 case DM_MAPIO_REMAPPED:
1968 /* The target has remapped the I/O so dispatch it */
1969 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1971 dm_dispatch_clone_request(clone, rq);
1973 case DM_MAPIO_REQUEUE:
1974 /* The target wants to requeue the I/O */
1975 dm_requeue_original_request(md, tio->orig);
1979 DMWARN("unimplemented target map return value: %d", r);
1983 /* The target wants to complete the I/O */
1984 dm_kill_unmapped_request(rq, r);
1991 static void map_tio_request(struct kthread_work *work)
1993 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
1994 struct request *rq = tio->orig;
1995 struct mapped_device *md = tio->md;
1997 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
1998 dm_requeue_original_request(md, rq);
2001 static void dm_start_request(struct mapped_device *md, struct request *orig)
2003 if (!orig->q->mq_ops)
2004 blk_start_request(orig);
2006 blk_mq_start_request(orig);
2007 atomic_inc(&md->pending[rq_data_dir(orig)]);
2009 if (md->seq_rq_merge_deadline_usecs) {
2010 md->last_rq_pos = rq_end_sector(orig);
2011 md->last_rq_rw = rq_data_dir(orig);
2012 md->last_rq_start_time = ktime_get();
2015 if (unlikely(dm_stats_used(&md->stats))) {
2016 struct dm_rq_target_io *tio = tio_from_request(orig);
2017 tio->duration_jiffies = jiffies;
2018 tio->n_sectors = blk_rq_sectors(orig);
2019 dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig),
2020 tio->n_sectors, false, 0, &tio->stats_aux);
2024 * Hold the md reference here for the in-flight I/O.
2025 * We can't rely on the reference count by device opener,
2026 * because the device may be closed during the request completion
2027 * when all bios are completed.
2028 * See the comment in rq_completed() too.
2033 #define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
2035 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
2037 return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
2040 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
2041 const char *buf, size_t count)
2045 if (!dm_request_based(md) || md->use_blk_mq)
2048 if (kstrtouint(buf, 10, &deadline))
2051 if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
2052 deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
2054 md->seq_rq_merge_deadline_usecs = deadline;
2059 static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md)
2061 ktime_t kt_deadline;
2063 if (!md->seq_rq_merge_deadline_usecs)
2066 kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
2067 kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
2069 return !ktime_after(ktime_get(), kt_deadline);
2073 * q->request_fn for request-based dm.
2074 * Called with the queue lock held.
2076 static void dm_request_fn(struct request_queue *q)
2078 struct mapped_device *md = q->queuedata;
2080 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
2081 struct dm_target *ti;
2083 struct dm_rq_target_io *tio;
2087 * For suspend, check blk_queue_stopped() and increment
2088 * ->pending within a single queue_lock not to increment the
2089 * number of in-flight I/Os after the queue is stopped in
2092 while (!blk_queue_stopped(q)) {
2093 rq = blk_peek_request(q);
2097 /* always use block 0 to find the target for flushes for now */
2099 if (!(rq->cmd_flags & REQ_FLUSH))
2100 pos = blk_rq_pos(rq);
2102 ti = dm_table_find_target(map, pos);
2103 if (!dm_target_is_valid(ti)) {
2105 * Must perform setup, that rq_completed() requires,
2106 * before calling dm_kill_unmapped_request
2108 DMERR_LIMIT("request attempted access beyond the end of device");
2109 dm_start_request(md, rq);
2110 dm_kill_unmapped_request(rq, -EIO);
2114 if (dm_request_peeked_before_merge_deadline(md) &&
2115 md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
2116 md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq))
2119 if (ti->type->busy && ti->type->busy(ti))
2122 dm_start_request(md, rq);
2124 tio = tio_from_request(rq);
2125 /* Establish tio->ti before queuing work (map_tio_request) */
2127 queue_kthread_work(&md->kworker, &tio->work);
2128 BUG_ON(!irqs_disabled());
2134 blk_delay_queue(q, HZ / 100);
2136 dm_put_live_table(md, srcu_idx);
2139 static int dm_any_congested(void *congested_data, int bdi_bits)
2142 struct mapped_device *md = congested_data;
2143 struct dm_table *map;
2145 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2146 map = dm_get_live_table_fast(md);
2149 * Request-based dm cares about only own queue for
2150 * the query about congestion status of request_queue
2152 if (dm_request_based(md))
2153 r = md->queue->backing_dev_info.wb.state &
2156 r = dm_table_any_congested(map, bdi_bits);
2158 dm_put_live_table_fast(md);
2164 /*-----------------------------------------------------------------
2165 * An IDR is used to keep track of allocated minor numbers.
2166 *---------------------------------------------------------------*/
2167 static void free_minor(int minor)
2169 spin_lock(&_minor_lock);
2170 idr_remove(&_minor_idr, minor);
2171 spin_unlock(&_minor_lock);
2175 * See if the device with a specific minor # is free.
2177 static int specific_minor(int minor)
2181 if (minor >= (1 << MINORBITS))
2184 idr_preload(GFP_KERNEL);
2185 spin_lock(&_minor_lock);
2187 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
2189 spin_unlock(&_minor_lock);
2192 return r == -ENOSPC ? -EBUSY : r;
2196 static int next_free_minor(int *minor)
2200 idr_preload(GFP_KERNEL);
2201 spin_lock(&_minor_lock);
2203 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
2205 spin_unlock(&_minor_lock);
2213 static const struct block_device_operations dm_blk_dops;
2215 static void dm_wq_work(struct work_struct *work);
2217 static void dm_init_md_queue(struct mapped_device *md)
2220 * Request-based dm devices cannot be stacked on top of bio-based dm
2221 * devices. The type of this dm device may not have been decided yet.
2222 * The type is decided at the first table loading time.
2223 * To prevent problematic device stacking, clear the queue flag
2224 * for request stacking support until then.
2226 * This queue is new, so no concurrency on the queue_flags.
2228 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
2231 * Initialize data that will only be used by a non-blk-mq DM queue
2232 * - must do so here (in alloc_dev callchain) before queue is used
2234 md->queue->queuedata = md;
2235 md->queue->backing_dev_info.congested_data = md;
2238 static void dm_init_old_md_queue(struct mapped_device *md)
2240 md->use_blk_mq = false;
2241 dm_init_md_queue(md);
2244 * Initialize aspects of queue that aren't relevant for blk-mq
2246 md->queue->backing_dev_info.congested_fn = dm_any_congested;
2247 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
2250 static void cleanup_mapped_device(struct mapped_device *md)
2253 destroy_workqueue(md->wq);
2254 if (md->kworker_task)
2255 kthread_stop(md->kworker_task);
2257 mempool_destroy(md->io_pool);
2259 mempool_destroy(md->rq_pool);
2261 bioset_free(md->bs);
2263 cleanup_srcu_struct(&md->io_barrier);
2266 spin_lock(&_minor_lock);
2267 md->disk->private_data = NULL;
2268 spin_unlock(&_minor_lock);
2269 if (blk_get_integrity(md->disk))
2270 blk_integrity_unregister(md->disk);
2271 del_gendisk(md->disk);
2276 blk_cleanup_queue(md->queue);
2285 * Allocate and initialise a blank device with a given minor.
2287 static struct mapped_device *alloc_dev(int minor)
2290 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
2294 DMWARN("unable to allocate device, out of memory.");
2298 if (!try_module_get(THIS_MODULE))
2299 goto bad_module_get;
2301 /* get a minor number for the dev */
2302 if (minor == DM_ANY_MINOR)
2303 r = next_free_minor(&minor);
2305 r = specific_minor(minor);
2309 r = init_srcu_struct(&md->io_barrier);
2311 goto bad_io_barrier;
2313 md->use_blk_mq = use_blk_mq;
2314 md->type = DM_TYPE_NONE;
2315 mutex_init(&md->suspend_lock);
2316 mutex_init(&md->type_lock);
2317 mutex_init(&md->table_devices_lock);
2318 spin_lock_init(&md->deferred_lock);
2319 atomic_set(&md->holders, 1);
2320 atomic_set(&md->open_count, 0);
2321 atomic_set(&md->event_nr, 0);
2322 atomic_set(&md->uevent_seq, 0);
2323 INIT_LIST_HEAD(&md->uevent_list);
2324 INIT_LIST_HEAD(&md->table_devices);
2325 spin_lock_init(&md->uevent_lock);
2327 md->queue = blk_alloc_queue(GFP_KERNEL);
2331 dm_init_md_queue(md);
2333 md->disk = alloc_disk(1);
2337 atomic_set(&md->pending[0], 0);
2338 atomic_set(&md->pending[1], 0);
2339 init_waitqueue_head(&md->wait);
2340 INIT_WORK(&md->work, dm_wq_work);
2341 init_waitqueue_head(&md->eventq);
2342 init_completion(&md->kobj_holder.completion);
2343 md->kworker_task = NULL;
2345 md->disk->major = _major;
2346 md->disk->first_minor = minor;
2347 md->disk->fops = &dm_blk_dops;
2348 md->disk->queue = md->queue;
2349 md->disk->private_data = md;
2350 sprintf(md->disk->disk_name, "dm-%d", minor);
2352 format_dev_t(md->name, MKDEV(_major, minor));
2354 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
2358 md->bdev = bdget_disk(md->disk, 0);
2362 bio_init(&md->flush_bio);
2363 md->flush_bio.bi_bdev = md->bdev;
2364 md->flush_bio.bi_rw = WRITE_FLUSH;
2366 dm_stats_init(&md->stats);
2368 /* Populate the mapping, nobody knows we exist yet */
2369 spin_lock(&_minor_lock);
2370 old_md = idr_replace(&_minor_idr, md, minor);
2371 spin_unlock(&_minor_lock);
2373 BUG_ON(old_md != MINOR_ALLOCED);
2378 cleanup_mapped_device(md);
2382 module_put(THIS_MODULE);
2388 static void unlock_fs(struct mapped_device *md);
2390 static void free_dev(struct mapped_device *md)
2392 int minor = MINOR(disk_devt(md->disk));
2396 cleanup_mapped_device(md);
2398 blk_mq_free_tag_set(&md->tag_set);
2400 free_table_devices(&md->table_devices);
2401 dm_stats_cleanup(&md->stats);
2404 module_put(THIS_MODULE);
2408 static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
2410 struct dm_md_mempools *p = dm_table_get_md_mempools(t);
2413 /* The md already has necessary mempools. */
2414 if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
2416 * Reload bioset because front_pad may have changed
2417 * because a different table was loaded.
2419 bioset_free(md->bs);
2424 * There's no need to reload with request-based dm
2425 * because the size of front_pad doesn't change.
2426 * Note for future: If you are to reload bioset,
2427 * prep-ed requests in the queue may refer
2428 * to bio from the old bioset, so you must walk
2429 * through the queue to unprep.
2434 BUG_ON(!p || md->io_pool || md->rq_pool || md->bs);
2436 md->io_pool = p->io_pool;
2438 md->rq_pool = p->rq_pool;
2444 /* mempool bind completed, no longer need any mempools in the table */
2445 dm_table_free_md_mempools(t);
2449 * Bind a table to the device.
2451 static void event_callback(void *context)
2453 unsigned long flags;
2455 struct mapped_device *md = (struct mapped_device *) context;
2457 spin_lock_irqsave(&md->uevent_lock, flags);
2458 list_splice_init(&md->uevent_list, &uevents);
2459 spin_unlock_irqrestore(&md->uevent_lock, flags);
2461 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
2463 atomic_inc(&md->event_nr);
2464 wake_up(&md->eventq);
2468 * Protected by md->suspend_lock obtained by dm_swap_table().
2470 static void __set_size(struct mapped_device *md, sector_t size)
2472 set_capacity(md->disk, size);
2474 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2478 * Returns old map, which caller must destroy.
2480 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2481 struct queue_limits *limits)
2483 struct dm_table *old_map;
2484 struct request_queue *q = md->queue;
2487 size = dm_table_get_size(t);
2490 * Wipe any geometry if the size of the table changed.
2492 if (size != dm_get_size(md))
2493 memset(&md->geometry, 0, sizeof(md->geometry));
2495 __set_size(md, size);
2497 dm_table_event_callback(t, event_callback, md);
2500 * The queue hasn't been stopped yet, if the old table type wasn't
2501 * for request-based during suspension. So stop it to prevent
2502 * I/O mapping before resume.
2503 * This must be done before setting the queue restrictions,
2504 * because request-based dm may be run just after the setting.
2506 if (dm_table_request_based(t))
2509 __bind_mempools(md, t);
2511 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2512 rcu_assign_pointer(md->map, t);
2513 md->immutable_target_type = dm_table_get_immutable_target_type(t);
2515 dm_table_set_restrictions(t, q, limits);
2523 * Returns unbound table for the caller to free.
2525 static struct dm_table *__unbind(struct mapped_device *md)
2527 struct dm_table *map = rcu_dereference_protected(md->map, 1);
2532 dm_table_event_callback(map, NULL, NULL);
2533 RCU_INIT_POINTER(md->map, NULL);
2540 * Constructor for a new device.
2542 int dm_create(int minor, struct mapped_device **result)
2544 struct mapped_device *md;
2546 md = alloc_dev(minor);
2557 * Functions to manage md->type.
2558 * All are required to hold md->type_lock.
2560 void dm_lock_md_type(struct mapped_device *md)
2562 mutex_lock(&md->type_lock);
2565 void dm_unlock_md_type(struct mapped_device *md)
2567 mutex_unlock(&md->type_lock);
2570 void dm_set_md_type(struct mapped_device *md, unsigned type)
2572 BUG_ON(!mutex_is_locked(&md->type_lock));
2576 unsigned dm_get_md_type(struct mapped_device *md)
2578 BUG_ON(!mutex_is_locked(&md->type_lock));
2582 struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2584 return md->immutable_target_type;
2588 * The queue_limits are only valid as long as you have a reference
2591 struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2593 BUG_ON(!atomic_read(&md->holders));
2594 return &md->queue->limits;
2596 EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2598 static void init_rq_based_worker_thread(struct mapped_device *md)
2600 /* Initialize the request-based DM worker thread */
2601 init_kthread_worker(&md->kworker);
2602 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
2603 "kdmwork-%s", dm_device_name(md));
2607 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2609 static int dm_init_request_based_queue(struct mapped_device *md)
2611 struct request_queue *q = NULL;
2613 /* Fully initialize the queue */
2614 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2618 /* disable dm_request_fn's merge heuristic by default */
2619 md->seq_rq_merge_deadline_usecs = 0;
2622 dm_init_old_md_queue(md);
2623 blk_queue_softirq_done(md->queue, dm_softirq_done);
2624 blk_queue_prep_rq(md->queue, dm_prep_fn);
2626 init_rq_based_worker_thread(md);
2628 elv_register_queue(md->queue);
2633 static int dm_mq_init_request(void *data, struct request *rq,
2634 unsigned int hctx_idx, unsigned int request_idx,
2635 unsigned int numa_node)
2637 struct mapped_device *md = data;
2638 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
2641 * Must initialize md member of tio, otherwise it won't
2642 * be available in dm_mq_queue_rq.
2649 static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
2650 const struct blk_mq_queue_data *bd)
2652 struct request *rq = bd->rq;
2653 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
2654 struct mapped_device *md = tio->md;
2656 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
2657 struct dm_target *ti;
2660 /* always use block 0 to find the target for flushes for now */
2662 if (!(rq->cmd_flags & REQ_FLUSH))
2663 pos = blk_rq_pos(rq);
2665 ti = dm_table_find_target(map, pos);
2666 if (!dm_target_is_valid(ti)) {
2667 dm_put_live_table(md, srcu_idx);
2668 DMERR_LIMIT("request attempted access beyond the end of device");
2670 * Must perform setup, that rq_completed() requires,
2671 * before returning BLK_MQ_RQ_QUEUE_ERROR
2673 dm_start_request(md, rq);
2674 return BLK_MQ_RQ_QUEUE_ERROR;
2676 dm_put_live_table(md, srcu_idx);
2678 if (ti->type->busy && ti->type->busy(ti))
2679 return BLK_MQ_RQ_QUEUE_BUSY;
2681 dm_start_request(md, rq);
2683 /* Init tio using md established in .init_request */
2684 init_tio(tio, rq, md);
2687 * Establish tio->ti before queuing work (map_tio_request)
2688 * or making direct call to map_request().
2692 /* Clone the request if underlying devices aren't blk-mq */
2693 if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) {
2694 /* clone request is allocated at the end of the pdu */
2695 tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io);
2696 (void) clone_rq(rq, md, tio, GFP_ATOMIC);
2697 queue_kthread_work(&md->kworker, &tio->work);
2699 /* Direct call is fine since .queue_rq allows allocations */
2700 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
2701 /* Undo dm_start_request() before requeuing */
2702 rq_end_stats(md, rq);
2703 rq_completed(md, rq_data_dir(rq), false);
2704 return BLK_MQ_RQ_QUEUE_BUSY;
2708 return BLK_MQ_RQ_QUEUE_OK;
2711 static struct blk_mq_ops dm_mq_ops = {
2712 .queue_rq = dm_mq_queue_rq,
2713 .map_queue = blk_mq_map_queue,
2714 .complete = dm_softirq_done,
2715 .init_request = dm_mq_init_request,
2718 static int dm_init_request_based_blk_mq_queue(struct mapped_device *md)
2720 unsigned md_type = dm_get_md_type(md);
2721 struct request_queue *q;
2724 memset(&md->tag_set, 0, sizeof(md->tag_set));
2725 md->tag_set.ops = &dm_mq_ops;
2726 md->tag_set.queue_depth = BLKDEV_MAX_RQ;
2727 md->tag_set.numa_node = NUMA_NO_NODE;
2728 md->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
2729 md->tag_set.nr_hw_queues = 1;
2730 if (md_type == DM_TYPE_REQUEST_BASED) {
2731 /* make the memory for non-blk-mq clone part of the pdu */
2732 md->tag_set.cmd_size = sizeof(struct dm_rq_target_io) + sizeof(struct request);
2734 md->tag_set.cmd_size = sizeof(struct dm_rq_target_io);
2735 md->tag_set.driver_data = md;
2737 err = blk_mq_alloc_tag_set(&md->tag_set);
2741 q = blk_mq_init_allocated_queue(&md->tag_set, md->queue);
2747 dm_init_md_queue(md);
2749 /* backfill 'mq' sysfs registration normally done in blk_register_queue */
2750 blk_mq_register_disk(md->disk);
2752 if (md_type == DM_TYPE_REQUEST_BASED)
2753 init_rq_based_worker_thread(md);
2758 blk_mq_free_tag_set(&md->tag_set);
2762 static unsigned filter_md_type(unsigned type, struct mapped_device *md)
2764 if (type == DM_TYPE_BIO_BASED)
2767 return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED;
2771 * Setup the DM device's queue based on md's type
2773 int dm_setup_md_queue(struct mapped_device *md)
2776 unsigned md_type = filter_md_type(dm_get_md_type(md), md);
2779 case DM_TYPE_REQUEST_BASED:
2780 r = dm_init_request_based_queue(md);
2782 DMWARN("Cannot initialize queue for request-based mapped device");
2786 case DM_TYPE_MQ_REQUEST_BASED:
2787 r = dm_init_request_based_blk_mq_queue(md);
2789 DMWARN("Cannot initialize queue for request-based blk-mq mapped device");
2793 case DM_TYPE_BIO_BASED:
2794 dm_init_old_md_queue(md);
2795 blk_queue_make_request(md->queue, dm_make_request);
2802 struct mapped_device *dm_get_md(dev_t dev)
2804 struct mapped_device *md;
2805 unsigned minor = MINOR(dev);
2807 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2810 spin_lock(&_minor_lock);
2812 md = idr_find(&_minor_idr, minor);
2814 if ((md == MINOR_ALLOCED ||
2815 (MINOR(disk_devt(dm_disk(md))) != minor) ||
2816 dm_deleting_md(md) ||
2817 test_bit(DMF_FREEING, &md->flags))) {
2825 spin_unlock(&_minor_lock);
2829 EXPORT_SYMBOL_GPL(dm_get_md);
2831 void *dm_get_mdptr(struct mapped_device *md)
2833 return md->interface_ptr;
2836 void dm_set_mdptr(struct mapped_device *md, void *ptr)
2838 md->interface_ptr = ptr;
2841 void dm_get(struct mapped_device *md)
2843 atomic_inc(&md->holders);
2844 BUG_ON(test_bit(DMF_FREEING, &md->flags));
2847 int dm_hold(struct mapped_device *md)
2849 spin_lock(&_minor_lock);
2850 if (test_bit(DMF_FREEING, &md->flags)) {
2851 spin_unlock(&_minor_lock);
2855 spin_unlock(&_minor_lock);
2858 EXPORT_SYMBOL_GPL(dm_hold);
2860 const char *dm_device_name(struct mapped_device *md)
2864 EXPORT_SYMBOL_GPL(dm_device_name);
2866 static void __dm_destroy(struct mapped_device *md, bool wait)
2868 struct dm_table *map;
2873 map = dm_get_live_table(md, &srcu_idx);
2875 spin_lock(&_minor_lock);
2876 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2877 set_bit(DMF_FREEING, &md->flags);
2878 spin_unlock(&_minor_lock);
2880 if (dm_request_based(md) && md->kworker_task)
2881 flush_kthread_worker(&md->kworker);
2884 * Take suspend_lock so that presuspend and postsuspend methods
2885 * do not race with internal suspend.
2887 mutex_lock(&md->suspend_lock);
2888 if (!dm_suspended_md(md)) {
2889 dm_table_presuspend_targets(map);
2890 dm_table_postsuspend_targets(map);
2892 mutex_unlock(&md->suspend_lock);
2894 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2895 dm_put_live_table(md, srcu_idx);
2898 * Rare, but there may be I/O requests still going to complete,
2899 * for example. Wait for all references to disappear.
2900 * No one should increment the reference count of the mapped_device,
2901 * after the mapped_device state becomes DMF_FREEING.
2904 while (atomic_read(&md->holders))
2906 else if (atomic_read(&md->holders))
2907 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2908 dm_device_name(md), atomic_read(&md->holders));
2911 dm_table_destroy(__unbind(md));
2915 void dm_destroy(struct mapped_device *md)
2917 __dm_destroy(md, true);
2920 void dm_destroy_immediate(struct mapped_device *md)
2922 __dm_destroy(md, false);
2925 void dm_put(struct mapped_device *md)
2927 atomic_dec(&md->holders);
2929 EXPORT_SYMBOL_GPL(dm_put);
2931 static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2934 DECLARE_WAITQUEUE(wait, current);
2936 add_wait_queue(&md->wait, &wait);
2939 set_current_state(interruptible);
2941 if (!md_in_flight(md))
2944 if (interruptible == TASK_INTERRUPTIBLE &&
2945 signal_pending(current)) {
2952 set_current_state(TASK_RUNNING);
2954 remove_wait_queue(&md->wait, &wait);
2960 * Process the deferred bios
2962 static void dm_wq_work(struct work_struct *work)
2964 struct mapped_device *md = container_of(work, struct mapped_device,
2968 struct dm_table *map;
2970 map = dm_get_live_table(md, &srcu_idx);
2972 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2973 spin_lock_irq(&md->deferred_lock);
2974 c = bio_list_pop(&md->deferred);
2975 spin_unlock_irq(&md->deferred_lock);
2980 if (dm_request_based(md))
2981 generic_make_request(c);
2983 __split_and_process_bio(md, map, c);
2986 dm_put_live_table(md, srcu_idx);
2989 static void dm_queue_flush(struct mapped_device *md)
2991 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2992 smp_mb__after_atomic();
2993 queue_work(md->wq, &md->work);
2997 * Swap in a new table, returning the old one for the caller to destroy.
2999 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
3001 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
3002 struct queue_limits limits;
3005 mutex_lock(&md->suspend_lock);
3007 /* device must be suspended */
3008 if (!dm_suspended_md(md))
3012 * If the new table has no data devices, retain the existing limits.
3013 * This helps multipath with queue_if_no_path if all paths disappear,
3014 * then new I/O is queued based on these limits, and then some paths
3017 if (dm_table_has_no_data_devices(table)) {
3018 live_map = dm_get_live_table_fast(md);
3020 limits = md->queue->limits;
3021 dm_put_live_table_fast(md);
3025 r = dm_calculate_queue_limits(table, &limits);
3032 map = __bind(md, table, &limits);
3035 mutex_unlock(&md->suspend_lock);
3040 * Functions to lock and unlock any filesystem running on the
3043 static int lock_fs(struct mapped_device *md)
3047 WARN_ON(md->frozen_sb);
3049 md->frozen_sb = freeze_bdev(md->bdev);
3050 if (IS_ERR(md->frozen_sb)) {
3051 r = PTR_ERR(md->frozen_sb);
3052 md->frozen_sb = NULL;
3056 set_bit(DMF_FROZEN, &md->flags);
3061 static void unlock_fs(struct mapped_device *md)
3063 if (!test_bit(DMF_FROZEN, &md->flags))
3066 thaw_bdev(md->bdev, md->frozen_sb);
3067 md->frozen_sb = NULL;
3068 clear_bit(DMF_FROZEN, &md->flags);
3072 * If __dm_suspend returns 0, the device is completely quiescent
3073 * now. There is no request-processing activity. All new requests
3074 * are being added to md->deferred list.
3076 * Caller must hold md->suspend_lock
3078 static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
3079 unsigned suspend_flags, int interruptible)
3081 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
3082 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
3086 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
3087 * This flag is cleared before dm_suspend returns.
3090 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
3093 * This gets reverted if there's an error later and the targets
3094 * provide the .presuspend_undo hook.
3096 dm_table_presuspend_targets(map);
3099 * Flush I/O to the device.
3100 * Any I/O submitted after lock_fs() may not be flushed.
3101 * noflush takes precedence over do_lockfs.
3102 * (lock_fs() flushes I/Os and waits for them to complete.)
3104 if (!noflush && do_lockfs) {
3107 dm_table_presuspend_undo_targets(map);
3113 * Here we must make sure that no processes are submitting requests
3114 * to target drivers i.e. no one may be executing
3115 * __split_and_process_bio. This is called from dm_request and
3118 * To get all processes out of __split_and_process_bio in dm_request,
3119 * we take the write lock. To prevent any process from reentering
3120 * __split_and_process_bio from dm_request and quiesce the thread
3121 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
3122 * flush_workqueue(md->wq).
3124 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
3126 synchronize_srcu(&md->io_barrier);
3129 * Stop md->queue before flushing md->wq in case request-based
3130 * dm defers requests to md->wq from md->queue.
3132 if (dm_request_based(md)) {
3133 stop_queue(md->queue);
3134 if (md->kworker_task)
3135 flush_kthread_worker(&md->kworker);
3138 flush_workqueue(md->wq);
3141 * At this point no more requests are entering target request routines.
3142 * We call dm_wait_for_completion to wait for all existing requests
3145 r = dm_wait_for_completion(md, interruptible);
3148 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
3150 synchronize_srcu(&md->io_barrier);
3152 /* were we interrupted ? */
3156 if (dm_request_based(md))
3157 start_queue(md->queue);
3160 dm_table_presuspend_undo_targets(map);
3161 /* pushback list is already flushed, so skip flush */
3168 * We need to be able to change a mapping table under a mounted
3169 * filesystem. For example we might want to move some data in
3170 * the background. Before the table can be swapped with
3171 * dm_bind_table, dm_suspend must be called to flush any in
3172 * flight bios and ensure that any further io gets deferred.
3175 * Suspend mechanism in request-based dm.
3177 * 1. Flush all I/Os by lock_fs() if needed.
3178 * 2. Stop dispatching any I/O by stopping the request_queue.
3179 * 3. Wait for all in-flight I/Os to be completed or requeued.
3181 * To abort suspend, start the request_queue.
3183 int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
3185 struct dm_table *map = NULL;
3189 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
3191 if (dm_suspended_md(md)) {
3196 if (dm_suspended_internally_md(md)) {
3197 /* already internally suspended, wait for internal resume */
3198 mutex_unlock(&md->suspend_lock);
3199 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
3205 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3207 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE);
3211 set_bit(DMF_SUSPENDED, &md->flags);
3213 dm_table_postsuspend_targets(map);
3216 mutex_unlock(&md->suspend_lock);
3220 static int __dm_resume(struct mapped_device *md, struct dm_table *map)
3223 int r = dm_table_resume_targets(map);
3231 * Flushing deferred I/Os must be done after targets are resumed
3232 * so that mapping of targets can work correctly.
3233 * Request-based dm is queueing the deferred I/Os in its request_queue.
3235 if (dm_request_based(md))
3236 start_queue(md->queue);
3243 int dm_resume(struct mapped_device *md)
3246 struct dm_table *map = NULL;
3249 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
3251 if (!dm_suspended_md(md))
3254 if (dm_suspended_internally_md(md)) {
3255 /* already internally suspended, wait for internal resume */
3256 mutex_unlock(&md->suspend_lock);
3257 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
3263 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3264 if (!map || !dm_table_get_size(map))
3267 r = __dm_resume(md, map);
3271 clear_bit(DMF_SUSPENDED, &md->flags);
3275 mutex_unlock(&md->suspend_lock);
3281 * Internal suspend/resume works like userspace-driven suspend. It waits
3282 * until all bios finish and prevents issuing new bios to the target drivers.
3283 * It may be used only from the kernel.
3286 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
3288 struct dm_table *map = NULL;
3290 if (md->internal_suspend_count++)
3291 return; /* nested internal suspend */
3293 if (dm_suspended_md(md)) {
3294 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3295 return; /* nest suspend */
3298 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3301 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
3302 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend
3303 * would require changing .presuspend to return an error -- avoid this
3304 * until there is a need for more elaborate variants of internal suspend.
3306 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE);
3308 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3310 dm_table_postsuspend_targets(map);
3313 static void __dm_internal_resume(struct mapped_device *md)
3315 BUG_ON(!md->internal_suspend_count);
3317 if (--md->internal_suspend_count)
3318 return; /* resume from nested internal suspend */
3320 if (dm_suspended_md(md))
3321 goto done; /* resume from nested suspend */
3324 * NOTE: existing callers don't need to call dm_table_resume_targets
3325 * (which may fail -- so best to avoid it for now by passing NULL map)
3327 (void) __dm_resume(md, NULL);
3330 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3331 smp_mb__after_atomic();
3332 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
3335 void dm_internal_suspend_noflush(struct mapped_device *md)
3337 mutex_lock(&md->suspend_lock);
3338 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
3339 mutex_unlock(&md->suspend_lock);
3341 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
3343 void dm_internal_resume(struct mapped_device *md)
3345 mutex_lock(&md->suspend_lock);
3346 __dm_internal_resume(md);
3347 mutex_unlock(&md->suspend_lock);
3349 EXPORT_SYMBOL_GPL(dm_internal_resume);
3352 * Fast variants of internal suspend/resume hold md->suspend_lock,
3353 * which prevents interaction with userspace-driven suspend.
3356 void dm_internal_suspend_fast(struct mapped_device *md)
3358 mutex_lock(&md->suspend_lock);
3359 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
3362 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
3363 synchronize_srcu(&md->io_barrier);
3364 flush_workqueue(md->wq);
3365 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
3367 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
3369 void dm_internal_resume_fast(struct mapped_device *md)
3371 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
3377 mutex_unlock(&md->suspend_lock);
3379 EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
3381 /*-----------------------------------------------------------------
3382 * Event notification.
3383 *---------------------------------------------------------------*/
3384 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
3387 char udev_cookie[DM_COOKIE_LENGTH];
3388 char *envp[] = { udev_cookie, NULL };
3391 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
3393 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
3394 DM_COOKIE_ENV_VAR_NAME, cookie);
3395 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
3400 uint32_t dm_next_uevent_seq(struct mapped_device *md)
3402 return atomic_add_return(1, &md->uevent_seq);
3405 uint32_t dm_get_event_nr(struct mapped_device *md)
3407 return atomic_read(&md->event_nr);
3410 int dm_wait_event(struct mapped_device *md, int event_nr)
3412 return wait_event_interruptible(md->eventq,
3413 (event_nr != atomic_read(&md->event_nr)));
3416 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
3418 unsigned long flags;
3420 spin_lock_irqsave(&md->uevent_lock, flags);
3421 list_add(elist, &md->uevent_list);
3422 spin_unlock_irqrestore(&md->uevent_lock, flags);
3426 * The gendisk is only valid as long as you have a reference
3429 struct gendisk *dm_disk(struct mapped_device *md)
3433 EXPORT_SYMBOL_GPL(dm_disk);
3435 struct kobject *dm_kobject(struct mapped_device *md)
3437 return &md->kobj_holder.kobj;
3440 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
3442 struct mapped_device *md;
3444 md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
3446 if (test_bit(DMF_FREEING, &md->flags) ||
3454 int dm_suspended_md(struct mapped_device *md)
3456 return test_bit(DMF_SUSPENDED, &md->flags);
3459 int dm_suspended_internally_md(struct mapped_device *md)
3461 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3464 int dm_test_deferred_remove_flag(struct mapped_device *md)
3466 return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
3469 int dm_suspended(struct dm_target *ti)
3471 return dm_suspended_md(dm_table_get_md(ti->table));
3473 EXPORT_SYMBOL_GPL(dm_suspended);
3475 int dm_noflush_suspending(struct dm_target *ti)
3477 return __noflush_suspending(dm_table_get_md(ti->table));
3479 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
3481 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
3482 unsigned integrity, unsigned per_bio_data_size)
3484 struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
3485 struct kmem_cache *cachep = NULL;
3486 unsigned int pool_size = 0;
3487 unsigned int front_pad;
3492 type = filter_md_type(type, md);
3495 case DM_TYPE_BIO_BASED:
3497 pool_size = dm_get_reserved_bio_based_ios();
3498 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
3500 case DM_TYPE_REQUEST_BASED:
3501 cachep = _rq_tio_cache;
3502 pool_size = dm_get_reserved_rq_based_ios();
3503 pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
3504 if (!pools->rq_pool)
3506 /* fall through to setup remaining rq-based pools */
3507 case DM_TYPE_MQ_REQUEST_BASED:
3509 pool_size = dm_get_reserved_rq_based_ios();
3510 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
3511 /* per_bio_data_size is not used. See __bind_mempools(). */
3512 WARN_ON(per_bio_data_size != 0);
3519 pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
3520 if (!pools->io_pool)
3524 pools->bs = bioset_create_nobvec(pool_size, front_pad);
3528 if (integrity && bioset_integrity_create(pools->bs, pool_size))
3534 dm_free_md_mempools(pools);
3539 void dm_free_md_mempools(struct dm_md_mempools *pools)
3545 mempool_destroy(pools->io_pool);
3548 mempool_destroy(pools->rq_pool);
3551 bioset_free(pools->bs);
3556 static const struct block_device_operations dm_blk_dops = {
3557 .open = dm_blk_open,
3558 .release = dm_blk_close,
3559 .ioctl = dm_blk_ioctl,
3560 .getgeo = dm_blk_getgeo,
3561 .owner = THIS_MODULE
3567 module_init(dm_init);
3568 module_exit(dm_exit);
3570 module_param(major, uint, 0);
3571 MODULE_PARM_DESC(major, "The major number of the device mapper");
3573 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
3574 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3576 module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
3577 MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
3579 module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
3580 MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
3582 MODULE_DESCRIPTION(DM_NAME " driver");
3583 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3584 MODULE_LICENSE("GPL");