2 * linux/drivers/block/loop.c
4 * Written by Theodore Ts'o, 3/29/93
6 * Copyright 1993 by Theodore Ts'o. Redistribution of this file is
7 * permitted under the GNU General Public License.
9 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993
10 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996
12 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
13 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
15 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
17 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
19 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
21 * Loadable modules and other fixes by AK, 1998
23 * Make real block number available to downstream transfer functions, enables
24 * CBC (and relatives) mode encryption requiring unique IVs per data block.
25 * Reed H. Petty, rhp@draper.net
27 * Maximum number of loop devices now dynamic via max_loop module parameter.
28 * Russell Kroll <rkroll@exploits.org> 19990701
30 * Maximum number of loop devices when compiled-in now selectable by passing
31 * max_loop=<1-255> to the kernel on boot.
32 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999
34 * Completely rewrite request handling to be make_request_fn style and
35 * non blocking, pushing work to a helper thread. Lots of fixes from
37 * Jens Axboe <axboe@suse.de>, Nov 2000
39 * Support up to 256 loop devices
40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
42 * Support for falling back on the write file operation when the address space
43 * operations write_begin is not available on the backing filesystem.
44 * Anton Altaparmakov, 16 Feb 2005
47 * - Advisory locking is ignored here.
48 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
52 #include <linux/module.h>
53 #include <linux/moduleparam.h>
54 #include <linux/sched.h>
56 #include <linux/file.h>
57 #include <linux/stat.h>
58 #include <linux/errno.h>
59 #include <linux/major.h>
60 #include <linux/wait.h>
61 #include <linux/blkdev.h>
62 #include <linux/blkpg.h>
63 #include <linux/init.h>
64 #include <linux/swap.h>
65 #include <linux/slab.h>
66 #include <linux/compat.h>
67 #include <linux/suspend.h>
68 #include <linux/freezer.h>
69 #include <linux/mutex.h>
70 #include <linux/writeback.h>
71 #include <linux/completion.h>
72 #include <linux/highmem.h>
73 #include <linux/kthread.h>
74 #include <linux/splice.h>
75 #include <linux/sysfs.h>
76 #include <linux/miscdevice.h>
77 #include <linux/falloc.h>
78 #include <linux/uio.h>
81 #include <linux/uaccess.h>
83 static DEFINE_IDR(loop_index_idr);
84 static DEFINE_MUTEX(loop_index_mutex);
87 static int part_shift;
89 static int transfer_xor(struct loop_device *lo, int cmd,
90 struct page *raw_page, unsigned raw_off,
91 struct page *loop_page, unsigned loop_off,
92 int size, sector_t real_block)
94 char *raw_buf = kmap_atomic(raw_page) + raw_off;
95 char *loop_buf = kmap_atomic(loop_page) + loop_off;
107 key = lo->lo_encrypt_key;
108 keysize = lo->lo_encrypt_key_size;
109 for (i = 0; i < size; i++)
110 *out++ = *in++ ^ key[(i & 511) % keysize];
112 kunmap_atomic(loop_buf);
113 kunmap_atomic(raw_buf);
118 static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
120 if (unlikely(info->lo_encrypt_key_size <= 0))
125 static struct loop_func_table none_funcs = {
126 .number = LO_CRYPT_NONE,
129 static struct loop_func_table xor_funcs = {
130 .number = LO_CRYPT_XOR,
131 .transfer = transfer_xor,
135 /* xfer_funcs[0] is special - its release function is never called */
136 static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
141 static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
145 /* Compute loopsize in bytes */
146 loopsize = i_size_read(file->f_mapping->host);
149 /* offset is beyond i_size, weird but possible */
153 if (sizelimit > 0 && sizelimit < loopsize)
154 loopsize = sizelimit;
156 * Unfortunately, if we want to do I/O on the device,
157 * the number of 512-byte sectors has to fit into a sector_t.
159 return loopsize >> 9;
162 static loff_t get_loop_size(struct loop_device *lo, struct file *file)
164 return get_size(lo->lo_offset, lo->lo_sizelimit, file);
167 static void __loop_update_dio(struct loop_device *lo, bool dio)
169 struct file *file = lo->lo_backing_file;
170 struct address_space *mapping = file->f_mapping;
171 struct inode *inode = mapping->host;
172 unsigned short sb_bsize = 0;
173 unsigned dio_align = 0;
176 if (inode->i_sb->s_bdev) {
177 sb_bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
178 dio_align = sb_bsize - 1;
182 * We support direct I/O only if lo_offset is aligned with the
183 * logical I/O size of backing device, and the logical block
184 * size of loop is bigger than the backing device's and the loop
185 * needn't transform transfer.
187 * TODO: the above condition may be loosed in the future, and
188 * direct I/O may be switched runtime at that time because most
189 * of requests in sane applications should be PAGE_SIZE aligned
192 if (queue_logical_block_size(lo->lo_queue) >= sb_bsize &&
193 !(lo->lo_offset & dio_align) &&
194 mapping->a_ops->direct_IO &&
203 if (lo->use_dio == use_dio)
206 /* flush dirty pages before changing direct IO */
210 * The flag of LO_FLAGS_DIRECT_IO is handled similarly with
211 * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup
212 * will get updated by ioctl(LOOP_GET_STATUS)
214 blk_mq_freeze_queue(lo->lo_queue);
215 lo->use_dio = use_dio;
217 lo->lo_flags |= LO_FLAGS_DIRECT_IO;
219 lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
220 blk_mq_unfreeze_queue(lo->lo_queue);
224 figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
225 loff_t logical_blocksize)
227 loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
228 sector_t x = (sector_t)size;
229 struct block_device *bdev = lo->lo_device;
231 if (unlikely((loff_t)x != size))
233 if (lo->lo_offset != offset)
234 lo->lo_offset = offset;
235 if (lo->lo_sizelimit != sizelimit)
236 lo->lo_sizelimit = sizelimit;
237 if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) {
238 lo->lo_logical_blocksize = logical_blocksize;
239 blk_queue_physical_block_size(lo->lo_queue, lo->lo_blocksize);
240 blk_queue_logical_block_size(lo->lo_queue,
241 lo->lo_logical_blocksize);
243 set_capacity(lo->lo_disk, x);
244 bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
245 /* let user-space know about the new size */
246 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
251 lo_do_transfer(struct loop_device *lo, int cmd,
252 struct page *rpage, unsigned roffs,
253 struct page *lpage, unsigned loffs,
254 int size, sector_t rblock)
258 ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
262 printk_ratelimited(KERN_ERR
263 "loop: Transfer error at byte offset %llu, length %i.\n",
264 (unsigned long long)rblock << 9, size);
268 static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
273 iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len);
275 file_start_write(file);
276 bw = vfs_iter_write(file, &i, ppos);
277 file_end_write(file);
279 if (likely(bw == bvec->bv_len))
282 printk_ratelimited(KERN_ERR
283 "loop: Write error at byte offset %llu, length %i.\n",
284 (unsigned long long)*ppos, bvec->bv_len);
290 static int lo_write_simple(struct loop_device *lo, struct request *rq,
294 struct req_iterator iter;
297 rq_for_each_segment(bvec, rq, iter) {
298 ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
308 * This is the slow, transforming version that needs to double buffer the
309 * data as it cannot do the transformations in place without having direct
310 * access to the destination pages of the backing file.
312 static int lo_write_transfer(struct loop_device *lo, struct request *rq,
315 struct bio_vec bvec, b;
316 struct req_iterator iter;
320 page = alloc_page(GFP_NOIO);
324 rq_for_each_segment(bvec, rq, iter) {
325 ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page,
326 bvec.bv_offset, bvec.bv_len, pos >> 9);
332 b.bv_len = bvec.bv_len;
333 ret = lo_write_bvec(lo->lo_backing_file, &b, &pos);
342 static int lo_read_simple(struct loop_device *lo, struct request *rq,
346 struct req_iterator iter;
350 rq_for_each_segment(bvec, rq, iter) {
351 iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len);
352 len = vfs_iter_read(lo->lo_backing_file, &i, &pos);
356 flush_dcache_page(bvec.bv_page);
358 if (len != bvec.bv_len) {
361 __rq_for_each_bio(bio, rq)
371 static int lo_read_transfer(struct loop_device *lo, struct request *rq,
374 struct bio_vec bvec, b;
375 struct req_iterator iter;
381 page = alloc_page(GFP_NOIO);
385 rq_for_each_segment(bvec, rq, iter) {
390 b.bv_len = bvec.bv_len;
392 iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len);
393 len = vfs_iter_read(lo->lo_backing_file, &i, &pos);
399 ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page,
400 bvec.bv_offset, len, offset >> 9);
404 flush_dcache_page(bvec.bv_page);
406 if (len != bvec.bv_len) {
409 __rq_for_each_bio(bio, rq)
421 static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos)
424 * We use punch hole to reclaim the free space used by the
425 * image a.k.a. discard. However we do not support discard if
426 * encryption is enabled, because it may give an attacker
427 * useful information.
429 struct file *file = lo->lo_backing_file;
430 int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
433 if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) {
438 ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
439 if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
445 static int lo_req_flush(struct loop_device *lo, struct request *rq)
447 struct file *file = lo->lo_backing_file;
448 int ret = vfs_fsync(file, 0);
449 if (unlikely(ret && ret != -EINVAL))
455 static void lo_complete_rq(struct request *rq)
457 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
459 if (unlikely(req_op(cmd->rq) == REQ_OP_READ && cmd->use_aio &&
460 cmd->ret >= 0 && cmd->ret < blk_rq_bytes(cmd->rq))) {
461 struct bio *bio = cmd->rq->bio;
463 bio_advance(bio, cmd->ret);
467 blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK);
470 static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
472 struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
475 blk_mq_complete_request(cmd->rq);
478 static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
481 struct iov_iter iter;
482 struct bio_vec *bvec;
483 struct bio *bio = cmd->rq->bio;
484 struct file *file = lo->lo_backing_file;
487 /* nomerge for loop request queue */
488 WARN_ON(cmd->rq->bio != cmd->rq->biotail);
490 bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
491 iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
492 bio_segments(bio), blk_rq_bytes(cmd->rq));
494 * This bio may be started from the middle of the 'bvec'
495 * because of bio splitting, so offset from the bvec must
496 * be passed to iov iterator
498 iter.iov_offset = bio->bi_iter.bi_bvec_done;
500 cmd->iocb.ki_pos = pos;
501 cmd->iocb.ki_filp = file;
502 cmd->iocb.ki_complete = lo_rw_aio_complete;
503 cmd->iocb.ki_flags = IOCB_DIRECT;
506 ret = call_write_iter(file, &cmd->iocb, &iter);
508 ret = call_read_iter(file, &cmd->iocb, &iter);
510 if (ret != -EIOCBQUEUED)
511 cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
515 static int do_req_filebacked(struct loop_device *lo, struct request *rq)
517 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
518 loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
521 * lo_write_simple and lo_read_simple should have been covered
522 * by io submit style function like lo_rw_aio(), one blocker
523 * is that lo_read_simple() need to call flush_dcache_page after
524 * the page is written from kernel, and it isn't easy to handle
525 * this in io submit style function which submits all segments
526 * of the req at one time. And direct read IO doesn't need to
527 * run flush_dcache_page().
529 switch (req_op(rq)) {
531 return lo_req_flush(lo, rq);
533 case REQ_OP_WRITE_ZEROES:
534 return lo_discard(lo, rq, pos);
537 return lo_write_transfer(lo, rq, pos);
538 else if (cmd->use_aio)
539 return lo_rw_aio(lo, cmd, pos, WRITE);
541 return lo_write_simple(lo, rq, pos);
544 return lo_read_transfer(lo, rq, pos);
545 else if (cmd->use_aio)
546 return lo_rw_aio(lo, cmd, pos, READ);
548 return lo_read_simple(lo, rq, pos);
556 struct switch_request {
558 struct completion wait;
561 static inline void loop_update_dio(struct loop_device *lo)
563 __loop_update_dio(lo, io_is_direct(lo->lo_backing_file) |
568 * Do the actual switch; called from the BIO completion routine
570 static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
572 struct file *file = p->file;
573 struct file *old_file = lo->lo_backing_file;
574 struct address_space *mapping;
576 /* if no new file, only flush of queued bios requested */
580 mapping = file->f_mapping;
581 mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
582 lo->lo_backing_file = file;
583 lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ?
584 mapping->host->i_bdev->bd_block_size : PAGE_SIZE;
585 lo->old_gfp_mask = mapping_gfp_mask(mapping);
586 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
591 * loop_switch performs the hard work of switching a backing store.
592 * First it needs to flush existing IO, it does this by sending a magic
593 * BIO down the pipe. The completion of this BIO does the actual switch.
595 static int loop_switch(struct loop_device *lo, struct file *file)
597 struct switch_request w;
601 /* freeze queue and wait for completion of scheduled requests */
602 blk_mq_freeze_queue(lo->lo_queue);
604 /* do the switch action */
605 do_loop_switch(lo, &w);
608 blk_mq_unfreeze_queue(lo->lo_queue);
614 * Helper to flush the IOs in loop, but keeping loop thread running
616 static int loop_flush(struct loop_device *lo)
618 /* loop not yet configured, no running thread, nothing to flush */
619 if (lo->lo_state != Lo_bound)
621 return loop_switch(lo, NULL);
624 static void loop_reread_partitions(struct loop_device *lo,
625 struct block_device *bdev)
630 * bd_mutex has been held already in release path, so don't
631 * acquire it if this function is called in such case.
633 * If the reread partition isn't from release path, lo_refcnt
634 * must be at least one and it can only become zero when the
635 * current holder is released.
637 if (!atomic_read(&lo->lo_refcnt))
638 rc = __blkdev_reread_part(bdev);
640 rc = blkdev_reread_part(bdev);
642 pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
643 __func__, lo->lo_number, lo->lo_file_name, rc);
647 * loop_change_fd switched the backing store of a loopback device to
648 * a new file. This is useful for operating system installers to free up
649 * the original file and in High Availability environments to switch to
650 * an alternative location for the content in case of server meltdown.
651 * This can only work if the loop device is used read-only, and if the
652 * new backing store is the same size and type as the old backing store.
654 static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
657 struct file *file, *old_file;
662 if (lo->lo_state != Lo_bound)
665 /* the loop device has to be read-only */
667 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
675 inode = file->f_mapping->host;
676 old_file = lo->lo_backing_file;
680 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
683 /* size of the new backing store needs to be the same */
684 if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
688 error = loop_switch(lo, file);
693 if (lo->lo_flags & LO_FLAGS_PARTSCAN)
694 loop_reread_partitions(lo, bdev);
703 static inline int is_loop_device(struct file *file)
705 struct inode *i = file->f_mapping->host;
707 return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
710 /* loop sysfs attributes */
712 static ssize_t loop_attr_show(struct device *dev, char *page,
713 ssize_t (*callback)(struct loop_device *, char *))
715 struct gendisk *disk = dev_to_disk(dev);
716 struct loop_device *lo = disk->private_data;
718 return callback(lo, page);
721 #define LOOP_ATTR_RO(_name) \
722 static ssize_t loop_attr_##_name##_show(struct loop_device *, char *); \
723 static ssize_t loop_attr_do_show_##_name(struct device *d, \
724 struct device_attribute *attr, char *b) \
726 return loop_attr_show(d, b, loop_attr_##_name##_show); \
728 static struct device_attribute loop_attr_##_name = \
729 __ATTR(_name, S_IRUGO, loop_attr_do_show_##_name, NULL);
731 static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
736 spin_lock_irq(&lo->lo_lock);
737 if (lo->lo_backing_file)
738 p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1);
739 spin_unlock_irq(&lo->lo_lock);
741 if (IS_ERR_OR_NULL(p))
745 memmove(buf, p, ret);
753 static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
755 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset);
758 static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
760 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
763 static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
765 int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
767 return sprintf(buf, "%s\n", autoclear ? "1" : "0");
770 static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf)
772 int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN);
774 return sprintf(buf, "%s\n", partscan ? "1" : "0");
777 static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf)
779 int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO);
781 return sprintf(buf, "%s\n", dio ? "1" : "0");
784 LOOP_ATTR_RO(backing_file);
785 LOOP_ATTR_RO(offset);
786 LOOP_ATTR_RO(sizelimit);
787 LOOP_ATTR_RO(autoclear);
788 LOOP_ATTR_RO(partscan);
791 static struct attribute *loop_attrs[] = {
792 &loop_attr_backing_file.attr,
793 &loop_attr_offset.attr,
794 &loop_attr_sizelimit.attr,
795 &loop_attr_autoclear.attr,
796 &loop_attr_partscan.attr,
801 static struct attribute_group loop_attribute_group = {
806 static int loop_sysfs_init(struct loop_device *lo)
808 return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
809 &loop_attribute_group);
812 static void loop_sysfs_exit(struct loop_device *lo)
814 sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
815 &loop_attribute_group);
818 static void loop_config_discard(struct loop_device *lo)
820 struct file *file = lo->lo_backing_file;
821 struct inode *inode = file->f_mapping->host;
822 struct request_queue *q = lo->lo_queue;
826 * We use punch hole to reclaim the free space used by the
827 * image a.k.a. discard. However we do not support discard if
828 * encryption is enabled, because it may give an attacker
829 * useful information.
831 if ((!file->f_op->fallocate) ||
832 lo->lo_encrypt_key_size) {
833 q->limits.discard_granularity = 0;
834 q->limits.discard_alignment = 0;
835 blk_queue_max_discard_sectors(q, 0);
836 blk_queue_max_write_zeroes_sectors(q, 0);
837 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
841 q->limits.discard_granularity = inode->i_sb->s_blocksize;
842 q->limits.discard_alignment = 0;
843 if (lo->lo_flags & LO_FLAGS_BLOCKSIZE)
844 lo_bits = blksize_bits(lo->lo_logical_blocksize);
846 blk_queue_max_discard_sectors(q, UINT_MAX >> lo_bits);
847 blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> lo_bits);
848 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
851 static void loop_unprepare_queue(struct loop_device *lo)
853 kthread_flush_worker(&lo->worker);
854 kthread_stop(lo->worker_task);
857 static int loop_prepare_queue(struct loop_device *lo)
859 kthread_init_worker(&lo->worker);
860 lo->worker_task = kthread_run(kthread_worker_fn,
861 &lo->worker, "loop%d", lo->lo_number);
862 if (IS_ERR(lo->worker_task))
864 set_user_nice(lo->worker_task, MIN_NICE);
868 static int loop_set_fd(struct loop_device *lo, fmode_t mode,
869 struct block_device *bdev, unsigned int arg)
871 struct file *file, *f;
873 struct address_space *mapping;
874 unsigned lo_blocksize;
879 /* This is safe, since we have a reference from open(). */
880 __module_get(THIS_MODULE);
888 if (lo->lo_state != Lo_unbound)
891 /* Avoid recursion */
893 while (is_loop_device(f)) {
894 struct loop_device *l;
896 if (f->f_mapping->host->i_bdev == bdev)
899 l = f->f_mapping->host->i_bdev->bd_disk->private_data;
900 if (l->lo_state == Lo_unbound) {
904 f = l->lo_backing_file;
907 mapping = file->f_mapping;
908 inode = mapping->host;
911 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
914 if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
915 !file->f_op->write_iter)
916 lo_flags |= LO_FLAGS_READ_ONLY;
918 lo_blocksize = S_ISBLK(inode->i_mode) ?
919 inode->i_bdev->bd_block_size : PAGE_SIZE;
922 size = get_loop_size(lo, file);
923 if ((loff_t)(sector_t)size != size)
925 error = loop_prepare_queue(lo);
931 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
934 lo->lo_blocksize = lo_blocksize;
935 lo->lo_logical_blocksize = 512;
936 lo->lo_device = bdev;
937 lo->lo_flags = lo_flags;
938 lo->lo_backing_file = file;
941 lo->lo_sizelimit = 0;
942 lo->old_gfp_mask = mapping_gfp_mask(mapping);
943 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
945 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
946 blk_queue_write_cache(lo->lo_queue, true, false);
949 set_capacity(lo->lo_disk, size);
950 bd_set_size(bdev, size << 9);
952 /* let user-space know about the new size */
953 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
955 set_blocksize(bdev, lo_blocksize);
957 lo->lo_state = Lo_bound;
959 lo->lo_flags |= LO_FLAGS_PARTSCAN;
960 if (lo->lo_flags & LO_FLAGS_PARTSCAN)
961 loop_reread_partitions(lo, bdev);
963 /* Grab the block_device to prevent its destruction after we
964 * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
972 /* This is safe: open() is still holding a reference. */
973 module_put(THIS_MODULE);
978 loop_release_xfer(struct loop_device *lo)
981 struct loop_func_table *xfer = lo->lo_encryption;
985 err = xfer->release(lo);
987 lo->lo_encryption = NULL;
988 module_put(xfer->owner);
994 loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
995 const struct loop_info64 *i)
1000 struct module *owner = xfer->owner;
1002 if (!try_module_get(owner))
1005 err = xfer->init(lo, i);
1009 lo->lo_encryption = xfer;
1014 static int loop_clr_fd(struct loop_device *lo)
1016 struct file *filp = lo->lo_backing_file;
1017 gfp_t gfp = lo->old_gfp_mask;
1018 struct block_device *bdev = lo->lo_device;
1020 if (lo->lo_state != Lo_bound)
1024 * If we've explicitly asked to tear down the loop device,
1025 * and it has an elevated reference count, set it for auto-teardown when
1026 * the last reference goes away. This stops $!~#$@ udev from
1027 * preventing teardown because it decided that it needs to run blkid on
1028 * the loopback device whenever they appear. xfstests is notorious for
1029 * failing tests because blkid via udev races with a losetup
1030 * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
1031 * command to fail with EBUSY.
1033 if (atomic_read(&lo->lo_refcnt) > 1) {
1034 lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
1035 mutex_unlock(&lo->lo_ctl_mutex);
1042 /* freeze request queue during the transition */
1043 blk_mq_freeze_queue(lo->lo_queue);
1045 spin_lock_irq(&lo->lo_lock);
1046 lo->lo_state = Lo_rundown;
1047 lo->lo_backing_file = NULL;
1048 spin_unlock_irq(&lo->lo_lock);
1050 loop_release_xfer(lo);
1051 lo->transfer = NULL;
1053 lo->lo_device = NULL;
1054 lo->lo_encryption = NULL;
1056 lo->lo_sizelimit = 0;
1057 lo->lo_encrypt_key_size = 0;
1058 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
1059 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
1060 memset(lo->lo_file_name, 0, LO_NAME_SIZE);
1063 invalidate_bdev(bdev);
1065 set_capacity(lo->lo_disk, 0);
1066 loop_sysfs_exit(lo);
1068 bd_set_size(bdev, 0);
1069 /* let user-space know about this change */
1070 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
1072 mapping_set_gfp_mask(filp->f_mapping, gfp);
1073 lo->lo_state = Lo_unbound;
1074 /* This is safe: open() is still holding a reference. */
1075 module_put(THIS_MODULE);
1076 blk_mq_unfreeze_queue(lo->lo_queue);
1078 if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
1079 loop_reread_partitions(lo, bdev);
1082 lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
1083 loop_unprepare_queue(lo);
1084 mutex_unlock(&lo->lo_ctl_mutex);
1086 * Need not hold lo_ctl_mutex to fput backing file.
1087 * Calling fput holding lo_ctl_mutex triggers a circular
1088 * lock dependency possibility warning as fput can take
1089 * bd_mutex which is usually taken before lo_ctl_mutex.
1096 loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1099 struct loop_func_table *xfer;
1100 kuid_t uid = current_uid();
1101 int lo_flags = lo->lo_flags;
1103 if (lo->lo_encrypt_key_size &&
1104 !uid_eq(lo->lo_key_owner, uid) &&
1105 !capable(CAP_SYS_ADMIN))
1107 if (lo->lo_state != Lo_bound)
1109 if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
1112 /* I/O need to be drained during transfer transition */
1113 blk_mq_freeze_queue(lo->lo_queue);
1115 err = loop_release_xfer(lo);
1119 if (info->lo_encrypt_type) {
1120 unsigned int type = info->lo_encrypt_type;
1122 if (type >= MAX_LO_CRYPT)
1124 xfer = xfer_funcs[type];
1130 err = loop_init_xfer(lo, xfer, info);
1134 if (info->lo_flags & LO_FLAGS_BLOCKSIZE) {
1135 if (!(lo->lo_flags & LO_FLAGS_BLOCKSIZE))
1136 lo->lo_logical_blocksize = 512;
1137 lo->lo_flags |= LO_FLAGS_BLOCKSIZE;
1138 if (LO_INFO_BLOCKSIZE(info) != 512 &&
1139 LO_INFO_BLOCKSIZE(info) != 1024 &&
1140 LO_INFO_BLOCKSIZE(info) != 2048 &&
1141 LO_INFO_BLOCKSIZE(info) != 4096)
1143 if (LO_INFO_BLOCKSIZE(info) > lo->lo_blocksize)
1147 if (lo->lo_offset != info->lo_offset ||
1148 lo->lo_sizelimit != info->lo_sizelimit ||
1149 lo->lo_flags != lo_flags ||
1150 ((lo->lo_flags & LO_FLAGS_BLOCKSIZE) &&
1151 lo->lo_logical_blocksize != LO_INFO_BLOCKSIZE(info))) {
1152 if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit,
1153 LO_INFO_BLOCKSIZE(info))) {
1159 loop_config_discard(lo);
1161 memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
1162 memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
1163 lo->lo_file_name[LO_NAME_SIZE-1] = 0;
1164 lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
1168 lo->transfer = xfer->transfer;
1169 lo->ioctl = xfer->ioctl;
1171 if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) !=
1172 (info->lo_flags & LO_FLAGS_AUTOCLEAR))
1173 lo->lo_flags ^= LO_FLAGS_AUTOCLEAR;
1175 lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
1176 lo->lo_init[0] = info->lo_init[0];
1177 lo->lo_init[1] = info->lo_init[1];
1178 if (info->lo_encrypt_key_size) {
1179 memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
1180 info->lo_encrypt_key_size);
1181 lo->lo_key_owner = uid;
1184 /* update dio if lo_offset or transfer is changed */
1185 __loop_update_dio(lo, lo->use_dio);
1188 blk_mq_unfreeze_queue(lo->lo_queue);
1190 if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) &&
1191 !(lo->lo_flags & LO_FLAGS_PARTSCAN)) {
1192 lo->lo_flags |= LO_FLAGS_PARTSCAN;
1193 lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
1194 loop_reread_partitions(lo, lo->lo_device);
1201 loop_get_status(struct loop_device *lo, struct loop_info64 *info)
1203 struct file *file = lo->lo_backing_file;
1207 if (lo->lo_state != Lo_bound)
1209 error = vfs_getattr(&file->f_path, &stat,
1210 STATX_INO, AT_STATX_SYNC_AS_STAT);
1213 memset(info, 0, sizeof(*info));
1214 info->lo_number = lo->lo_number;
1215 info->lo_device = huge_encode_dev(stat.dev);
1216 info->lo_inode = stat.ino;
1217 info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev);
1218 info->lo_offset = lo->lo_offset;
1219 info->lo_sizelimit = lo->lo_sizelimit;
1220 info->lo_flags = lo->lo_flags;
1221 memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
1222 memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
1223 info->lo_encrypt_type =
1224 lo->lo_encryption ? lo->lo_encryption->number : 0;
1225 if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
1226 info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
1227 memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
1228 lo->lo_encrypt_key_size);
1234 loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
1236 memset(info64, 0, sizeof(*info64));
1237 info64->lo_number = info->lo_number;
1238 info64->lo_device = info->lo_device;
1239 info64->lo_inode = info->lo_inode;
1240 info64->lo_rdevice = info->lo_rdevice;
1241 info64->lo_offset = info->lo_offset;
1242 info64->lo_sizelimit = 0;
1243 info64->lo_encrypt_type = info->lo_encrypt_type;
1244 info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
1245 info64->lo_flags = info->lo_flags;
1246 info64->lo_init[0] = info->lo_init[0];
1247 info64->lo_init[1] = info->lo_init[1];
1248 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1249 memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE);
1251 memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
1252 memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
1256 loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
1258 memset(info, 0, sizeof(*info));
1259 info->lo_number = info64->lo_number;
1260 info->lo_device = info64->lo_device;
1261 info->lo_inode = info64->lo_inode;
1262 info->lo_rdevice = info64->lo_rdevice;
1263 info->lo_offset = info64->lo_offset;
1264 info->lo_encrypt_type = info64->lo_encrypt_type;
1265 info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
1266 info->lo_flags = info64->lo_flags;
1267 info->lo_init[0] = info64->lo_init[0];
1268 info->lo_init[1] = info64->lo_init[1];
1269 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1270 memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1272 memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
1273 memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1275 /* error in case values were truncated */
1276 if (info->lo_device != info64->lo_device ||
1277 info->lo_rdevice != info64->lo_rdevice ||
1278 info->lo_inode != info64->lo_inode ||
1279 info->lo_offset != info64->lo_offset)
1286 loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
1288 struct loop_info info;
1289 struct loop_info64 info64;
1291 if (copy_from_user(&info, arg, sizeof (struct loop_info)))
1293 loop_info64_from_old(&info, &info64);
1294 return loop_set_status(lo, &info64);
1298 loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
1300 struct loop_info64 info64;
1302 if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
1304 return loop_set_status(lo, &info64);
1308 loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
1309 struct loop_info info;
1310 struct loop_info64 info64;
1316 err = loop_get_status(lo, &info64);
1318 err = loop_info64_to_old(&info64, &info);
1319 if (!err && copy_to_user(arg, &info, sizeof(info)))
1326 loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
1327 struct loop_info64 info64;
1333 err = loop_get_status(lo, &info64);
1334 if (!err && copy_to_user(arg, &info64, sizeof(info64)))
1340 static int loop_set_capacity(struct loop_device *lo)
1342 if (unlikely(lo->lo_state != Lo_bound))
1345 return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit,
1346 lo->lo_logical_blocksize);
1349 static int loop_set_dio(struct loop_device *lo, unsigned long arg)
1352 if (lo->lo_state != Lo_bound)
1355 __loop_update_dio(lo, !!arg);
1356 if (lo->use_dio == !!arg)
1363 static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1364 unsigned int cmd, unsigned long arg)
1366 struct loop_device *lo = bdev->bd_disk->private_data;
1369 mutex_lock_nested(&lo->lo_ctl_mutex, 1);
1372 err = loop_set_fd(lo, mode, bdev, arg);
1374 case LOOP_CHANGE_FD:
1375 err = loop_change_fd(lo, bdev, arg);
1378 /* loop_clr_fd would have unlocked lo_ctl_mutex on success */
1379 err = loop_clr_fd(lo);
1383 case LOOP_SET_STATUS:
1385 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1386 err = loop_set_status_old(lo,
1387 (struct loop_info __user *)arg);
1389 case LOOP_GET_STATUS:
1390 err = loop_get_status_old(lo, (struct loop_info __user *) arg);
1392 case LOOP_SET_STATUS64:
1394 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1395 err = loop_set_status64(lo,
1396 (struct loop_info64 __user *) arg);
1398 case LOOP_GET_STATUS64:
1399 err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
1401 case LOOP_SET_CAPACITY:
1403 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1404 err = loop_set_capacity(lo);
1406 case LOOP_SET_DIRECT_IO:
1408 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1409 err = loop_set_dio(lo, arg);
1412 err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
1414 mutex_unlock(&lo->lo_ctl_mutex);
1420 #ifdef CONFIG_COMPAT
1421 struct compat_loop_info {
1422 compat_int_t lo_number; /* ioctl r/o */
1423 compat_dev_t lo_device; /* ioctl r/o */
1424 compat_ulong_t lo_inode; /* ioctl r/o */
1425 compat_dev_t lo_rdevice; /* ioctl r/o */
1426 compat_int_t lo_offset;
1427 compat_int_t lo_encrypt_type;
1428 compat_int_t lo_encrypt_key_size; /* ioctl w/o */
1429 compat_int_t lo_flags; /* ioctl r/o */
1430 char lo_name[LO_NAME_SIZE];
1431 unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
1432 compat_ulong_t lo_init[2];
1437 * Transfer 32-bit compatibility structure in userspace to 64-bit loop info
1438 * - noinlined to reduce stack space usage in main part of driver
1441 loop_info64_from_compat(const struct compat_loop_info __user *arg,
1442 struct loop_info64 *info64)
1444 struct compat_loop_info info;
1446 if (copy_from_user(&info, arg, sizeof(info)))
1449 memset(info64, 0, sizeof(*info64));
1450 info64->lo_number = info.lo_number;
1451 info64->lo_device = info.lo_device;
1452 info64->lo_inode = info.lo_inode;
1453 info64->lo_rdevice = info.lo_rdevice;
1454 info64->lo_offset = info.lo_offset;
1455 info64->lo_sizelimit = 0;
1456 info64->lo_encrypt_type = info.lo_encrypt_type;
1457 info64->lo_encrypt_key_size = info.lo_encrypt_key_size;
1458 info64->lo_flags = info.lo_flags;
1459 info64->lo_init[0] = info.lo_init[0];
1460 info64->lo_init[1] = info.lo_init[1];
1461 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1462 memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE);
1464 memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
1465 memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE);
1470 * Transfer 64-bit loop info to 32-bit compatibility structure in userspace
1471 * - noinlined to reduce stack space usage in main part of driver
1474 loop_info64_to_compat(const struct loop_info64 *info64,
1475 struct compat_loop_info __user *arg)
1477 struct compat_loop_info info;
1479 memset(&info, 0, sizeof(info));
1480 info.lo_number = info64->lo_number;
1481 info.lo_device = info64->lo_device;
1482 info.lo_inode = info64->lo_inode;
1483 info.lo_rdevice = info64->lo_rdevice;
1484 info.lo_offset = info64->lo_offset;
1485 info.lo_encrypt_type = info64->lo_encrypt_type;
1486 info.lo_encrypt_key_size = info64->lo_encrypt_key_size;
1487 info.lo_flags = info64->lo_flags;
1488 info.lo_init[0] = info64->lo_init[0];
1489 info.lo_init[1] = info64->lo_init[1];
1490 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1491 memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1493 memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
1494 memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1496 /* error in case values were truncated */
1497 if (info.lo_device != info64->lo_device ||
1498 info.lo_rdevice != info64->lo_rdevice ||
1499 info.lo_inode != info64->lo_inode ||
1500 info.lo_offset != info64->lo_offset ||
1501 info.lo_init[0] != info64->lo_init[0] ||
1502 info.lo_init[1] != info64->lo_init[1])
1505 if (copy_to_user(arg, &info, sizeof(info)))
1511 loop_set_status_compat(struct loop_device *lo,
1512 const struct compat_loop_info __user *arg)
1514 struct loop_info64 info64;
1517 ret = loop_info64_from_compat(arg, &info64);
1520 return loop_set_status(lo, &info64);
1524 loop_get_status_compat(struct loop_device *lo,
1525 struct compat_loop_info __user *arg)
1527 struct loop_info64 info64;
1533 err = loop_get_status(lo, &info64);
1535 err = loop_info64_to_compat(&info64, arg);
1539 static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1540 unsigned int cmd, unsigned long arg)
1542 struct loop_device *lo = bdev->bd_disk->private_data;
1546 case LOOP_SET_STATUS:
1547 mutex_lock(&lo->lo_ctl_mutex);
1548 err = loop_set_status_compat(
1549 lo, (const struct compat_loop_info __user *) arg);
1550 mutex_unlock(&lo->lo_ctl_mutex);
1552 case LOOP_GET_STATUS:
1553 mutex_lock(&lo->lo_ctl_mutex);
1554 err = loop_get_status_compat(
1555 lo, (struct compat_loop_info __user *) arg);
1556 mutex_unlock(&lo->lo_ctl_mutex);
1558 case LOOP_SET_CAPACITY:
1560 case LOOP_GET_STATUS64:
1561 case LOOP_SET_STATUS64:
1562 arg = (unsigned long) compat_ptr(arg);
1564 case LOOP_CHANGE_FD:
1565 err = lo_ioctl(bdev, mode, cmd, arg);
1575 static int lo_open(struct block_device *bdev, fmode_t mode)
1577 struct loop_device *lo;
1580 mutex_lock(&loop_index_mutex);
1581 lo = bdev->bd_disk->private_data;
1587 atomic_inc(&lo->lo_refcnt);
1589 mutex_unlock(&loop_index_mutex);
1593 static void lo_release(struct gendisk *disk, fmode_t mode)
1595 struct loop_device *lo = disk->private_data;
1598 if (atomic_dec_return(&lo->lo_refcnt))
1601 mutex_lock(&lo->lo_ctl_mutex);
1602 if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
1604 * In autoclear mode, stop the loop thread
1605 * and remove configuration after last close.
1607 err = loop_clr_fd(lo);
1612 * Otherwise keep thread (if running) and config,
1613 * but flush possible ongoing bios in thread.
1618 mutex_unlock(&lo->lo_ctl_mutex);
1621 static const struct block_device_operations lo_fops = {
1622 .owner = THIS_MODULE,
1624 .release = lo_release,
1626 #ifdef CONFIG_COMPAT
1627 .compat_ioctl = lo_compat_ioctl,
1632 * And now the modules code and kernel interface.
1634 static int max_loop;
1635 module_param(max_loop, int, S_IRUGO);
1636 MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
1637 module_param(max_part, int, S_IRUGO);
1638 MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
1639 MODULE_LICENSE("GPL");
1640 MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
1642 int loop_register_transfer(struct loop_func_table *funcs)
1644 unsigned int n = funcs->number;
1646 if (n >= MAX_LO_CRYPT || xfer_funcs[n])
1648 xfer_funcs[n] = funcs;
1652 static int unregister_transfer_cb(int id, void *ptr, void *data)
1654 struct loop_device *lo = ptr;
1655 struct loop_func_table *xfer = data;
1657 mutex_lock(&lo->lo_ctl_mutex);
1658 if (lo->lo_encryption == xfer)
1659 loop_release_xfer(lo);
1660 mutex_unlock(&lo->lo_ctl_mutex);
1664 int loop_unregister_transfer(int number)
1666 unsigned int n = number;
1667 struct loop_func_table *xfer;
1669 if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
1672 xfer_funcs[n] = NULL;
1673 idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer);
1677 EXPORT_SYMBOL(loop_register_transfer);
1678 EXPORT_SYMBOL(loop_unregister_transfer);
1680 static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1681 const struct blk_mq_queue_data *bd)
1683 struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
1684 struct loop_device *lo = cmd->rq->q->queuedata;
1686 blk_mq_start_request(bd->rq);
1688 if (lo->lo_state != Lo_bound)
1689 return BLK_STS_IOERR;
1691 switch (req_op(cmd->rq)) {
1693 case REQ_OP_DISCARD:
1694 case REQ_OP_WRITE_ZEROES:
1695 cmd->use_aio = false;
1698 cmd->use_aio = lo->use_dio;
1702 kthread_queue_work(&lo->worker, &cmd->work);
1707 static void loop_handle_cmd(struct loop_cmd *cmd)
1709 const bool write = op_is_write(req_op(cmd->rq));
1710 struct loop_device *lo = cmd->rq->q->queuedata;
1713 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
1718 ret = do_req_filebacked(lo, cmd->rq);
1720 /* complete non-aio request */
1721 if (!cmd->use_aio || ret) {
1722 cmd->ret = ret ? -EIO : 0;
1723 blk_mq_complete_request(cmd->rq);
1727 static void loop_queue_work(struct kthread_work *work)
1729 struct loop_cmd *cmd =
1730 container_of(work, struct loop_cmd, work);
1732 loop_handle_cmd(cmd);
1735 static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
1736 unsigned int hctx_idx, unsigned int numa_node)
1738 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
1741 kthread_init_work(&cmd->work, loop_queue_work);
1746 static const struct blk_mq_ops loop_mq_ops = {
1747 .queue_rq = loop_queue_rq,
1748 .init_request = loop_init_request,
1749 .complete = lo_complete_rq,
1752 static int loop_add(struct loop_device **l, int i)
1754 struct loop_device *lo;
1755 struct gendisk *disk;
1759 lo = kzalloc(sizeof(*lo), GFP_KERNEL);
1763 lo->lo_state = Lo_unbound;
1765 /* allocate id, if @id >= 0, we're requesting that specific id */
1767 err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
1771 err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
1778 lo->tag_set.ops = &loop_mq_ops;
1779 lo->tag_set.nr_hw_queues = 1;
1780 lo->tag_set.queue_depth = 128;
1781 lo->tag_set.numa_node = NUMA_NO_NODE;
1782 lo->tag_set.cmd_size = sizeof(struct loop_cmd);
1783 lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
1784 lo->tag_set.driver_data = lo;
1786 err = blk_mq_alloc_tag_set(&lo->tag_set);
1790 lo->lo_queue = blk_mq_init_queue(&lo->tag_set);
1791 if (IS_ERR_OR_NULL(lo->lo_queue)) {
1792 err = PTR_ERR(lo->lo_queue);
1793 goto out_cleanup_tags;
1795 lo->lo_queue->queuedata = lo;
1798 * It doesn't make sense to enable merge because the I/O
1799 * submitted to backing file is handled page by page.
1801 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
1804 disk = lo->lo_disk = alloc_disk(1 << part_shift);
1806 goto out_free_queue;
1809 * Disable partition scanning by default. The in-kernel partition
1810 * scanning can be requested individually per-device during its
1811 * setup. Userspace can always add and remove partitions from all
1812 * devices. The needed partition minors are allocated from the
1813 * extended minor space, the main loop device numbers will continue
1814 * to match the loop minors, regardless of the number of partitions
1817 * If max_part is given, partition scanning is globally enabled for
1818 * all loop devices. The minors for the main loop devices will be
1819 * multiples of max_part.
1821 * Note: Global-for-all-devices, set-only-at-init, read-only module
1822 * parameteters like 'max_loop' and 'max_part' make things needlessly
1823 * complicated, are too static, inflexible and may surprise
1824 * userspace tools. Parameters like this in general should be avoided.
1827 disk->flags |= GENHD_FL_NO_PART_SCAN;
1828 disk->flags |= GENHD_FL_EXT_DEVT;
1829 mutex_init(&lo->lo_ctl_mutex);
1830 atomic_set(&lo->lo_refcnt, 0);
1832 spin_lock_init(&lo->lo_lock);
1833 disk->major = LOOP_MAJOR;
1834 disk->first_minor = i << part_shift;
1835 disk->fops = &lo_fops;
1836 disk->private_data = lo;
1837 disk->queue = lo->lo_queue;
1838 sprintf(disk->disk_name, "loop%d", i);
1841 return lo->lo_number;
1844 blk_cleanup_queue(lo->lo_queue);
1846 blk_mq_free_tag_set(&lo->tag_set);
1848 idr_remove(&loop_index_idr, i);
1855 static void loop_remove(struct loop_device *lo)
1857 blk_cleanup_queue(lo->lo_queue);
1858 del_gendisk(lo->lo_disk);
1859 blk_mq_free_tag_set(&lo->tag_set);
1860 put_disk(lo->lo_disk);
1864 static int find_free_cb(int id, void *ptr, void *data)
1866 struct loop_device *lo = ptr;
1867 struct loop_device **l = data;
1869 if (lo->lo_state == Lo_unbound) {
1876 static int loop_lookup(struct loop_device **l, int i)
1878 struct loop_device *lo;
1884 err = idr_for_each(&loop_index_idr, &find_free_cb, &lo);
1887 ret = lo->lo_number;
1892 /* lookup and return a specific i */
1893 lo = idr_find(&loop_index_idr, i);
1896 ret = lo->lo_number;
1902 static struct kobject *loop_probe(dev_t dev, int *part, void *data)
1904 struct loop_device *lo;
1905 struct kobject *kobj;
1908 mutex_lock(&loop_index_mutex);
1909 err = loop_lookup(&lo, MINOR(dev) >> part_shift);
1911 err = loop_add(&lo, MINOR(dev) >> part_shift);
1915 kobj = get_disk(lo->lo_disk);
1916 mutex_unlock(&loop_index_mutex);
1922 static long loop_control_ioctl(struct file *file, unsigned int cmd,
1925 struct loop_device *lo;
1928 mutex_lock(&loop_index_mutex);
1931 ret = loop_lookup(&lo, parm);
1936 ret = loop_add(&lo, parm);
1938 case LOOP_CTL_REMOVE:
1939 ret = loop_lookup(&lo, parm);
1942 mutex_lock(&lo->lo_ctl_mutex);
1943 if (lo->lo_state != Lo_unbound) {
1945 mutex_unlock(&lo->lo_ctl_mutex);
1948 if (atomic_read(&lo->lo_refcnt) > 0) {
1950 mutex_unlock(&lo->lo_ctl_mutex);
1953 lo->lo_disk->private_data = NULL;
1954 mutex_unlock(&lo->lo_ctl_mutex);
1955 idr_remove(&loop_index_idr, lo->lo_number);
1958 case LOOP_CTL_GET_FREE:
1959 ret = loop_lookup(&lo, -1);
1962 ret = loop_add(&lo, -1);
1964 mutex_unlock(&loop_index_mutex);
1969 static const struct file_operations loop_ctl_fops = {
1970 .open = nonseekable_open,
1971 .unlocked_ioctl = loop_control_ioctl,
1972 .compat_ioctl = loop_control_ioctl,
1973 .owner = THIS_MODULE,
1974 .llseek = noop_llseek,
1977 static struct miscdevice loop_misc = {
1978 .minor = LOOP_CTRL_MINOR,
1979 .name = "loop-control",
1980 .fops = &loop_ctl_fops,
1983 MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR);
1984 MODULE_ALIAS("devname:loop-control");
1986 static int __init loop_init(void)
1989 unsigned long range;
1990 struct loop_device *lo;
1993 err = misc_register(&loop_misc);
1999 part_shift = fls(max_part);
2002 * Adjust max_part according to part_shift as it is exported
2003 * to user space so that user can decide correct minor number
2004 * if [s]he want to create more devices.
2006 * Note that -1 is required because partition 0 is reserved
2007 * for the whole disk.
2009 max_part = (1UL << part_shift) - 1;
2012 if ((1UL << part_shift) > DISK_MAX_PARTS) {
2017 if (max_loop > 1UL << (MINORBITS - part_shift)) {
2023 * If max_loop is specified, create that many devices upfront.
2024 * This also becomes a hard limit. If max_loop is not specified,
2025 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
2026 * init time. Loop devices can be requested on-demand with the
2027 * /dev/loop-control interface, or be instantiated by accessing
2028 * a 'dead' device node.
2032 range = max_loop << part_shift;
2034 nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
2035 range = 1UL << MINORBITS;
2038 if (register_blkdev(LOOP_MAJOR, "loop")) {
2043 blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
2044 THIS_MODULE, loop_probe, NULL, NULL);
2046 /* pre-create number of devices given by config or max_loop */
2047 mutex_lock(&loop_index_mutex);
2048 for (i = 0; i < nr; i++)
2050 mutex_unlock(&loop_index_mutex);
2052 printk(KERN_INFO "loop: module loaded\n");
2056 misc_deregister(&loop_misc);
2060 static int loop_exit_cb(int id, void *ptr, void *data)
2062 struct loop_device *lo = ptr;
2068 static void __exit loop_exit(void)
2070 unsigned long range;
2072 range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
2074 idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
2075 idr_destroy(&loop_index_idr);
2077 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
2078 unregister_blkdev(LOOP_MAJOR, "loop");
2080 misc_deregister(&loop_misc);
2083 module_init(loop_init);
2084 module_exit(loop_exit);
2087 static int __init max_loop_setup(char *str)
2089 max_loop = simple_strtol(str, NULL, 0);
2093 __setup("max_loop=", max_loop_setup);