2 * Network block device - make block devices work over TCP
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 * This file is released under GPLv2 or later.
12 * (part of code stolen from loop.c)
15 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
33 #include <linux/net.h>
34 #include <linux/kthread.h>
35 #include <linux/types.h>
36 #include <linux/debugfs.h>
37 #include <linux/blk-mq.h>
39 #include <linux/uaccess.h>
40 #include <asm/types.h>
42 #include <linux/nbd.h>
44 static DEFINE_IDR(nbd_index_idr);
45 static DEFINE_MUTEX(nbd_index_mutex);
50 struct request *pending;
54 #define NBD_TIMEDOUT 0
55 #define NBD_DISCONNECT_REQUESTED 1
56 #define NBD_DISCONNECTED 2
61 unsigned long runtime_flags;
62 struct nbd_sock **socks;
65 struct blk_mq_tag_set tag_set;
67 struct mutex config_lock;
70 atomic_t recv_threads;
71 wait_queue_head_t recv_wq;
75 struct task_struct *task_recv;
76 struct task_struct *task_setup;
78 #if IS_ENABLED(CONFIG_DEBUG_FS)
79 struct dentry *dbg_dir;
84 struct nbd_device *nbd;
85 struct completion send_complete;
88 #if IS_ENABLED(CONFIG_DEBUG_FS)
89 static struct dentry *nbd_dbg_dir;
92 #define nbd_name(nbd) ((nbd)->disk->disk_name)
94 #define NBD_MAGIC 0x68797548
96 static unsigned int nbds_max = 16;
98 static struct workqueue_struct *recv_workqueue;
99 static int part_shift;
101 static int nbd_dev_dbg_init(struct nbd_device *nbd);
102 static void nbd_dev_dbg_close(struct nbd_device *nbd);
105 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
107 return disk_to_dev(nbd->disk);
110 static bool nbd_is_connected(struct nbd_device *nbd)
112 return !!nbd->task_recv;
115 static const char *nbdcmd_to_ascii(int cmd)
118 case NBD_CMD_READ: return "read";
119 case NBD_CMD_WRITE: return "write";
120 case NBD_CMD_DISC: return "disconnect";
121 case NBD_CMD_FLUSH: return "flush";
122 case NBD_CMD_TRIM: return "trim/discard";
127 static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
129 if (bdev->bd_openers <= 1)
130 bd_set_size(bdev, 0);
131 set_capacity(nbd->disk, 0);
132 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
137 static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev)
139 blk_queue_logical_block_size(nbd->disk->queue, nbd->blksize);
140 blk_queue_physical_block_size(nbd->disk->queue, nbd->blksize);
141 bd_set_size(bdev, nbd->bytesize);
142 set_capacity(nbd->disk, nbd->bytesize >> 9);
143 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
146 static void nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
147 loff_t blocksize, loff_t nr_blocks)
149 nbd->blksize = blocksize;
150 nbd->bytesize = blocksize * nr_blocks;
151 if (nbd_is_connected(nbd))
152 nbd_size_update(nbd, bdev);
155 static void nbd_end_request(struct nbd_cmd *cmd)
157 struct nbd_device *nbd = cmd->nbd;
158 struct request *req = blk_mq_rq_from_pdu(cmd);
159 int error = req->errors ? -EIO : 0;
161 dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", cmd,
162 error ? "failed" : "done");
164 blk_mq_complete_request(req, error);
168 * Forcibly shutdown the socket causing all listeners to error
170 static void sock_shutdown(struct nbd_device *nbd)
174 if (nbd->num_connections == 0)
176 if (test_and_set_bit(NBD_DISCONNECTED, &nbd->runtime_flags))
179 for (i = 0; i < nbd->num_connections; i++) {
180 struct nbd_sock *nsock = nbd->socks[i];
181 mutex_lock(&nsock->tx_lock);
182 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
183 mutex_unlock(&nsock->tx_lock);
185 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
188 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
191 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
192 struct nbd_device *nbd = cmd->nbd;
194 dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
195 set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
198 mutex_lock(&nbd->config_lock);
200 mutex_unlock(&nbd->config_lock);
201 return BLK_EH_HANDLED;
205 * Send or receive packet.
207 static int sock_xmit(struct nbd_device *nbd, int index, int send,
208 struct iov_iter *iter, int msg_flags, int *sent)
210 struct socket *sock = nbd->socks[index]->sock;
213 unsigned long pflags = current->flags;
215 if (unlikely(!sock)) {
216 dev_err_ratelimited(disk_to_dev(nbd->disk),
217 "Attempted %s on closed socket in sock_xmit\n",
218 (send ? "send" : "recv"));
222 msg.msg_iter = *iter;
224 current->flags |= PF_MEMALLOC;
226 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
229 msg.msg_control = NULL;
230 msg.msg_controllen = 0;
231 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
234 result = sock_sendmsg(sock, &msg);
236 result = sock_recvmsg(sock, &msg, msg.msg_flags);
240 result = -EPIPE; /* short read */
245 } while (msg_data_left(&msg));
247 tsk_restore_flags(current, pflags, PF_MEMALLOC);
252 /* always call with the tx_lock held */
253 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
255 struct request *req = blk_mq_rq_from_pdu(cmd);
256 struct nbd_sock *nsock = nbd->socks[index];
258 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
259 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
260 struct iov_iter from;
261 unsigned long size = blk_rq_bytes(req);
264 u32 tag = blk_mq_unique_tag(req);
265 int sent = nsock->sent, skip = 0;
267 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
269 switch (req_op(req)) {
274 type = NBD_CMD_FLUSH;
277 type = NBD_CMD_WRITE;
286 if (rq_data_dir(req) == WRITE &&
287 (nbd->flags & NBD_FLAG_READ_ONLY)) {
288 dev_err_ratelimited(disk_to_dev(nbd->disk),
289 "Write on read-only\n");
293 /* We did a partial send previously, and we at least sent the whole
294 * request struct, so just go and send the rest of the pages in the
298 if (sent >= sizeof(request)) {
299 skip = sent - sizeof(request);
302 iov_iter_advance(&from, sent);
304 request.type = htonl(type);
305 if (type != NBD_CMD_FLUSH) {
306 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
307 request.len = htonl(size);
309 memcpy(request.handle, &tag, sizeof(tag));
311 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
312 cmd, nbdcmd_to_ascii(type),
313 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
314 result = sock_xmit(nbd, index, 1, &from,
315 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
317 if (result == -ERESTARTSYS) {
318 /* If we havne't sent anything we can just return BUSY,
319 * however if we have sent something we need to make
320 * sure we only allow this req to be sent until we are
324 nsock->pending = req;
327 return BLK_MQ_RQ_QUEUE_BUSY;
329 dev_err_ratelimited(disk_to_dev(nbd->disk),
330 "Send control failed (result %d)\n", result);
334 if (type != NBD_CMD_WRITE)
339 struct bio *next = bio->bi_next;
340 struct bvec_iter iter;
343 bio_for_each_segment(bvec, bio, iter) {
344 bool is_last = !next && bio_iter_last(bvec, iter);
345 int flags = is_last ? 0 : MSG_MORE;
347 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
349 iov_iter_bvec(&from, ITER_BVEC | WRITE,
350 &bvec, 1, bvec.bv_len);
352 if (skip >= iov_iter_count(&from)) {
353 skip -= iov_iter_count(&from);
356 iov_iter_advance(&from, skip);
359 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
361 if (result == -ERESTARTSYS) {
362 /* We've already sent the header, we
363 * have no choice but to set pending and
366 nsock->pending = req;
368 return BLK_MQ_RQ_QUEUE_BUSY;
370 dev_err(disk_to_dev(nbd->disk),
371 "Send data failed (result %d)\n",
376 * The completion might already have come in,
377 * so break for the last one instead of letting
378 * the iterator do it. This prevents use-after-free
387 nsock->pending = NULL;
392 /* NULL returned = something went wrong, inform userspace */
393 static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
396 struct nbd_reply reply;
398 struct request *req = NULL;
401 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
405 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
406 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
408 if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) &&
409 !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
410 dev_err(disk_to_dev(nbd->disk),
411 "Receive control failed (result %d)\n", result);
412 return ERR_PTR(result);
415 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
416 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
417 (unsigned long)ntohl(reply.magic));
418 return ERR_PTR(-EPROTO);
421 memcpy(&tag, reply.handle, sizeof(u32));
423 hwq = blk_mq_unique_tag_to_hwq(tag);
424 if (hwq < nbd->tag_set.nr_hw_queues)
425 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
426 blk_mq_unique_tag_to_tag(tag));
427 if (!req || !blk_mq_request_started(req)) {
428 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
430 return ERR_PTR(-ENOENT);
432 cmd = blk_mq_rq_to_pdu(req);
433 if (ntohl(reply.error)) {
434 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
440 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd);
441 if (rq_data_dir(req) != WRITE) {
442 struct req_iterator iter;
445 rq_for_each_segment(bvec, req, iter) {
446 iov_iter_bvec(&to, ITER_BVEC | READ,
447 &bvec, 1, bvec.bv_len);
448 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
450 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
455 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
459 /* See the comment in nbd_queue_rq. */
460 wait_for_completion(&cmd->send_complete);
465 static ssize_t pid_show(struct device *dev,
466 struct device_attribute *attr, char *buf)
468 struct gendisk *disk = dev_to_disk(dev);
469 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
471 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
474 static struct device_attribute pid_attr = {
475 .attr = { .name = "pid", .mode = S_IRUGO},
479 struct recv_thread_args {
480 struct work_struct work;
481 struct nbd_device *nbd;
485 static void recv_work(struct work_struct *work)
487 struct recv_thread_args *args = container_of(work,
488 struct recv_thread_args,
490 struct nbd_device *nbd = args->nbd;
494 BUG_ON(nbd->magic != NBD_MAGIC);
496 cmd = nbd_read_stat(nbd, args->index);
502 nbd_end_request(cmd);
506 * We got an error, shut everybody down if this wasn't the result of a
507 * disconnect request.
509 if (ret && !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
511 atomic_dec(&nbd->recv_threads);
512 wake_up(&nbd->recv_wq);
515 static void nbd_clear_req(struct request *req, void *data, bool reserved)
519 if (!blk_mq_request_started(req))
521 cmd = blk_mq_rq_to_pdu(req);
523 nbd_end_request(cmd);
526 static void nbd_clear_que(struct nbd_device *nbd)
528 BUG_ON(nbd->magic != NBD_MAGIC);
530 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
531 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
535 static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
537 struct request *req = blk_mq_rq_from_pdu(cmd);
538 struct nbd_device *nbd = cmd->nbd;
539 struct nbd_sock *nsock;
542 if (index >= nbd->num_connections) {
543 dev_err_ratelimited(disk_to_dev(nbd->disk),
544 "Attempted send on invalid socket\n");
548 if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) {
549 dev_err_ratelimited(disk_to_dev(nbd->disk),
550 "Attempted send on closed socket\n");
556 nsock = nbd->socks[index];
557 mutex_lock(&nsock->tx_lock);
558 if (unlikely(!nsock->sock)) {
559 mutex_unlock(&nsock->tx_lock);
560 dev_err_ratelimited(disk_to_dev(nbd->disk),
561 "Attempted send on closed socket\n");
565 /* Handle the case that we have a pending request that was partially
566 * transmitted that _has_ to be serviced first. We need to call requeue
567 * here so that it gets put _after_ the request that is already on the
570 if (unlikely(nsock->pending && nsock->pending != req)) {
571 blk_mq_requeue_request(req, true);
575 ret = nbd_send_cmd(nbd, cmd, index);
577 mutex_unlock(&nsock->tx_lock);
581 static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
582 const struct blk_mq_queue_data *bd)
584 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
588 * Since we look at the bio's to send the request over the network we
589 * need to make sure the completion work doesn't mark this request done
590 * before we are done doing our send. This keeps us from dereferencing
591 * freed data if we have particularly fast completions (ie we get the
592 * completion before we exit sock_xmit on the last bvec) or in the case
593 * that the server is misbehaving (or there was an error) before we're
594 * done sending everything over the wire.
596 init_completion(&cmd->send_complete);
597 blk_mq_start_request(bd->rq);
599 /* We can be called directly from the user space process, which means we
600 * could possibly have signals pending so our sendmsg will fail. In
601 * this case we need to return that we are busy, otherwise error out as
604 ret = nbd_handle_cmd(cmd, hctx->queue_num);
606 ret = BLK_MQ_RQ_QUEUE_ERROR;
608 ret = BLK_MQ_RQ_QUEUE_OK;
609 complete(&cmd->send_complete);
614 static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev,
618 struct nbd_sock **socks;
619 struct nbd_sock *nsock;
622 sock = sockfd_lookup(arg, &err);
626 if (!nbd->task_setup)
627 nbd->task_setup = current;
628 if (nbd->task_setup != current) {
629 dev_err(disk_to_dev(nbd->disk),
630 "Device being setup by another task");
634 socks = krealloc(nbd->socks, (nbd->num_connections + 1) *
635 sizeof(struct nbd_sock *), GFP_KERNEL);
638 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
644 mutex_init(&nsock->tx_lock);
646 nsock->pending = NULL;
648 socks[nbd->num_connections++] = nsock;
651 bdev->bd_invalidated = 1;
655 /* Reset all properties of an NBD device */
656 static void nbd_reset(struct nbd_device *nbd)
658 nbd->runtime_flags = 0;
661 set_capacity(nbd->disk, 0);
663 nbd->tag_set.timeout = 0;
664 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
667 static void nbd_bdev_reset(struct block_device *bdev)
669 if (bdev->bd_openers > 1)
671 set_device_ro(bdev, false);
672 bdev->bd_inode->i_size = 0;
674 blkdev_reread_part(bdev);
675 bdev->bd_invalidated = 1;
679 static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev)
681 if (nbd->flags & NBD_FLAG_READ_ONLY)
682 set_device_ro(bdev, true);
683 if (nbd->flags & NBD_FLAG_SEND_TRIM)
684 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
685 if (nbd->flags & NBD_FLAG_SEND_FLUSH)
686 blk_queue_write_cache(nbd->disk->queue, true, false);
688 blk_queue_write_cache(nbd->disk->queue, false, false);
691 static void send_disconnects(struct nbd_device *nbd)
693 struct nbd_request request = {
694 .magic = htonl(NBD_REQUEST_MAGIC),
695 .type = htonl(NBD_CMD_DISC),
697 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
698 struct iov_iter from;
701 for (i = 0; i < nbd->num_connections; i++) {
702 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
703 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
705 dev_err(disk_to_dev(nbd->disk),
706 "Send disconnect failed %d\n", ret);
710 static int nbd_disconnect(struct nbd_device *nbd, struct block_device *bdev)
712 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
716 mutex_unlock(&nbd->config_lock);
718 mutex_lock(&nbd->config_lock);
720 /* Check again after getting mutex back. */
724 if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED,
725 &nbd->runtime_flags))
726 send_disconnects(nbd);
730 static int nbd_clear_sock(struct nbd_device *nbd, struct block_device *bdev)
735 __invalidate_device(bdev, true);
736 nbd_bdev_reset(bdev);
738 * We want to give the run thread a chance to wait for everybody
739 * to clean up and then do it's own cleanup.
741 if (!test_bit(NBD_RUNNING, &nbd->runtime_flags) &&
742 nbd->num_connections) {
745 for (i = 0; i < nbd->num_connections; i++) {
746 sockfd_put(nbd->socks[i]->sock);
747 kfree(nbd->socks[i]);
751 nbd->num_connections = 0;
753 nbd->task_setup = NULL;
758 static int nbd_start_device(struct nbd_device *nbd, struct block_device *bdev)
760 struct recv_thread_args *args;
761 int num_connections = nbd->num_connections;
768 if (num_connections > 1 &&
769 !(nbd->flags & NBD_FLAG_CAN_MULTI_CONN)) {
770 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
775 set_bit(NBD_RUNNING, &nbd->runtime_flags);
776 blk_mq_update_nr_hw_queues(&nbd->tag_set, nbd->num_connections);
777 args = kcalloc(num_connections, sizeof(*args), GFP_KERNEL);
782 nbd->task_recv = current;
783 mutex_unlock(&nbd->config_lock);
785 nbd_parse_flags(nbd, bdev);
787 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
789 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
793 nbd_size_update(nbd, bdev);
795 nbd_dev_dbg_init(nbd);
796 for (i = 0; i < num_connections; i++) {
797 sk_set_memalloc(nbd->socks[i]->sock->sk);
798 atomic_inc(&nbd->recv_threads);
799 INIT_WORK(&args[i].work, recv_work);
802 queue_work(recv_workqueue, &args[i].work);
804 wait_event_interruptible(nbd->recv_wq,
805 atomic_read(&nbd->recv_threads) == 0);
806 for (i = 0; i < num_connections; i++)
807 flush_work(&args[i].work);
808 nbd_dev_dbg_close(nbd);
809 nbd_size_clear(nbd, bdev);
810 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
812 mutex_lock(&nbd->config_lock);
813 nbd->task_recv = NULL;
815 clear_bit(NBD_RUNNING, &nbd->runtime_flags);
816 nbd_clear_sock(nbd, bdev);
818 /* user requested, ignore socket errors */
819 if (test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
821 if (test_bit(NBD_TIMEDOUT, &nbd->runtime_flags))
828 /* Must be called with config_lock held */
829 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
830 unsigned int cmd, unsigned long arg)
834 return nbd_disconnect(nbd, bdev);
836 return nbd_clear_sock(nbd, bdev);
838 return nbd_add_socket(nbd, bdev, arg);
839 case NBD_SET_BLKSIZE:
840 nbd_size_set(nbd, bdev, arg,
841 div_s64(nbd->bytesize, arg));
844 nbd_size_set(nbd, bdev, nbd->blksize,
845 div_s64(arg, nbd->blksize));
847 case NBD_SET_SIZE_BLOCKS:
848 nbd_size_set(nbd, bdev, nbd->blksize, arg);
850 case NBD_SET_TIMEOUT:
852 nbd->tag_set.timeout = arg * HZ;
853 blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
861 return nbd_start_device(nbd, bdev);
864 * This is for compatibility only. The queue is always cleared
865 * by NBD_DO_IT or NBD_CLEAR_SOCK.
868 case NBD_PRINT_DEBUG:
870 * For compatibility only, we no longer keep a list of
871 * outstanding requests.
878 static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
879 unsigned int cmd, unsigned long arg)
881 struct nbd_device *nbd = bdev->bd_disk->private_data;
884 if (!capable(CAP_SYS_ADMIN))
887 BUG_ON(nbd->magic != NBD_MAGIC);
889 mutex_lock(&nbd->config_lock);
890 error = __nbd_ioctl(bdev, nbd, cmd, arg);
891 mutex_unlock(&nbd->config_lock);
896 static const struct block_device_operations nbd_fops =
898 .owner = THIS_MODULE,
900 .compat_ioctl = nbd_ioctl,
903 #if IS_ENABLED(CONFIG_DEBUG_FS)
905 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
907 struct nbd_device *nbd = s->private;
910 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
915 static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
917 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
920 static const struct file_operations nbd_dbg_tasks_ops = {
921 .open = nbd_dbg_tasks_open,
924 .release = single_release,
927 static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
929 struct nbd_device *nbd = s->private;
930 u32 flags = nbd->flags;
932 seq_printf(s, "Hex: 0x%08x\n\n", flags);
934 seq_puts(s, "Known flags:\n");
936 if (flags & NBD_FLAG_HAS_FLAGS)
937 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
938 if (flags & NBD_FLAG_READ_ONLY)
939 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
940 if (flags & NBD_FLAG_SEND_FLUSH)
941 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
942 if (flags & NBD_FLAG_SEND_TRIM)
943 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
948 static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
950 return single_open(file, nbd_dbg_flags_show, inode->i_private);
953 static const struct file_operations nbd_dbg_flags_ops = {
954 .open = nbd_dbg_flags_open,
957 .release = single_release,
960 static int nbd_dev_dbg_init(struct nbd_device *nbd)
967 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
969 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
975 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
976 debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
977 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
978 debugfs_create_u64("blocksize", 0444, dir, &nbd->blksize);
979 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
984 static void nbd_dev_dbg_close(struct nbd_device *nbd)
986 debugfs_remove_recursive(nbd->dbg_dir);
989 static int nbd_dbg_init(void)
991 struct dentry *dbg_dir;
993 dbg_dir = debugfs_create_dir("nbd", NULL);
997 nbd_dbg_dir = dbg_dir;
1002 static void nbd_dbg_close(void)
1004 debugfs_remove_recursive(nbd_dbg_dir);
1007 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1009 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1014 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1018 static int nbd_dbg_init(void)
1023 static void nbd_dbg_close(void)
1029 static int nbd_init_request(void *data, struct request *rq,
1030 unsigned int hctx_idx, unsigned int request_idx,
1031 unsigned int numa_node)
1033 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1038 static struct blk_mq_ops nbd_mq_ops = {
1039 .queue_rq = nbd_queue_rq,
1040 .init_request = nbd_init_request,
1041 .timeout = nbd_xmit_timeout,
1044 static void nbd_dev_remove(struct nbd_device *nbd)
1046 struct gendisk *disk = nbd->disk;
1050 blk_cleanup_queue(disk->queue);
1051 blk_mq_free_tag_set(&nbd->tag_set);
1057 static int nbd_dev_add(int index)
1059 struct nbd_device *nbd;
1060 struct gendisk *disk;
1061 struct request_queue *q;
1064 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1068 disk = alloc_disk(1 << part_shift);
1073 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1078 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
1086 nbd->tag_set.ops = &nbd_mq_ops;
1087 nbd->tag_set.nr_hw_queues = 1;
1088 nbd->tag_set.queue_depth = 128;
1089 nbd->tag_set.numa_node = NUMA_NO_NODE;
1090 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1091 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1092 BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
1093 nbd->tag_set.driver_data = nbd;
1095 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1099 q = blk_mq_init_queue(&nbd->tag_set);
1107 * Tell the block layer that we are not a rotational device
1109 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
1110 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1111 disk->queue->limits.discard_granularity = 512;
1112 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
1113 disk->queue->limits.discard_zeroes_data = 0;
1114 blk_queue_max_hw_sectors(disk->queue, 65536);
1115 disk->queue->limits.max_sectors = 256;
1117 nbd->magic = NBD_MAGIC;
1118 mutex_init(&nbd->config_lock);
1119 disk->major = NBD_MAJOR;
1120 disk->first_minor = index << part_shift;
1121 disk->fops = &nbd_fops;
1122 disk->private_data = nbd;
1123 sprintf(disk->disk_name, "nbd%d", index);
1124 init_waitqueue_head(&nbd->recv_wq);
1130 blk_mq_free_tag_set(&nbd->tag_set);
1132 idr_remove(&nbd_index_idr, index);
1142 * And here should be modules and kernel interface
1143 * (Just smiley confuses emacs :-)
1146 static int __init nbd_init(void)
1150 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
1153 printk(KERN_ERR "nbd: max_part must be >= 0\n");
1159 part_shift = fls(max_part);
1162 * Adjust max_part according to part_shift as it is exported
1163 * to user space so that user can know the max number of
1164 * partition kernel should be able to manage.
1166 * Note that -1 is required because partition 0 is reserved
1167 * for the whole disk.
1169 max_part = (1UL << part_shift) - 1;
1172 if ((1UL << part_shift) > DISK_MAX_PARTS)
1175 if (nbds_max > 1UL << (MINORBITS - part_shift))
1177 recv_workqueue = alloc_workqueue("knbd-recv",
1178 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1179 if (!recv_workqueue)
1182 if (register_blkdev(NBD_MAJOR, "nbd")) {
1183 destroy_workqueue(recv_workqueue);
1189 mutex_lock(&nbd_index_mutex);
1190 for (i = 0; i < nbds_max; i++)
1192 mutex_unlock(&nbd_index_mutex);
1196 static int nbd_exit_cb(int id, void *ptr, void *data)
1198 struct nbd_device *nbd = ptr;
1199 nbd_dev_remove(nbd);
1203 static void __exit nbd_cleanup(void)
1207 idr_for_each(&nbd_index_idr, &nbd_exit_cb, NULL);
1208 idr_destroy(&nbd_index_idr);
1209 destroy_workqueue(recv_workqueue);
1210 unregister_blkdev(NBD_MAJOR, "nbd");
1213 module_init(nbd_init);
1214 module_exit(nbd_cleanup);
1216 MODULE_DESCRIPTION("Network Block Device");
1217 MODULE_LICENSE("GPL");
1219 module_param(nbds_max, int, 0444);
1220 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
1221 module_param(max_part, int, 0444);
1222 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");