2 * Network block device - make block devices work over TCP
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 * This file is released under GPLv2 or later.
12 * (part of code stolen from loop.c)
15 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
33 #include <linux/net.h>
34 #include <linux/kthread.h>
35 #include <linux/types.h>
36 #include <linux/debugfs.h>
38 #include <asm/uaccess.h>
39 #include <asm/types.h>
41 #include <linux/nbd.h>
45 struct socket * sock; /* If == NULL, device is not ready, yet */
48 spinlock_t queue_lock;
49 struct list_head queue_head; /* Requests waiting result */
50 struct request *active_req;
51 wait_queue_head_t active_wq;
52 struct list_head waiting_queue; /* Requests to be sent */
53 wait_queue_head_t waiting_wq;
60 bool disconnect; /* a disconnect has been requested by user */
62 struct timer_list timeout_timer;
63 spinlock_t tasks_lock;
64 struct task_struct *task_recv;
65 struct task_struct *task_send;
67 #if IS_ENABLED(CONFIG_DEBUG_FS)
68 struct dentry *dbg_dir;
72 #if IS_ENABLED(CONFIG_DEBUG_FS)
73 static struct dentry *nbd_dbg_dir;
76 #define nbd_name(nbd) ((nbd)->disk->disk_name)
78 #define NBD_MAGIC 0x68797548
80 static unsigned int nbds_max = 16;
81 static struct nbd_device *nbd_dev;
85 * Use just one lock (or at most 1 per NIC). Two arguments for this:
86 * 1. Each NIC is essentially a synchronization point for all servers
87 * accessed through that NIC so there's no need to have more locks
89 * 2. More locks lead to more "Dirty cache line bouncing" which will slow
90 * down each lock to the point where they're actually slower than just
92 * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
94 static DEFINE_SPINLOCK(nbd_lock);
96 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
98 return disk_to_dev(nbd->disk);
101 static const char *nbdcmd_to_ascii(int cmd)
104 case NBD_CMD_READ: return "read";
105 case NBD_CMD_WRITE: return "write";
106 case NBD_CMD_DISC: return "disconnect";
107 case NBD_CMD_FLUSH: return "flush";
108 case NBD_CMD_TRIM: return "trim/discard";
113 static void nbd_end_request(struct nbd_device *nbd, struct request *req)
115 int error = req->errors ? -EIO : 0;
116 struct request_queue *q = req->q;
119 dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", req,
120 error ? "failed" : "done");
122 spin_lock_irqsave(q->queue_lock, flags);
123 __blk_end_request_all(req, error);
124 spin_unlock_irqrestore(q->queue_lock, flags);
128 * Forcibly shutdown the socket causing all listeners to error
130 static void sock_shutdown(struct nbd_device *nbd)
135 dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
136 kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
138 del_timer_sync(&nbd->timeout_timer);
141 static void nbd_xmit_timeout(unsigned long arg)
143 struct nbd_device *nbd = (struct nbd_device *)arg;
146 if (list_empty(&nbd->queue_head))
149 nbd->disconnect = true;
151 spin_lock_irqsave(&nbd->tasks_lock, flags);
154 force_sig(SIGKILL, nbd->task_recv);
157 force_sig(SIGKILL, nbd->task_send);
159 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
161 dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n");
165 * Send or receive packet.
167 static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
170 struct socket *sock = nbd->sock;
174 sigset_t blocked, oldset;
175 unsigned long pflags = current->flags;
177 if (unlikely(!sock)) {
178 dev_err(disk_to_dev(nbd->disk),
179 "Attempted %s on closed socket in sock_xmit\n",
180 (send ? "send" : "recv"));
184 /* Allow interception of SIGKILL only
185 * Don't allow other signals to interrupt the transmission */
186 siginitsetinv(&blocked, sigmask(SIGKILL));
187 sigprocmask(SIG_SETMASK, &blocked, &oldset);
189 current->flags |= PF_MEMALLOC;
191 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
196 msg.msg_control = NULL;
197 msg.msg_controllen = 0;
198 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
201 result = kernel_sendmsg(sock, &msg, &iov, 1, size);
203 result = kernel_recvmsg(sock, &msg, &iov, 1, size,
208 result = -EPIPE; /* short read */
215 sigprocmask(SIG_SETMASK, &oldset, NULL);
216 tsk_restore_flags(current, pflags, PF_MEMALLOC);
218 if (!send && nbd->xmit_timeout)
219 mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
224 static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
228 void *kaddr = kmap(bvec->bv_page);
229 result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset,
230 bvec->bv_len, flags);
231 kunmap(bvec->bv_page);
235 /* always call with the tx_lock held */
236 static int nbd_send_req(struct nbd_device *nbd, struct request *req)
239 struct nbd_request request;
240 unsigned long size = blk_rq_bytes(req);
243 if (req->cmd_type == REQ_TYPE_DRV_PRIV)
245 else if (req->cmd_flags & REQ_DISCARD)
247 else if (req->cmd_flags & REQ_FLUSH)
248 type = NBD_CMD_FLUSH;
249 else if (rq_data_dir(req) == WRITE)
250 type = NBD_CMD_WRITE;
254 memset(&request, 0, sizeof(request));
255 request.magic = htonl(NBD_REQUEST_MAGIC);
256 request.type = htonl(type);
257 if (type != NBD_CMD_FLUSH && type != NBD_CMD_DISC) {
258 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
259 request.len = htonl(size);
261 memcpy(request.handle, &req, sizeof(req));
263 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
264 req, nbdcmd_to_ascii(type),
265 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
266 result = sock_xmit(nbd, 1, &request, sizeof(request),
267 (type == NBD_CMD_WRITE) ? MSG_MORE : 0);
269 dev_err(disk_to_dev(nbd->disk),
270 "Send control failed (result %d)\n", result);
274 if (type == NBD_CMD_WRITE) {
275 struct req_iterator iter;
278 * we are really probing at internals to determine
279 * whether to set MSG_MORE or not...
281 rq_for_each_segment(bvec, req, iter) {
283 if (!rq_iter_last(bvec, iter))
285 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
287 result = sock_send_bvec(nbd, &bvec, flags);
289 dev_err(disk_to_dev(nbd->disk),
290 "Send data failed (result %d)\n",
299 static struct request *nbd_find_request(struct nbd_device *nbd,
300 struct request *xreq)
302 struct request *req, *tmp;
305 err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq);
309 spin_lock(&nbd->queue_lock);
310 list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) {
313 list_del_init(&req->queuelist);
314 spin_unlock(&nbd->queue_lock);
317 spin_unlock(&nbd->queue_lock);
319 return ERR_PTR(-ENOENT);
322 static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
325 void *kaddr = kmap(bvec->bv_page);
326 result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len,
328 kunmap(bvec->bv_page);
332 /* NULL returned = something went wrong, inform userspace */
333 static struct request *nbd_read_stat(struct nbd_device *nbd)
336 struct nbd_reply reply;
340 result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL);
342 dev_err(disk_to_dev(nbd->disk),
343 "Receive control failed (result %d)\n", result);
344 return ERR_PTR(result);
347 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
348 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
349 (unsigned long)ntohl(reply.magic));
350 return ERR_PTR(-EPROTO);
353 req = nbd_find_request(nbd, *(struct request **)reply.handle);
355 result = PTR_ERR(req);
356 if (result != -ENOENT)
357 return ERR_PTR(result);
359 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n",
361 return ERR_PTR(-EBADR);
364 if (ntohl(reply.error)) {
365 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
371 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
372 if (rq_data_dir(req) != WRITE) {
373 struct req_iterator iter;
376 rq_for_each_segment(bvec, req, iter) {
377 result = sock_recv_bvec(nbd, &bvec);
379 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
384 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
391 static ssize_t pid_show(struct device *dev,
392 struct device_attribute *attr, char *buf)
394 struct gendisk *disk = dev_to_disk(dev);
395 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
397 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
400 static struct device_attribute pid_attr = {
401 .attr = { .name = "pid", .mode = S_IRUGO},
405 static int nbd_thread_recv(struct nbd_device *nbd)
411 BUG_ON(nbd->magic != NBD_MAGIC);
413 sk_set_memalloc(nbd->sock->sk);
415 spin_lock_irqsave(&nbd->tasks_lock, flags);
416 nbd->task_recv = current;
417 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
419 ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
421 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
423 spin_lock_irqsave(&nbd->tasks_lock, flags);
424 nbd->task_recv = NULL;
425 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
431 req = nbd_read_stat(nbd);
437 nbd_end_request(nbd, req);
440 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
442 spin_lock_irqsave(&nbd->tasks_lock, flags);
443 nbd->task_recv = NULL;
444 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
446 if (signal_pending(current)) {
449 ret = dequeue_signal_lock(current, ¤t->blocked, &info);
450 dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n",
451 task_pid_nr(current), current->comm, ret);
452 mutex_lock(&nbd->tx_lock);
454 mutex_unlock(&nbd->tx_lock);
461 static void nbd_clear_que(struct nbd_device *nbd)
465 BUG_ON(nbd->magic != NBD_MAGIC);
468 * Because we have set nbd->sock to NULL under the tx_lock, all
469 * modifications to the list must have completed by now. For
470 * the same reason, the active_req must be NULL.
472 * As a consequence, we don't need to take the spin lock while
473 * purging the list here.
476 BUG_ON(nbd->active_req);
478 while (!list_empty(&nbd->queue_head)) {
479 req = list_entry(nbd->queue_head.next, struct request,
481 list_del_init(&req->queuelist);
483 nbd_end_request(nbd, req);
486 while (!list_empty(&nbd->waiting_queue)) {
487 req = list_entry(nbd->waiting_queue.next, struct request,
489 list_del_init(&req->queuelist);
491 nbd_end_request(nbd, req);
493 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
497 static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
499 if (req->cmd_type != REQ_TYPE_FS)
502 if (rq_data_dir(req) == WRITE &&
503 (nbd->flags & NBD_FLAG_READ_ONLY)) {
504 dev_err(disk_to_dev(nbd->disk),
505 "Write on read-only\n");
511 mutex_lock(&nbd->tx_lock);
512 if (unlikely(!nbd->sock)) {
513 mutex_unlock(&nbd->tx_lock);
514 dev_err(disk_to_dev(nbd->disk),
515 "Attempted send on closed socket\n");
519 nbd->active_req = req;
521 if (nbd->xmit_timeout && list_empty_careful(&nbd->queue_head))
522 mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
524 if (nbd_send_req(nbd, req) != 0) {
525 dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
527 nbd_end_request(nbd, req);
529 spin_lock(&nbd->queue_lock);
530 list_add_tail(&req->queuelist, &nbd->queue_head);
531 spin_unlock(&nbd->queue_lock);
534 nbd->active_req = NULL;
535 mutex_unlock(&nbd->tx_lock);
536 wake_up_all(&nbd->active_wq);
542 nbd_end_request(nbd, req);
545 static int nbd_thread_send(void *data)
547 struct nbd_device *nbd = data;
551 spin_lock_irqsave(&nbd->tasks_lock, flags);
552 nbd->task_send = current;
553 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
555 set_user_nice(current, MIN_NICE);
556 while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
557 /* wait for something to do */
558 wait_event_interruptible(nbd->waiting_wq,
559 kthread_should_stop() ||
560 !list_empty(&nbd->waiting_queue));
562 if (signal_pending(current)) {
566 ret = dequeue_signal_lock(current, ¤t->blocked,
568 dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n",
569 task_pid_nr(current), current->comm, ret);
570 mutex_lock(&nbd->tx_lock);
572 mutex_unlock(&nbd->tx_lock);
576 /* extract request */
577 if (list_empty(&nbd->waiting_queue))
580 spin_lock_irq(&nbd->queue_lock);
581 req = list_entry(nbd->waiting_queue.next, struct request,
583 list_del_init(&req->queuelist);
584 spin_unlock_irq(&nbd->queue_lock);
587 nbd_handle_req(nbd, req);
590 spin_lock_irqsave(&nbd->tasks_lock, flags);
591 nbd->task_send = NULL;
592 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
594 /* Clear maybe pending signals */
595 if (signal_pending(current)) {
597 dequeue_signal_lock(current, ¤t->blocked, &info);
604 * We always wait for result of write, for now. It would be nice to make it optional
606 * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK))
607 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
610 static void nbd_request_handler(struct request_queue *q)
611 __releases(q->queue_lock) __acquires(q->queue_lock)
615 while ((req = blk_fetch_request(q)) != NULL) {
616 struct nbd_device *nbd;
618 spin_unlock_irq(q->queue_lock);
620 nbd = req->rq_disk->private_data;
622 BUG_ON(nbd->magic != NBD_MAGIC);
624 dev_dbg(nbd_to_dev(nbd), "request %p: dequeued (flags=%x)\n",
627 if (unlikely(!nbd->sock)) {
628 dev_err(disk_to_dev(nbd->disk),
629 "Attempted send on closed socket\n");
631 nbd_end_request(nbd, req);
632 spin_lock_irq(q->queue_lock);
636 spin_lock_irq(&nbd->queue_lock);
637 list_add_tail(&req->queuelist, &nbd->waiting_queue);
638 spin_unlock_irq(&nbd->queue_lock);
640 wake_up(&nbd->waiting_wq);
642 spin_lock_irq(q->queue_lock);
646 static int nbd_dev_dbg_init(struct nbd_device *nbd);
647 static void nbd_dev_dbg_close(struct nbd_device *nbd);
649 /* Must be called with tx_lock held */
651 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
652 unsigned int cmd, unsigned long arg)
655 case NBD_DISCONNECT: {
658 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
662 mutex_unlock(&nbd->tx_lock);
664 mutex_lock(&nbd->tx_lock);
665 blk_rq_init(NULL, &sreq);
666 sreq.cmd_type = REQ_TYPE_DRV_PRIV;
668 /* Check again after getting mutex back. */
672 nbd->disconnect = true;
674 nbd_send_req(nbd, &sreq);
678 case NBD_CLEAR_SOCK: {
679 struct socket *sock = nbd->sock;
682 BUG_ON(!list_empty(&nbd->queue_head));
683 BUG_ON(!list_empty(&nbd->waiting_queue));
695 sock = sockfd_lookup(arg, &err);
699 bdev->bd_invalidated = 1;
700 nbd->disconnect = false; /* we're connected now */
706 case NBD_SET_BLKSIZE:
708 nbd->bytesize &= ~(nbd->blksize-1);
709 bdev->bd_inode->i_size = nbd->bytesize;
710 set_blocksize(bdev, nbd->blksize);
711 set_capacity(nbd->disk, nbd->bytesize >> 9);
715 nbd->bytesize = arg & ~(nbd->blksize-1);
716 bdev->bd_inode->i_size = nbd->bytesize;
717 set_blocksize(bdev, nbd->blksize);
718 set_capacity(nbd->disk, nbd->bytesize >> 9);
721 case NBD_SET_TIMEOUT:
722 nbd->xmit_timeout = arg * HZ;
724 mod_timer(&nbd->timeout_timer,
725 jiffies + nbd->xmit_timeout);
727 del_timer_sync(&nbd->timeout_timer);
735 case NBD_SET_SIZE_BLOCKS:
736 nbd->bytesize = ((u64) arg) * nbd->blksize;
737 bdev->bd_inode->i_size = nbd->bytesize;
738 set_blocksize(bdev, nbd->blksize);
739 set_capacity(nbd->disk, nbd->bytesize >> 9);
743 struct task_struct *thread;
752 mutex_unlock(&nbd->tx_lock);
754 if (nbd->flags & NBD_FLAG_READ_ONLY)
755 set_device_ro(bdev, true);
756 if (nbd->flags & NBD_FLAG_SEND_TRIM)
757 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
759 if (nbd->flags & NBD_FLAG_SEND_FLUSH)
760 blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
762 blk_queue_flush(nbd->disk->queue, 0);
764 thread = kthread_run(nbd_thread_send, nbd, "%s",
766 if (IS_ERR(thread)) {
767 mutex_lock(&nbd->tx_lock);
768 return PTR_ERR(thread);
771 nbd_dev_dbg_init(nbd);
772 error = nbd_thread_recv(nbd);
773 nbd_dev_dbg_close(nbd);
774 kthread_stop(thread);
776 mutex_lock(&nbd->tx_lock);
783 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
784 set_device_ro(bdev, false);
789 bdev->bd_inode->i_size = 0;
790 set_capacity(nbd->disk, 0);
792 blkdev_reread_part(bdev);
793 if (nbd->disconnect) /* user requested, ignore socket errors */
800 * This is for compatibility only. The queue is always cleared
801 * by NBD_DO_IT or NBD_CLEAR_SOCK.
805 case NBD_PRINT_DEBUG:
806 dev_info(disk_to_dev(nbd->disk),
807 "next = %p, prev = %p, head = %p\n",
808 nbd->queue_head.next, nbd->queue_head.prev,
815 static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
816 unsigned int cmd, unsigned long arg)
818 struct nbd_device *nbd = bdev->bd_disk->private_data;
821 if (!capable(CAP_SYS_ADMIN))
824 BUG_ON(nbd->magic != NBD_MAGIC);
826 mutex_lock(&nbd->tx_lock);
827 error = __nbd_ioctl(bdev, nbd, cmd, arg);
828 mutex_unlock(&nbd->tx_lock);
833 static const struct block_device_operations nbd_fops =
835 .owner = THIS_MODULE,
839 #if IS_ENABLED(CONFIG_DEBUG_FS)
841 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
843 struct nbd_device *nbd = s->private;
846 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
848 seq_printf(s, "send: %d\n", task_pid_nr(nbd->task_send));
853 static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
855 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
858 static const struct file_operations nbd_dbg_tasks_ops = {
859 .open = nbd_dbg_tasks_open,
862 .release = single_release,
865 static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
867 struct nbd_device *nbd = s->private;
868 u32 flags = nbd->flags;
870 seq_printf(s, "Hex: 0x%08x\n\n", flags);
872 seq_puts(s, "Known flags:\n");
874 if (flags & NBD_FLAG_HAS_FLAGS)
875 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
876 if (flags & NBD_FLAG_READ_ONLY)
877 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
878 if (flags & NBD_FLAG_SEND_FLUSH)
879 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
880 if (flags & NBD_FLAG_SEND_TRIM)
881 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
886 static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
888 return single_open(file, nbd_dbg_flags_show, inode->i_private);
891 static const struct file_operations nbd_dbg_flags_ops = {
892 .open = nbd_dbg_flags_open,
895 .release = single_release,
898 static int nbd_dev_dbg_init(struct nbd_device *nbd)
903 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
904 if (IS_ERR_OR_NULL(dir)) {
905 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s' (%ld)\n",
906 nbd_name(nbd), PTR_ERR(dir));
911 f = debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
912 if (IS_ERR_OR_NULL(f)) {
913 dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'tasks', %ld\n",
918 f = debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
919 if (IS_ERR_OR_NULL(f)) {
920 dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'size_bytes', %ld\n",
925 f = debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
926 if (IS_ERR_OR_NULL(f)) {
927 dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'timeout', %ld\n",
932 f = debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
933 if (IS_ERR_OR_NULL(f)) {
934 dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'blocksize', %ld\n",
939 f = debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops);
940 if (IS_ERR_OR_NULL(f)) {
941 dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'flags', %ld\n",
949 static void nbd_dev_dbg_close(struct nbd_device *nbd)
951 debugfs_remove_recursive(nbd->dbg_dir);
954 static int nbd_dbg_init(void)
956 struct dentry *dbg_dir;
958 dbg_dir = debugfs_create_dir("nbd", NULL);
960 return PTR_ERR(dbg_dir);
962 nbd_dbg_dir = dbg_dir;
967 static void nbd_dbg_close(void)
969 debugfs_remove_recursive(nbd_dbg_dir);
972 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
974 static int nbd_dev_dbg_init(struct nbd_device *nbd)
979 static void nbd_dev_dbg_close(struct nbd_device *nbd)
983 static int nbd_dbg_init(void)
988 static void nbd_dbg_close(void)
995 * And here should be modules and kernel interface
996 * (Just smiley confuses emacs :-)
999 static int __init nbd_init(void)
1005 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
1008 printk(KERN_ERR "nbd: max_part must be >= 0\n");
1014 part_shift = fls(max_part);
1017 * Adjust max_part according to part_shift as it is exported
1018 * to user space so that user can know the max number of
1019 * partition kernel should be able to manage.
1021 * Note that -1 is required because partition 0 is reserved
1022 * for the whole disk.
1024 max_part = (1UL << part_shift) - 1;
1027 if ((1UL << part_shift) > DISK_MAX_PARTS)
1030 if (nbds_max > 1UL << (MINORBITS - part_shift))
1033 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
1037 for (i = 0; i < nbds_max; i++) {
1038 struct gendisk *disk = alloc_disk(1 << part_shift);
1041 nbd_dev[i].disk = disk;
1043 * The new linux 2.5 block layer implementation requires
1044 * every gendisk to have its very own request_queue struct.
1045 * These structs are big so we dynamically allocate them.
1047 disk->queue = blk_init_queue(nbd_request_handler, &nbd_lock);
1053 * Tell the block layer that we are not a rotational device
1055 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
1056 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1057 disk->queue->limits.discard_granularity = 512;
1058 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
1059 disk->queue->limits.discard_zeroes_data = 0;
1060 blk_queue_max_hw_sectors(disk->queue, 65536);
1061 disk->queue->limits.max_sectors = 256;
1064 if (register_blkdev(NBD_MAJOR, "nbd")) {
1069 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
1073 for (i = 0; i < nbds_max; i++) {
1074 struct gendisk *disk = nbd_dev[i].disk;
1075 nbd_dev[i].magic = NBD_MAGIC;
1076 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
1077 spin_lock_init(&nbd_dev[i].queue_lock);
1078 spin_lock_init(&nbd_dev[i].tasks_lock);
1079 INIT_LIST_HEAD(&nbd_dev[i].queue_head);
1080 mutex_init(&nbd_dev[i].tx_lock);
1081 init_timer(&nbd_dev[i].timeout_timer);
1082 nbd_dev[i].timeout_timer.function = nbd_xmit_timeout;
1083 nbd_dev[i].timeout_timer.data = (unsigned long)&nbd_dev[i];
1084 init_waitqueue_head(&nbd_dev[i].active_wq);
1085 init_waitqueue_head(&nbd_dev[i].waiting_wq);
1086 nbd_dev[i].blksize = 1024;
1087 nbd_dev[i].bytesize = 0;
1088 disk->major = NBD_MAJOR;
1089 disk->first_minor = i << part_shift;
1090 disk->fops = &nbd_fops;
1091 disk->private_data = &nbd_dev[i];
1092 sprintf(disk->disk_name, "nbd%d", i);
1093 set_capacity(disk, 0);
1100 blk_cleanup_queue(nbd_dev[i].disk->queue);
1101 put_disk(nbd_dev[i].disk);
1107 static void __exit nbd_cleanup(void)
1113 for (i = 0; i < nbds_max; i++) {
1114 struct gendisk *disk = nbd_dev[i].disk;
1115 nbd_dev[i].magic = 0;
1118 blk_cleanup_queue(disk->queue);
1122 unregister_blkdev(NBD_MAJOR, "nbd");
1124 printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
1127 module_init(nbd_init);
1128 module_exit(nbd_cleanup);
1130 MODULE_DESCRIPTION("Network Block Device");
1131 MODULE_LICENSE("GPL");
1133 module_param(nbds_max, int, 0444);
1134 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
1135 module_param(max_part, int, 0444);
1136 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");