2 * Network block device - make block devices work over TCP
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 * This file is released under GPLv2 or later.
12 * (part of code stolen from loop.c)
15 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
33 #include <linux/net.h>
34 #include <linux/kthread.h>
36 #include <asm/uaccess.h>
37 #include <asm/types.h>
39 #include <linux/nbd.h>
41 #define NBD_MAGIC 0x68797548
44 #define dprintk(flags, fmt...)
46 #define dprintk(flags, fmt...) do { \
47 if (debugflags & (flags)) printk(KERN_DEBUG fmt); \
49 #define DBG_IOCTL 0x0004
50 #define DBG_INIT 0x0010
51 #define DBG_EXIT 0x0020
52 #define DBG_BLKDEV 0x0100
55 static unsigned int debugflags;
58 static unsigned int nbds_max = 16;
59 static struct nbd_device *nbd_dev;
63 * Use just one lock (or at most 1 per NIC). Two arguments for this:
64 * 1. Each NIC is essentially a synchronization point for all servers
65 * accessed through that NIC so there's no need to have more locks
67 * 2. More locks lead to more "Dirty cache line bouncing" which will slow
68 * down each lock to the point where they're actually slower than just
70 * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
72 static DEFINE_SPINLOCK(nbd_lock);
75 static const char *ioctl_cmd_to_ascii(int cmd)
78 case NBD_SET_SOCK: return "set-sock";
79 case NBD_SET_BLKSIZE: return "set-blksize";
80 case NBD_SET_SIZE: return "set-size";
81 case NBD_SET_TIMEOUT: return "set-timeout";
82 case NBD_SET_FLAGS: return "set-flags";
83 case NBD_DO_IT: return "do-it";
84 case NBD_CLEAR_SOCK: return "clear-sock";
85 case NBD_CLEAR_QUE: return "clear-que";
86 case NBD_PRINT_DEBUG: return "print-debug";
87 case NBD_SET_SIZE_BLOCKS: return "set-size-blocks";
88 case NBD_DISCONNECT: return "disconnect";
89 case BLKROSET: return "set-read-only";
90 case BLKFLSBUF: return "flush-buffer-cache";
95 static const char *nbdcmd_to_ascii(int cmd)
98 case NBD_CMD_READ: return "read";
99 case NBD_CMD_WRITE: return "write";
100 case NBD_CMD_DISC: return "disconnect";
106 static void nbd_end_request(struct request *req)
108 int error = req->errors ? -EIO : 0;
109 struct request_queue *q = req->q;
112 dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name,
113 req, error ? "failed" : "done");
115 spin_lock_irqsave(q->queue_lock, flags);
116 __blk_end_request_all(req, error);
117 spin_unlock_irqrestore(q->queue_lock, flags);
120 static void sock_shutdown(struct nbd_device *nbd, int lock)
122 /* Forcibly shutdown the socket causing all listeners
125 * FIXME: This code is duplicated from sys_shutdown, but
126 * there should be a more generic interface rather than
127 * calling socket ops directly here */
129 mutex_lock(&nbd->tx_lock);
131 dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
132 kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
136 mutex_unlock(&nbd->tx_lock);
139 static void nbd_xmit_timeout(unsigned long arg)
141 struct task_struct *task = (struct task_struct *)arg;
143 printk(KERN_WARNING "nbd: killing hung xmit (%s, pid: %d)\n",
144 task->comm, task->pid);
145 force_sig(SIGKILL, task);
149 * Send or receive packet.
151 static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
154 struct socket *sock = nbd->sock;
158 sigset_t blocked, oldset;
159 unsigned long pflags = current->flags;
161 if (unlikely(!sock)) {
162 dev_err(disk_to_dev(nbd->disk),
163 "Attempted %s on closed socket in sock_xmit\n",
164 (send ? "send" : "recv"));
168 /* Allow interception of SIGKILL only
169 * Don't allow other signals to interrupt the transmission */
170 siginitsetinv(&blocked, sigmask(SIGKILL));
171 sigprocmask(SIG_SETMASK, &blocked, &oldset);
173 current->flags |= PF_MEMALLOC;
175 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
180 msg.msg_control = NULL;
181 msg.msg_controllen = 0;
182 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
185 struct timer_list ti;
187 if (nbd->xmit_timeout) {
189 ti.function = nbd_xmit_timeout;
190 ti.data = (unsigned long)current;
191 ti.expires = jiffies + nbd->xmit_timeout;
194 result = kernel_sendmsg(sock, &msg, &iov, 1, size);
195 if (nbd->xmit_timeout)
198 result = kernel_recvmsg(sock, &msg, &iov, 1, size,
201 if (signal_pending(current)) {
203 printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n",
204 task_pid_nr(current), current->comm,
205 dequeue_signal_lock(current, ¤t->blocked, &info));
207 sock_shutdown(nbd, !send);
213 result = -EPIPE; /* short read */
220 sigprocmask(SIG_SETMASK, &oldset, NULL);
221 tsk_restore_flags(current, pflags, PF_MEMALLOC);
226 static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
230 void *kaddr = kmap(bvec->bv_page);
231 result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset,
232 bvec->bv_len, flags);
233 kunmap(bvec->bv_page);
237 /* always call with the tx_lock held */
238 static int nbd_send_req(struct nbd_device *nbd, struct request *req)
241 struct nbd_request request;
242 unsigned long size = blk_rq_bytes(req);
244 request.magic = htonl(NBD_REQUEST_MAGIC);
245 request.type = htonl(nbd_cmd(req));
246 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
247 request.len = htonl(size);
248 memcpy(request.handle, &req, sizeof(req));
250 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
251 nbd->disk->disk_name, req,
252 nbdcmd_to_ascii(nbd_cmd(req)),
253 (unsigned long long)blk_rq_pos(req) << 9,
255 result = sock_xmit(nbd, 1, &request, sizeof(request),
256 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
258 dev_err(disk_to_dev(nbd->disk),
259 "Send control failed (result %d)\n", result);
263 if (nbd_cmd(req) == NBD_CMD_WRITE) {
264 struct req_iterator iter;
265 struct bio_vec *bvec;
267 * we are really probing at internals to determine
268 * whether to set MSG_MORE or not...
270 rq_for_each_segment(bvec, req, iter) {
272 if (!rq_iter_last(req, iter))
274 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
275 nbd->disk->disk_name, req, bvec->bv_len);
276 result = sock_send_bvec(nbd, bvec, flags);
278 dev_err(disk_to_dev(nbd->disk),
279 "Send data failed (result %d)\n",
291 static struct request *nbd_find_request(struct nbd_device *nbd,
292 struct request *xreq)
294 struct request *req, *tmp;
297 err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq);
301 spin_lock(&nbd->queue_lock);
302 list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) {
305 list_del_init(&req->queuelist);
306 spin_unlock(&nbd->queue_lock);
309 spin_unlock(&nbd->queue_lock);
317 static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
320 void *kaddr = kmap(bvec->bv_page);
321 result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len,
323 kunmap(bvec->bv_page);
327 /* NULL returned = something went wrong, inform userspace */
328 static struct request *nbd_read_stat(struct nbd_device *nbd)
331 struct nbd_reply reply;
335 result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL);
337 dev_err(disk_to_dev(nbd->disk),
338 "Receive control failed (result %d)\n", result);
342 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
343 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
344 (unsigned long)ntohl(reply.magic));
349 req = nbd_find_request(nbd, *(struct request **)reply.handle);
351 result = PTR_ERR(req);
352 if (result != -ENOENT)
355 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n",
361 if (ntohl(reply.error)) {
362 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
368 dprintk(DBG_RX, "%s: request %p: got reply\n",
369 nbd->disk->disk_name, req);
370 if (nbd_cmd(req) == NBD_CMD_READ) {
371 struct req_iterator iter;
372 struct bio_vec *bvec;
374 rq_for_each_segment(bvec, req, iter) {
375 result = sock_recv_bvec(nbd, bvec);
377 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
382 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
383 nbd->disk->disk_name, req, bvec->bv_len);
388 nbd->harderror = result;
392 static ssize_t pid_show(struct device *dev,
393 struct device_attribute *attr, char *buf)
395 struct gendisk *disk = dev_to_disk(dev);
397 return sprintf(buf, "%ld\n",
398 (long) ((struct nbd_device *)disk->private_data)->pid);
401 static struct device_attribute pid_attr = {
402 .attr = { .name = "pid", .mode = S_IRUGO},
406 static int nbd_do_it(struct nbd_device *nbd)
411 BUG_ON(nbd->magic != NBD_MAGIC);
413 sk_set_memalloc(nbd->sock->sk);
414 nbd->pid = task_pid_nr(current);
415 ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
417 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
422 while ((req = nbd_read_stat(nbd)) != NULL)
423 nbd_end_request(req);
425 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
430 static void nbd_clear_que(struct nbd_device *nbd)
434 BUG_ON(nbd->magic != NBD_MAGIC);
437 * Because we have set nbd->sock to NULL under the tx_lock, all
438 * modifications to the list must have completed by now. For
439 * the same reason, the active_req must be NULL.
441 * As a consequence, we don't need to take the spin lock while
442 * purging the list here.
445 BUG_ON(nbd->active_req);
447 while (!list_empty(&nbd->queue_head)) {
448 req = list_entry(nbd->queue_head.next, struct request,
450 list_del_init(&req->queuelist);
452 nbd_end_request(req);
455 while (!list_empty(&nbd->waiting_queue)) {
456 req = list_entry(nbd->waiting_queue.next, struct request,
458 list_del_init(&req->queuelist);
460 nbd_end_request(req);
465 static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
467 if (req->cmd_type != REQ_TYPE_FS)
470 nbd_cmd(req) = NBD_CMD_READ;
471 if (rq_data_dir(req) == WRITE) {
472 nbd_cmd(req) = NBD_CMD_WRITE;
473 if (nbd->flags & NBD_FLAG_READ_ONLY) {
474 dev_err(disk_to_dev(nbd->disk),
475 "Write on read-only\n");
482 mutex_lock(&nbd->tx_lock);
483 if (unlikely(!nbd->sock)) {
484 mutex_unlock(&nbd->tx_lock);
485 dev_err(disk_to_dev(nbd->disk),
486 "Attempted send on closed socket\n");
490 nbd->active_req = req;
492 if (nbd_send_req(nbd, req) != 0) {
493 dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
495 nbd_end_request(req);
497 spin_lock(&nbd->queue_lock);
498 list_add_tail(&req->queuelist, &nbd->queue_head);
499 spin_unlock(&nbd->queue_lock);
502 nbd->active_req = NULL;
503 mutex_unlock(&nbd->tx_lock);
504 wake_up_all(&nbd->active_wq);
510 nbd_end_request(req);
513 static int nbd_thread(void *data)
515 struct nbd_device *nbd = data;
518 set_user_nice(current, -20);
519 while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
520 /* wait for something to do */
521 wait_event_interruptible(nbd->waiting_wq,
522 kthread_should_stop() ||
523 !list_empty(&nbd->waiting_queue));
525 /* extract request */
526 if (list_empty(&nbd->waiting_queue))
529 spin_lock_irq(&nbd->queue_lock);
530 req = list_entry(nbd->waiting_queue.next, struct request,
532 list_del_init(&req->queuelist);
533 spin_unlock_irq(&nbd->queue_lock);
536 nbd_handle_req(nbd, req);
542 * We always wait for result of write, for now. It would be nice to make it optional
544 * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK))
545 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
548 static void do_nbd_request(struct request_queue *q)
552 while ((req = blk_fetch_request(q)) != NULL) {
553 struct nbd_device *nbd;
555 spin_unlock_irq(q->queue_lock);
557 dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
558 req->rq_disk->disk_name, req, req->cmd_type);
560 nbd = req->rq_disk->private_data;
562 BUG_ON(nbd->magic != NBD_MAGIC);
564 if (unlikely(!nbd->sock)) {
565 dev_err(disk_to_dev(nbd->disk),
566 "Attempted send on closed socket\n");
568 nbd_end_request(req);
569 spin_lock_irq(q->queue_lock);
573 spin_lock_irq(&nbd->queue_lock);
574 list_add_tail(&req->queuelist, &nbd->waiting_queue);
575 spin_unlock_irq(&nbd->queue_lock);
577 wake_up(&nbd->waiting_wq);
579 spin_lock_irq(q->queue_lock);
583 /* Must be called with tx_lock held */
585 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
586 unsigned int cmd, unsigned long arg)
589 case NBD_DISCONNECT: {
592 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
594 blk_rq_init(NULL, &sreq);
595 sreq.cmd_type = REQ_TYPE_SPECIAL;
596 nbd_cmd(&sreq) = NBD_CMD_DISC;
599 nbd_send_req(nbd, &sreq);
603 case NBD_CLEAR_SOCK: {
610 BUG_ON(!list_empty(&nbd->queue_head));
611 BUG_ON(!list_empty(&nbd->waiting_queue));
623 struct inode *inode = file->f_path.dentry->d_inode;
624 if (S_ISSOCK(inode->i_mode)) {
626 nbd->sock = SOCKET_I(inode);
628 bdev->bd_invalidated = 1;
637 case NBD_SET_BLKSIZE:
639 nbd->bytesize &= ~(nbd->blksize-1);
640 bdev->bd_inode->i_size = nbd->bytesize;
641 set_blocksize(bdev, nbd->blksize);
642 set_capacity(nbd->disk, nbd->bytesize >> 9);
646 nbd->bytesize = arg & ~(nbd->blksize-1);
647 bdev->bd_inode->i_size = nbd->bytesize;
648 set_blocksize(bdev, nbd->blksize);
649 set_capacity(nbd->disk, nbd->bytesize >> 9);
652 case NBD_SET_TIMEOUT:
653 nbd->xmit_timeout = arg * HZ;
660 case NBD_SET_SIZE_BLOCKS:
661 nbd->bytesize = ((u64) arg) * nbd->blksize;
662 bdev->bd_inode->i_size = nbd->bytesize;
663 set_blocksize(bdev, nbd->blksize);
664 set_capacity(nbd->disk, nbd->bytesize >> 9);
668 struct task_struct *thread;
677 mutex_unlock(&nbd->tx_lock);
679 thread = kthread_create(nbd_thread, nbd, nbd->disk->disk_name);
680 if (IS_ERR(thread)) {
681 mutex_lock(&nbd->tx_lock);
682 return PTR_ERR(thread);
684 wake_up_process(thread);
685 error = nbd_do_it(nbd);
686 kthread_stop(thread);
688 mutex_lock(&nbd->tx_lock);
691 sock_shutdown(nbd, 0);
695 dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
699 bdev->bd_inode->i_size = 0;
700 set_capacity(nbd->disk, 0);
702 ioctl_by_bdev(bdev, BLKRRPART, 0);
703 return nbd->harderror;
708 * This is for compatibility only. The queue is always cleared
709 * by NBD_DO_IT or NBD_CLEAR_SOCK.
711 BUG_ON(!nbd->sock && !list_empty(&nbd->queue_head));
714 case NBD_PRINT_DEBUG:
715 dev_info(disk_to_dev(nbd->disk),
716 "next = %p, prev = %p, head = %p\n",
717 nbd->queue_head.next, nbd->queue_head.prev,
724 static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
725 unsigned int cmd, unsigned long arg)
727 struct nbd_device *nbd = bdev->bd_disk->private_data;
730 if (!capable(CAP_SYS_ADMIN))
733 BUG_ON(nbd->magic != NBD_MAGIC);
735 /* Anyone capable of this syscall can do *real bad* things */
736 dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
737 nbd->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
739 mutex_lock(&nbd->tx_lock);
740 error = __nbd_ioctl(bdev, nbd, cmd, arg);
741 mutex_unlock(&nbd->tx_lock);
746 static const struct block_device_operations nbd_fops =
748 .owner = THIS_MODULE,
753 * And here should be modules and kernel interface
754 * (Just smiley confuses emacs :-)
757 static int __init nbd_init(void)
763 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
766 printk(KERN_ERR "nbd: max_part must be >= 0\n");
770 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
776 part_shift = fls(max_part);
779 * Adjust max_part according to part_shift as it is exported
780 * to user space so that user can know the max number of
781 * partition kernel should be able to manage.
783 * Note that -1 is required because partition 0 is reserved
784 * for the whole disk.
786 max_part = (1UL << part_shift) - 1;
789 if ((1UL << part_shift) > DISK_MAX_PARTS)
792 if (nbds_max > 1UL << (MINORBITS - part_shift))
795 for (i = 0; i < nbds_max; i++) {
796 struct gendisk *disk = alloc_disk(1 << part_shift);
799 nbd_dev[i].disk = disk;
801 * The new linux 2.5 block layer implementation requires
802 * every gendisk to have its very own request_queue struct.
803 * These structs are big so we dynamically allocate them.
805 disk->queue = blk_init_queue(do_nbd_request, &nbd_lock);
811 * Tell the block layer that we are not a rotational device
813 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
816 if (register_blkdev(NBD_MAJOR, "nbd")) {
821 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
822 dprintk(DBG_INIT, "nbd: debugflags=0x%x\n", debugflags);
824 for (i = 0; i < nbds_max; i++) {
825 struct gendisk *disk = nbd_dev[i].disk;
826 nbd_dev[i].file = NULL;
827 nbd_dev[i].magic = NBD_MAGIC;
828 nbd_dev[i].flags = 0;
829 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
830 spin_lock_init(&nbd_dev[i].queue_lock);
831 INIT_LIST_HEAD(&nbd_dev[i].queue_head);
832 mutex_init(&nbd_dev[i].tx_lock);
833 init_waitqueue_head(&nbd_dev[i].active_wq);
834 init_waitqueue_head(&nbd_dev[i].waiting_wq);
835 nbd_dev[i].blksize = 1024;
836 nbd_dev[i].bytesize = 0;
837 disk->major = NBD_MAJOR;
838 disk->first_minor = i << part_shift;
839 disk->fops = &nbd_fops;
840 disk->private_data = &nbd_dev[i];
841 sprintf(disk->disk_name, "nbd%d", i);
842 set_capacity(disk, 0);
849 blk_cleanup_queue(nbd_dev[i].disk->queue);
850 put_disk(nbd_dev[i].disk);
856 static void __exit nbd_cleanup(void)
859 for (i = 0; i < nbds_max; i++) {
860 struct gendisk *disk = nbd_dev[i].disk;
861 nbd_dev[i].magic = 0;
864 blk_cleanup_queue(disk->queue);
868 unregister_blkdev(NBD_MAJOR, "nbd");
870 printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
873 module_init(nbd_init);
874 module_exit(nbd_cleanup);
876 MODULE_DESCRIPTION("Network Block Device");
877 MODULE_LICENSE("GPL");
879 module_param(nbds_max, int, 0444);
880 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
881 module_param(max_part, int, 0444);
882 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");
884 module_param(debugflags, int, 0644);
885 MODULE_PARM_DESC(debugflags, "flags for controlling debug output");