2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
20 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
22 static kmem_cache_t *fuse_req_cachep;
24 static struct fuse_conn *fuse_get_conn(struct file *file)
27 * Lockless access is OK, because file->private data is set
28 * once during mount and is valid until the file is released.
30 return file->private_data;
33 static void fuse_request_init(struct fuse_req *req)
35 memset(req, 0, sizeof(*req));
36 INIT_LIST_HEAD(&req->list);
37 init_waitqueue_head(&req->waitq);
38 atomic_set(&req->count, 1);
41 struct fuse_req *fuse_request_alloc(void)
43 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL);
45 fuse_request_init(req);
49 void fuse_request_free(struct fuse_req *req)
51 kmem_cache_free(fuse_req_cachep, req);
54 static void block_sigs(sigset_t *oldset)
58 siginitsetinv(&mask, sigmask(SIGKILL));
59 sigprocmask(SIG_BLOCK, &mask, oldset);
62 static void restore_sigs(sigset_t *oldset)
64 sigprocmask(SIG_SETMASK, oldset, NULL);
68 * Reset request, so that it can be reused
70 * The caller must be _very_ careful to make sure, that it is holding
71 * the only reference to req
73 void fuse_reset_request(struct fuse_req *req)
75 BUG_ON(atomic_read(&req->count) != 1);
76 fuse_request_init(req);
79 static void __fuse_get_request(struct fuse_req *req)
81 atomic_inc(&req->count);
84 /* Must be called with > 1 refcount */
85 static void __fuse_put_request(struct fuse_req *req)
87 BUG_ON(atomic_read(&req->count) < 2);
88 atomic_dec(&req->count);
91 struct fuse_req *fuse_get_req(struct fuse_conn *fc)
98 atomic_inc(&fc->num_waiting);
100 intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
101 restore_sigs(&oldset);
106 req = fuse_request_alloc();
111 fuse_request_init(req);
112 req->in.h.uid = current->fsuid;
113 req->in.h.gid = current->fsgid;
114 req->in.h.pid = current->pid;
119 atomic_dec(&fc->num_waiting);
123 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
125 if (atomic_dec_and_test(&req->count)) {
127 atomic_dec(&fc->num_waiting);
128 fuse_request_free(req);
132 void fuse_remove_background(struct fuse_conn *fc, struct fuse_req *req)
134 list_del_init(&req->bg_entry);
135 if (fc->num_background == FUSE_MAX_BACKGROUND) {
137 wake_up_all(&fc->blocked_waitq);
139 fc->num_background--;
143 * This function is called when a request is finished. Either a reply
144 * has arrived or it was interrupted (and not yet sent) or some error
145 * occurred during communication with userspace, or the device file
146 * was closed. In case of a background request the reference to the
147 * stored objects are released. The requester thread is woken up (if
148 * still waiting), the 'end' callback is called if given, else the
149 * reference to the request is released
151 * Releasing extra reference for foreground requests must be done
152 * within the same locked region as setting state to finished. This
153 * is because fuse_reset_request() may be called after request is
154 * finished and it must be the sole possessor. If request is
155 * interrupted and put in the background, it will return with an error
156 * and hence never be reset and reused.
158 * Called with fc->lock, unlocks it
160 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
162 list_del(&req->list);
163 req->state = FUSE_REQ_FINISHED;
164 if (!req->background) {
165 spin_unlock(&fc->lock);
166 wake_up(&req->waitq);
167 fuse_put_request(fc, req);
169 struct inode *inode = req->inode;
170 struct inode *inode2 = req->inode2;
171 struct file *file = req->file;
172 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
177 if (!list_empty(&req->bg_entry))
178 fuse_remove_background(fc, req);
179 spin_unlock(&fc->lock);
184 fuse_put_request(fc, req);
194 * Unfortunately request interruption not just solves the deadlock
195 * problem, it causes problems too. These stem from the fact, that an
196 * interrupted request is continued to be processed in userspace,
197 * while all the locks and object references (inode and file) held
198 * during the operation are released.
200 * To release the locks is exactly why there's a need to interrupt the
201 * request, so there's not a lot that can be done about this, except
202 * introduce additional locking in userspace.
204 * More important is to keep inode and file references until userspace
205 * has replied, otherwise FORGET and RELEASE could be sent while the
206 * inode/file is still used by the filesystem.
208 * For this reason the concept of "background" request is introduced.
209 * An interrupted request is backgrounded if it has been already sent
210 * to userspace. Backgrounding involves getting an extra reference to
211 * inode(s) or file used in the request, and adding the request to
212 * fc->background list. When a reply is received for a background
213 * request, the object references are released, and the request is
214 * removed from the list. If the filesystem is unmounted while there
215 * are still background requests, the list is walked and references
216 * are released as if a reply was received.
218 * There's one more use for a background request. The RELEASE message is
219 * always sent as background, since it doesn't return an error or
222 static void background_request(struct fuse_conn *fc, struct fuse_req *req)
225 list_add(&req->bg_entry, &fc->background);
226 fc->num_background++;
227 if (fc->num_background == FUSE_MAX_BACKGROUND)
230 req->inode = igrab(req->inode);
232 req->inode2 = igrab(req->inode2);
237 /* Called with fc->lock held. Releases, and then reacquires it. */
238 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
242 spin_unlock(&fc->lock);
244 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
245 restore_sigs(&oldset);
246 spin_lock(&fc->lock);
247 if (req->state == FUSE_REQ_FINISHED && !req->interrupted)
250 if (!req->interrupted) {
251 req->out.h.error = -EINTR;
252 req->interrupted = 1;
255 /* This is uninterruptible sleep, because data is
256 being copied to/from the buffers of req. During
257 locked state, there mustn't be any filesystem
258 operation (e.g. page fault), since that could lead
260 spin_unlock(&fc->lock);
261 wait_event(req->waitq, !req->locked);
262 spin_lock(&fc->lock);
264 if (req->state == FUSE_REQ_PENDING) {
265 list_del(&req->list);
266 __fuse_put_request(req);
267 } else if (req->state == FUSE_REQ_SENT)
268 background_request(fc, req);
271 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
276 for (i = 0; i < numargs; i++)
277 nbytes += args[i].size;
282 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
285 /* zero is special */
288 req->in.h.unique = fc->reqctr;
289 req->in.h.len = sizeof(struct fuse_in_header) +
290 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
291 list_add_tail(&req->list, &fc->pending);
292 req->state = FUSE_REQ_PENDING;
295 atomic_inc(&fc->num_waiting);
298 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
302 * This can only be interrupted by a SIGKILL
304 void request_send(struct fuse_conn *fc, struct fuse_req *req)
307 spin_lock(&fc->lock);
309 req->out.h.error = -ENOTCONN;
310 else if (fc->conn_error)
311 req->out.h.error = -ECONNREFUSED;
313 queue_request(fc, req);
314 /* acquire extra reference, since request is still needed
315 after request_end() */
316 __fuse_get_request(req);
318 request_wait_answer(fc, req);
320 spin_unlock(&fc->lock);
323 static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
325 spin_lock(&fc->lock);
326 background_request(fc, req);
328 queue_request(fc, req);
329 spin_unlock(&fc->lock);
331 req->out.h.error = -ENOTCONN;
332 request_end(fc, req);
336 void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
339 request_send_nowait(fc, req);
342 void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
345 request_send_nowait(fc, req);
349 * Lock the request. Up to the next unlock_request() there mustn't be
350 * anything that could cause a page-fault. If the request was already
351 * interrupted bail out.
353 static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
357 spin_lock(&fc->lock);
358 if (req->interrupted)
362 spin_unlock(&fc->lock);
368 * Unlock request. If it was interrupted during being locked, the
369 * requester thread is currently waiting for it to be unlocked, so
372 static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
375 spin_lock(&fc->lock);
377 if (req->interrupted)
378 wake_up(&req->waitq);
379 spin_unlock(&fc->lock);
383 struct fuse_copy_state {
384 struct fuse_conn *fc;
386 struct fuse_req *req;
387 const struct iovec *iov;
388 unsigned long nr_segs;
389 unsigned long seglen;
397 static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
398 int write, struct fuse_req *req,
399 const struct iovec *iov, unsigned long nr_segs)
401 memset(cs, 0, sizeof(*cs));
406 cs->nr_segs = nr_segs;
409 /* Unmap and put previous page of userspace buffer */
410 static void fuse_copy_finish(struct fuse_copy_state *cs)
413 kunmap_atomic(cs->mapaddr, KM_USER0);
415 flush_dcache_page(cs->pg);
416 set_page_dirty_lock(cs->pg);
424 * Get another pagefull of userspace buffer, and map it to kernel
425 * address space, and lock request
427 static int fuse_copy_fill(struct fuse_copy_state *cs)
429 unsigned long offset;
432 unlock_request(cs->fc, cs->req);
433 fuse_copy_finish(cs);
435 BUG_ON(!cs->nr_segs);
436 cs->seglen = cs->iov[0].iov_len;
437 cs->addr = (unsigned long) cs->iov[0].iov_base;
441 down_read(¤t->mm->mmap_sem);
442 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
444 up_read(¤t->mm->mmap_sem);
448 offset = cs->addr % PAGE_SIZE;
449 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
450 cs->buf = cs->mapaddr + offset;
451 cs->len = min(PAGE_SIZE - offset, cs->seglen);
452 cs->seglen -= cs->len;
455 return lock_request(cs->fc, cs->req);
458 /* Do as much copy to/from userspace buffer as we can */
459 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
461 unsigned ncpy = min(*size, cs->len);
464 memcpy(cs->buf, *val, ncpy);
466 memcpy(*val, cs->buf, ncpy);
476 * Copy a page in the request to/from the userspace buffer. Must be
479 static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
480 unsigned offset, unsigned count, int zeroing)
482 if (page && zeroing && count < PAGE_SIZE) {
483 void *mapaddr = kmap_atomic(page, KM_USER1);
484 memset(mapaddr, 0, PAGE_SIZE);
485 kunmap_atomic(mapaddr, KM_USER1);
489 if (!cs->len && (err = fuse_copy_fill(cs)))
492 void *mapaddr = kmap_atomic(page, KM_USER1);
493 void *buf = mapaddr + offset;
494 offset += fuse_copy_do(cs, &buf, &count);
495 kunmap_atomic(mapaddr, KM_USER1);
497 offset += fuse_copy_do(cs, NULL, &count);
499 if (page && !cs->write)
500 flush_dcache_page(page);
504 /* Copy pages in the request to/from userspace buffer */
505 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
509 struct fuse_req *req = cs->req;
510 unsigned offset = req->page_offset;
511 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
513 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
514 struct page *page = req->pages[i];
515 int err = fuse_copy_page(cs, page, offset, count, zeroing);
520 count = min(nbytes, (unsigned) PAGE_SIZE);
526 /* Copy a single argument in the request to/from userspace buffer */
527 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
531 if (!cs->len && (err = fuse_copy_fill(cs)))
533 fuse_copy_do(cs, &val, &size);
538 /* Copy request arguments to/from userspace buffer */
539 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
540 unsigned argpages, struct fuse_arg *args,
546 for (i = 0; !err && i < numargs; i++) {
547 struct fuse_arg *arg = &args[i];
548 if (i == numargs - 1 && argpages)
549 err = fuse_copy_pages(cs, arg->size, zeroing);
551 err = fuse_copy_one(cs, arg->value, arg->size);
556 /* Wait until a request is available on the pending list */
557 static void request_wait(struct fuse_conn *fc)
559 DECLARE_WAITQUEUE(wait, current);
561 add_wait_queue_exclusive(&fc->waitq, &wait);
562 while (fc->connected && list_empty(&fc->pending)) {
563 set_current_state(TASK_INTERRUPTIBLE);
564 if (signal_pending(current))
567 spin_unlock(&fc->lock);
569 spin_lock(&fc->lock);
571 set_current_state(TASK_RUNNING);
572 remove_wait_queue(&fc->waitq, &wait);
576 * Read a single request into the userspace filesystem's buffer. This
577 * function waits until a request is available, then removes it from
578 * the pending list and copies request data to userspace buffer. If
579 * no reply is needed (FORGET) or request has been interrupted or
580 * there was an error during the copying then it's finished by calling
581 * request_end(). Otherwise add it to the processing list, and set
584 static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
585 unsigned long nr_segs, loff_t *off)
588 struct fuse_req *req;
590 struct fuse_copy_state cs;
592 struct fuse_conn *fc = fuse_get_conn(file);
597 spin_lock(&fc->lock);
599 if ((file->f_flags & O_NONBLOCK) && fc->connected &&
600 list_empty(&fc->pending))
608 if (list_empty(&fc->pending))
611 req = list_entry(fc->pending.next, struct fuse_req, list);
612 req->state = FUSE_REQ_READING;
613 list_move(&req->list, &fc->io);
617 /* If request is too large, reply with an error and restart the read */
618 if (iov_length(iov, nr_segs) < reqsize) {
619 req->out.h.error = -EIO;
620 /* SETXATTR is special, since it may contain too large data */
621 if (in->h.opcode == FUSE_SETXATTR)
622 req->out.h.error = -E2BIG;
623 request_end(fc, req);
626 spin_unlock(&fc->lock);
627 fuse_copy_init(&cs, fc, 1, req, iov, nr_segs);
628 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
630 err = fuse_copy_args(&cs, in->numargs, in->argpages,
631 (struct fuse_arg *) in->args, 0);
632 fuse_copy_finish(&cs);
633 spin_lock(&fc->lock);
635 if (!err && req->interrupted)
638 if (!req->interrupted)
639 req->out.h.error = -EIO;
640 request_end(fc, req);
644 request_end(fc, req);
646 req->state = FUSE_REQ_SENT;
647 list_move_tail(&req->list, &fc->processing);
648 spin_unlock(&fc->lock);
653 spin_unlock(&fc->lock);
657 static ssize_t fuse_dev_read(struct file *file, char __user *buf,
658 size_t nbytes, loff_t *off)
661 iov.iov_len = nbytes;
663 return fuse_dev_readv(file, &iov, 1, off);
666 /* Look up request on processing list by unique ID */
667 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
669 struct list_head *entry;
671 list_for_each(entry, &fc->processing) {
672 struct fuse_req *req;
673 req = list_entry(entry, struct fuse_req, list);
674 if (req->in.h.unique == unique)
680 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
683 unsigned reqsize = sizeof(struct fuse_out_header);
686 return nbytes != reqsize ? -EINVAL : 0;
688 reqsize += len_args(out->numargs, out->args);
690 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
692 else if (reqsize > nbytes) {
693 struct fuse_arg *lastarg = &out->args[out->numargs-1];
694 unsigned diffsize = reqsize - nbytes;
695 if (diffsize > lastarg->size)
697 lastarg->size -= diffsize;
699 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
704 * Write a single reply to a request. First the header is copied from
705 * the write buffer. The request is then searched on the processing
706 * list by the unique ID found in the header. If found, then remove
707 * it from the list and copy the rest of the buffer to the request.
708 * The request is finished by calling request_end()
710 static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
711 unsigned long nr_segs, loff_t *off)
714 unsigned nbytes = iov_length(iov, nr_segs);
715 struct fuse_req *req;
716 struct fuse_out_header oh;
717 struct fuse_copy_state cs;
718 struct fuse_conn *fc = fuse_get_conn(file);
722 fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);
723 if (nbytes < sizeof(struct fuse_out_header))
726 err = fuse_copy_one(&cs, &oh, sizeof(oh));
730 if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
734 spin_lock(&fc->lock);
739 req = request_find(fc, oh.unique);
744 if (req->interrupted) {
745 spin_unlock(&fc->lock);
746 fuse_copy_finish(&cs);
747 spin_lock(&fc->lock);
748 request_end(fc, req);
751 list_move(&req->list, &fc->io);
755 spin_unlock(&fc->lock);
757 err = copy_out_args(&cs, &req->out, nbytes);
758 fuse_copy_finish(&cs);
760 spin_lock(&fc->lock);
763 if (req->interrupted)
765 } else if (!req->interrupted)
766 req->out.h.error = -EIO;
767 request_end(fc, req);
769 return err ? err : nbytes;
772 spin_unlock(&fc->lock);
774 fuse_copy_finish(&cs);
778 static ssize_t fuse_dev_write(struct file *file, const char __user *buf,
779 size_t nbytes, loff_t *off)
782 iov.iov_len = nbytes;
783 iov.iov_base = (char __user *) buf;
784 return fuse_dev_writev(file, &iov, 1, off);
787 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
789 unsigned mask = POLLOUT | POLLWRNORM;
790 struct fuse_conn *fc = fuse_get_conn(file);
794 poll_wait(file, &fc->waitq, wait);
796 spin_lock(&fc->lock);
799 else if (!list_empty(&fc->pending))
800 mask |= POLLIN | POLLRDNORM;
801 spin_unlock(&fc->lock);
807 * Abort all requests on the given list (pending or processing)
809 * This function releases and reacquires fc->lock
811 static void end_requests(struct fuse_conn *fc, struct list_head *head)
813 while (!list_empty(head)) {
814 struct fuse_req *req;
815 req = list_entry(head->next, struct fuse_req, list);
816 req->out.h.error = -ECONNABORTED;
817 request_end(fc, req);
818 spin_lock(&fc->lock);
823 * Abort requests under I/O
825 * The requests are set to interrupted and finished, and the request
826 * waiter is woken up. This will make request_wait_answer() wait
827 * until the request is unlocked and then return.
829 * If the request is asynchronous, then the end function needs to be
830 * called after waiting for the request to be unlocked (if it was
833 static void end_io_requests(struct fuse_conn *fc)
835 while (!list_empty(&fc->io)) {
836 struct fuse_req *req =
837 list_entry(fc->io.next, struct fuse_req, list);
838 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
840 req->interrupted = 1;
841 req->out.h.error = -ECONNABORTED;
842 req->state = FUSE_REQ_FINISHED;
843 list_del_init(&req->list);
844 wake_up(&req->waitq);
847 /* The end function will consume this reference */
848 __fuse_get_request(req);
849 spin_unlock(&fc->lock);
850 wait_event(req->waitq, !req->locked);
852 spin_lock(&fc->lock);
858 * Abort all requests.
860 * Emergency exit in case of a malicious or accidental deadlock, or
861 * just a hung filesystem.
863 * The same effect is usually achievable through killing the
864 * filesystem daemon and all users of the filesystem. The exception
865 * is the combination of an asynchronous request and the tricky
866 * deadlock (see Documentation/filesystems/fuse.txt).
868 * During the aborting, progression of requests from the pending and
869 * processing lists onto the io list, and progression of new requests
870 * onto the pending list is prevented by req->connected being false.
872 * Progression of requests under I/O to the processing list is
873 * prevented by the req->interrupted flag being true for these
874 * requests. For this reason requests on the io list must be aborted
877 void fuse_abort_conn(struct fuse_conn *fc)
879 spin_lock(&fc->lock);
883 end_requests(fc, &fc->pending);
884 end_requests(fc, &fc->processing);
885 wake_up_all(&fc->waitq);
886 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
888 spin_unlock(&fc->lock);
891 static int fuse_dev_release(struct inode *inode, struct file *file)
893 struct fuse_conn *fc = fuse_get_conn(file);
895 spin_lock(&fc->lock);
897 end_requests(fc, &fc->pending);
898 end_requests(fc, &fc->processing);
899 spin_unlock(&fc->lock);
900 fasync_helper(-1, file, 0, &fc->fasync);
901 kobject_put(&fc->kobj);
907 static int fuse_dev_fasync(int fd, struct file *file, int on)
909 struct fuse_conn *fc = fuse_get_conn(file);
913 /* No locking - fasync_helper does its own locking */
914 return fasync_helper(fd, file, on, &fc->fasync);
917 const struct file_operations fuse_dev_operations = {
918 .owner = THIS_MODULE,
920 .read = fuse_dev_read,
921 .readv = fuse_dev_readv,
922 .write = fuse_dev_write,
923 .writev = fuse_dev_writev,
924 .poll = fuse_dev_poll,
925 .release = fuse_dev_release,
926 .fasync = fuse_dev_fasync,
929 static struct miscdevice fuse_miscdevice = {
932 .fops = &fuse_dev_operations,
935 int __init fuse_dev_init(void)
938 fuse_req_cachep = kmem_cache_create("fuse_request",
939 sizeof(struct fuse_req),
941 if (!fuse_req_cachep)
944 err = misc_register(&fuse_miscdevice);
946 goto out_cache_clean;
951 kmem_cache_destroy(fuse_req_cachep);
956 void fuse_dev_cleanup(void)
958 misc_deregister(&fuse_miscdevice);
959 kmem_cache_destroy(fuse_req_cachep);