]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - fs/fuse/dev.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[mv-sheeva.git] / fs / fuse / dev.c
index f556a0d5c0d31010b86552ff958dcb55b20947f5..104a62dadb94c9b8fa489963b51a97bc69b3dba2 100644 (file)
@@ -1,6 +1,6 @@
 /*
   FUSE: Filesystem in Userspace
 /*
   FUSE: Filesystem in Userspace
-  Copyright (C) 2001-2005  Miklos Szeredi <miklos@szeredi.hu>
+  Copyright (C) 2001-2006  Miklos Szeredi <miklos@szeredi.hu>
 
   This program can be distributed under the terms of the GNU GPL.
   See the file COPYING.
 
   This program can be distributed under the terms of the GNU GPL.
   See the file COPYING.
@@ -23,13 +23,11 @@ static kmem_cache_t *fuse_req_cachep;
 
 static struct fuse_conn *fuse_get_conn(struct file *file)
 {
 
 static struct fuse_conn *fuse_get_conn(struct file *file)
 {
-       struct fuse_conn *fc;
-       spin_lock(&fuse_lock);
-       fc = file->private_data;
-       if (fc && !fc->connected)
-               fc = NULL;
-       spin_unlock(&fuse_lock);
-       return fc;
+       /*
+        * Lockless access is OK, because file->private data is set
+        * once during mount and is valid until the file is released.
+        */
+       return file->private_data;
 }
 
 static void fuse_request_init(struct fuse_req *req)
 }
 
 static void fuse_request_init(struct fuse_req *req)
@@ -66,12 +64,16 @@ static void restore_sigs(sigset_t *oldset)
        sigprocmask(SIG_SETMASK, oldset, NULL);
 }
 
        sigprocmask(SIG_SETMASK, oldset, NULL);
 }
 
+/*
+ * Reset request, so that it can be reused
+ *
+ * The caller must be _very_ careful to make sure, that it is holding
+ * the only reference to req
+ */
 void fuse_reset_request(struct fuse_req *req)
 {
 void fuse_reset_request(struct fuse_req *req)
 {
-       int preallocated = req->preallocated;
        BUG_ON(atomic_read(&req->count) != 1);
        fuse_request_init(req);
        BUG_ON(atomic_read(&req->count) != 1);
        fuse_request_init(req);
-       req->preallocated = preallocated;
 }
 
 static void __fuse_get_request(struct fuse_req *req)
 }
 
 static void __fuse_get_request(struct fuse_req *req)
@@ -86,80 +88,64 @@ static void __fuse_put_request(struct fuse_req *req)
        atomic_dec(&req->count);
 }
 
        atomic_dec(&req->count);
 }
 
-static struct fuse_req *do_get_request(struct fuse_conn *fc)
+struct fuse_req *fuse_get_req(struct fuse_conn *fc)
 {
        struct fuse_req *req;
 {
        struct fuse_req *req;
-
-       spin_lock(&fuse_lock);
-       BUG_ON(list_empty(&fc->unused_list));
-       req = list_entry(fc->unused_list.next, struct fuse_req, list);
-       list_del_init(&req->list);
-       spin_unlock(&fuse_lock);
-       fuse_request_init(req);
-       req->preallocated = 1;
-       req->in.h.uid = current->fsuid;
-       req->in.h.gid = current->fsgid;
-       req->in.h.pid = current->pid;
-       return req;
-}
-
-/* This can return NULL, but only in case it's interrupted by a SIGKILL */
-struct fuse_req *fuse_get_request(struct fuse_conn *fc)
-{
-       int intr;
        sigset_t oldset;
        sigset_t oldset;
+       int intr;
+       int err;
 
        atomic_inc(&fc->num_waiting);
        block_sigs(&oldset);
 
        atomic_inc(&fc->num_waiting);
        block_sigs(&oldset);
-       intr = down_interruptible(&fc->outstanding_sem);
+       intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
        restore_sigs(&oldset);
        restore_sigs(&oldset);
-       if (intr) {
-               atomic_dec(&fc->num_waiting);
-               return NULL;
-       }
-       return do_get_request(fc);
-}
+       err = -EINTR;
+       if (intr)
+               goto out;
 
 
-/* Must be called with fuse_lock held */
-static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
-{
-       if (req->preallocated) {
-               atomic_dec(&fc->num_waiting);
-               list_add(&req->list, &fc->unused_list);
-       } else
-               fuse_request_free(req);
+       req = fuse_request_alloc();
+       err = -ENOMEM;
+       if (!req)
+               goto out;
+
+       req->in.h.uid = current->fsuid;
+       req->in.h.gid = current->fsgid;
+       req->in.h.pid = current->pid;
+       req->waiting = 1;
+       return req;
 
 
-       /* If we are in debt decrease that first */
-       if (fc->outstanding_debt)
-               fc->outstanding_debt--;
-       else
-               up(&fc->outstanding_sem);
+ out:
+       atomic_dec(&fc->num_waiting);
+       return ERR_PTR(err);
 }
 
 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
 {
        if (atomic_dec_and_test(&req->count)) {
 }
 
 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
 {
        if (atomic_dec_and_test(&req->count)) {
-               spin_lock(&fuse_lock);
-               fuse_putback_request(fc, req);
-               spin_unlock(&fuse_lock);
+               if (req->waiting)
+                       atomic_dec(&fc->num_waiting);
+               fuse_request_free(req);
        }
 }
 
        }
 }
 
-static void fuse_put_request_locked(struct fuse_conn *fc, struct fuse_req *req)
-{
-       if (atomic_dec_and_test(&req->count))
-               fuse_putback_request(fc, req);
-}
-
-void fuse_release_background(struct fuse_req *req)
+/*
+ * Called with sbput_sem held for read (request_end) or write
+ * (fuse_put_super).  By the time fuse_put_super() is finished, all
+ * inodes belonging to background requests must be released, so the
+ * iputs have to be done within the locked region.
+ */
+void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req)
 {
        iput(req->inode);
        iput(req->inode2);
 {
        iput(req->inode);
        iput(req->inode2);
-       if (req->file)
-               fput(req->file);
-       spin_lock(&fuse_lock);
+       spin_lock(&fc->lock);
        list_del(&req->bg_entry);
        list_del(&req->bg_entry);
-       spin_unlock(&fuse_lock);
+       if (fc->num_background == FUSE_MAX_BACKGROUND) {
+               fc->blocked = 0;
+               wake_up_all(&fc->blocked_waitq);
+       }
+       fc->num_background--;
+       spin_unlock(&fc->lock);
 }
 
 /*
 }
 
 /*
@@ -178,24 +164,29 @@ void fuse_release_background(struct fuse_req *req)
  * interrupted and put in the background, it will return with an error
  * and hence never be reset and reused.
  *
  * interrupted and put in the background, it will return with an error
  * and hence never be reset and reused.
  *
- * Called with fuse_lock, unlocks it
+ * Called with fc->lock, unlocks it
  */
 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
 {
        list_del(&req->list);
        req->state = FUSE_REQ_FINISHED;
        if (!req->background) {
  */
 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
 {
        list_del(&req->list);
        req->state = FUSE_REQ_FINISHED;
        if (!req->background) {
+               spin_unlock(&fc->lock);
                wake_up(&req->waitq);
                wake_up(&req->waitq);
-               fuse_put_request_locked(fc, req);
-               spin_unlock(&fuse_lock);
+               fuse_put_request(fc, req);
        } else {
                void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
                req->end = NULL;
        } else {
                void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
                req->end = NULL;
-               spin_unlock(&fuse_lock);
+               spin_unlock(&fc->lock);
                down_read(&fc->sbput_sem);
                if (fc->mounted)
                down_read(&fc->sbput_sem);
                if (fc->mounted)
-                       fuse_release_background(req);
+                       fuse_release_background(fc, req);
                up_read(&fc->sbput_sem);
                up_read(&fc->sbput_sem);
+
+               /* fput must go outside sbput_sem, otherwise it can deadlock */
+               if (req->file)
+                       fput(req->file);
+
                if (end)
                        end(fc, req);
                else
                if (end)
                        end(fc, req);
                else
@@ -236,6 +227,9 @@ static void background_request(struct fuse_conn *fc, struct fuse_req *req)
 {
        req->background = 1;
        list_add(&req->bg_entry, &fc->background);
 {
        req->background = 1;
        list_add(&req->bg_entry, &fc->background);
+       fc->num_background++;
+       if (fc->num_background == FUSE_MAX_BACKGROUND)
+               fc->blocked = 1;
        if (req->inode)
                req->inode = igrab(req->inode);
        if (req->inode2)
        if (req->inode)
                req->inode = igrab(req->inode);
        if (req->inode2)
@@ -244,16 +238,16 @@ static void background_request(struct fuse_conn *fc, struct fuse_req *req)
                get_file(req->file);
 }
 
                get_file(req->file);
 }
 
-/* Called with fuse_lock held.  Releases, and then reacquires it. */
+/* Called with fc->lock held.  Releases, and then reacquires it. */
 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
 {
        sigset_t oldset;
 
 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
 {
        sigset_t oldset;
 
-       spin_unlock(&fuse_lock);
+       spin_unlock(&fc->lock);
        block_sigs(&oldset);
        wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
        restore_sigs(&oldset);
        block_sigs(&oldset);
        wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
        restore_sigs(&oldset);
-       spin_lock(&fuse_lock);
+       spin_lock(&fc->lock);
        if (req->state == FUSE_REQ_FINISHED && !req->interrupted)
                return;
 
        if (req->state == FUSE_REQ_FINISHED && !req->interrupted)
                return;
 
@@ -267,9 +261,9 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
                   locked state, there mustn't be any filesystem
                   operation (e.g. page fault), since that could lead
                   to deadlock */
                   locked state, there mustn't be any filesystem
                   operation (e.g. page fault), since that could lead
                   to deadlock */
-               spin_unlock(&fuse_lock);
+               spin_unlock(&fc->lock);
                wait_event(req->waitq, !req->locked);
                wait_event(req->waitq, !req->locked);
-               spin_lock(&fuse_lock);
+               spin_lock(&fc->lock);
        }
        if (req->state == FUSE_REQ_PENDING) {
                list_del(&req->list);
        }
        if (req->state == FUSE_REQ_PENDING) {
                list_del(&req->list);
@@ -298,19 +292,14 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
        req->in.h.unique = fc->reqctr;
        req->in.h.len = sizeof(struct fuse_in_header) +
                len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
        req->in.h.unique = fc->reqctr;
        req->in.h.len = sizeof(struct fuse_in_header) +
                len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
-       if (!req->preallocated) {
-               /* If request is not preallocated (either FORGET or
-                  RELEASE), then still decrease outstanding_sem, so
-                  user can't open infinite number of files while not
-                  processing the RELEASE requests.  However for
-                  efficiency do it without blocking, so if down()
-                  would block, just increase the debt instead */
-               if (down_trylock(&fc->outstanding_sem))
-                       fc->outstanding_debt++;
-       }
        list_add_tail(&req->list, &fc->pending);
        req->state = FUSE_REQ_PENDING;
        list_add_tail(&req->list, &fc->pending);
        req->state = FUSE_REQ_PENDING;
+       if (!req->waiting) {
+               req->waiting = 1;
+               atomic_inc(&fc->num_waiting);
+       }
        wake_up(&fc->waitq);
        wake_up(&fc->waitq);
+       kill_fasync(&fc->fasync, SIGIO, POLL_IN);
 }
 
 /*
 }
 
 /*
@@ -319,7 +308,7 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
 void request_send(struct fuse_conn *fc, struct fuse_req *req)
 {
        req->isreply = 1;
 void request_send(struct fuse_conn *fc, struct fuse_req *req)
 {
        req->isreply = 1;
-       spin_lock(&fuse_lock);
+       spin_lock(&fc->lock);
        if (!fc->connected)
                req->out.h.error = -ENOTCONN;
        else if (fc->conn_error)
        if (!fc->connected)
                req->out.h.error = -ENOTCONN;
        else if (fc->conn_error)
@@ -332,15 +321,16 @@ void request_send(struct fuse_conn *fc, struct fuse_req *req)
 
                request_wait_answer(fc, req);
        }
 
                request_wait_answer(fc, req);
        }
-       spin_unlock(&fuse_lock);
+       spin_unlock(&fc->lock);
 }
 
 static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
 {
 }
 
 static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
 {
-       spin_lock(&fuse_lock);
+       spin_lock(&fc->lock);
+       background_request(fc, req);
        if (fc->connected) {
                queue_request(fc, req);
        if (fc->connected) {
                queue_request(fc, req);
-               spin_unlock(&fuse_lock);
+               spin_unlock(&fc->lock);
        } else {
                req->out.h.error = -ENOTCONN;
                request_end(fc, req);
        } else {
                req->out.h.error = -ENOTCONN;
                request_end(fc, req);
@@ -356,9 +346,6 @@ void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
 void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
 {
        req->isreply = 1;
 void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
 {
        req->isreply = 1;
-       spin_lock(&fuse_lock);
-       background_request(fc, req);
-       spin_unlock(&fuse_lock);
        request_send_nowait(fc, req);
 }
 
        request_send_nowait(fc, req);
 }
 
@@ -367,16 +354,16 @@ void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
  * anything that could cause a page-fault.  If the request was already
  * interrupted bail out.
  */
  * anything that could cause a page-fault.  If the request was already
  * interrupted bail out.
  */
-static int lock_request(struct fuse_req *req)
+static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
 {
        int err = 0;
        if (req) {
 {
        int err = 0;
        if (req) {
-               spin_lock(&fuse_lock);
+               spin_lock(&fc->lock);
                if (req->interrupted)
                        err = -ENOENT;
                else
                        req->locked = 1;
                if (req->interrupted)
                        err = -ENOENT;
                else
                        req->locked = 1;
-               spin_unlock(&fuse_lock);
+               spin_unlock(&fc->lock);
        }
        return err;
 }
        }
        return err;
 }
@@ -386,18 +373,19 @@ static int lock_request(struct fuse_req *req)
  * requester thread is currently waiting for it to be unlocked, so
  * wake it up.
  */
  * requester thread is currently waiting for it to be unlocked, so
  * wake it up.
  */
-static void unlock_request(struct fuse_req *req)
+static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
 {
        if (req) {
 {
        if (req) {
-               spin_lock(&fuse_lock);
+               spin_lock(&fc->lock);
                req->locked = 0;
                if (req->interrupted)
                        wake_up(&req->waitq);
                req->locked = 0;
                if (req->interrupted)
                        wake_up(&req->waitq);
-               spin_unlock(&fuse_lock);
+               spin_unlock(&fc->lock);
        }
 }
 
 struct fuse_copy_state {
        }
 }
 
 struct fuse_copy_state {
+       struct fuse_conn *fc;
        int write;
        struct fuse_req *req;
        const struct iovec *iov;
        int write;
        struct fuse_req *req;
        const struct iovec *iov;
@@ -410,11 +398,12 @@ struct fuse_copy_state {
        unsigned len;
 };
 
        unsigned len;
 };
 
-static void fuse_copy_init(struct fuse_copy_state *cs, int write,
-                          struct fuse_req *req, const struct iovec *iov,
-                          unsigned long nr_segs)
+static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
+                          int write, struct fuse_req *req,
+                          const struct iovec *iov, unsigned long nr_segs)
 {
        memset(cs, 0, sizeof(*cs));
 {
        memset(cs, 0, sizeof(*cs));
+       cs->fc = fc;
        cs->write = write;
        cs->req = req;
        cs->iov = iov;
        cs->write = write;
        cs->req = req;
        cs->iov = iov;
@@ -444,7 +433,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
        unsigned long offset;
        int err;
 
        unsigned long offset;
        int err;
 
-       unlock_request(cs->req);
+       unlock_request(cs->fc, cs->req);
        fuse_copy_finish(cs);
        if (!cs->seglen) {
                BUG_ON(!cs->nr_segs);
        fuse_copy_finish(cs);
        if (!cs->seglen) {
                BUG_ON(!cs->nr_segs);
@@ -467,7 +456,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
        cs->seglen -= cs->len;
        cs->addr += cs->len;
 
        cs->seglen -= cs->len;
        cs->addr += cs->len;
 
-       return lock_request(cs->req);
+       return lock_request(cs->fc, cs->req);
 }
 
 /* Do as much copy to/from userspace buffer as we can */
 }
 
 /* Do as much copy to/from userspace buffer as we can */
@@ -579,9 +568,9 @@ static void request_wait(struct fuse_conn *fc)
                if (signal_pending(current))
                        break;
 
                if (signal_pending(current))
                        break;
 
-               spin_unlock(&fuse_lock);
+               spin_unlock(&fc->lock);
                schedule();
                schedule();
-               spin_lock(&fuse_lock);
+               spin_lock(&fc->lock);
        }
        set_current_state(TASK_RUNNING);
        remove_wait_queue(&fc->waitq, &wait);
        }
        set_current_state(TASK_RUNNING);
        remove_wait_queue(&fc->waitq, &wait);
@@ -600,18 +589,21 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
                              unsigned long nr_segs, loff_t *off)
 {
        int err;
                              unsigned long nr_segs, loff_t *off)
 {
        int err;
-       struct fuse_conn *fc;
        struct fuse_req *req;
        struct fuse_in *in;
        struct fuse_copy_state cs;
        unsigned reqsize;
        struct fuse_req *req;
        struct fuse_in *in;
        struct fuse_copy_state cs;
        unsigned reqsize;
+       struct fuse_conn *fc = fuse_get_conn(file);
+       if (!fc)
+               return -EPERM;
 
  restart:
 
  restart:
-       spin_lock(&fuse_lock);
-       fc = file->private_data;
-       err = -EPERM;
-       if (!fc)
+       spin_lock(&fc->lock);
+       err = -EAGAIN;
+       if ((file->f_flags & O_NONBLOCK) && fc->connected &&
+           list_empty(&fc->pending))
                goto err_unlock;
                goto err_unlock;
+
        request_wait(fc);
        err = -ENODEV;
        if (!fc->connected)
        request_wait(fc);
        err = -ENODEV;
        if (!fc->connected)
@@ -635,14 +627,14 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
                request_end(fc, req);
                goto restart;
        }
                request_end(fc, req);
                goto restart;
        }
-       spin_unlock(&fuse_lock);
-       fuse_copy_init(&cs, 1, req, iov, nr_segs);
+       spin_unlock(&fc->lock);
+       fuse_copy_init(&cs, fc, 1, req, iov, nr_segs);
        err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
        if (!err)
                err = fuse_copy_args(&cs, in->numargs, in->argpages,
                                     (struct fuse_arg *) in->args, 0);
        fuse_copy_finish(&cs);
        err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
        if (!err)
                err = fuse_copy_args(&cs, in->numargs, in->argpages,
                                     (struct fuse_arg *) in->args, 0);
        fuse_copy_finish(&cs);
-       spin_lock(&fuse_lock);
+       spin_lock(&fc->lock);
        req->locked = 0;
        if (!err && req->interrupted)
                err = -ENOENT;
        req->locked = 0;
        if (!err && req->interrupted)
                err = -ENOENT;
@@ -657,12 +649,12 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
        else {
                req->state = FUSE_REQ_SENT;
                list_move_tail(&req->list, &fc->processing);
        else {
                req->state = FUSE_REQ_SENT;
                list_move_tail(&req->list, &fc->processing);
-               spin_unlock(&fuse_lock);
+               spin_unlock(&fc->lock);
        }
        return reqsize;
 
  err_unlock:
        }
        return reqsize;
 
  err_unlock:
-       spin_unlock(&fuse_lock);
+       spin_unlock(&fc->lock);
        return err;
 }
 
        return err;
 }
 
@@ -729,9 +721,9 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
        struct fuse_copy_state cs;
        struct fuse_conn *fc = fuse_get_conn(file);
        if (!fc)
        struct fuse_copy_state cs;
        struct fuse_conn *fc = fuse_get_conn(file);
        if (!fc)
-               return -ENODEV;
+               return -EPERM;
 
 
-       fuse_copy_init(&cs, 0, NULL, iov, nr_segs);
+       fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);
        if (nbytes < sizeof(struct fuse_out_header))
                return -EINVAL;
 
        if (nbytes < sizeof(struct fuse_out_header))
                return -EINVAL;
 
@@ -743,7 +735,7 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
            oh.len != nbytes)
                goto err_finish;
 
            oh.len != nbytes)
                goto err_finish;
 
-       spin_lock(&fuse_lock);
+       spin_lock(&fc->lock);
        err = -ENOENT;
        if (!fc->connected)
                goto err_unlock;
        err = -ENOENT;
        if (!fc->connected)
                goto err_unlock;
@@ -754,9 +746,9 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
                goto err_unlock;
 
        if (req->interrupted) {
                goto err_unlock;
 
        if (req->interrupted) {
-               spin_unlock(&fuse_lock);
+               spin_unlock(&fc->lock);
                fuse_copy_finish(&cs);
                fuse_copy_finish(&cs);
-               spin_lock(&fuse_lock);
+               spin_lock(&fc->lock);
                request_end(fc, req);
                return -ENOENT;
        }
                request_end(fc, req);
                return -ENOENT;
        }
@@ -764,12 +756,12 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
        req->out.h = oh;
        req->locked = 1;
        cs.req = req;
        req->out.h = oh;
        req->locked = 1;
        cs.req = req;
-       spin_unlock(&fuse_lock);
+       spin_unlock(&fc->lock);
 
        err = copy_out_args(&cs, &req->out, nbytes);
        fuse_copy_finish(&cs);
 
 
        err = copy_out_args(&cs, &req->out, nbytes);
        fuse_copy_finish(&cs);
 
-       spin_lock(&fuse_lock);
+       spin_lock(&fc->lock);
        req->locked = 0;
        if (!err) {
                if (req->interrupted)
        req->locked = 0;
        if (!err) {
                if (req->interrupted)
@@ -781,7 +773,7 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
        return err ? err : nbytes;
 
  err_unlock:
        return err ? err : nbytes;
 
  err_unlock:
-       spin_unlock(&fuse_lock);
+       spin_unlock(&fc->lock);
  err_finish:
        fuse_copy_finish(&cs);
        return err;
  err_finish:
        fuse_copy_finish(&cs);
        return err;
@@ -798,18 +790,19 @@ static ssize_t fuse_dev_write(struct file *file, const char __user *buf,
 
 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
 {
 
 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
 {
-       struct fuse_conn *fc = fuse_get_conn(file);
        unsigned mask = POLLOUT | POLLWRNORM;
        unsigned mask = POLLOUT | POLLWRNORM;
-
+       struct fuse_conn *fc = fuse_get_conn(file);
        if (!fc)
        if (!fc)
-               return -ENODEV;
+               return POLLERR;
 
        poll_wait(file, &fc->waitq, wait);
 
 
        poll_wait(file, &fc->waitq, wait);
 
-       spin_lock(&fuse_lock);
-       if (!list_empty(&fc->pending))
-                mask |= POLLIN | POLLRDNORM;
-       spin_unlock(&fuse_lock);
+       spin_lock(&fc->lock);
+       if (!fc->connected)
+               mask = POLLERR;
+       else if (!list_empty(&fc->pending))
+               mask |= POLLIN | POLLRDNORM;
+       spin_unlock(&fc->lock);
 
        return mask;
 }
 
        return mask;
 }
@@ -817,7 +810,7 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
 /*
  * Abort all requests on the given list (pending or processing)
  *
 /*
  * Abort all requests on the given list (pending or processing)
  *
- * This function releases and reacquires fuse_lock
+ * This function releases and reacquires fc->lock
  */
 static void end_requests(struct fuse_conn *fc, struct list_head *head)
 {
  */
 static void end_requests(struct fuse_conn *fc, struct list_head *head)
 {
@@ -826,7 +819,7 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
                req = list_entry(head->next, struct fuse_req, list);
                req->out.h.error = -ECONNABORTED;
                request_end(fc, req);
                req = list_entry(head->next, struct fuse_req, list);
                req->out.h.error = -ECONNABORTED;
                request_end(fc, req);
-               spin_lock(&fuse_lock);
+               spin_lock(&fc->lock);
        }
 }
 
        }
 }
 
@@ -857,10 +850,10 @@ static void end_io_requests(struct fuse_conn *fc)
                        req->end = NULL;
                        /* The end function will consume this reference */
                        __fuse_get_request(req);
                        req->end = NULL;
                        /* The end function will consume this reference */
                        __fuse_get_request(req);
-                       spin_unlock(&fuse_lock);
+                       spin_unlock(&fc->lock);
                        wait_event(req->waitq, !req->locked);
                        end(fc, req);
                        wait_event(req->waitq, !req->locked);
                        end(fc, req);
-                       spin_lock(&fuse_lock);
+                       spin_lock(&fc->lock);
                }
        }
 }
                }
        }
 }
@@ -887,36 +880,45 @@ static void end_io_requests(struct fuse_conn *fc)
  */
 void fuse_abort_conn(struct fuse_conn *fc)
 {
  */
 void fuse_abort_conn(struct fuse_conn *fc)
 {
-       spin_lock(&fuse_lock);
+       spin_lock(&fc->lock);
        if (fc->connected) {
                fc->connected = 0;
                end_io_requests(fc);
                end_requests(fc, &fc->pending);
                end_requests(fc, &fc->processing);
                wake_up_all(&fc->waitq);
        if (fc->connected) {
                fc->connected = 0;
                end_io_requests(fc);
                end_requests(fc, &fc->pending);
                end_requests(fc, &fc->processing);
                wake_up_all(&fc->waitq);
+               kill_fasync(&fc->fasync, SIGIO, POLL_IN);
        }
        }
-       spin_unlock(&fuse_lock);
+       spin_unlock(&fc->lock);
 }
 
 static int fuse_dev_release(struct inode *inode, struct file *file)
 {
 }
 
 static int fuse_dev_release(struct inode *inode, struct file *file)
 {
-       struct fuse_conn *fc;
-
-       spin_lock(&fuse_lock);
-       fc = file->private_data;
+       struct fuse_conn *fc = fuse_get_conn(file);
        if (fc) {
        if (fc) {
+               spin_lock(&fc->lock);
                fc->connected = 0;
                end_requests(fc, &fc->pending);
                end_requests(fc, &fc->processing);
                fc->connected = 0;
                end_requests(fc, &fc->pending);
                end_requests(fc, &fc->processing);
-       }
-       spin_unlock(&fuse_lock);
-       if (fc)
+               spin_unlock(&fc->lock);
+               fasync_helper(-1, file, 0, &fc->fasync);
                kobject_put(&fc->kobj);
                kobject_put(&fc->kobj);
+       }
 
        return 0;
 }
 
 
        return 0;
 }
 
-struct file_operations fuse_dev_operations = {
+static int fuse_dev_fasync(int fd, struct file *file, int on)
+{
+       struct fuse_conn *fc = fuse_get_conn(file);
+       if (!fc)
+               return -EPERM;
+
+       /* No locking - fasync_helper does its own locking */
+       return fasync_helper(fd, file, on, &fc->fasync);
+}
+
+const struct file_operations fuse_dev_operations = {
        .owner          = THIS_MODULE,
        .llseek         = no_llseek,
        .read           = fuse_dev_read,
        .owner          = THIS_MODULE,
        .llseek         = no_llseek,
        .read           = fuse_dev_read,
@@ -925,6 +927,7 @@ struct file_operations fuse_dev_operations = {
        .writev         = fuse_dev_writev,
        .poll           = fuse_dev_poll,
        .release        = fuse_dev_release,
        .writev         = fuse_dev_writev,
        .poll           = fuse_dev_poll,
        .release        = fuse_dev_release,
+       .fasync         = fuse_dev_fasync,
 };
 
 static struct miscdevice fuse_miscdevice = {
 };
 
 static struct miscdevice fuse_miscdevice = {