/*
FUSE: Filesystem in Userspace
- Copyright (C) 2001-2005 Miklos Szeredi <miklos@szeredi.hu>
+ Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
This program can be distributed under the terms of the GNU GPL.
See the file COPYING.
static kmem_cache_t *fuse_req_cachep;
-static inline struct fuse_conn *fuse_get_conn(struct file *file)
+static struct fuse_conn *fuse_get_conn(struct file *file)
{
- struct fuse_conn *fc;
- spin_lock(&fuse_lock);
- fc = file->private_data;
- if (fc && !fc->mounted)
- fc = NULL;
- spin_unlock(&fuse_lock);
- return fc;
+ /*
+ * Lockless access is OK, because file->private data is set
+ * once during mount and is valid until the file is released.
+ */
+ return file->private_data;
}
-static inline void fuse_request_init(struct fuse_req *req)
+static void fuse_request_init(struct fuse_req *req)
{
memset(req, 0, sizeof(*req));
INIT_LIST_HEAD(&req->list);
kmem_cache_free(fuse_req_cachep, req);
}
-static inline void block_sigs(sigset_t *oldset)
+static void block_sigs(sigset_t *oldset)
{
sigset_t mask;
sigprocmask(SIG_BLOCK, &mask, oldset);
}
-static inline void restore_sigs(sigset_t *oldset)
+static void restore_sigs(sigset_t *oldset)
{
sigprocmask(SIG_SETMASK, oldset, NULL);
}
+/*
+ * Reset request, so that it can be reused
+ *
+ * The caller must be _very_ careful to make sure, that it is holding
+ * the only reference to req
+ */
void fuse_reset_request(struct fuse_req *req)
{
- int preallocated = req->preallocated;
BUG_ON(atomic_read(&req->count) != 1);
fuse_request_init(req);
- req->preallocated = preallocated;
}
static void __fuse_get_request(struct fuse_req *req)
atomic_dec(&req->count);
}
-static struct fuse_req *do_get_request(struct fuse_conn *fc)
+struct fuse_req *fuse_get_req(struct fuse_conn *fc)
{
struct fuse_req *req;
-
- spin_lock(&fuse_lock);
- BUG_ON(list_empty(&fc->unused_list));
- req = list_entry(fc->unused_list.next, struct fuse_req, list);
- list_del_init(&req->list);
- spin_unlock(&fuse_lock);
- fuse_request_init(req);
- req->preallocated = 1;
- req->in.h.uid = current->fsuid;
- req->in.h.gid = current->fsgid;
- req->in.h.pid = current->pid;
- return req;
-}
-
-/* This can return NULL, but only in case it's interrupted by a SIGKILL */
-struct fuse_req *fuse_get_request(struct fuse_conn *fc)
-{
- int intr;
sigset_t oldset;
+ int intr;
+ int err;
+ atomic_inc(&fc->num_waiting);
block_sigs(&oldset);
- intr = down_interruptible(&fc->outstanding_sem);
+ intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
restore_sigs(&oldset);
- return intr ? NULL : do_get_request(fc);
-}
+ err = -EINTR;
+ if (intr)
+ goto out;
-static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
-{
- spin_lock(&fuse_lock);
- if (req->preallocated)
- list_add(&req->list, &fc->unused_list);
- else
- fuse_request_free(req);
+ req = fuse_request_alloc();
+ err = -ENOMEM;
+ if (!req)
+ goto out;
+
+ req->in.h.uid = current->fsuid;
+ req->in.h.gid = current->fsgid;
+ req->in.h.pid = current->pid;
+ req->waiting = 1;
+ return req;
- /* If we are in debt decrease that first */
- if (fc->outstanding_debt)
- fc->outstanding_debt--;
- else
- up(&fc->outstanding_sem);
- spin_unlock(&fuse_lock);
+ out:
+ atomic_dec(&fc->num_waiting);
+ return ERR_PTR(err);
}
void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
{
- if (atomic_dec_and_test(&req->count))
- fuse_putback_request(fc, req);
+ if (atomic_dec_and_test(&req->count)) {
+ if (req->waiting)
+ atomic_dec(&fc->num_waiting);
+ fuse_request_free(req);
+ }
}
-void fuse_release_background(struct fuse_req *req)
+/*
+ * Called with sbput_sem held for read (request_end) or write
+ * (fuse_put_super). By the time fuse_put_super() is finished, all
+ * inodes belonging to background requests must be released, so the
+ * iputs have to be done within the locked region.
+ */
+void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req)
{
iput(req->inode);
iput(req->inode2);
- if (req->file)
- fput(req->file);
- spin_lock(&fuse_lock);
+ spin_lock(&fc->lock);
list_del(&req->bg_entry);
- spin_unlock(&fuse_lock);
+ if (fc->num_background == FUSE_MAX_BACKGROUND) {
+ fc->blocked = 0;
+ wake_up_all(&fc->blocked_waitq);
+ }
+ fc->num_background--;
+ spin_unlock(&fc->lock);
}
/*
* This function is called when a request is finished. Either a reply
* has arrived or it was interrupted (and not yet sent) or some error
- * occurred during communication with userspace, or the device file was
- * closed. It decreases the reference count for the request. In case
- * of a background request the reference to the stored objects are
- * released. The requester thread is woken up (if still waiting), and
- * finally the request is either freed or put on the unused_list
+ * occurred during communication with userspace, or the device file
+ * was closed. In case of a background request the reference to the
+ * stored objects are released. The requester thread is woken up (if
+ * still waiting), the 'end' callback is called if given, else the
+ * reference to the request is released
*
- * Called with fuse_lock, unlocks it
+ * Releasing extra reference for foreground requests must be done
+ * within the same locked region as setting state to finished. This
+ * is because fuse_reset_request() may be called after request is
+ * finished and it must be the sole possessor. If request is
+ * interrupted and put in the background, it will return with an error
+ * and hence never be reset and reused.
+ *
+ * Called with fc->lock, unlocks it
*/
static void request_end(struct fuse_conn *fc, struct fuse_req *req)
{
- int putback;
- req->finished = 1;
- putback = atomic_dec_and_test(&req->count);
- spin_unlock(&fuse_lock);
- if (req->background) {
+ list_del(&req->list);
+ req->state = FUSE_REQ_FINISHED;
+ if (!req->background) {
+ spin_unlock(&fc->lock);
+ wake_up(&req->waitq);
+ fuse_put_request(fc, req);
+ } else {
+ void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
+ req->end = NULL;
+ spin_unlock(&fc->lock);
down_read(&fc->sbput_sem);
if (fc->mounted)
- fuse_release_background(req);
+ fuse_release_background(fc, req);
up_read(&fc->sbput_sem);
+
+ /* fput must go outside sbput_sem, otherwise it can deadlock */
+ if (req->file)
+ fput(req->file);
+
+ if (end)
+ end(fc, req);
+ else
+ fuse_put_request(fc, req);
}
- wake_up(&req->waitq);
- if (req->in.h.opcode == FUSE_INIT) {
- int i;
-
- if (req->misc.init_in_out.major != FUSE_KERNEL_VERSION)
- fc->conn_error = 1;
-
- /* After INIT reply is received other requests can go
- out. So do (FUSE_MAX_OUTSTANDING - 1) number of
- up()s on outstanding_sem. The last up() is done in
- fuse_putback_request() */
- for (i = 1; i < FUSE_MAX_OUTSTANDING; i++)
- up(&fc->outstanding_sem);
- }
- if (putback)
- fuse_putback_request(fc, req);
}
/*
{
req->background = 1;
list_add(&req->bg_entry, &fc->background);
+ fc->num_background++;
+ if (fc->num_background == FUSE_MAX_BACKGROUND)
+ fc->blocked = 1;
if (req->inode)
req->inode = igrab(req->inode);
if (req->inode2)
get_file(req->file);
}
-/* Called with fuse_lock held. Releases, and then reacquires it. */
+/* Called with fc->lock held. Releases, and then reacquires it. */
static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
{
sigset_t oldset;
- spin_unlock(&fuse_lock);
+ spin_unlock(&fc->lock);
block_sigs(&oldset);
- wait_event_interruptible(req->waitq, req->finished);
+ wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
restore_sigs(&oldset);
- spin_lock(&fuse_lock);
- if (req->finished)
+ spin_lock(&fc->lock);
+ if (req->state == FUSE_REQ_FINISHED && !req->interrupted)
return;
- req->out.h.error = -EINTR;
- req->interrupted = 1;
+ if (!req->interrupted) {
+ req->out.h.error = -EINTR;
+ req->interrupted = 1;
+ }
if (req->locked) {
/* This is uninterruptible sleep, because data is
being copied to/from the buffers of req. During
locked state, there mustn't be any filesystem
operation (e.g. page fault), since that could lead
to deadlock */
- spin_unlock(&fuse_lock);
+ spin_unlock(&fc->lock);
wait_event(req->waitq, !req->locked);
- spin_lock(&fuse_lock);
+ spin_lock(&fc->lock);
}
- if (!req->sent && !list_empty(&req->list)) {
+ if (req->state == FUSE_REQ_PENDING) {
list_del(&req->list);
__fuse_put_request(req);
- } else if (!req->finished && req->sent)
+ } else if (req->state == FUSE_REQ_SENT)
background_request(fc, req);
}
req->in.h.unique = fc->reqctr;
req->in.h.len = sizeof(struct fuse_in_header) +
len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
- if (!req->preallocated) {
- /* If request is not preallocated (either FORGET or
- RELEASE), then still decrease outstanding_sem, so
- user can't open infinite number of files while not
- processing the RELEASE requests. However for
- efficiency do it without blocking, so if down()
- would block, just increase the debt instead */
- if (down_trylock(&fc->outstanding_sem))
- fc->outstanding_debt++;
- }
list_add_tail(&req->list, &fc->pending);
+ req->state = FUSE_REQ_PENDING;
+ if (!req->waiting) {
+ req->waiting = 1;
+ atomic_inc(&fc->num_waiting);
+ }
wake_up(&fc->waitq);
+ kill_fasync(&fc->fasync, SIGIO, POLL_IN);
}
/*
void request_send(struct fuse_conn *fc, struct fuse_req *req)
{
req->isreply = 1;
- spin_lock(&fuse_lock);
+ spin_lock(&fc->lock);
if (!fc->connected)
req->out.h.error = -ENOTCONN;
else if (fc->conn_error)
request_wait_answer(fc, req);
}
- spin_unlock(&fuse_lock);
+ spin_unlock(&fc->lock);
}
static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
{
- spin_lock(&fuse_lock);
+ spin_lock(&fc->lock);
+ background_request(fc, req);
if (fc->connected) {
queue_request(fc, req);
- spin_unlock(&fuse_lock);
+ spin_unlock(&fc->lock);
} else {
req->out.h.error = -ENOTCONN;
request_end(fc, req);
void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
{
req->isreply = 1;
- spin_lock(&fuse_lock);
- background_request(fc, req);
- spin_unlock(&fuse_lock);
request_send_nowait(fc, req);
}
-void fuse_send_init(struct fuse_conn *fc)
-{
- /* This is called from fuse_read_super() so there's guaranteed
- to be a request available */
- struct fuse_req *req = do_get_request(fc);
- struct fuse_init_in_out *arg = &req->misc.init_in_out;
- arg->major = FUSE_KERNEL_VERSION;
- arg->minor = FUSE_KERNEL_MINOR_VERSION;
- req->in.h.opcode = FUSE_INIT;
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(*arg);
- req->in.args[0].value = arg;
- req->out.numargs = 1;
- req->out.args[0].size = sizeof(*arg);
- req->out.args[0].value = arg;
- request_send_background(fc, req);
-}
-
/*
* Lock the request. Up to the next unlock_request() there mustn't be
* anything that could cause a page-fault. If the request was already
* interrupted bail out.
*/
-static inline int lock_request(struct fuse_req *req)
+static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
{
int err = 0;
if (req) {
- spin_lock(&fuse_lock);
+ spin_lock(&fc->lock);
if (req->interrupted)
err = -ENOENT;
else
req->locked = 1;
- spin_unlock(&fuse_lock);
+ spin_unlock(&fc->lock);
}
return err;
}
* requester thread is currently waiting for it to be unlocked, so
* wake it up.
*/
-static inline void unlock_request(struct fuse_req *req)
+static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
{
if (req) {
- spin_lock(&fuse_lock);
+ spin_lock(&fc->lock);
req->locked = 0;
if (req->interrupted)
wake_up(&req->waitq);
- spin_unlock(&fuse_lock);
+ spin_unlock(&fc->lock);
}
}
struct fuse_copy_state {
+ struct fuse_conn *fc;
int write;
struct fuse_req *req;
const struct iovec *iov;
unsigned len;
};
-static void fuse_copy_init(struct fuse_copy_state *cs, int write,
- struct fuse_req *req, const struct iovec *iov,
- unsigned long nr_segs)
+static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
+ int write, struct fuse_req *req,
+ const struct iovec *iov, unsigned long nr_segs)
{
memset(cs, 0, sizeof(*cs));
+ cs->fc = fc;
cs->write = write;
cs->req = req;
cs->iov = iov;
}
/* Unmap and put previous page of userspace buffer */
-static inline void fuse_copy_finish(struct fuse_copy_state *cs)
+static void fuse_copy_finish(struct fuse_copy_state *cs)
{
if (cs->mapaddr) {
kunmap_atomic(cs->mapaddr, KM_USER0);
unsigned long offset;
int err;
- unlock_request(cs->req);
+ unlock_request(cs->fc, cs->req);
fuse_copy_finish(cs);
if (!cs->seglen) {
BUG_ON(!cs->nr_segs);
cs->seglen -= cs->len;
cs->addr += cs->len;
- return lock_request(cs->req);
+ return lock_request(cs->fc, cs->req);
}
/* Do as much copy to/from userspace buffer as we can */
-static inline int fuse_copy_do(struct fuse_copy_state *cs, void **val,
- unsigned *size)
+static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
{
unsigned ncpy = min(*size, cs->len);
if (val) {
* Copy a page in the request to/from the userspace buffer. Must be
* done atomically
*/
-static inline int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
- unsigned offset, unsigned count, int zeroing)
+static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
+ unsigned offset, unsigned count, int zeroing)
{
if (page && zeroing && count < PAGE_SIZE) {
void *mapaddr = kmap_atomic(page, KM_USER1);
DECLARE_WAITQUEUE(wait, current);
add_wait_queue_exclusive(&fc->waitq, &wait);
- while (fc->mounted && list_empty(&fc->pending)) {
+ while (fc->connected && list_empty(&fc->pending)) {
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current))
break;
- spin_unlock(&fuse_lock);
+ spin_unlock(&fc->lock);
schedule();
- spin_lock(&fuse_lock);
+ spin_lock(&fc->lock);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&fc->waitq, &wait);
unsigned long nr_segs, loff_t *off)
{
int err;
- struct fuse_conn *fc;
struct fuse_req *req;
struct fuse_in *in;
struct fuse_copy_state cs;
unsigned reqsize;
-
- spin_lock(&fuse_lock);
- fc = file->private_data;
- err = -EPERM;
+ struct fuse_conn *fc = fuse_get_conn(file);
if (!fc)
+ return -EPERM;
+
+ restart:
+ spin_lock(&fc->lock);
+ err = -EAGAIN;
+ if ((file->f_flags & O_NONBLOCK) && fc->connected &&
+ list_empty(&fc->pending))
goto err_unlock;
+
request_wait(fc);
err = -ENODEV;
- if (!fc->mounted)
+ if (!fc->connected)
goto err_unlock;
err = -ERESTARTSYS;
if (list_empty(&fc->pending))
goto err_unlock;
req = list_entry(fc->pending.next, struct fuse_req, list);
- list_del_init(&req->list);
- spin_unlock(&fuse_lock);
+ req->state = FUSE_REQ_READING;
+ list_move(&req->list, &fc->io);
in = &req->in;
- reqsize = req->in.h.len;
- fuse_copy_init(&cs, 1, req, iov, nr_segs);
- err = -EINVAL;
- if (iov_length(iov, nr_segs) >= reqsize) {
- err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
- if (!err)
- err = fuse_copy_args(&cs, in->numargs, in->argpages,
- (struct fuse_arg *) in->args, 0);
+ reqsize = in->h.len;
+ /* If request is too large, reply with an error and restart the read */
+ if (iov_length(iov, nr_segs) < reqsize) {
+ req->out.h.error = -EIO;
+ /* SETXATTR is special, since it may contain too large data */
+ if (in->h.opcode == FUSE_SETXATTR)
+ req->out.h.error = -E2BIG;
+ request_end(fc, req);
+ goto restart;
}
+ spin_unlock(&fc->lock);
+ fuse_copy_init(&cs, fc, 1, req, iov, nr_segs);
+ err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
+ if (!err)
+ err = fuse_copy_args(&cs, in->numargs, in->argpages,
+ (struct fuse_arg *) in->args, 0);
fuse_copy_finish(&cs);
-
- spin_lock(&fuse_lock);
+ spin_lock(&fc->lock);
req->locked = 0;
if (!err && req->interrupted)
err = -ENOENT;
if (!req->isreply)
request_end(fc, req);
else {
- req->sent = 1;
- list_add_tail(&req->list, &fc->processing);
- spin_unlock(&fuse_lock);
+ req->state = FUSE_REQ_SENT;
+ list_move_tail(&req->list, &fc->processing);
+ spin_unlock(&fc->lock);
}
return reqsize;
err_unlock:
- spin_unlock(&fuse_lock);
+ spin_unlock(&fc->lock);
return err;
}
struct fuse_copy_state cs;
struct fuse_conn *fc = fuse_get_conn(file);
if (!fc)
- return -ENODEV;
+ return -EPERM;
- fuse_copy_init(&cs, 0, NULL, iov, nr_segs);
+ fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);
if (nbytes < sizeof(struct fuse_out_header))
return -EINVAL;
oh.len != nbytes)
goto err_finish;
- spin_lock(&fuse_lock);
+ spin_lock(&fc->lock);
+ err = -ENOENT;
+ if (!fc->connected)
+ goto err_unlock;
+
req = request_find(fc, oh.unique);
err = -EINVAL;
if (!req)
goto err_unlock;
- list_del_init(&req->list);
if (req->interrupted) {
- request_end(fc, req);
+ spin_unlock(&fc->lock);
fuse_copy_finish(&cs);
+ spin_lock(&fc->lock);
+ request_end(fc, req);
return -ENOENT;
}
+ list_move(&req->list, &fc->io);
req->out.h = oh;
req->locked = 1;
cs.req = req;
- spin_unlock(&fuse_lock);
+ spin_unlock(&fc->lock);
err = copy_out_args(&cs, &req->out, nbytes);
fuse_copy_finish(&cs);
- spin_lock(&fuse_lock);
+ spin_lock(&fc->lock);
req->locked = 0;
if (!err) {
if (req->interrupted)
return err ? err : nbytes;
err_unlock:
- spin_unlock(&fuse_lock);
+ spin_unlock(&fc->lock);
err_finish:
fuse_copy_finish(&cs);
return err;
static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
{
- struct fuse_conn *fc = fuse_get_conn(file);
unsigned mask = POLLOUT | POLLWRNORM;
-
+ struct fuse_conn *fc = fuse_get_conn(file);
if (!fc)
- return -ENODEV;
+ return POLLERR;
poll_wait(file, &fc->waitq, wait);
- spin_lock(&fuse_lock);
- if (!list_empty(&fc->pending))
- mask |= POLLIN | POLLRDNORM;
- spin_unlock(&fuse_lock);
+ spin_lock(&fc->lock);
+ if (!fc->connected)
+ mask = POLLERR;
+ else if (!list_empty(&fc->pending))
+ mask |= POLLIN | POLLRDNORM;
+ spin_unlock(&fc->lock);
return mask;
}
-/* Abort all requests on the given list (pending or processing) */
+/*
+ * Abort all requests on the given list (pending or processing)
+ *
+ * This function releases and reacquires fc->lock
+ */
static void end_requests(struct fuse_conn *fc, struct list_head *head)
{
while (!list_empty(head)) {
struct fuse_req *req;
req = list_entry(head->next, struct fuse_req, list);
- list_del_init(&req->list);
req->out.h.error = -ECONNABORTED;
request_end(fc, req);
- spin_lock(&fuse_lock);
+ spin_lock(&fc->lock);
}
}
-static int fuse_dev_release(struct inode *inode, struct file *file)
+/*
+ * Abort requests under I/O
+ *
+ * The requests are set to interrupted and finished, and the request
+ * waiter is woken up. This will make request_wait_answer() wait
+ * until the request is unlocked and then return.
+ *
+ * If the request is asynchronous, then the end function needs to be
+ * called after waiting for the request to be unlocked (if it was
+ * locked).
+ */
+static void end_io_requests(struct fuse_conn *fc)
{
- struct fuse_conn *fc;
+ while (!list_empty(&fc->io)) {
+ struct fuse_req *req =
+ list_entry(fc->io.next, struct fuse_req, list);
+ void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
+
+ req->interrupted = 1;
+ req->out.h.error = -ECONNABORTED;
+ req->state = FUSE_REQ_FINISHED;
+ list_del_init(&req->list);
+ wake_up(&req->waitq);
+ if (end) {
+ req->end = NULL;
+ /* The end function will consume this reference */
+ __fuse_get_request(req);
+ spin_unlock(&fc->lock);
+ wait_event(req->waitq, !req->locked);
+ end(fc, req);
+ spin_lock(&fc->lock);
+ }
+ }
+}
- spin_lock(&fuse_lock);
- fc = file->private_data;
+/*
+ * Abort all requests.
+ *
+ * Emergency exit in case of a malicious or accidental deadlock, or
+ * just a hung filesystem.
+ *
+ * The same effect is usually achievable through killing the
+ * filesystem daemon and all users of the filesystem. The exception
+ * is the combination of an asynchronous request and the tricky
+ * deadlock (see Documentation/filesystems/fuse.txt).
+ *
+ * During the aborting, progression of requests from the pending and
+ * processing lists onto the io list, and progression of new requests
+ * onto the pending list is prevented by req->connected being false.
+ *
+ * Progression of requests under I/O to the processing list is
+ * prevented by the req->interrupted flag being true for these
+ * requests. For this reason requests on the io list must be aborted
+ * first.
+ */
+void fuse_abort_conn(struct fuse_conn *fc)
+{
+ spin_lock(&fc->lock);
+ if (fc->connected) {
+ fc->connected = 0;
+ end_io_requests(fc);
+ end_requests(fc, &fc->pending);
+ end_requests(fc, &fc->processing);
+ wake_up_all(&fc->waitq);
+ kill_fasync(&fc->fasync, SIGIO, POLL_IN);
+ }
+ spin_unlock(&fc->lock);
+}
+
+static int fuse_dev_release(struct inode *inode, struct file *file)
+{
+ struct fuse_conn *fc = fuse_get_conn(file);
if (fc) {
+ spin_lock(&fc->lock);
fc->connected = 0;
end_requests(fc, &fc->pending);
end_requests(fc, &fc->processing);
- fuse_release_conn(fc);
+ spin_unlock(&fc->lock);
+ fasync_helper(-1, file, 0, &fc->fasync);
+ kobject_put(&fc->kobj);
}
- spin_unlock(&fuse_lock);
+
return 0;
}
-struct file_operations fuse_dev_operations = {
+static int fuse_dev_fasync(int fd, struct file *file, int on)
+{
+ struct fuse_conn *fc = fuse_get_conn(file);
+ if (!fc)
+ return -EPERM;
+
+ /* No locking - fasync_helper does its own locking */
+ return fasync_helper(fd, file, on, &fc->fasync);
+}
+
+const struct file_operations fuse_dev_operations = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = fuse_dev_read,
.writev = fuse_dev_writev,
.poll = fuse_dev_poll,
.release = fuse_dev_release,
+ .fasync = fuse_dev_fasync,
};
static struct miscdevice fuse_miscdevice = {