From bc0fb201b34b12e2d16e8cbd5bb078c1db936304 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 20 Mar 2006 13:44:31 -0500 Subject: [PATCH] NFS: create common routine for waiting for direct I/O to complete We're about to add asynchrony to the NFS direct write path. Begin by abstracting out the common pieces in the read path. The first piece is nfs_direct_read_wait, which works the same whether the process is waiting for a read or a write. Test plan: Compile kernel with CONFIG_NFS and CONFIG_NFS_DIRECTIO enabled. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/direct.c | 57 ++++++++++++++++++++++--------------------------- 1 file changed, 26 insertions(+), 31 deletions(-) diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 094456c3df90..2593f47eaff0 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -158,6 +158,30 @@ static void nfs_direct_req_release(struct kref *kref) kmem_cache_free(nfs_direct_cachep, dreq); } +/* + * Collects and returns the final error value/byte-count. + */ +static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq) +{ + int result = -EIOCBQUEUED; + + /* Async requests don't wait here */ + if (dreq->iocb) + goto out; + + result = wait_event_interruptible(dreq->wait, + (atomic_read(&dreq->complete) == 0)); + + if (!result) + result = atomic_read(&dreq->error); + if (!result) + result = atomic_read(&dreq->count); + +out: + kref_put(&dreq->kref, nfs_direct_req_release); + return (ssize_t) result; +} + /* * Note we also set the number of requests we have in the dreq when we are * done. This prevents races with I/O completion so we will always wait @@ -213,7 +237,7 @@ static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize) /* * We must hold a reference to all the pages in this direct read request * until the RPCs complete. This could be long *after* we are woken up in - * nfs_direct_read_wait (for instance, if someone hits ^C on a slow server). + * nfs_direct_wait (for instance, if someone hits ^C on a slow server). * * In addition, synchronous I/O uses a stack-allocated iocb. Thus we * can't trust the iocb is still valid here if this is a synchronous @@ -315,35 +339,6 @@ static void nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long } while (count != 0); } -/* - * Collects and returns the final error value/byte-count. - */ -static ssize_t nfs_direct_read_wait(struct nfs_direct_req *dreq, int intr) -{ - int result = -EIOCBQUEUED; - - /* Async requests don't wait here */ - if (dreq->iocb) - goto out; - - result = 0; - if (intr) { - result = wait_event_interruptible(dreq->wait, - (atomic_read(&dreq->complete) == 0)); - } else { - wait_event(dreq->wait, (atomic_read(&dreq->complete) == 0)); - } - - if (!result) - result = atomic_read(&dreq->error); - if (!result) - result = atomic_read(&dreq->count); - -out: - kref_put(&dreq->kref, nfs_direct_req_release); - return (ssize_t) result; -} - static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t file_offset, struct page **pages, unsigned int nr_pages) { ssize_t result; @@ -366,7 +361,7 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count); rpc_clnt_sigmask(clnt, &oldset); nfs_direct_read_schedule(dreq, user_addr, count, file_offset); - result = nfs_direct_read_wait(dreq, clnt->cl_intr); + result = nfs_direct_wait(dreq); rpc_clnt_sigunmask(clnt, &oldset); return result; -- 2.39.5