From efc91ed0191e3fc62bb1c556ac93fc4e661214d2 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 10 Jun 2008 18:31:00 -0400 Subject: [PATCH] NFS: Optimise append writes with holes If a file is being extended, and we're creating a hole, we might as well declare the entire page to be up to date. This patch significantly improves the write performance for sparse files in the case where lseek(SEEK_END) is used to append several non-contiguous writes at intervals of < PAGE_SIZE. Signed-off-by: Trond Myklebust --- fs/nfs/file.c | 20 ++++++++++++++++++++ fs/nfs/write.c | 12 +++--------- 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 7c73f06692b6..7ac89a845a5e 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -344,6 +344,26 @@ static int nfs_write_end(struct file *file, struct address_space *mapping, unsigned offset = pos & (PAGE_CACHE_SIZE - 1); int status; + /* + * Zero any uninitialised parts of the page, and then mark the page + * as up to date if it turns out that we're extending the file. + */ + if (!PageUptodate(page)) { + unsigned pglen = nfs_page_length(page); + unsigned end = offset + len; + + if (pglen == 0) { + zero_user_segments(page, 0, offset, + end, PAGE_CACHE_SIZE); + SetPageUptodate(page); + } else if (end >= pglen) { + zero_user_segment(page, end, PAGE_CACHE_SIZE); + if (offset == 0) + SetPageUptodate(page); + } else + zero_user_segment(page, pglen, PAGE_CACHE_SIZE); + } + lock_kernel(); status = nfs_updatepage(file, page, offset, copied); unlock_kernel(); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index dc62bc504693..eea2d2b5278c 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -616,7 +616,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, spin_unlock(&inode->i_lock); radix_tree_preload_end(); req = new; - goto zero_page; + goto out; } spin_unlock(&inode->i_lock); @@ -649,19 +649,13 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, req->wb_offset = offset; req->wb_pgbase = offset; req->wb_bytes = max(end, rqend) - req->wb_offset; - goto zero_page; + goto out; } if (end > rqend) req->wb_bytes = end - req->wb_offset; - return req; -zero_page: - /* If this page might potentially be marked as up to date, - * then we need to zero any uninitalised data. */ - if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE - && !PageUptodate(req->wb_page)) - zero_user_segment(req->wb_page, req->wb_bytes, PAGE_CACHE_SIZE); +out: return req; } -- 2.39.5