]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - fs/nfs/pagelist.c
Merge tag 'nfs-for-3.16-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[karo-tx-linux.git] / fs / nfs / pagelist.c
index 18ee4e99347e332560bc29490f1e1c3a096d79df..b6ee3a6ee96dd2b06df61a022fadc0841da8d0b4 100644 (file)
@@ -100,7 +100,7 @@ nfs_iocounter_dec(struct nfs_io_counter *c)
 {
        if (atomic_dec_and_test(&c->io_count)) {
                clear_bit(NFS_IO_INPROGRESS, &c->flags);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
                wake_up_bit(&c->flags, NFS_IO_INPROGRESS);
        }
 }
@@ -138,6 +138,12 @@ nfs_iocounter_wait(struct nfs_io_counter *c)
        return __nfs_iocounter_wait(c);
 }
 
+static int nfs_wait_bit_uninterruptible(void *word)
+{
+       io_schedule();
+       return 0;
+}
+
 /*
  * nfs_page_group_lock - lock the head of the page group
  * @req - request in group that is to be locked
@@ -148,13 +154,12 @@ void
 nfs_page_group_lock(struct nfs_page *req)
 {
        struct nfs_page *head = req->wb_head;
-       int err = -EAGAIN;
 
        WARN_ON_ONCE(head != head->wb_head);
 
-       while (err)
-               err = wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
-                       nfs_wait_bit_killable, TASK_KILLABLE);
+       wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
+                       nfs_wait_bit_uninterruptible,
+                       TASK_UNINTERRUPTIBLE);
 }
 
 /*
@@ -168,9 +173,9 @@ nfs_page_group_unlock(struct nfs_page *req)
 
        WARN_ON_ONCE(head != head->wb_head);
 
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(PG_HEADLOCK, &head->wb_flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_bit(&head->wb_flags, PG_HEADLOCK);
 }
 
@@ -339,9 +344,9 @@ void nfs_unlock_request(struct nfs_page *req)
                printk(KERN_ERR "NFS: Invalid unlock attempted\n");
                BUG();
        }
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(PG_BUSY, &req->wb_flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_bit(&req->wb_flags, PG_BUSY);
 }
 
@@ -397,6 +402,8 @@ static void nfs_free_request(struct nfs_page *req)
        WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
        WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags));
        WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags));
+       WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags));
+       WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags));
 
        /* Release struct file and open context */
        nfs_clear_request(req);
@@ -408,12 +415,6 @@ void nfs_release_request(struct nfs_page *req)
        kref_put(&req->wb_kref, nfs_page_group_destroy);
 }
 
-static int nfs_wait_bit_uninterruptible(void *word)
-{
-       io_schedule();
-       return 0;
-}
-
 /**
  * nfs_wait_on_request - Wait for a request to complete.
  * @req: request to wait upon.
@@ -441,21 +442,13 @@ nfs_wait_on_request(struct nfs_page *req)
 size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
                           struct nfs_page *prev, struct nfs_page *req)
 {
-       if (!prev)
-               return req->wb_bytes;
-       /*
-        * FIXME: ideally we should be able to coalesce all requests
-        * that are not block boundary aligned, but currently this
-        * is problematic for the case of bsize < PAGE_CACHE_SIZE,
-        * since nfs_flush_multi and nfs_pagein_multi assume you
-        * can have only one struct nfs_page.
-        */
-       if (desc->pg_bsize < PAGE_SIZE)
+       if (desc->pg_count > desc->pg_bsize) {
+               /* should never happen */
+               WARN_ON_ONCE(1);
                return 0;
+       }
 
-       if (desc->pg_count + req->wb_bytes <= desc->pg_bsize)
-               return req->wb_bytes;
-       return 0;
+       return min(desc->pg_bsize - desc->pg_count, (size_t)req->wb_bytes);
 }
 EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
 
@@ -476,7 +469,6 @@ struct nfs_rw_header *nfs_rw_header_alloc(const struct nfs_rw_ops *ops)
                struct nfs_pgio_header *hdr = &header->header;
 
                INIT_LIST_HEAD(&hdr->pages);
-               INIT_LIST_HEAD(&hdr->rpc_list);
                spin_lock_init(&hdr->lock);
                atomic_set(&hdr->refcnt, 0);
                hdr->rw_ops = ops;
@@ -654,27 +646,6 @@ out:
 }
 EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
 
-static int nfs_do_multiple_pgios(struct list_head *head,
-                                const struct rpc_call_ops *call_ops,
-                                int how)
-{
-       struct nfs_pgio_data *data;
-       int ret = 0;
-
-       while (!list_empty(head)) {
-               int ret2;
-
-               data = list_first_entry(head, struct nfs_pgio_data, list);
-               list_del_init(&data->list);
-
-               ret2 = nfs_initiate_pgio(NFS_CLIENT(data->header->inode),
-                                        data, call_ops, how, 0);
-               if (ret == 0)
-                        ret = ret2;
-       }
-       return ret;
-}
-
 /**
  * nfs_pgio_error - Clean up from a pageio error
  * @desc: IO descriptor
@@ -683,14 +654,9 @@ static int nfs_do_multiple_pgios(struct list_head *head,
 static int nfs_pgio_error(struct nfs_pageio_descriptor *desc,
                          struct nfs_pgio_header *hdr)
 {
-       struct nfs_pgio_data *data;
-
        set_bit(NFS_IOHDR_REDO, &hdr->flags);
-       while (!list_empty(&hdr->rpc_list)) {
-               data = list_first_entry(&hdr->rpc_list, struct nfs_pgio_data, list);
-               list_del(&data->list);
-               nfs_pgio_data_release(data);
-       }
+       nfs_pgio_data_release(hdr->data);
+       hdr->data = NULL;
        desc->pg_completion_ops->error_cleanup(&desc->pg_list);
        return -ENOMEM;
 }
@@ -763,50 +729,6 @@ static void nfs_pgio_result(struct rpc_task *task, void *calldata)
                data->header->rw_ops->rw_result(task, data);
 }
 
-/*
- * Generate multiple small requests to read or write a single
- * contiguous dirty on one page.
- */
-static int nfs_pgio_multi(struct nfs_pageio_descriptor *desc,
-                         struct nfs_pgio_header *hdr)
-{
-       struct nfs_page *req = hdr->req;
-       struct page *page = req->wb_page;
-       struct nfs_pgio_data *data;
-       size_t wsize = desc->pg_bsize, nbytes;
-       unsigned int offset;
-       int requests = 0;
-       struct nfs_commit_info cinfo;
-
-       nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
-
-       if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
-           (desc->pg_moreio || nfs_reqs_to_commit(&cinfo) ||
-            desc->pg_count > wsize))
-               desc->pg_ioflags &= ~FLUSH_COND_STABLE;
-
-       offset = 0;
-       nbytes = desc->pg_count;
-       do {
-               size_t len = min(nbytes, wsize);
-
-               data = nfs_pgio_data_alloc(hdr, 1);
-               if (!data)
-                       return nfs_pgio_error(desc, hdr);
-               data->pages.pagevec[0] = page;
-               nfs_pgio_rpcsetup(data, len, offset, desc->pg_ioflags, &cinfo);
-               list_add(&data->list, &hdr->rpc_list);
-               requests++;
-               nbytes -= len;
-               offset += len;
-       } while (nbytes != 0);
-
-       nfs_list_remove_request(req);
-       nfs_list_add_request(req, &hdr->pages);
-       desc->pg_rpc_callops = &nfs_pgio_common_ops;
-       return 0;
-}
-
 /*
  * Create an RPC task for the given read or write request and kick it.
  * The page must have been locked by the caller.
@@ -815,8 +737,8 @@ static int nfs_pgio_multi(struct nfs_pageio_descriptor *desc,
  * This is the case if nfs_updatepage detects a conflicting request
  * that has been written but not committed.
  */
-static int nfs_pgio_one(struct nfs_pageio_descriptor *desc,
-                       struct nfs_pgio_header *hdr)
+int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
+                    struct nfs_pgio_header *hdr)
 {
        struct nfs_page         *req;
        struct page             **pages;
@@ -844,10 +766,11 @@ static int nfs_pgio_one(struct nfs_pageio_descriptor *desc,
 
        /* Set up the argument struct */
        nfs_pgio_rpcsetup(data, desc->pg_count, 0, desc->pg_ioflags, &cinfo);
-       list_add(&data->list, &hdr->rpc_list);
+       hdr->data = data;
        desc->pg_rpc_callops = &nfs_pgio_common_ops;
        return 0;
 }
+EXPORT_SYMBOL_GPL(nfs_generic_pgio);
 
 static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
 {
@@ -865,23 +788,14 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
        atomic_inc(&hdr->refcnt);
        ret = nfs_generic_pgio(desc, hdr);
        if (ret == 0)
-               ret = nfs_do_multiple_pgios(&hdr->rpc_list,
-                                           desc->pg_rpc_callops,
-                                           desc->pg_ioflags);
+               ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
+                                       hdr->data, desc->pg_rpc_callops,
+                                       desc->pg_ioflags, 0);
        if (atomic_dec_and_test(&hdr->refcnt))
                hdr->completion_ops->completion(hdr);
        return ret;
 }
 
-int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
-                    struct nfs_pgio_header *hdr)
-{
-       if (desc->pg_bsize < PAGE_CACHE_SIZE)
-               return nfs_pgio_multi(desc, hdr);
-       return nfs_pgio_one(desc, hdr);
-}
-EXPORT_SYMBOL_GPL(nfs_generic_pgio);
-
 static bool nfs_match_open_context(const struct nfs_open_context *ctx1,
                const struct nfs_open_context *ctx2)
 {
@@ -919,15 +833,13 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
                    !nfs_match_lock_context(req->wb_lock_context,
                                            prev->wb_lock_context))
                        return false;
-               if (req->wb_pgbase != 0)
-                       return false;
-               if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
-                       return false;
                if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
                        return false;
        }
        size = pgio->pg_ops->pg_test(pgio, prev, req);
-       WARN_ON_ONCE(size && size != req->wb_bytes);
+       WARN_ON_ONCE(size > req->wb_bytes);
+       if (size && size < req->wb_bytes)
+               req->wb_bytes = size;
        return size > 0;
 }
 
@@ -1034,6 +946,8 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
                        subreq = nfs_create_request(req->wb_context,
                                        req->wb_page,
                                        subreq, pgbase, bytes_left);
+                       if (IS_ERR(subreq))
+                               goto err_ptr;
                        nfs_lock_request(subreq);
                        subreq->wb_offset  = offset;
                        subreq->wb_index = req->wb_index;
@@ -1042,6 +956,10 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
 
        nfs_page_group_unlock(req);
        return 1;
+err_ptr:
+       desc->pg_error = PTR_ERR(subreq);
+       nfs_page_group_unlock(req);
+       return 0;
 }
 
 static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)