]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - fs/nfs/pagelist.c
Merge tag 'nfs-for-3.16-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[karo-tx-linux.git] / fs / nfs / pagelist.c
index aabff78dc6e5720db3c82cd9ef8095e058a383c8..b6ee3a6ee96dd2b06df61a022fadc0841da8d0b4 100644 (file)
@@ -27,6 +27,9 @@
 #define NFSDBG_FACILITY                NFSDBG_PAGECACHE
 
 static struct kmem_cache *nfs_page_cachep;
+static const struct rpc_call_ops nfs_pgio_common_ops;
+
+static void nfs_free_request(struct nfs_page *);
 
 static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
 {
@@ -97,7 +100,7 @@ nfs_iocounter_dec(struct nfs_io_counter *c)
 {
        if (atomic_dec_and_test(&c->io_count)) {
                clear_bit(NFS_IO_INPROGRESS, &c->flags);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
                wake_up_bit(&c->flags, NFS_IO_INPROGRESS);
        }
 }
@@ -135,11 +138,156 @@ nfs_iocounter_wait(struct nfs_io_counter *c)
        return __nfs_iocounter_wait(c);
 }
 
+static int nfs_wait_bit_uninterruptible(void *word)
+{
+       io_schedule();
+       return 0;
+}
+
+/*
+ * nfs_page_group_lock - lock the head of the page group
+ * @req - request in group that is to be locked
+ *
+ * this lock must be held if modifying the page group list
+ */
+void
+nfs_page_group_lock(struct nfs_page *req)
+{
+       struct nfs_page *head = req->wb_head;
+
+       WARN_ON_ONCE(head != head->wb_head);
+
+       wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
+                       nfs_wait_bit_uninterruptible,
+                       TASK_UNINTERRUPTIBLE);
+}
+
+/*
+ * nfs_page_group_unlock - unlock the head of the page group
+ * @req - request in group that is to be unlocked
+ */
+void
+nfs_page_group_unlock(struct nfs_page *req)
+{
+       struct nfs_page *head = req->wb_head;
+
+       WARN_ON_ONCE(head != head->wb_head);
+
+       smp_mb__before_atomic();
+       clear_bit(PG_HEADLOCK, &head->wb_flags);
+       smp_mb__after_atomic();
+       wake_up_bit(&head->wb_flags, PG_HEADLOCK);
+}
+
+/*
+ * nfs_page_group_sync_on_bit_locked
+ *
+ * must be called with page group lock held
+ */
+static bool
+nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
+{
+       struct nfs_page *head = req->wb_head;
+       struct nfs_page *tmp;
+
+       WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_flags));
+       WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags));
+
+       tmp = req->wb_this_page;
+       while (tmp != req) {
+               if (!test_bit(bit, &tmp->wb_flags))
+                       return false;
+               tmp = tmp->wb_this_page;
+       }
+
+       /* true! reset all bits */
+       tmp = req;
+       do {
+               clear_bit(bit, &tmp->wb_flags);
+               tmp = tmp->wb_this_page;
+       } while (tmp != req);
+
+       return true;
+}
+
+/*
+ * nfs_page_group_sync_on_bit - set bit on current request, but only
+ *   return true if the bit is set for all requests in page group
+ * @req - request in page group
+ * @bit - PG_* bit that is used to sync page group
+ */
+bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
+{
+       bool ret;
+
+       nfs_page_group_lock(req);
+       ret = nfs_page_group_sync_on_bit_locked(req, bit);
+       nfs_page_group_unlock(req);
+
+       return ret;
+}
+
+/*
+ * nfs_page_group_init - Initialize the page group linkage for @req
+ * @req - a new nfs request
+ * @prev - the previous request in page group, or NULL if @req is the first
+ *         or only request in the group (the head).
+ */
+static inline void
+nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
+{
+       WARN_ON_ONCE(prev == req);
+
+       if (!prev) {
+               req->wb_head = req;
+               req->wb_this_page = req;
+       } else {
+               WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
+               WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
+               req->wb_head = prev->wb_head;
+               req->wb_this_page = prev->wb_this_page;
+               prev->wb_this_page = req;
+
+               /* grab extra ref if head request has extra ref from
+                * the write/commit path to handle handoff between write
+                * and commit lists */
+               if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags))
+                       kref_get(&req->wb_kref);
+       }
+}
+
+/*
+ * nfs_page_group_destroy - sync the destruction of page groups
+ * @req - request that no longer needs the page group
+ *
+ * releases the page group reference from each member once all
+ * members have called this function.
+ */
+static void
+nfs_page_group_destroy(struct kref *kref)
+{
+       struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
+       struct nfs_page *tmp, *next;
+
+       if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
+               return;
+
+       tmp = req;
+       do {
+               next = tmp->wb_this_page;
+               /* unlink and free */
+               tmp->wb_this_page = tmp;
+               tmp->wb_head = tmp;
+               nfs_free_request(tmp);
+               tmp = next;
+       } while (tmp != req);
+}
+
 /**
  * nfs_create_request - Create an NFS read/write request.
  * @ctx: open context to use
- * @inode: inode to which the request is attached
  * @page: page to write
+ * @last: last nfs request created for this page group or NULL if head
  * @offset: starting offset within the page for the write
  * @count: number of bytes to read/write
  *
@@ -148,9 +296,9 @@ nfs_iocounter_wait(struct nfs_io_counter *c)
  * User should ensure it is safe to sleep in this function.
  */
 struct nfs_page *
-nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
-                  struct page *page,
-                  unsigned int offset, unsigned int count)
+nfs_create_request(struct nfs_open_context *ctx, struct page *page,
+                  struct nfs_page *last, unsigned int offset,
+                  unsigned int count)
 {
        struct nfs_page         *req;
        struct nfs_lock_context *l_ctx;
@@ -182,6 +330,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
        req->wb_bytes   = count;
        req->wb_context = get_nfs_open_context(ctx);
        kref_init(&req->wb_kref);
+       nfs_page_group_init(req, last);
        return req;
 }
 
@@ -195,9 +344,9 @@ void nfs_unlock_request(struct nfs_page *req)
                printk(KERN_ERR "NFS: Invalid unlock attempted\n");
                BUG();
        }
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(PG_BUSY, &req->wb_flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_bit(&req->wb_flags, PG_BUSY);
 }
 
@@ -239,16 +388,22 @@ static void nfs_clear_request(struct nfs_page *req)
        }
 }
 
-
 /**
  * nfs_release_request - Release the count on an NFS read/write request
  * @req: request to release
  *
  * Note: Should never be called with the spinlock held!
  */
-static void nfs_free_request(struct kref *kref)
+static void nfs_free_request(struct nfs_page *req)
 {
-       struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
+       WARN_ON_ONCE(req->wb_this_page != req);
+
+       /* extra debug: make sure no sync bits are still set */
+       WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
+       WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags));
+       WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags));
+       WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags));
+       WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags));
 
        /* Release struct file and open context */
        nfs_clear_request(req);
@@ -257,13 +412,7 @@ static void nfs_free_request(struct kref *kref)
 
 void nfs_release_request(struct nfs_page *req)
 {
-       kref_put(&req->wb_kref, nfs_free_request);
-}
-
-static int nfs_wait_bit_uninterruptible(void *word)
-{
-       io_schedule();
-       return 0;
+       kref_put(&req->wb_kref, nfs_page_group_destroy);
 }
 
 /**
@@ -281,19 +430,25 @@ nfs_wait_on_request(struct nfs_page *req)
                        TASK_UNINTERRUPTIBLE);
 }
 
-bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
+/*
+ * nfs_generic_pg_test - determine if requests can be coalesced
+ * @desc: pointer to descriptor
+ * @prev: previous request in desc, or NULL
+ * @req: this request
+ *
+ * Returns zero if @req can be coalesced into @desc, otherwise it returns
+ * the size of the request.
+ */
+size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
+                          struct nfs_page *prev, struct nfs_page *req)
 {
-       /*
-        * FIXME: ideally we should be able to coalesce all requests
-        * that are not block boundary aligned, but currently this
-        * is problematic for the case of bsize < PAGE_CACHE_SIZE,
-        * since nfs_flush_multi and nfs_pagein_multi assume you
-        * can have only one struct nfs_page.
-        */
-       if (desc->pg_bsize < PAGE_SIZE)
+       if (desc->pg_count > desc->pg_bsize) {
+               /* should never happen */
+               WARN_ON_ONCE(1);
                return 0;
+       }
 
-       return desc->pg_count + req->wb_bytes <= desc->pg_bsize;
+       return min(desc->pg_bsize - desc->pg_count, (size_t)req->wb_bytes);
 }
 EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
 
@@ -314,7 +469,6 @@ struct nfs_rw_header *nfs_rw_header_alloc(const struct nfs_rw_ops *ops)
                struct nfs_pgio_header *hdr = &header->header;
 
                INIT_LIST_HEAD(&hdr->pages);
-               INIT_LIST_HEAD(&hdr->rpc_list);
                spin_lock_init(&hdr->lock);
                atomic_set(&hdr->refcnt, 0);
                hdr->rw_ops = ops;
@@ -338,8 +492,8 @@ EXPORT_SYMBOL_GPL(nfs_rw_header_free);
  * @hdr: The header making a request
  * @pagecount: Number of pages to create
  */
-struct nfs_pgio_data *nfs_pgio_data_alloc(struct nfs_pgio_header *hdr,
-                                         unsigned int pagecount)
+static struct nfs_pgio_data *nfs_pgio_data_alloc(struct nfs_pgio_header *hdr,
+                                                unsigned int pagecount)
 {
        struct nfs_pgio_data *data, *prealloc;
 
@@ -388,6 +542,50 @@ void nfs_pgio_data_release(struct nfs_pgio_data *data)
 }
 EXPORT_SYMBOL_GPL(nfs_pgio_data_release);
 
+/**
+ * nfs_pgio_rpcsetup - Set up arguments for a pageio call
+ * @data: The pageio data
+ * @count: Number of bytes to read
+ * @offset: Initial offset
+ * @how: How to commit data (writes only)
+ * @cinfo: Commit information for the call (writes only)
+ */
+static void nfs_pgio_rpcsetup(struct nfs_pgio_data *data,
+                             unsigned int count, unsigned int offset,
+                             int how, struct nfs_commit_info *cinfo)
+{
+       struct nfs_page *req = data->header->req;
+
+       /* Set up the RPC argument and reply structs
+        * NB: take care not to mess about with data->commit et al. */
+
+       data->args.fh     = NFS_FH(data->header->inode);
+       data->args.offset = req_offset(req) + offset;
+       /* pnfs_set_layoutcommit needs this */
+       data->mds_offset = data->args.offset;
+       data->args.pgbase = req->wb_pgbase + offset;
+       data->args.pages  = data->pages.pagevec;
+       data->args.count  = count;
+       data->args.context = get_nfs_open_context(req->wb_context);
+       data->args.lock_context = req->wb_lock_context;
+       data->args.stable  = NFS_UNSTABLE;
+       switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
+       case 0:
+               break;
+       case FLUSH_COND_STABLE:
+               if (nfs_reqs_to_commit(cinfo))
+                       break;
+       default:
+               data->args.stable = NFS_FILE_SYNC;
+       }
+
+       data->res.fattr   = &data->fattr;
+       data->res.count   = count;
+       data->res.eof     = 0;
+       data->res.verf    = &data->verf;
+       nfs_fattr_init(&data->fattr);
+}
+
 /**
  * nfs_pgio_prepare - Prepare pageio data to go over the wire
  * @task: The current task
@@ -402,6 +600,67 @@ static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
                rpc_exit(task, err);
 }
 
+int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_data *data,
+                     const struct rpc_call_ops *call_ops, int how, int flags)
+{
+       struct rpc_task *task;
+       struct rpc_message msg = {
+               .rpc_argp = &data->args,
+               .rpc_resp = &data->res,
+               .rpc_cred = data->header->cred,
+       };
+       struct rpc_task_setup task_setup_data = {
+               .rpc_client = clnt,
+               .task = &data->task,
+               .rpc_message = &msg,
+               .callback_ops = call_ops,
+               .callback_data = data,
+               .workqueue = nfsiod_workqueue,
+               .flags = RPC_TASK_ASYNC | flags,
+       };
+       int ret = 0;
+
+       data->header->rw_ops->rw_initiate(data, &msg, &task_setup_data, how);
+
+       dprintk("NFS: %5u initiated pgio call "
+               "(req %s/%llu, %u bytes @ offset %llu)\n",
+               data->task.tk_pid,
+               data->header->inode->i_sb->s_id,
+               (unsigned long long)NFS_FILEID(data->header->inode),
+               data->args.count,
+               (unsigned long long)data->args.offset);
+
+       task = rpc_run_task(&task_setup_data);
+       if (IS_ERR(task)) {
+               ret = PTR_ERR(task);
+               goto out;
+       }
+       if (how & FLUSH_SYNC) {
+               ret = rpc_wait_for_completion_task(task);
+               if (ret == 0)
+                       ret = task->tk_status;
+       }
+       rpc_put_task(task);
+out:
+       return ret;
+}
+EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
+
+/**
+ * nfs_pgio_error - Clean up from a pageio error
+ * @desc: IO descriptor
+ * @hdr: pageio header
+ */
+static int nfs_pgio_error(struct nfs_pageio_descriptor *desc,
+                         struct nfs_pgio_header *hdr)
+{
+       set_bit(NFS_IOHDR_REDO, &hdr->flags);
+       nfs_pgio_data_release(hdr->data);
+       hdr->data = NULL;
+       desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+       return -ENOMEM;
+}
+
 /**
  * nfs_pgio_release - Release pageio data
  * @calldata: The pageio data to release
@@ -470,6 +729,73 @@ static void nfs_pgio_result(struct rpc_task *task, void *calldata)
                data->header->rw_ops->rw_result(task, data);
 }
 
+/*
+ * Create an RPC task for the given read or write request and kick it.
+ * The page must have been locked by the caller.
+ *
+ * It may happen that the page we're passed is not marked dirty.
+ * This is the case if nfs_updatepage detects a conflicting request
+ * that has been written but not committed.
+ */
+int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
+                    struct nfs_pgio_header *hdr)
+{
+       struct nfs_page         *req;
+       struct page             **pages;
+       struct nfs_pgio_data    *data;
+       struct list_head *head = &desc->pg_list;
+       struct nfs_commit_info cinfo;
+
+       data = nfs_pgio_data_alloc(hdr, nfs_page_array_len(desc->pg_base,
+                                                          desc->pg_count));
+       if (!data)
+               return nfs_pgio_error(desc, hdr);
+
+       nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
+       pages = data->pages.pagevec;
+       while (!list_empty(head)) {
+               req = nfs_list_entry(head->next);
+               nfs_list_remove_request(req);
+               nfs_list_add_request(req, &hdr->pages);
+               *pages++ = req->wb_page;
+       }
+
+       if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
+           (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
+               desc->pg_ioflags &= ~FLUSH_COND_STABLE;
+
+       /* Set up the argument struct */
+       nfs_pgio_rpcsetup(data, desc->pg_count, 0, desc->pg_ioflags, &cinfo);
+       hdr->data = data;
+       desc->pg_rpc_callops = &nfs_pgio_common_ops;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nfs_generic_pgio);
+
+static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
+{
+       struct nfs_rw_header *rw_hdr;
+       struct nfs_pgio_header *hdr;
+       int ret;
+
+       rw_hdr = nfs_rw_header_alloc(desc->pg_rw_ops);
+       if (!rw_hdr) {
+               desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+               return -ENOMEM;
+       }
+       hdr = &rw_hdr->header;
+       nfs_pgheader_init(desc, hdr, nfs_rw_header_free);
+       atomic_inc(&hdr->refcnt);
+       ret = nfs_generic_pgio(desc, hdr);
+       if (ret == 0)
+               ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
+                                       hdr->data, desc->pg_rpc_callops,
+                                       desc->pg_ioflags, 0);
+       if (atomic_dec_and_test(&hdr->refcnt))
+               hdr->completion_ops->completion(hdr);
+       return ret;
+}
+
 static bool nfs_match_open_context(const struct nfs_open_context *ctx1,
                const struct nfs_open_context *ctx2)
 {
@@ -498,18 +824,23 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
                                      struct nfs_page *req,
                                      struct nfs_pageio_descriptor *pgio)
 {
-       if (!nfs_match_open_context(req->wb_context, prev->wb_context))
-               return false;
-       if (req->wb_context->dentry->d_inode->i_flock != NULL &&
-           !nfs_match_lock_context(req->wb_lock_context, prev->wb_lock_context))
-               return false;
-       if (req->wb_pgbase != 0)
-               return false;
-       if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
-               return false;
-       if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
-               return false;
-       return pgio->pg_ops->pg_test(pgio, prev, req);
+       size_t size;
+
+       if (prev) {
+               if (!nfs_match_open_context(req->wb_context, prev->wb_context))
+                       return false;
+               if (req->wb_context->dentry->d_inode->i_flock != NULL &&
+                   !nfs_match_lock_context(req->wb_lock_context,
+                                           prev->wb_lock_context))
+                       return false;
+               if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
+                       return false;
+       }
+       size = pgio->pg_ops->pg_test(pgio, prev, req);
+       WARN_ON_ONCE(size > req->wb_bytes);
+       if (size && size < req->wb_bytes)
+               req->wb_bytes = size;
+       return size > 0;
 }
 
 /**
@@ -523,17 +854,16 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
                                     struct nfs_page *req)
 {
+       struct nfs_page *prev = NULL;
        if (desc->pg_count != 0) {
-               struct nfs_page *prev;
-
                prev = nfs_list_entry(desc->pg_list.prev);
-               if (!nfs_can_coalesce_requests(prev, req, desc))
-                       return 0;
        } else {
                if (desc->pg_ops->pg_init)
                        desc->pg_ops->pg_init(desc, req);
                desc->pg_base = req->wb_pgbase;
        }
+       if (!nfs_can_coalesce_requests(prev, req, desc))
+               return 0;
        nfs_list_remove_request(req);
        nfs_list_add_request(req, &desc->pg_list);
        desc->pg_count += req->wb_bytes;
@@ -563,22 +893,73 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
  * @desc: destination io descriptor
  * @req: request
  *
+ * This may split a request into subrequests which are all part of the
+ * same page group.
+ *
  * Returns true if the request 'req' was successfully coalesced into the
  * existing list of pages 'desc'.
  */
 static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
                           struct nfs_page *req)
 {
-       while (!nfs_pageio_do_add_request(desc, req)) {
-               desc->pg_moreio = 1;
-               nfs_pageio_doio(desc);
-               if (desc->pg_error < 0)
-                       return 0;
-               desc->pg_moreio = 0;
-               if (desc->pg_recoalesce)
-                       return 0;
-       }
+       struct nfs_page *subreq;
+       unsigned int bytes_left = 0;
+       unsigned int offset, pgbase;
+
+       nfs_page_group_lock(req);
+
+       subreq = req;
+       bytes_left = subreq->wb_bytes;
+       offset = subreq->wb_offset;
+       pgbase = subreq->wb_pgbase;
+
+       do {
+               if (!nfs_pageio_do_add_request(desc, subreq)) {
+                       /* make sure pg_test call(s) did nothing */
+                       WARN_ON_ONCE(subreq->wb_bytes != bytes_left);
+                       WARN_ON_ONCE(subreq->wb_offset != offset);
+                       WARN_ON_ONCE(subreq->wb_pgbase != pgbase);
+
+                       nfs_page_group_unlock(req);
+                       desc->pg_moreio = 1;
+                       nfs_pageio_doio(desc);
+                       if (desc->pg_error < 0)
+                               return 0;
+                       desc->pg_moreio = 0;
+                       if (desc->pg_recoalesce)
+                               return 0;
+                       /* retry add_request for this subreq */
+                       nfs_page_group_lock(req);
+                       continue;
+               }
+
+               /* check for buggy pg_test call(s) */
+               WARN_ON_ONCE(subreq->wb_bytes + subreq->wb_pgbase > PAGE_SIZE);
+               WARN_ON_ONCE(subreq->wb_bytes > bytes_left);
+               WARN_ON_ONCE(subreq->wb_bytes == 0);
+
+               bytes_left -= subreq->wb_bytes;
+               offset += subreq->wb_bytes;
+               pgbase += subreq->wb_bytes;
+
+               if (bytes_left) {
+                       subreq = nfs_create_request(req->wb_context,
+                                       req->wb_page,
+                                       subreq, pgbase, bytes_left);
+                       if (IS_ERR(subreq))
+                               goto err_ptr;
+                       nfs_lock_request(subreq);
+                       subreq->wb_offset  = offset;
+                       subreq->wb_index = req->wb_index;
+               }
+       } while (bytes_left > 0);
+
+       nfs_page_group_unlock(req);
        return 1;
+err_ptr:
+       desc->pg_error = PTR_ERR(subreq);
+       nfs_page_group_unlock(req);
+       return 0;
 }
 
 static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
@@ -677,8 +1058,13 @@ void nfs_destroy_nfspagecache(void)
        kmem_cache_destroy(nfs_page_cachep);
 }
 
-const struct rpc_call_ops nfs_pgio_common_ops = {
+static const struct rpc_call_ops nfs_pgio_common_ops = {
        .rpc_call_prepare = nfs_pgio_prepare,
        .rpc_call_done = nfs_pgio_result,
        .rpc_release = nfs_pgio_release,
 };
+
+const struct nfs_pageio_ops nfs_pgio_rw_ops = {
+       .pg_test = nfs_generic_pg_test,
+       .pg_doio = nfs_generic_pg_pgios,
+};