{
struct nfs4_layoutreturn *lrp = calldata;
struct pnfs_layout_hdr *lo = lrp->args.layout;
- LIST_HEAD(freeme);
dprintk("--> %s\n", __func__);
- spin_lock(&lo->plh_inode->i_lock);
- if (lrp->res.lrs_present) {
- pnfs_mark_matching_lsegs_invalid(lo, &freeme,
- &lrp->args.range,
- be32_to_cpu(lrp->args.stateid.seqid));
- pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
- } else
- pnfs_mark_layout_stateid_invalid(lo, &freeme);
- pnfs_clear_layoutreturn_waitbit(lo);
- spin_unlock(&lo->plh_inode->i_lock);
+ pnfs_layoutreturn_free_lsegs(lo, &lrp->args.range,
+ be32_to_cpu(lrp->args.stateid.seqid),
+ lrp->res.lrs_present ? &lrp->res.stateid : NULL);
nfs4_sequence_free_slot(&lrp->res.seq_res);
- pnfs_free_lseg_list(&freeme);
pnfs_put_layout_hdr(lrp->args.layout);
nfs_iput_and_deactive(lrp->inode);
kfree(calldata);
static LIST_HEAD(pnfs_modules_tbl);
static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo);
+static void pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
+ struct list_head *free_me,
+ const struct pnfs_layout_range *range,
+ u32 seq);
/* Return the registered pnfs layout driver module matching given id */
static struct pnfs_layoutdriver_type *
set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
pnfs_clear_layoutreturn_info(lo);
+ pnfs_free_returned_lsegs(lo, lseg_list, &range, 0);
return pnfs_mark_matching_lsegs_invalid(lo, lseg_list, &range, 0);
}
static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
{
- struct inode *ino = lseg->pls_layout->plh_inode;
-
- NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
+ if (lseg != NULL) {
+ struct inode *inode = lseg->pls_layout->plh_inode;
+ NFS_SERVER(inode)->pnfs_curr_ld->free_lseg(lseg);
+ }
}
static void
rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
}
+static bool
+pnfs_cache_lseg_for_layoutreturn(struct pnfs_layout_hdr *lo,
+ struct pnfs_layout_segment *lseg)
+{
+ if (test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
+ pnfs_layout_is_valid(lo)) {
+ list_move_tail(&lseg->pls_list, &lo->plh_return_segs);
+ return true;
+ }
+ return false;
+}
+
void
pnfs_put_lseg(struct pnfs_layout_segment *lseg)
{
}
pnfs_get_layout_hdr(lo);
pnfs_layout_remove_lseg(lo, lseg);
+ if (pnfs_cache_lseg_for_layoutreturn(lo, lseg))
+ lseg = NULL;
spin_unlock(&inode->i_lock);
pnfs_free_lseg(lseg);
pnfs_put_layout_hdr(lo);
struct pnfs_layout_hdr *lo = lseg->pls_layout;
if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
return;
- pnfs_get_layout_hdr(lo);
pnfs_layout_remove_lseg(lo, lseg);
- pnfs_free_lseg_async(lseg);
+ if (!pnfs_cache_lseg_for_layoutreturn(lo, lseg)) {
+ pnfs_get_layout_hdr(lo);
+ pnfs_free_lseg_async(lseg);
+ }
}
}
EXPORT_SYMBOL_GPL(pnfs_put_lseg_locked);
return remaining;
}
+static void
+pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
+ struct list_head *free_me,
+ const struct pnfs_layout_range *range,
+ u32 seq)
+{
+ struct pnfs_layout_segment *lseg, *next;
+
+ list_for_each_entry_safe(lseg, next, &lo->plh_return_segs, pls_list) {
+ if (pnfs_match_lseg_recall(lseg, range, seq))
+ list_move_tail(&lseg->pls_list, free_me);
+ }
+}
+
/* note free_me must contain lsegs from a single layout_hdr */
void
pnfs_free_lseg_list(struct list_head *free_me)
}
}
-void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
+static void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
{
clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
clear_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags);
rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
}
+void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
+ const struct pnfs_layout_range *range,
+ u32 seq,
+ const nfs4_stateid *stateid)
+{
+ struct inode *inode = lo->plh_inode;
+ LIST_HEAD(freeme);
+
+ spin_lock(&inode->i_lock);
+ if (stateid) {
+ pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
+ pnfs_free_returned_lsegs(lo, &freeme, range, seq);
+ pnfs_set_layout_stateid(lo, stateid, true);
+ } else
+ pnfs_mark_layout_stateid_invalid(lo, &freeme);
+ pnfs_clear_layoutreturn_waitbit(lo);
+ spin_unlock(&inode->i_lock);
+ pnfs_free_lseg_list(&freeme);
+
+}
+
static bool
pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
nfs4_stateid *stateid,
atomic_set(&lo->plh_refcount, 1);
INIT_LIST_HEAD(&lo->plh_layouts);
INIT_LIST_HEAD(&lo->plh_segs);
+ INIT_LIST_HEAD(&lo->plh_return_segs);
INIT_LIST_HEAD(&lo->plh_bulk_destroy);
lo->plh_inode = ino;
lo->plh_lc_cred = get_rpccred(ctx->cred);
.offset = 0,
.length = NFS4_MAX_UINT64,
};
- LIST_HEAD(free_me);
bool return_now = false;
spin_lock(&inode->i_lock);
* segments at hand when sending layoutreturn. See pnfs_put_lseg()
* for how it works.
*/
- if (!pnfs_mark_matching_lsegs_return(lo, &free_me, &range, 0)) {
+ if (!pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, &range, 0)) {
nfs4_stateid stateid;
enum pnfs_iomode iomode;
spin_unlock(&inode->i_lock);
nfs_commit_inode(inode, 0);
}
- pnfs_free_lseg_list(&free_me);
}
EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return);