]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
NFSv4: Ensure continued open and lockowner name uniqueness
authorTrond Myklebust <Trond.Myklebust@netapp.com>
Tue, 21 Dec 2010 15:45:27 +0000 (10:45 -0500)
committerTrond Myklebust <Trond.Myklebust@netapp.com>
Thu, 6 Jan 2011 21:03:13 +0000 (16:03 -0500)
In order to enable migration support, we will want to move some of the
structures that are subject to migration into the struct nfs_server.
In particular, if we are to move the state_owner and state_owner_id to
being a per-filesystem structure, then we should label the resulting
open/lock owners with a per-filesytem label to ensure global uniqueness.

This patch does so by adding the super block s_dev to the open/lock owner
name.

Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
fs/nfs/nfs4proc.c
fs/nfs/nfs4xdr.c
include/linux/nfs_xdr.h

index 88f590feeb724888abe373538b5a3ff6fc864673..f2b92f6a7efb8dfe54455c8997f74f3265697f28 100644 (file)
@@ -3779,6 +3779,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
                goto out;
        lsp = request->fl_u.nfs4_fl.owner;
        arg.lock_owner.id = lsp->ls_id.id;
+       arg.lock_owner.s_dev = server->s_dev;
        status = nfs4_call_sync(server, &msg, &arg, &res, 1);
        switch (status) {
                case 0:
@@ -4024,6 +4025,7 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
        p->arg.lock_stateid = &lsp->ls_stateid;
        p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
        p->arg.lock_owner.id = lsp->ls_id.id;
+       p->arg.lock_owner.s_dev = server->s_dev;
        p->res.lock_seqid = p->arg.lock_seqid;
        p->lsp = lsp;
        p->server = server;
@@ -4428,6 +4430,7 @@ void nfs4_release_lockowner(const struct nfs4_lock_state *lsp)
                return;
        args->lock_owner.clientid = server->nfs_client->cl_clientid;
        args->lock_owner.id = lsp->ls_id.id;
+       args->lock_owner.s_dev = server->s_dev;
        msg.rpc_argp = args;
        rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args);
 }
index 3cbdd0c80a2d1600b061f7cbf783615a07b79c7d..8e496887ec61f097f34da2efffd3f946e0117542 100644 (file)
@@ -71,8 +71,8 @@ static int nfs4_stat_to_errno(int);
 /* lock,open owner id:
  * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT  >> 2)
  */
-#define open_owner_id_maxsz    (1 + 4)
-#define lock_owner_id_maxsz    (1 + 4)
+#define open_owner_id_maxsz    (1 + 1 + 4)
+#define lock_owner_id_maxsz    (1 + 1 + 4)
 #define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
 #define compound_encode_hdr_maxsz      (3 + (NFS4_MAXTAGLEN >> 2))
 #define compound_decode_hdr_maxsz      (3 + (NFS4_MAXTAGLEN >> 2))
@@ -1088,10 +1088,11 @@ static void encode_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lo
 {
        __be32 *p;
 
-       p = reserve_space(xdr, 28);
+       p = reserve_space(xdr, 32);
        p = xdr_encode_hyper(p, lowner->clientid);
-       *p++ = cpu_to_be32(16);
+       *p++ = cpu_to_be32(20);
        p = xdr_encode_opaque_fixed(p, "lock id:", 8);
+       *p++ = cpu_to_be32(lowner->s_dev);
        xdr_encode_hyper(p, lowner->id);
 }
 
@@ -1210,10 +1211,11 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
        *p++ = cpu_to_be32(OP_OPEN);
        *p = cpu_to_be32(arg->seqid->sequence->counter);
        encode_share_access(xdr, arg->fmode);
-       p = reserve_space(xdr, 28);
+       p = reserve_space(xdr, 32);
        p = xdr_encode_hyper(p, arg->clientid);
-       *p++ = cpu_to_be32(16);
+       *p++ = cpu_to_be32(20);
        p = xdr_encode_opaque_fixed(p, "open id:", 8);
+       *p++ = cpu_to_be32(arg->server->s_dev);
        xdr_encode_hyper(p, arg->id);
 }
 
index 83d36d3a12e6fe929996beb4239ccd66792962be..b0068579bec252b2a357a97ddb841a1c1b9086d4 100644 (file)
@@ -317,6 +317,7 @@ struct nfs_closeres {
 struct nfs_lowner {
        __u64                   clientid;
        __u64                   id;
+       dev_t                   s_dev;
 };
 
 struct nfs_lock_args {