From: Andy Adamson <andros@xxxxxxxxxx> Separate nfs4_lock calls from nfs41: sequence setup/done support Call nfs4_sequence_done from respective rpc_call_done methods. Note that we need to pass a pointer to the nfs_server in calls data for passing on to nfs4_sequence_done. Signed-off-by: Andy Adamson<andros@xxxxxxxxxx> Signed-off-by: Benny Halevy <bhalevy@xxxxxxxxxxx> [pnfs: client data server write validate and release] [use nfs4_sequence_done_free_slot] Signed-off-by: Andy Adamson<andros@xxxxxxxxx> Signed-off-by: Benny Halevy <bhalevy@xxxxxxxxxxx> --- fs/nfs/nfs4proc.c | 8 ++++++++ 1 files changed, 8 insertions(+), 0 deletions(-) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 56612d1..9c41d42 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3712,6 +3712,7 @@ struct nfs4_lockdata { unsigned long timestamp; int rpc_status; int cancelled; + struct nfs_server *server; }; static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, @@ -3739,6 +3740,7 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, p->res.lock_seqid = p->arg.lock_seqid; p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE; p->lsp = lsp; + p->server = server; atomic_inc(&lsp->ls_count); p->ctx = get_nfs_open_context(ctx); memcpy(&p->fl, fl, sizeof(p->fl)); @@ -3768,6 +3770,9 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) } else data->arg.new_lock_owner = 0; data->timestamp = jiffies; + if (nfs4_setup_sequence(data->server->nfs_client, &data->arg.seq_args, + &data->res.seq_res, 1, task)) + return; rpc_call_start(task); dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); } @@ -3778,6 +3783,9 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) dprintk("%s: begin!\n", __func__); + nfs4_sequence_done_free_slot(data->server, &data->res.seq_res, + task->tk_status); + data->rpc_status = task->tk_status; if (RPC_ASSASSINATED(task)) goto out; -- 1.6.1.3 -- To unsubscribe from this list: send the line "unsubscribe linux-nfs" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html