3.16-stable review patch. If anyone has any objections, please let me know. ------------------ From: Trond Myklebust <trond.myklebust@xxxxxxxxxxxxxxx> commit 412f6c4c26fb1eba8844290663837561ac53fa6e upstream. If we did an OPEN_DOWNGRADE, then the right thing to do on success, is to apply the new open mode to the struct nfs4_state. Instead, we were unconditionally clearing the state, making it appear to our state machinery as if we had just performed a CLOSE. Fixes: 226056c5c312b (NFSv4: Use correct locking when updating nfs4_state...) Signed-off-by: Trond Myklebust <trond.myklebust@xxxxxxxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- fs/nfs/nfs4proc.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2545,6 +2545,7 @@ static void nfs4_close_done(struct rpc_t struct nfs4_closedata *calldata = data; struct nfs4_state *state = calldata->state; struct nfs_server *server = NFS_SERVER(calldata->inode); + nfs4_stateid *res_stateid = NULL; dprintk("%s: begin!\n", __func__); if (!nfs4_sequence_done(task, &calldata->res.seq_res)) @@ -2555,12 +2556,12 @@ static void nfs4_close_done(struct rpc_t */ switch (task->tk_status) { case 0: - if (calldata->roc) + res_stateid = &calldata->res.stateid; + if (calldata->arg.fmode == 0 && calldata->roc) pnfs_roc_set_barrier(state->inode, calldata->roc_barrier); - nfs_clear_open_stateid(state, &calldata->res.stateid, 0); renew_lease(server, calldata->timestamp); - goto out_release; + break; case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_OLD_STATEID: @@ -2574,7 +2575,7 @@ static void nfs4_close_done(struct rpc_t goto out_release; } } - nfs_clear_open_stateid(state, NULL, calldata->arg.fmode); + nfs_clear_open_stateid(state, res_stateid, calldata->arg.fmode); out_release: nfs_release_seqid(calldata->arg.seqid); nfs_refresh_inode(calldata->inode, calldata->res.fattr); -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html