From: Dave Chinner <dchinner@xxxxxxxxxx> Fix the incorrect use of igrab() inside the i_lock in NFS and Cephâ If we are already holding the i_lock, we have a reference to the inode so we can safely use ihold() to gain an extra reference. This avoids hangs due to lock recursion on the i_lock now that the inode_lock is gone and igrab() uses the i_lock itself. Signed-off-by: Dave Chinner <dchinner@xxxxxxxxxx> --- fs/ceph/addr.c | 2 +- fs/ceph/snap.c | 4 ++-- fs/nfs/nfs4state.c | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 561438b..37368ba 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -92,7 +92,7 @@ static int ceph_set_page_dirty(struct page *page) ci->i_head_snapc = ceph_get_snap_context(snapc); ++ci->i_wrbuffer_ref_head; if (ci->i_wrbuffer_ref == 0) - igrab(inode); + ihold(inode); ++ci->i_wrbuffer_ref; dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d " "snapc %p seq %lld (%d snaps)\n", diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index f40b913..0aee66b 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -463,8 +463,8 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci) dout("queue_cap_snap %p cap_snap %p queuing under %p\n", inode, capsnap, snapc); - igrab(inode); - + ihold(inode); + atomic_set(&capsnap->nref, 1); capsnap->ci = ci; INIT_LIST_HEAD(&capsnap->ci_item); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index ab1bf5b..a6804f7 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -590,7 +590,8 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner) state->owner = owner; atomic_inc(&owner->so_count); list_add(&state->inode_states, &nfsi->open_states); - state->inode = igrab(inode); + ihold(inode); + state->inode = inode; spin_unlock(&inode->i_lock); /* Note: The reclaim code dictates that we add stateless * and read-only stateids to the end of the list */ -- 1.7.2.3 -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html