4.10-stable review patch. If anyone has any objections, please let me know. ------------------ From: Andrew Price <anprice@xxxxxxxxxx> commit f38e5fb95a1f8feda88531eedc98f69b24748712 upstream. We must hold the rcu read lock across looking up glocks and trying to bump their refcount to prevent the glocks from being freed in between. Signed-off-by: Andrew Price <anprice@xxxxxxxxxx> Signed-off-by: Andreas Gruenbacher <agruenba@xxxxxxxxxx> Signed-off-by: Bob Peterson <rpeterso@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- fs/gfs2/glock.c | 5 +++++ 1 file changed, 5 insertions(+) --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -658,9 +658,11 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, struct kmem_cache *cachep; int ret, tries = 0; + rcu_read_lock(); gl = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms); if (gl && !lockref_get_not_dead(&gl->gl_lockref)) gl = NULL; + rcu_read_unlock(); *glp = gl; if (gl) @@ -728,15 +730,18 @@ again: if (ret == -EEXIST) { ret = 0; + rcu_read_lock(); tmp = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms); if (tmp == NULL || !lockref_get_not_dead(&tmp->gl_lockref)) { if (++tries < 100) { + rcu_read_unlock(); cond_resched(); goto again; } tmp = NULL; ret = -ENOMEM; } + rcu_read_unlock(); } else { WARN_ON_ONCE(ret); }