On 03/19, David Howells wrote: > > --- a/security/keys/gc.c > +++ b/security/keys/gc.c > @@ -218,8 +218,10 @@ static void key_garbage_collector(struct work_struct *work) > key = rb_entry(cursor, struct key, serial_node); > cursor = rb_next(cursor); > > - if (refcount_read(&key->usage) == 0) > + if (test_bit(KEY_FLAG_FINAL_PUT, &key->flags)) { > + smp_mb(); /* Clobber key->user after FINAL_PUT seen. */ > goto found_unreferenced_key; > + } > > if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) { > if (key->type == key_gc_dead_keytype) { > diff --git a/security/keys/key.c b/security/keys/key.c > index 3d7d185019d3..7198cd2ac3a3 100644 > --- a/security/keys/key.c > +++ b/security/keys/key.c > @@ -658,6 +658,8 @@ void key_put(struct key *key) > key->user->qnbytes -= key->quotalen; > spin_unlock_irqrestore(&key->user->lock, flags); > } > + smp_mb(); /* key->user before FINAL_PUT set. */ Can't resist, smp_mb__before_atomic() should equally work, but this doesn't really matter, please forget. > + set_bit(KEY_FLAG_FINAL_PUT, &key->flags); > schedule_work(&key_gc_work); I believe this patch is correct, Reviewed-by: Oleg Nesterov <oleg@xxxxxxxxxx>