On 2025-02-22 07:25:42 [+0900], Sergey Senozhatsky wrote: > index 1424ee73cbb5..03710d71d022 100644 > --- a/mm/zsmalloc.c > +++ b/mm/zsmalloc.c > @@ -226,6 +226,9 @@ struct zs_pool { > /* protect zspage migration/compaction */ > rwlock_t lock; > atomic_t compaction_in_progress; > +#ifdef CONFIG_DEBUG_LOCK_ALLOC > + struct lock_class_key lock_class; > +#endif No ifdef > }; > > static inline void zpdesc_set_first(struct zpdesc *zpdesc) > @@ -279,6 +294,93 @@ struct mapping_area { > enum zs_mapmode vm_mm; /* mapping mode */ > }; > > +#ifdef CONFIG_DEBUG_LOCK_ALLOC > +#define zsl_dep_map(zsl) (&(zsl)->dep_map) > +#define zspool_lock_class(pool) (&(pool)->lock_class) > +#else > +#define zsl_dep_map(zsl) NULL > +#define zspool_lock_class(pool) NULL > +#endif > + > +static void zspage_lock_init(struct zspage *zspage) > +{ > + struct zspage_lock *zsl = &zspage->zsl; > + > + lockdep_init_map(zsl_dep_map(zsl), "zspage->lock", > + zspool_lock_class(zspage->pool), 0); > + spin_lock_init(&zsl->lock); > + zsl->cnt = ZS_PAGE_UNLOCKED; > +} > + > +/* > + * The zspage lock can be held from atomic contexts, but it needs to remain > + * preemptible when held for reading because it remains held outside of those > + * atomic contexts, otherwise we unnecessarily lose preemptibility. > + * > + * To achieve this, the following rules are enforced on readers and writers: > + * > + * - Writers are blocked by both writers and readers, while readers are only > + * blocked by writers (i.e. normal rwlock semantics). > + * > + * - Writers are always atomic (to allow readers to spin waiting for them). > + * > + * - Writers always use trylock (as the lock may be held be sleeping readers). > + * > + * - Readers may spin on the lock (as they can only wait for atomic writers). > + * > + * - Readers may sleep while holding the lock (as writes only use trylock). > + */ > +static void zspage_read_lock(struct zspage *zspage) > +{ > + struct zspage_lock *zsl = &zspage->zsl; > + > + rwsem_acquire_read(zsl_dep_map(zsl), 0, 0, _RET_IP_); > + > + spin_lock(&zsl->lock); > + zsl->cnt++; > + spin_unlock(&zsl->lock); How is this working given that a read_lock always increments the counter? If it is write_locked then a read_lock makes it UNLOCKED. migrate_read_lock() did block if a writer was pending, this does not or at least it is not obvious how. > + lock_acquired(zsl_dep_map(zsl), _RET_IP_); > +} > + Sebastian