JFI On (25/01/29 15:43), Sergey Senozhatsky wrote: > +static void zspage_lock_init(struct zspage *zspage) > +{ > + atomic_set(&zspage->lock, ZS_PAGE_UNLOCKED); > +} > + > +/* > + * zspage lock permits preemption on the reader-side (there can be multiple > + * readers). Writers (exclusive zspage ownership), on the other hand, are > + * always run in atomic context and cannot spin waiting for a (potentially > + * preempted) reader to unlock zspage. This, basically, means that writers > + * can only call write-try-lock and must bail out if it didn't succeed. > + * > + * At the same time, writers cannot reschedule under zspage write-lock, > + * so readers can spin waiting for the writer to unlock zspage. > + */ > +static void zspage_read_lock(struct zspage *zspage) > +{ > + atomic_t *lock = &zspage->lock; > + int old; > + > + while (1) { > + old = atomic_read(lock); > + if (old == ZS_PAGE_WRLOCKED) { > + cpu_relax(); > + continue; > + } > + > + if (atomic_try_cmpxchg(lock, &old, old + 1)) > + return; > + > + cpu_relax(); > + } > +} > + > +static void zspage_read_unlock(struct zspage *zspage) > +{ > + atomic_dec(&zspage->lock); > +} > + > +static int zspage_try_write_lock(struct zspage *zspage) > +{ > + atomic_t *lock = &zspage->lock; > + int old = ZS_PAGE_UNLOCKED; > + > + preempt_disable(); > + if (atomic_try_cmpxchg(lock, &old, ZS_PAGE_WRLOCKED)) > + return 1; > + > + preempt_enable(); > + return 0; > +} > + > +static void zspage_write_unlock(struct zspage *zspage) > +{ > + atomic_set(&zspage->lock, ZS_PAGE_UNLOCKED); > + preempt_enable(); > +} Below is what I currently have based on a (private) feedback from Uros. No functional changes. I think I'll wait before sending out v2, based on the fact that this is not a fix or a functional change (WR-locked case is relatively uncommon; it's only when we map an object from a page that is currently either under migration or compaction.) --- static void zspage_lock_init(struct zspage *zspage) { atomic_set(&zspage->lock, ZS_PAGE_UNLOCKED); } static void zspage_read_lock(struct zspage *zspage) { atomic_t *lock = &zspage->lock; int old = atomic_read(lock); do { if (old == ZS_PAGE_WRLOCKED) { cpu_relax(); old = atomic_read(lock); continue; } } while (!atomic_try_cmpxchg(lock, &old, old + 1)); } static void zspage_read_unlock(struct zspage *zspage) { atomic_dec(&zspage->lock); } static bool zspage_try_write_lock(struct zspage *zspage) { atomic_t *lock = &zspage->lock; int old = ZS_PAGE_UNLOCKED; preempt_disable(); if (atomic_try_cmpxchg(lock, &old, ZS_PAGE_WRLOCKED)) return true; preempt_enable(); return false; } static void zspage_write_unlock(struct zspage *zspage) { atomic_set(&zspage->lock, ZS_PAGE_UNLOCKED); preempt_enable(); }