On (25/02/06 12:05), Sergey Senozhatsky wrote: > > Sure. That's what it currently looks like (can always improve) > - added must-check - added preemptible() check // just in case - added locking rules list Oh, and also switched to acquire/release semantics, like you suggested a couple of days ago. --- /* * zspage locking rules: * * 1) writer-lock is exclusive * * 2) writer-lock owner cannot sleep * * 3) writer-lock owner cannot spin waiting for the lock * - caller (e.g. compaction and migration) must check return value and * handle locking failures * - there is only TRY variant of writer-lock function * * 4) reader-lock owners (multiple) can sleep * * 5) reader-lock owners can spin waiting for the lock, in any context * - existing readers (even preempted ones) don't block new readers * - writer-lock owners never sleep, always unlock at some point */ static void zspage_read_lock(struct zspage *zspage) { atomic_t *lock = &zspage->lock; int old = atomic_read_acquire(lock); do { if (old == ZS_PAGE_WRLOCKED) { cpu_relax(); old = atomic_read_acquire(lock); continue; } } while (!atomic_try_cmpxchg_acquire(lock, &old, old + 1)); #ifdef CONFIG_DEBUG_LOCK_ALLOC rwsem_acquire_read(&zspage->lockdep_map, 0, 0, _RET_IP_); #endif } static void zspage_read_unlock(struct zspage *zspage) { atomic_dec_return_release(&zspage->lock); #ifdef CONFIG_DEBUG_LOCK_ALLOC rwsem_release(&zspage->lockdep_map, _RET_IP_); #endif } static __must_check bool zspage_try_write_lock(struct zspage *zspage) { atomic_t *lock = &zspage->lock; int old = ZS_PAGE_UNLOCKED; WARN_ON_ONCE(preemptible()); preempt_disable(); if (atomic_try_cmpxchg_acquire(lock, &old, ZS_PAGE_WRLOCKED)) { #ifdef CONFIG_DEBUG_LOCK_ALLOC rwsem_acquire(&zspage->lockdep_map, 0, 0, _RET_IP_); #endif return true; } preempt_enable(); return false; } static void zspage_write_unlock(struct zspage *zspage) { atomic_set_release(&zspage->lock, ZS_PAGE_UNLOCKED); #ifdef CONFIG_DEBUG_LOCK_ALLOC rwsem_release(&zspage->lockdep_map, _RET_IP_); #endif preempt_enable(); }