The following commit has been merged into the locking/core branch of tip: Commit-ID: fbd7a5a0359bc770e898d918d84977ea61163aad Gitweb: https://git.kernel.org/tip/fbd7a5a0359bc770e898d918d84977ea61163aad Author: Lyude Paul <lyude@xxxxxxxxxx> AuthorDate: Mon, 25 Nov 2024 15:40:58 -05:00 Committer: Boqun Feng <boqun.feng@xxxxxxxxx> CommitterDate: Thu, 19 Dec 2024 14:04:42 -08:00 rust: sync: Add lock::Backend::assert_is_held() Since we've exposed Lock::from_raw() and Guard::new() publically, we want to be able to make sure that we assert that a lock is actually held when constructing a Guard for it to handle instances of unsafe Guard::new() calls outside of our lock module. Hence add a new method assert_is_held() to Backend, which uses lockdep to check whether or not a lock has been acquired. When lockdep is disabled, this has no overhead. [Boqun: Resolve the conflicts with exposing Guard::new(), reword the commit log a bit and format "unsafe { <statement>; }" into "unsafe { <statement> }" for the consistency. ] Signed-off-by: Lyude Paul <lyude@xxxxxxxxxx> Signed-off-by: Boqun Feng <boqun.feng@xxxxxxxxx> Link: https://lore.kernel.org/r/20241125204139.656801-1-lyude@xxxxxxxxxx --- rust/helpers/mutex.c | 5 +++++ rust/helpers/spinlock.c | 5 +++++ rust/kernel/sync/lock.rs | 10 ++++++++++ rust/kernel/sync/lock/mutex.rs | 5 +++++ rust/kernel/sync/lock/spinlock.rs | 5 +++++ 5 files changed, 30 insertions(+) diff --git a/rust/helpers/mutex.c b/rust/helpers/mutex.c index 7e00680..0657555 100644 --- a/rust/helpers/mutex.c +++ b/rust/helpers/mutex.c @@ -12,3 +12,8 @@ void rust_helper___mutex_init(struct mutex *mutex, const char *name, { __mutex_init(mutex, name, key); } + +void rust_helper_mutex_assert_is_held(struct mutex *mutex) +{ + lockdep_assert_held(mutex); +} diff --git a/rust/helpers/spinlock.c b/rust/helpers/spinlock.c index 5971fdf..42c4bf0 100644 --- a/rust/helpers/spinlock.c +++ b/rust/helpers/spinlock.c @@ -30,3 +30,8 @@ int rust_helper_spin_trylock(spinlock_t *lock) { return spin_trylock(lock); } + +void rust_helper_spin_assert_is_held(spinlock_t *lock) +{ + lockdep_assert_held(lock); +} diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs index 72dbf3f..eb80048 100644 --- a/rust/kernel/sync/lock.rs +++ b/rust/kernel/sync/lock.rs @@ -90,6 +90,13 @@ pub unsafe trait Backend { // SAFETY: The safety requirements ensure that the lock is initialised. *guard_state = unsafe { Self::lock(ptr) }; } + + /// Asserts that the lock is held using lockdep. + /// + /// # Safety + /// + /// Callers must ensure that [`Backend::init`] has been previously called. + unsafe fn assert_is_held(ptr: *mut Self::State); } /// A mutual exclusion primitive. @@ -235,6 +242,9 @@ impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> { /// /// The caller must ensure that it owns the lock. pub unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self { + // SAFETY: The caller can only hold the lock if `Backend::init` has already been called. + unsafe { B::assert_is_held(lock.state.get()) }; + Self { lock, state, diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs index 10a70c0..70cadbc 100644 --- a/rust/kernel/sync/lock/mutex.rs +++ b/rust/kernel/sync/lock/mutex.rs @@ -134,4 +134,9 @@ unsafe impl super::Backend for MutexBackend { None } } + + unsafe fn assert_is_held(ptr: *mut Self::State) { + // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use. + unsafe { bindings::mutex_assert_is_held(ptr) } + } } diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs index 081c022..ab2f8d0 100644 --- a/rust/kernel/sync/lock/spinlock.rs +++ b/rust/kernel/sync/lock/spinlock.rs @@ -133,4 +133,9 @@ unsafe impl super::Backend for SpinLockBackend { None } } + + unsafe fn assert_is_held(ptr: *mut Self::State) { + // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use. + unsafe { bindings::spin_assert_is_held(ptr) } + } }