The assertions work as well as the comment to inform developers about locking expectations. Additionally they are validated by lockdep at runtime, making sure the expectations are met. Signed-off-by: Thomas Weißschuh <linux@xxxxxxxxxxxxxx> --- Tested against the sysctl selftests and normal desktop usage. --- fs/proc/proc_sysctl.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index b1c2c0b82116..1d9d789fbd3e 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -17,6 +17,7 @@ #include <linux/bpf-cgroup.h> #include <linux/mount.h> #include <linux/kmemleak.h> +#include <linux/lockdep.h> #include "internal.h" #define list_for_each_table_entry(entry, header) \ @@ -104,7 +105,6 @@ static int namecmp(const char *name1, int len1, const char *name2, int len2) return cmp; } -/* Called under sysctl_lock */ static struct ctl_table *find_entry(struct ctl_table_header **phead, struct ctl_dir *dir, const char *name, int namelen) { @@ -112,6 +112,8 @@ static struct ctl_table *find_entry(struct ctl_table_header **phead, struct ctl_table *entry; struct rb_node *node = dir->root.rb_node; + lockdep_assert_held(&sysctl_lock); + while (node) { struct ctl_node *ctl_node; @@ -258,18 +260,20 @@ static int insert_header(struct ctl_dir *dir, struct ctl_table_header *header) return err; } -/* called under sysctl_lock */ static int use_table(struct ctl_table_header *p) { + lockdep_assert_held(&sysctl_lock); + if (unlikely(p->unregistering)) return 0; p->used++; return 1; } -/* called under sysctl_lock */ static void unuse_table(struct ctl_table_header *p) { + lockdep_assert_held(&sysctl_lock); + if (!--p->used) if (unlikely(p->unregistering)) complete(p->unregistering); @@ -280,9 +284,11 @@ static void proc_sys_invalidate_dcache(struct ctl_table_header *head) proc_invalidate_siblings_dcache(&head->inodes, &sysctl_lock); } -/* called under sysctl_lock, will reacquire if has to wait */ static void start_unregistering(struct ctl_table_header *p) { + /* will reacquire if has to wait */ + lockdep_assert_held(&sysctl_lock); + /* * if p->used is 0, nobody will ever touch that entry again; * we'll eliminate all paths to it before dropping sysctl_lock --- base-commit: 27b31deb900dfcec60820d8d3e48f6de9ae9a18e change-id: 20240629-sysctl-lockdep-26c90b73a786 Best regards, -- Thomas Weißschuh <linux@xxxxxxxxxxxxxx>