lock_list::distance is always not greater than MAX_LOCK_DEPTH (which is 48 right now), so a u16 will fit. This patch reduces the size of lock_list::distance to save space, so that we can introduce other fields to help detect recursive read lock deadlocks without increasing the size of lock_list structure. Suggested-by: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Signed-off-by: Boqun Feng <boqun.feng@xxxxxxxxx> --- include/linux/lockdep.h | 2 +- kernel/locking/lockdep.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 6b7cb390f19f..b85973515f84 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -212,7 +212,7 @@ struct lock_list { struct lock_class *class; struct lock_class *links_to; const struct lock_trace *trace; - int distance; + u16 distance; /* * The parent field is used to implement breadth-first search, and the diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 2d9798b71f74..699e9039a9b3 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -1320,7 +1320,7 @@ static struct lock_list *alloc_list_entry(void) */ static int add_lock_to_list(struct lock_class *this, struct lock_class *links_to, struct list_head *head, - unsigned long ip, int distance, + unsigned long ip, u16 distance, const struct lock_trace *trace) { struct lock_list *entry; @@ -2489,7 +2489,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next) */ static int check_prev_add(struct task_struct *curr, struct held_lock *prev, - struct held_lock *next, int distance, + struct held_lock *next, u16 distance, struct lock_trace **const trace) { struct lock_list *entry; @@ -2622,7 +2622,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) goto out_bug; for (;;) { - int distance = curr->lockdep_depth - depth + 1; + u16 distance = curr->lockdep_depth - depth + 1; hlock = curr->held_locks + depth - 1; /* -- 2.28.0