The allocation wait timeout was initially added because of warnings due to CONFIG_DETECT_HUNG_TASK=y [1]. While the 1 sec timeout is sufficient to resolve the warnings (given the hung task timeout must be 1 sec or larger) it may cause unnecessary wake-ups if the system is idle. [1] https://lkml.kernel.org/r/CADYN=9J0DQhizAGB0-jz4HOBBh+05kMBXb4c0cXMS7Qi5NAJiw@xxxxxxxxxxxxxx Fix it by computing the timeout duration in terms of the current sysctl_hung_task_timeout_secs value. Signed-off-by: Marco Elver <elver@xxxxxxxxxx> --- mm/kfence/core.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 5f0a56041549..73e7b621fb36 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -20,6 +20,7 @@ #include <linux/moduleparam.h> #include <linux/random.h> #include <linux/rcupdate.h> +#include <linux/sched/sysctl.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/spinlock.h> @@ -626,7 +627,16 @@ static void toggle_allocation_gate(struct work_struct *work) WRITE_ONCE(kfence_timer_waiting, true); smp_mb(); /* See comment in __kfence_alloc(). */ - wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate), HZ); + if (sysctl_hung_task_timeout_secs) { + /* + * During low activity with no allocations we might wait a + * while; let's avoid the hung task warning. + */ + wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate), + sysctl_hung_task_timeout_secs * HZ / 2); + } else { + wait_event(allocation_wait, atomic_read(&kfence_allocation_gate)); + } smp_store_release(&kfence_timer_waiting, false); /* Order after wait_event(). */ /* Disable static key and reset timer. */ -- 2.31.1.368.gbe11c130af-goog