The patch titled Subject: kfence: add option to use KFENCE without static keys has been added to the -mm tree. Its filename is mm-add-kernel-electric-fence-infrastructure-fix-4.patch This patch should soon appear at https://ozlabs.org/~akpm/mmots/broken-out/mm-add-kernel-electric-fence-infrastructure-fix-4.patch and later at https://ozlabs.org/~akpm/mmotm/broken-out/mm-add-kernel-electric-fence-infrastructure-fix-4.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Marco Elver <elver@xxxxxxxxxx> Subject: kfence: add option to use KFENCE without static keys For certain usecases, specifically where the sample interval is always set to a very low value such as 1ms, it can make sense to use a dynamic branch instead of static branches due to the overhead of toggling a static branch. Therefore, add a new Kconfig option to remove the static branches and instead check kfence_allocation_gate if a KFENCE allocation should be set up. Link: https://lkml.kernel.org/r/20210111091544.3287013-1-elver@xxxxxxxxxx Signed-off-by: Marco Elver <elver@xxxxxxxxxx> Suggested-by: Jörn Engel <joern@xxxxxxxxxxxxxxx> Reviewed-by: Jörn Engel <joern@xxxxxxxxxxxxxxx> Cc: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Jann Horn <jannh@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/kfence.h | 11 ++++++++++- lib/Kconfig.kfence | 12 +++++++++++- mm/kfence/core.c | 30 +++++++++++++++++------------- 3 files changed, 38 insertions(+), 15 deletions(-) --- a/include/linux/kfence.h~mm-add-kernel-electric-fence-infrastructure-fix-4 +++ a/include/linux/kfence.h @@ -4,7 +4,6 @@ #define _LINUX_KFENCE_H #include <linux/mm.h> -#include <linux/static_key.h> #include <linux/types.h> #ifdef CONFIG_KFENCE @@ -17,7 +16,13 @@ #define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE) extern char *__kfence_pool; +#ifdef CONFIG_KFENCE_STATIC_KEYS +#include <linux/static_key.h> DECLARE_STATIC_KEY_FALSE(kfence_allocation_key); +#else +#include <linux/atomic.h> +extern atomic_t kfence_allocation_gate; +#endif /** * is_kfence_address() - check if an address belongs to KFENCE pool @@ -104,7 +109,11 @@ void *__kfence_alloc(struct kmem_cache * */ static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { +#ifdef CONFIG_KFENCE_STATIC_KEYS if (static_branch_unlikely(&kfence_allocation_key)) +#else + if (unlikely(!atomic_read(&kfence_allocation_gate))) +#endif return __kfence_alloc(s, size, flags); return NULL; } --- a/lib/Kconfig.kfence~mm-add-kernel-electric-fence-infrastructure-fix-4 +++ a/lib/Kconfig.kfence @@ -6,7 +6,6 @@ config HAVE_ARCH_KFENCE menuconfig KFENCE bool "KFENCE: low-overhead sampling-based memory safety error detector" depends on HAVE_ARCH_KFENCE && !KASAN && (SLAB || SLUB) - depends on JUMP_LABEL # To ensure performance, require jump labels select STACKTRACE help KFENCE is a low-overhead sampling-based detector of heap out-of-bounds @@ -23,6 +22,17 @@ menuconfig KFENCE if KFENCE +config KFENCE_STATIC_KEYS + bool "Use static keys to set up allocations" + default y + depends on JUMP_LABEL # To ensure performance, require jump labels + help + Use static keys (static branches) to set up KFENCE allocations. Using + static keys is normally recommended, because it avoids a dynamic + branch in the allocator's fast path. However, with very low sample + intervals, or on systems that do not support jump labels, a dynamic + branch may still be an acceptable performance trade-off. + config KFENCE_SAMPLE_INTERVAL int "Default sample interval in milliseconds" default 100 --- a/mm/kfence/core.c~mm-add-kernel-electric-fence-infrastructure-fix-4 +++ a/mm/kfence/core.c @@ -88,11 +88,13 @@ struct kfence_metadata kfence_metadata[C static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist); static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */ +#ifdef CONFIG_KFENCE_STATIC_KEYS /* The static key to set up a KFENCE allocation. */ DEFINE_STATIC_KEY_FALSE(kfence_allocation_key); +#endif /* Gates the allocation, ensuring only one succeeds in a given period. */ -static atomic_t allocation_gate = ATOMIC_INIT(1); +atomic_t kfence_allocation_gate = ATOMIC_INIT(1); /* Statistics counters for debugfs. */ enum kfence_counter_id { @@ -579,29 +581,31 @@ late_initcall(kfence_debugfs_init); static struct delayed_work kfence_timer; static void toggle_allocation_gate(struct work_struct *work) { - unsigned long end_wait; - if (!READ_ONCE(kfence_enabled)) return; /* Enable static key, and await allocation to happen. */ - atomic_set(&allocation_gate, 0); + atomic_set(&kfence_allocation_gate, 0); +#ifdef CONFIG_KFENCE_STATIC_KEYS static_branch_enable(&kfence_allocation_key); /* * Await an allocation. Timeout after 1 second, in case the kernel stops * doing allocations, to avoid stalling this worker task for too long. */ - end_wait = jiffies + HZ; - do { - set_current_state(TASK_UNINTERRUPTIBLE); - if (atomic_read(&allocation_gate) != 0) - break; - schedule_timeout(1); - } while (time_before(jiffies, end_wait)); - __set_current_state(TASK_RUNNING); + { + unsigned long end_wait = jiffies + HZ; + do { + set_current_state(TASK_UNINTERRUPTIBLE); + if (atomic_read(&kfence_allocation_gate) != 0) + break; + schedule_timeout(1); + } while (time_before(jiffies, end_wait)); + __set_current_state(TASK_RUNNING); + } /* Disable static key and reset timer. */ static_branch_disable(&kfence_allocation_key); +#endif schedule_delayed_work(&kfence_timer, msecs_to_jiffies(kfence_sample_interval)); } static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate); @@ -707,7 +711,7 @@ void *__kfence_alloc(struct kmem_cache * * sense to continue writing to it and pay the associated contention * cost, in case we have a large number of concurrent allocations. */ - if (atomic_read(&allocation_gate) || atomic_inc_return(&allocation_gate) > 1) + if (atomic_read(&kfence_allocation_gate) || atomic_inc_return(&kfence_allocation_gate) > 1) return NULL; if (!READ_ONCE(kfence_enabled)) _ Patches currently in -mm which might be from elver@xxxxxxxxxx are mm-add-kernel-electric-fence-infrastructure-fix.patch mm-add-kernel-electric-fence-infrastructure-fix-2.patch mm-add-kernel-electric-fence-infrastructure-fix-3.patch mm-add-kernel-electric-fence-infrastructure-fix-4.patch arm64-kfence-enable-kfence-for-arm64.patch kfence-use-pt_regs-to-generate-stack-trace-on-faults.patch kfence-documentation-add-kfence-documentation.patch kfence-add-test-suite.patch kfence-add-test-suite-fix.patch kfence-add-test-suite-fix-2.patch maintainers-add-entry-for-kfence.patch