Allow architectures to define a kasan_arch_is_ready() hook that bails out of any function that's about to touch the shadow unless the arch says that it is ready for the memory to be accessed. This is fairly uninvasive and should have a negligible performance penalty. This will only work in outline mode, so an arch must specify ARCH_DISABLE_KASAN_INLINE if it requires this. Cc: Balbir Singh <bsingharora@xxxxxxxxx> Cc: Aneesh Kumar K.V <aneesh.kumar@xxxxxxxxxxxxxxxxxx> Suggested-by: Christophe Leroy <christophe.leroy@xxxxxxxxxx> Signed-off-by: Daniel Axtens <dja@xxxxxxxxxx> -- I discuss the justfication for this later in the series. Also, both previous RFCs for ppc64 - by 2 different people - have needed this trick! See: - https://lore.kernel.org/patchwork/patch/592820/ # ppc64 hash series - https://patchwork.ozlabs.org/patch/795211/ # ppc radix series --- mm/kasan/common.c | 4 ++++ mm/kasan/generic.c | 3 +++ mm/kasan/kasan.h | 4 ++++ mm/kasan/shadow.c | 8 ++++++++ 4 files changed, 19 insertions(+) diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 10177cc26d06..0ad615f3801d 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -331,6 +331,10 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object, u8 tag; void *tagged_object; + /* Bail if the arch isn't ready */ + if (!kasan_arch_is_ready()) + return false; + tag = get_tag(object); tagged_object = object; object = kasan_reset_tag(object); diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 53cbf28859b5..c3f5ba7a294a 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -163,6 +163,9 @@ static __always_inline bool check_region_inline(unsigned long addr, size_t size, bool write, unsigned long ret_ip) { + if (!kasan_arch_is_ready()) + return true; + if (unlikely(size == 0)) return true; diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 8f450bc28045..19323a3d5975 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -449,6 +449,10 @@ static inline void kasan_poison_last_granule(const void *address, size_t size) { #endif /* CONFIG_KASAN_GENERIC */ +#ifndef kasan_arch_is_ready +static inline bool kasan_arch_is_ready(void) { return true; } +#endif + /* * Exported functions for interfaces called from assembly or from generated * code. Declarations here to avoid warning about missing declarations. diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index 082ee5b6d9a1..3c7f7efe6f68 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -73,6 +73,10 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init) { void *shadow_start, *shadow_end; + /* Don't touch the shadow memory if arch isn't ready */ + if (!kasan_arch_is_ready()) + return; + /* * Perform shadow offset calculation based on untagged address, as * some of the callers (e.g. kasan_poison_object_data) pass tagged @@ -99,6 +103,10 @@ EXPORT_SYMBOL(kasan_poison); #ifdef CONFIG_KASAN_GENERIC void kasan_poison_last_granule(const void *addr, size_t size) { + /* Don't touch the shadow memory if arch isn't ready */ + if (!kasan_arch_is_ready()) + return; + if (size & KASAN_GRANULE_MASK) { u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size); *shadow = size & KASAN_GRANULE_MASK; -- 2.30.2