They are implicitly zero-initialized, remove explicit initialization. It keeps the upcoming additions to kcsan_ctx consistent with the rest. No functional change intended. Signed-off-by: Marco Elver <elver@xxxxxxxxxx> Acked-by: Mark Rutland <mark.rutland@xxxxxxx> --- v3: * Minimize diff by leaving "scoped_accesses" on its own line, which should also reduce diff of future changes. --- init/init_task.c | 5 ----- kernel/kcsan/core.c | 5 ----- 2 files changed, 10 deletions(-) diff --git a/init/init_task.c b/init/init_task.c index 2d024066e27b..73cc8f03511a 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -182,11 +182,6 @@ struct task_struct init_task #endif #ifdef CONFIG_KCSAN .kcsan_ctx = { - .disable_count = 0, - .atomic_next = 0, - .atomic_nest_count = 0, - .in_flat_atomic = false, - .access_mask = 0, .scoped_accesses = {LIST_POISON1, NULL}, }, #endif diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 6bfd3040f46b..e34a1710b7bc 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -44,11 +44,6 @@ bool kcsan_enabled; /* Per-CPU kcsan_ctx for interrupts */ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = { - .disable_count = 0, - .atomic_next = 0, - .atomic_nest_count = 0, - .in_flat_atomic = false, - .access_mask = 0, .scoped_accesses = {LIST_POISON1, NULL}, }; -- 2.34.0.rc2.393.gf8c9666880-goog