They are implicitly zero-initialized, remove explicit initialization. It keeps the upcoming additions to kcsan_ctx consistent with the rest. No functional change intended. Signed-off-by: Marco Elver <elver@xxxxxxxxxx> --- init/init_task.c | 9 +-------- kernel/kcsan/core.c | 5 ----- 2 files changed, 1 insertion(+), 13 deletions(-) diff --git a/init/init_task.c b/init/init_task.c index 2d024066e27b..61700365ce58 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -181,14 +181,7 @@ struct task_struct init_task .kasan_depth = 1, #endif #ifdef CONFIG_KCSAN - .kcsan_ctx = { - .disable_count = 0, - .atomic_next = 0, - .atomic_nest_count = 0, - .in_flat_atomic = false, - .access_mask = 0, - .scoped_accesses = {LIST_POISON1, NULL}, - }, + .kcsan_ctx = { .scoped_accesses = {LIST_POISON1, NULL} }, #endif #ifdef CONFIG_TRACE_IRQFLAGS .softirqs_enabled = 1, diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 6bfd3040f46b..e34a1710b7bc 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -44,11 +44,6 @@ bool kcsan_enabled; /* Per-CPU kcsan_ctx for interrupts */ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = { - .disable_count = 0, - .atomic_next = 0, - .atomic_nest_count = 0, - .in_flat_atomic = false, - .access_mask = 0, .scoped_accesses = {LIST_POISON1, NULL}, }; -- 2.34.0.rc2.393.gf8c9666880-goog