NMIs could happen at any time. This patch makes sure that the safe printk() in NMI will schedule IRQ work only when the related structs are initialized. All pending messages are flushed when the IRQ work is being initialized. Signed-off-by: Petr Mladek <pmladek@xxxxxxxx> Cc: Jan Kara <jack@xxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Steven Rostedt <rostedt@xxxxxxxxxxx> Cc: Russell King <rmk+kernel@xxxxxxxxxxxxxxxx> Cc: Daniel Thompson <daniel.thompson@xxxxxxxxxx> Cc: Jiri Kosina <jkosina@xxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Ralf Baechle <ralf@xxxxxxxxxxxxxx> Cc: Benjamin Herrenschmidt <benh@xxxxxxxxxxxxxxxxxxx> Cc: Martin Schwidefsky <schwidefsky@xxxxxxxxxx> Cc: David Miller <davem@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- kernel/printk/nmi.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/kernel/printk/nmi.c b/kernel/printk/nmi.c index 479e0764203c..303cf0d15e57 100644 --- a/kernel/printk/nmi.c +++ b/kernel/printk/nmi.c @@ -38,6 +38,7 @@ * were handled or when IRQs are blocked. */ DEFINE_PER_CPU(printk_func_t, printk_func) = vprintk_default; +static int printk_nmi_irq_ready; #define NMI_LOG_BUF_LEN (4096 - sizeof(atomic_t) - sizeof(struct irq_work)) @@ -84,8 +85,11 @@ again: goto again; /* Get flushed in a more safe context. */ - if (add) + if (add && printk_nmi_irq_ready) { + /* Make sure that IRQ work is really initialized. */ + smp_rmb(); irq_work_queue(&s->work); + } return add; } @@ -195,6 +199,13 @@ void __init printk_nmi_init(void) init_irq_work(&s->work, __printk_nmi_flush); } + + /* Make sure that IRQ works are initialized before enabling. */ + smp_wmb(); + printk_nmi_irq_ready = 1; + + /* Flush pending messages that did not have scheduled IRQ works. */ + printk_nmi_flush(); } void printk_nmi_enter(void) -- 1.8.5.6