The following commit has been merged into the sched/rt branch of tip: Commit-ID: 5c586baa60e46d9fccd04f04e16f8a50b2fbc1af Gitweb: https://git.kernel.org/tip/5c586baa60e46d9fccd04f04e16f8a50b2fbc1af Author: John Ogness <john.ogness@xxxxxxxxxxxxx> AuthorDate: Wed, 04 Sep 2024 14:11:27 +02:06 Committer: Petr Mladek <pmladek@xxxxxxxx> CommitterDate: Wed, 04 Sep 2024 15:56:32 +02:00 printk: nbcon: Use thread callback if in task context for legacy When printing via console_lock, the write_atomic() callback is used for nbcon consoles. However, if it is known that the current context is a task context, the write_thread() callback can be used instead. Using write_thread() instead of write_atomic() helps to reduce large disabled preemption regions when the device_lock does not disable preemption. This is mainly a preparatory change to allow avoiding write_atomic() completely during normal operation if boot consoles are registered. As a side-effect, it also allows consolidating the printing code for legacy printing and the kthread printer. Signed-off-by: John Ogness <john.ogness@xxxxxxxxxxxxx> Reviewed-by: Petr Mladek <pmladek@xxxxxxxx> Link: https://lore.kernel.org/r/20240904120536.115780-9-john.ogness@xxxxxxxxxxxxx Signed-off-by: Petr Mladek <pmladek@xxxxxxxx> --- kernel/printk/internal.h | 4 +- kernel/printk/nbcon.c | 95 ++++++++++++++++++++++----------------- kernel/printk/printk.c | 5 +- 3 files changed, 59 insertions(+), 45 deletions(-) diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h index 14f7fc7..a96d411 100644 --- a/kernel/printk/internal.h +++ b/kernel/printk/internal.h @@ -90,7 +90,7 @@ void nbcon_free(struct console *con); enum nbcon_prio nbcon_get_default_prio(void); void nbcon_atomic_flush_pending(void); bool nbcon_legacy_emit_next_record(struct console *con, bool *handover, - int cookie); + int cookie, bool use_atomic); bool nbcon_kthread_create(struct console *con); void nbcon_kthread_stop(struct console *con); void nbcon_kthreads_wake(void); @@ -174,7 +174,7 @@ static inline void nbcon_free(struct console *con) { } static inline enum nbcon_prio nbcon_get_default_prio(void) { return NBCON_PRIO_NONE; } static inline void nbcon_atomic_flush_pending(void) { } static inline bool nbcon_legacy_emit_next_record(struct console *con, bool *handover, - int cookie) { return false; } + int cookie, bool use_atomic) { return false; } static inline void nbcon_kthread_wake(struct console *con) { } static inline bool console_is_usable(struct console *con, short flags, diff --git a/kernel/printk/nbcon.c b/kernel/printk/nbcon.c index 57a0e9b..784e5de 100644 --- a/kernel/printk/nbcon.c +++ b/kernel/printk/nbcon.c @@ -1043,9 +1043,10 @@ update_con: } /* - * nbcon_atomic_emit_one - Print one record for an nbcon console using the - * write_atomic() callback + * nbcon_emit_one - Print one record for an nbcon console using the + * specified callback * @wctxt: An initialized write context struct to use for this context + * @use_atomic: True if the write_atomic() callback is to be used * * Return: True, when a record has been printed and there are still * pending records. The caller might want to continue flushing. @@ -1058,12 +1059,25 @@ update_con: * This is an internal helper to handle the locking of the console before * calling nbcon_emit_next_record(). */ -static bool nbcon_atomic_emit_one(struct nbcon_write_context *wctxt) +static bool nbcon_emit_one(struct nbcon_write_context *wctxt, bool use_atomic) { struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); + struct console *con = ctxt->console; + unsigned long flags; + bool ret = false; + + if (!use_atomic) { + con->device_lock(con, &flags); + + /* + * Ensure this stays on the CPU to make handover and + * takeover possible. + */ + cant_migrate(); + } if (!nbcon_context_try_acquire(ctxt)) - return false; + goto out; /* * nbcon_emit_next_record() returns false when the console was @@ -1073,12 +1087,16 @@ static bool nbcon_atomic_emit_one(struct nbcon_write_context *wctxt) * The higher priority printing context takes over responsibility * to print the pending records. */ - if (!nbcon_emit_next_record(wctxt, true)) - return false; + if (!nbcon_emit_next_record(wctxt, use_atomic)) + goto out; nbcon_context_release(ctxt); - return ctxt->backlog; + ret = ctxt->backlog; +out: + if (!use_atomic) + con->device_unlock(con, flags); + return ret; } /** @@ -1163,30 +1181,8 @@ wait_for_event: con_flags = console_srcu_read_flags(con); - if (console_is_usable(con, con_flags, false)) { - unsigned long lock_flags; - - con->device_lock(con, &lock_flags); - - /* - * Ensure this stays on the CPU to make handover and - * takeover possible. - */ - cant_migrate(); - - if (nbcon_context_try_acquire(ctxt)) { - /* - * If the emit fails, this context is no - * longer the owner. - */ - if (nbcon_emit_next_record(&wctxt, false)) { - nbcon_context_release(ctxt); - backlog = ctxt->backlog; - } - } - - con->device_unlock(con, lock_flags); - } + if (console_is_usable(con, con_flags, false)) + backlog = nbcon_emit_one(&wctxt, false); console_srcu_read_unlock(cookie); @@ -1367,6 +1363,13 @@ enum nbcon_prio nbcon_get_default_prio(void) * both the console_lock and the SRCU read lock. Otherwise it * is set to false. * @cookie: The cookie from the SRCU read lock. + * @use_atomic: Set true when called in an atomic or unknown context. + * It affects which nbcon callback will be used: write_atomic() + * or write_thread(). + * + * When false, the write_thread() callback is used and would be + * called in a preemtible context unless disabled by the + * device_lock. The legacy handover is not allowed in this mode. * * Context: Any context except NMI. * Return: True, when a record has been printed and there are still @@ -1382,26 +1385,36 @@ enum nbcon_prio nbcon_get_default_prio(void) * Essentially it is the nbcon version of console_emit_next_record(). */ bool nbcon_legacy_emit_next_record(struct console *con, bool *handover, - int cookie) + int cookie, bool use_atomic) { struct nbcon_write_context wctxt = { }; struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt); unsigned long flags; bool progress; - /* Use the same procedure as console_emit_next_record(). */ - printk_safe_enter_irqsave(flags); - console_lock_spinning_enable(); - stop_critical_timings(); - ctxt->console = con; ctxt->prio = nbcon_get_default_prio(); - progress = nbcon_atomic_emit_one(&wctxt); + if (use_atomic) { + /* + * In an atomic or unknown context, use the same procedure as + * in console_emit_next_record(). It allows to handover. + */ + printk_safe_enter_irqsave(flags); + console_lock_spinning_enable(); + stop_critical_timings(); + } - start_critical_timings(); - *handover = console_lock_spinning_disable_and_check(cookie); - printk_safe_exit_irqrestore(flags); + progress = nbcon_emit_one(&wctxt, use_atomic); + + if (use_atomic) { + start_critical_timings(); + *handover = console_lock_spinning_disable_and_check(cookie); + printk_safe_exit_irqrestore(flags); + } else { + /* Non-atomic does not perform legacy spinning handovers. */ + *handover = false; + } return progress; } diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index f27c76c..55d75db 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -3077,12 +3077,13 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove u64 printk_seq; bool progress; - if (!console_is_usable(con, flags, true)) + if (!console_is_usable(con, flags, !do_cond_resched)) continue; any_usable = true; if (flags & CON_NBCON) { - progress = nbcon_legacy_emit_next_record(con, handover, cookie); + progress = nbcon_legacy_emit_next_record(con, handover, cookie, + !do_cond_resched); printk_seq = nbcon_seq_read(con); } else { progress = console_emit_next_record(con, handover, cookie);