diff options
author | John Ogness <john.ogness@linutronix.de> | 2024-09-04 14:11:28 +0206 |
---|---|---|
committer | Petr Mladek <pmladek@suse.com> | 2024-09-04 15:56:32 +0200 |
commit | 13189fa73afae2b961d29132fd36d9cc0be77ecb (patch) | |
tree | ac48e44ca10e9e7801334b258b8648491f08e5c9 /kernel/printk/printk.c | |
parent | 5c586baa60e46d9fccd04f04e16f8a50b2fbc1af (diff) |
printk: nbcon: Rely on kthreads for normal operation
Once the kthread is running and available
(i.e. @printk_kthreads_running is set), the kthread becomes
responsible for flushing any pending messages which are added
in NBCON_PRIO_NORMAL context. Namely the legacy
console_flush_all() and device_release() no longer flush the
console. And nbcon_atomic_flush_pending() used by
nbcon_cpu_emergency_exit() no longer flushes messages added
after the emergency messages.
The console context is safe when used by the kthread only when
one of the following conditions are true:
1. Other caller acquires the console context with
NBCON_PRIO_NORMAL with preemption disabled. It will
release the context before rescheduling.
2. Other caller acquires the console context with
NBCON_PRIO_NORMAL under the device_lock.
3. The kthread is the only context which acquires the console
with NBCON_PRIO_NORMAL.
This is satisfied for all atomic printing call sites:
nbcon_legacy_emit_next_record() (#1)
nbcon_atomic_flush_pending_con() (#1)
nbcon_device_release() (#2)
It is even double guaranteed when @printk_kthreads_running
is set because then _only_ the kthread will print for
NBCON_PRIO_NORMAL. (#3)
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Link: https://lore.kernel.org/r/20240904120536.115780-10-john.ogness@linutronix.de
Signed-off-by: Petr Mladek <pmladek@suse.com>
Diffstat (limited to 'kernel/printk/printk.c')
-rw-r--r-- | kernel/printk/printk.c | 48 |
1 files changed, 47 insertions, 1 deletions
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 55d75db00042..149c3e04c2b5 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -2384,6 +2384,9 @@ asmlinkage int vprintk_emit(int facility, int level, if (ft.nbcon_atomic) nbcon_atomic_flush_pending(); + if (ft.nbcon_offload) + nbcon_kthreads_wake(); + if (ft.legacy_direct) { /* * The caller may be holding system-critical or @@ -2732,6 +2735,7 @@ void suspend_console(void) void resume_console(void) { + struct console_flush_type ft; struct console *con; if (!console_suspend_enabled) @@ -2749,6 +2753,10 @@ void resume_console(void) */ synchronize_srcu(&console_srcu); + printk_get_console_flush_type(&ft); + if (ft.nbcon_offload) + nbcon_kthreads_wake(); + pr_flush(1000, true); } @@ -3060,6 +3068,7 @@ static inline void printk_kthreads_check_locked(void) { } */ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover) { + struct console_flush_type ft; bool any_usable = false; struct console *con; bool any_progress; @@ -3071,12 +3080,22 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove do { any_progress = false; + printk_get_console_flush_type(&ft); + cookie = console_srcu_read_lock(); for_each_console_srcu(con) { short flags = console_srcu_read_flags(con); u64 printk_seq; bool progress; + /* + * console_flush_all() is only responsible for nbcon + * consoles when the nbcon consoles cannot print via + * their atomic or threaded flushing. + */ + if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload)) + continue; + if (!console_is_usable(con, flags, !do_cond_resched)) continue; any_usable = true; @@ -3387,9 +3406,25 @@ EXPORT_SYMBOL(console_stop); void console_start(struct console *console) { + struct console_flush_type ft; + bool is_nbcon; + console_list_lock(); console_srcu_write_flags(console, console->flags | CON_ENABLED); + is_nbcon = console->flags & CON_NBCON; console_list_unlock(); + + /* + * Ensure that all SRCU list walks have completed. The related + * printing context must be able to see it is enabled so that + * it is guaranteed to wake up and resume printing. + */ + synchronize_srcu(&console_srcu); + + printk_get_console_flush_type(&ft); + if (is_nbcon && ft.nbcon_offload) + nbcon_kthread_wake(console); + __pr_flush(console, 1000, true); } EXPORT_SYMBOL(console_start); @@ -4115,6 +4150,8 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre /* Flush the consoles so that records up to @seq are printed. */ printk_get_console_flush_type(&ft); + if (ft.nbcon_atomic) + nbcon_atomic_flush_pending(); if (ft.legacy_direct) { console_lock(); console_unlock(); @@ -4152,8 +4189,10 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre * that they make forward progress, so only increment * @diff for usable consoles. */ - if (!console_is_usable(c, flags, true)) + if (!console_is_usable(c, flags, true) && + !console_is_usable(c, flags, false)) { continue; + } if (flags & CON_NBCON) { printk_seq = nbcon_seq_read(c); @@ -4629,8 +4668,15 @@ EXPORT_SYMBOL_GPL(kmsg_dump_rewind); */ void console_try_replay_all(void) { + struct console_flush_type ft; + + printk_get_console_flush_type(&ft); if (console_trylock()) { __console_rewind_all(); + if (ft.nbcon_atomic) + nbcon_atomic_flush_pending(); + if (ft.nbcon_offload) + nbcon_kthreads_wake(); /* Consoles are flushed as part of console_unlock(). */ console_unlock(); } |