summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/core-api/index.rst1
-rw-r--r--Documentation/core-api/printk-index.rst137
-rw-r--r--MAINTAINERS2
-rw-r--r--drivers/tty/sysrq.c2
-rw-r--r--include/linux/console.h19
-rw-r--r--include/linux/printk.h82
-rw-r--r--kernel/hung_task.c11
-rw-r--r--kernel/panic.c4
-rw-r--r--kernel/printk/printk.c1205
-rw-r--r--kernel/rcu/tree_stall.h2
-rw-r--r--kernel/reboot.c14
-rw-r--r--kernel/watchdog.c4
-rw-r--r--kernel/watchdog_hld.c4
-rw-r--r--lib/dump_stack.c4
-rw-r--r--lib/nmi_backtrace.c4
15 files changed, 1174 insertions, 321 deletions
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index 972d46a5ddf6..8a3fce1fe582 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -20,6 +20,7 @@ it.
workqueue
printk-basics
printk-formats
+ printk-index
symbol-namespaces
Data structures and low-level utilities
diff --git a/Documentation/core-api/printk-index.rst b/Documentation/core-api/printk-index.rst
new file mode 100644
index 000000000000..3062f37d119b
--- /dev/null
+++ b/Documentation/core-api/printk-index.rst
@@ -0,0 +1,137 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============
+Printk Index
+============
+
+There are many ways how to monitor the state of the system. One important
+source of information is the system log. It provides a lot of information,
+including more or less important warnings and error messages.
+
+There are monitoring tools that filter and take action based on messages
+logged.
+
+The kernel messages are evolving together with the code. As a result,
+particular kernel messages are not KABI and never will be!
+
+It is a huge challenge for maintaining the system log monitors. It requires
+knowing what messages were updated in a particular kernel version and why.
+Finding these changes in the sources would require non-trivial parsers.
+Also it would require matching the sources with the binary kernel which
+is not always trivial. Various changes might be backported. Various kernel
+versions might be used on different monitored systems.
+
+This is where the printk index feature might become useful. It provides
+a dump of printk formats used all over the source code used for the kernel
+and modules on the running system. It is accessible at runtime via debugfs.
+
+The printk index helps to find changes in the message formats. Also it helps
+to track the strings back to the kernel sources and the related commit.
+
+
+User Interface
+==============
+
+The index of printk formats are split in into separate files. The files are
+named according to the binaries where the printk formats are built-in. There
+is always "vmlinux" and optionally also modules, for example::
+
+ /sys/kernel/debug/printk/index/vmlinux
+ /sys/kernel/debug/printk/index/ext4
+ /sys/kernel/debug/printk/index/scsi_mod
+
+Note that only loaded modules are shown. Also printk formats from a module
+might appear in "vmlinux" when the module is built-in.
+
+The content is inspired by the dynamic debug interface and looks like::
+
+ $> head -1 /sys/kernel/debug/printk/index/vmlinux; shuf -n 5 vmlinux
+ # <level[,flags]> filename:line function "format"
+ <5> block/blk-settings.c:661 disk_stack_limits "%s: Warning: Device %s is misaligned\n"
+ <4> kernel/trace/trace.c:8296 trace_create_file "Could not create tracefs '%s' entry\n"
+ <6> arch/x86/kernel/hpet.c:144 _hpet_print_config "hpet: %s(%d):\n"
+ <6> init/do_mounts.c:605 prepare_namespace "Waiting for root device %s...\n"
+ <6> drivers/acpi/osl.c:1410 acpi_no_auto_serialize_setup "ACPI: auto-serialization disabled\n"
+
+, where the meaning is:
+
+ - :level: log level value: 0-7 for particular severity, -1 as default,
+ 'c' as continuous line without an explicit log level
+ - :flags: optional flags: currently only 'c' for KERN_CONT
+ - :filename\:line: source filename and line number of the related
+ printk() call. Note that there are many wrappers, for example,
+ pr_warn(), pr_warn_once(), dev_warn().
+ - :function: function name where the printk() call is used.
+ - :format: format string
+
+The extra information makes it a bit harder to find differences
+between various kernels. Especially the line number might change
+very often. On the other hand, it helps a lot to confirm that
+it is the same string or find the commit that is responsible
+for eventual changes.
+
+
+printk() Is Not a Stable KABI
+=============================
+
+Several developers are afraid that exporting all these implementation
+details into the user space will transform particular printk() calls
+into KABI.
+
+But it is exactly the opposite. printk() calls must _not_ be KABI.
+And the printk index helps user space tools to deal with this.
+
+
+Subsystem specific printk wrappers
+==================================
+
+The printk index is generated using extra metadata that are stored in
+a dedicated .elf section ".printk_index". It is achieved using macro
+wrappers doing __printk_index_emit() together with the real printk()
+call. The same technique is used also for the metadata used by
+the dynamic debug feature.
+
+The metadata are stored for a particular message only when it is printed
+using these special wrappers. It is implemented for the commonly
+used printk() calls, including, for example, pr_warn(), or pr_once().
+
+Additional changes are necessary for various subsystem specific wrappers
+that call the original printk() via a common helper function. These needs
+their own wrappers adding __printk_index_emit().
+
+Only few subsystem specific wrappers have been updated so far,
+for example, dev_printk(). As a result, the printk formats from
+some subsystes can be missing in the printk index.
+
+
+Subsystem specific prefix
+=========================
+
+The macro pr_fmt() macro allows to define a prefix that is printed
+before the string generated by the related printk() calls.
+
+Subsystem specific wrappers usually add even more complicated
+prefixes.
+
+These prefixes can be stored into the printk index metadata
+by an optional parameter of __printk_index_emit(). The debugfs
+interface might then show the printk formats including these prefixes.
+For example, drivers/acpi/osl.c contains::
+
+ #define pr_fmt(fmt) "ACPI: OSL: " fmt
+
+ static int __init acpi_no_auto_serialize_setup(char *str)
+ {
+ acpi_gbl_auto_serialize_methods = FALSE;
+ pr_info("Auto-serialization disabled\n");
+
+ return 1;
+ }
+
+This results in the following printk index entry::
+
+ <6> drivers/acpi/osl.c:1410 acpi_no_auto_serialize_setup "ACPI: auto-serialization disabled\n"
+
+It helps matching messages from the real log with printk index.
+Then the source file name, line number, and function name can
+be used to match the string with the source code.
diff --git a/MAINTAINERS b/MAINTAINERS
index 7af43b32e06d..309b45217022 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -15923,7 +15923,9 @@ F: kernel/printk/
PRINTK INDEXING
R: Chris Down <chris@chrisdown.name>
S: Maintained
+F: Documentation/core-api/printk-index.rst
F: kernel/printk/index.c
+K: printk_index
PROC FILESYSTEM
L: linux-kernel@vger.kernel.org
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index bbfd004449b5..2884cd638d64 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -578,6 +578,7 @@ void __handle_sysrq(int key, bool check_mask)
rcu_sysrq_start();
rcu_read_lock();
+ printk_prefer_direct_enter();
/*
* Raise the apparent loglevel to maximum so that the sysrq header
* is shown to provide the user with positive feedback. We do not
@@ -619,6 +620,7 @@ void __handle_sysrq(int key, bool check_mask)
pr_cont("\n");
console_loglevel = orig_log_level;
}
+ printk_prefer_direct_exit();
rcu_read_unlock();
rcu_sysrq_end();
diff --git a/include/linux/console.h b/include/linux/console.h
index 7cd758a4f44e..143653090c48 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -16,6 +16,7 @@
#include <linux/atomic.h>
#include <linux/types.h>
+#include <linux/mutex.h>
struct vc_data;
struct console_font_op;
@@ -151,6 +152,24 @@ struct console {
int cflag;
uint ispeed;
uint ospeed;
+ u64 seq;
+ unsigned long dropped;
+ struct task_struct *thread;
+ bool blocked;
+
+ /*
+ * The per-console lock is used by printing kthreads to synchronize
+ * this console with callers of console_lock(). This is necessary in
+ * order to allow printing kthreads to run in parallel to each other,
+ * while each safely accessing the @blocked field and synchronizing
+ * against direct printing via console_lock/console_unlock.
+ *
+ * Note: For synchronizing against direct printing via
+ * console_trylock/console_unlock, see the static global
+ * variable @console_kthreads_active.
+ */
+ struct mutex lock;
+
void *data;
struct console *next;
};
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 1522df223c0f..cd26aab0ab2a 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -170,6 +170,11 @@ extern void __printk_safe_exit(void);
#define printk_deferred_enter __printk_safe_enter
#define printk_deferred_exit __printk_safe_exit
+extern void printk_prefer_direct_enter(void);
+extern void printk_prefer_direct_exit(void);
+
+extern bool pr_flush(int timeout_ms, bool reset_on_progress);
+
/*
* Please don't use printk_ratelimit(), because it shares ratelimiting state
* with all other unrelated printk_ratelimit() callsites. Instead use
@@ -220,6 +225,19 @@ static inline void printk_deferred_exit(void)
{
}
+static inline void printk_prefer_direct_enter(void)
+{
+}
+
+static inline void printk_prefer_direct_exit(void)
+{
+}
+
+static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
+{
+ return true;
+}
+
static inline int printk_ratelimit(void)
{
return 0;
@@ -277,45 +295,57 @@ static inline void printk_trigger_flush(void)
#endif
#ifdef CONFIG_SMP
-extern int __printk_cpu_trylock(void);
-extern void __printk_wait_on_cpu_lock(void);
-extern void __printk_cpu_unlock(void);
+extern int __printk_cpu_sync_try_get(void);
+extern void __printk_cpu_sync_wait(void);
+extern void __printk_cpu_sync_put(void);
+
+#else
+
+#define __printk_cpu_sync_try_get() true
+#define __printk_cpu_sync_wait()
+#define __printk_cpu_sync_put()
+#endif /* CONFIG_SMP */
/**
- * printk_cpu_lock_irqsave() - Acquire the printk cpu-reentrant spinning
- * lock and disable interrupts.
+ * printk_cpu_sync_get_irqsave() - Disable interrupts and acquire the printk
+ * cpu-reentrant spinning lock.
* @flags: Stack-allocated storage for saving local interrupt state,
- * to be passed to printk_cpu_unlock_irqrestore().
+ * to be passed to printk_cpu_sync_put_irqrestore().
*
* If the lock is owned by another CPU, spin until it becomes available.
* Interrupts are restored while spinning.
+ *
+ * CAUTION: This function must be used carefully. It does not behave like a
+ * typical lock. Here are important things to watch out for...
+ *
+ * * This function is reentrant on the same CPU. Therefore the calling
+ * code must not assume exclusive access to data if code accessing the
+ * data can run reentrant or within NMI context on the same CPU.
+ *
+ * * If there exists usage of this function from NMI context, it becomes
+ * unsafe to perform any type of locking or spinning to wait for other
+ * CPUs after calling this function from any context. This includes
+ * using spinlocks or any other busy-waiting synchronization methods.
*/
-#define printk_cpu_lock_irqsave(flags) \
- for (;;) { \
- local_irq_save(flags); \
- if (__printk_cpu_trylock()) \
- break; \
- local_irq_restore(flags); \
- __printk_wait_on_cpu_lock(); \
+#define printk_cpu_sync_get_irqsave(flags) \
+ for (;;) { \
+ local_irq_save(flags); \
+ if (__printk_cpu_sync_try_get()) \
+ break; \
+ local_irq_restore(flags); \
+ __printk_cpu_sync_wait(); \
}
/**
- * printk_cpu_unlock_irqrestore() - Release the printk cpu-reentrant spinning
- * lock and restore interrupts.
- * @flags: Caller's saved interrupt state, from printk_cpu_lock_irqsave().
+ * printk_cpu_sync_put_irqrestore() - Release the printk cpu-reentrant spinning
+ * lock and restore interrupts.
+ * @flags: Caller's saved interrupt state, from printk_cpu_sync_get_irqsave().
*/
-#define printk_cpu_unlock_irqrestore(flags) \
+#define printk_cpu_sync_put_irqrestore(flags) \
do { \
- __printk_cpu_unlock(); \
+ __printk_cpu_sync_put(); \
local_irq_restore(flags); \
- } while (0) \
-
-#else
-
-#define printk_cpu_lock_irqsave(flags) ((void)flags)
-#define printk_cpu_unlock_irqrestore(flags) ((void)flags)
-
-#endif /* CONFIG_SMP */
+ } while (0)
extern int kptr_restrict;
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 52501e5f7655..02a65d554340 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -127,6 +127,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
* complain:
*/
if (sysctl_hung_task_warnings) {
+ printk_prefer_direct_enter();
+
if (sysctl_hung_task_warnings > 0)
sysctl_hung_task_warnings--;
pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
@@ -142,6 +144,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
if (sysctl_hung_task_all_cpu_backtrace)
hung_task_show_all_bt = true;
+
+ printk_prefer_direct_exit();
}
touch_nmi_watchdog();
@@ -204,12 +208,17 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
}
unlock:
rcu_read_unlock();
- if (hung_task_show_lock)
+ if (hung_task_show_lock) {
+ printk_prefer_direct_enter();
debug_show_all_locks();
+ printk_prefer_direct_exit();
+ }
if (hung_task_show_all_bt) {
hung_task_show_all_bt = false;
+ printk_prefer_direct_enter();
trigger_all_cpu_backtrace();
+ printk_prefer_direct_exit();
}
if (hung_task_call_panic)
diff --git a/kernel/panic.c b/kernel/panic.c
index 8355b19676f8..6737b2332275 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -579,6 +579,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
{
disable_trace_on_warning();
+ printk_prefer_direct_enter();
+
if (file)
pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
raw_smp_processor_id(), current->pid, file, line,
@@ -608,6 +610,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
/* Just a warning, don't kill lockdep. */
add_taint(taint, LOCKDEP_STILL_OK);
+
+ printk_prefer_direct_exit();
}
#ifndef __WARN_FLAGS
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index da03c15ecc89..a3e1035929b0 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -224,6 +224,33 @@ int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
static int nr_ext_console_drivers;
/*
+ * Used to synchronize printing kthreads against direct printing via
+ * console_trylock/console_unlock.
+ *
+ * Values:
+ * -1 = console kthreads atomically blocked (via global trylock)
+ * 0 = no kthread printing, console not locked (via trylock)
+ * >0 = kthread(s) actively printing
+ *
+ * Note: For synchronizing against direct printing via
+ * console_lock/console_unlock, see the @lock variable in
+ * struct console.
+ */
+static atomic_t console_kthreads_active = ATOMIC_INIT(0);
+
+#define console_kthreads_atomic_tryblock() \
+ (atomic_cmpxchg(&console_kthreads_active, 0, -1) == 0)
+#define console_kthreads_atomic_unblock() \
+ atomic_cmpxchg(&console_kthreads_active, -1, 0)
+#define console_kthreads_atomically_blocked() \
+ (atomic_read(&console_kthreads_active) == -1)
+
+#define console_kthread_printing_tryenter() \
+ atomic_inc_unless_negative(&console_kthreads_active)
+#define console_kthread_printing_exit() \
+ atomic_dec(&console_kthreads_active)
+
+/*
* Helper macros to handle lockdep when locking/unlocking console_sem. We use
* macros instead of functions so that _RET_IP_ contains useful information.
*/
@@ -271,19 +298,49 @@ static bool panic_in_progress(void)
}
/*
- * This is used for debugging the mess that is the VT code by
- * keeping track if we have the console semaphore held. It's
- * definitely not the perfect debug tool (we don't know if _WE_
- * hold it and are racing, but it helps tracking those weird code
- * paths in the console code where we end up in places I want
- * locked without the console semaphore held).
+ * Tracks whether kthread printers are all blocked. A value of true implies
+ * that the console is locked via console_lock() or the console is suspended.
+ * Writing to this variable requires holding @console_sem.
*/
-static int console_locked, console_suspended;
+static bool console_kthreads_blocked;
+
+/*
+ * Block all kthread printers from a schedulable context.
+ *
+ * Requires holding @console_sem.
+ */
+static void console_kthreads_block(void)
+{
+ struct console *con;
+
+ for_each_console(con) {
+ mutex_lock(&con->lock);
+ con->blocked = true;
+ mutex_unlock(&con->lock);
+ }
+
+ console_kthreads_blocked = true;
+}
/*
- * If exclusive_console is non-NULL then only this console is to be printed to.
+ * Unblock all kthread printers from a schedulable context.
+ *
+ * Requires holding @console_sem.
*/
-static struct console *exclusive_console;
+static void console_kthreads_unblock(void)
+{
+ struct console *con;
+
+ for_each_console(con) {
+ mutex_lock(&con->lock);
+ con->blocked = false;
+ mutex_unlock(&con->lock);
+ }
+
+ console_kthreads_blocked = false;
+}
+
+static int console_suspended;
/*
* Array of consoles built from command line options (console=)
@@ -366,7 +423,75 @@ static int console_msg_format = MSG_FORMAT_DEFAULT;
/* syslog_lock protects syslog_* variables and write access to clear_seq. */
static DEFINE_MUTEX(syslog_lock);
+/*
+ * A flag to signify if printk_activate_kthreads() has already started the
+ * kthread printers. If true, any later registered consoles must start their
+ * own kthread directly. The flag is write protected by the console_lock.
+ */
+static bool printk_kthreads_available;
+
#ifdef CONFIG_PRINTK
+static atomic_t printk_prefer_direct = ATOMIC_INIT(0);
+
+/**
+ * printk_prefer_direct_enter - cause printk() calls to attempt direct
+ * printing to all enabled consoles
+ *
+ * Since it is not possible to call into the console printing code from any
+ * context, there is no guarantee that direct printing will occur.
+ *
+ * This globally effects all printk() callers.
+ *
+ * Context: Any context.
+ */
+void printk_prefer_direct_enter(void)
+{
+ atomic_inc(&printk_prefer_direct);
+}
+
+/**
+ * printk_prefer_direct_exit - restore printk() behavior
+ *
+ * Context: Any context.
+ */
+void printk_prefer_direct_exit(void)
+{
+ WARN_ON(atomic_dec_if_positive(&printk_prefer_direct) < 0);
+}
+
+/*
+ * Calling printk() always wakes kthread printers so that they can
+ * flush the new message to their respective consoles. Also, if direct
+ * printing is allowed, printk() tries to flush the messages directly.
+ *
+ * Direct printing is allowed in situations when the kthreads
+ * are not available or the system is in a problematic state.
+ *
+ * See the implementation about possible races.
+ */
+static inline bool allow_direct_printing(void)
+{
+ /*
+ * Checking kthread availability is a possible race because the
+ * kthread printers can become permanently disabled during runtime.
+ * However, doing that requires holding the console_lock, so any
+ * pending messages will be direct printed by console_unlock().
+ */
+ if (!printk_kthreads_available)
+ return true;
+
+ /*
+ * Prefer direct printing when the system is in a problematic state.
+ * The context that sets this state will always see the updated value.
+ * The other contexts do not care. Anyway, direct printing is just a
+ * best effort. The direct output is only possible when console_lock
+ * is not already taken and no kthread printers are actively printing.
+ */
+ return (system_state > SYSTEM_RUNNING ||
+ oops_in_progress ||
+ atomic_read(&printk_prefer_direct));
+}
+
DECLARE_WAIT_QUEUE_HEAD(log_wait);
/* All 3 protected by @syslog_lock. */
/* the next printk record to read by syslog(READ) or /proc/kmsg */
@@ -374,12 +499,6 @@ static u64 syslog_seq;
static size_t syslog_partial;
static bool syslog_time;
-/* All 3 protected by @console_sem. */
-/* the next printk record to write to the console */
-static u64 console_seq;
-static u64 exclusive_console_stop_seq;
-static unsigned long console_dropped;
-
struct latched_seq {
seqcount_latch_t latch;
u64 val[2];
@@ -405,6 +524,9 @@ static struct latched_seq clear_seq = {
/* the maximum size of a formatted record (i.e. with prefix added per line) */
#define CONSOLE_LOG_MAX 1024
+/* the maximum size for a dropped text message */
+#define DROPPED_TEXT_MAX 64
+
/* the maximum size allowed to be reserved for a record */
#define LOG_LINE_MAX (CONSOLE_LOG_MAX - PREFIX_MAX)
@@ -746,8 +868,19 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
goto out;
}
+ /*
+ * Guarantee this task is visible on the waitqueue before
+ * checking the wake condition.
+ *
+ * The full memory barrier within set_current_state() of
+ * prepare_to_wait_event() pairs with the full memory barrier
+ * within wq_has_sleeper().
+ *
+ * This pairs with __wake_up_klogd:A.
+ */
ret = wait_event_interruptible(log_wait,
- prb_read_valid(prb, atomic64_read(&user->seq), r));
+ prb_read_valid(prb,
+ atomic64_read(&user->seq), r)); /* LMM(devkmsg_read:A) */
if (ret)
goto out;
}
@@ -1513,7 +1646,18 @@ static int syslog_print(char __user *buf, int size)
seq = syslog_seq;
mutex_unlock(&syslog_lock);
- len = wait_event_interruptible(log_wait, prb_read_valid(prb, seq, NULL));
+ /*
+ * Guarantee this task is visible on the waitqueue before
+ * checking the wake condition.
+ *
+ * The full memory barrier within set_current_state() of
+ * prepare_to_wait_event() pairs with the full memory barrier
+ * within wq_has_sleeper().
+ *
+ * This pairs with __wake_up_klogd:A.
+ */
+ len = wait_event_interruptible(log_wait,
+ prb_read_valid(prb, seq, NULL)); /* LMM(syslog_print:A) */
mutex_lock(&syslog_lock);
if (len)
@@ -1911,47 +2055,24 @@ static int console_trylock_spinning(void)
}
/*
- * Call the console drivers, asking them to write out
- * log_buf[start] to log_buf[end - 1].
- * The console_lock must be held.
+ * Call the specified console driver, asking it to write out the specified
+ * text and length. If @dropped_text is non-NULL and any records have been
+ * dropped, a dropped message will be written out first.
*/
-static void call_console_drivers(const char *ext_text, size_t ext_len,
- const char *text, size_t len)
+static void call_console_driver(struct console *con, const char *text, size_t len,
+ char *dropped_text)
{
- static char dropped_text[64];
- size_t dropped_len = 0;
- struct console *con;
-
- trace_console_rcuidle(text, len);
+ size_t dropped_len;
- if (!console_drivers)
- return;
-
- if (console_dropped) {
- dropped_len = snprintf(dropped_text, sizeof(dropped_text),
+ if (con->dropped && dropped_text) {
+ dropped_len = snprintf(dropped_text, DROPPED_TEXT_MAX,
"** %lu printk messages dropped **\n",
- console_dropped);
- console_dropped = 0;
+ con->dropped);
+ con->dropped = 0;
+ con->write(con, dropped_text, dropped_len);
}
- for_each_console(con) {
- if (exclusive_console && con != exclusive_console)
- continue;
- if (!(con->flags & CON_ENABLED))
- continue;
- if (!con->write)
- continue;
- if (!cpu_online(smp_processor_id()) &&
- !(con->flags & CON_ANYTIME))
- continue;
- if (con->flags & CON_EXTENDED)
- con->write(con, ext_text, ext_len);
- else {
- if (dropped_len)
- con->write(con, dropped_text, dropped_len);
- con->write(con, text, len);
- }
- }
+ con->write(con, text, len);
}
/*
@@ -2026,8 +2147,10 @@ static u8 *__printk_recursion_counter(void)
int printk_delay_msec __read_mostly;
-static inline void printk_delay(void)
+static inline void printk_delay(int level)
{
+ boot_delay_msec(level);
+
if (unlikely(printk_delay_msec)) {
int m = printk_delay_msec;
@@ -2041,7 +2164,7 @@ static inline void printk_delay(void)
static inline u32 printk_caller_id(void)
{
return in_task() ? task_pid_nr(current) :
- 0x80000000 + raw_smp_processor_id();
+ 0x80000000 + smp_processor_id();
}
/**
@@ -2115,6 +2238,8 @@ static u16 printk_sprint(char *text, u16 size, int facility,
}
}
+ trace_console_rcuidle(text, text_len);
+
return text_len;
}
@@ -2123,7 +2248,6 @@ int vprintk_store(int facility, int level,
const struct dev_printk_info *dev_info,
const char *fmt, va_list args)
{
- const u32 caller_id = printk_caller_id();
struct prb_reserved_entry e;
enum printk_info_flags flags = 0;
struct printk_record r;
@@ -2133,10 +2257,14 @@ int vprintk_store(int facility, int level,
u8 *recursion_ptr;
u16 reserve_size;
va_list args2;
+ u32 caller_id;
u16 text_len;
int ret = 0;
u64 ts_nsec;
+ if (!printk_enter_irqsave(recursion_ptr, irqflags))
+ return 0;
+
/*
* Since the duration of printk() can vary depending on the message
* and state of the ringbuffer, grab the timestamp now so that it is
@@ -2145,8 +2273,7 @@ int vprintk_store(int facility, int level,
*/
ts_nsec = local_clock();
- if (!printk_enter_irqsave(recursion_ptr, irqflags))
- return 0;
+ caller_id = printk_caller_id();
/*
* The sprintf needs to come first since the syslog prefix might be
@@ -2250,23 +2377,25 @@ asmlinkage int vprintk_emit(int facility, int level,
in_sched = true;
}
- boot_delay_msec(level);
- printk_delay();
+ printk_delay(level);
printed_len = vprintk_store(facility, level, dev_info, fmt, args);
/* If called from the scheduler, we can not call up(). */
- if (!in_sched) {
+ if (!in_sched && allow_direct_printing()) {
/*
- * Disable preemption to avoid being preempted while holding
- * console_sem which would prevent anyone from printing to
- * console
+ * The caller may be holding system-critical or
+ * timing-sensitive locks. Disable preemption during direct
+ * printing of all remaining records to all consoles so that
+ * this context can return as soon as possible. Hopefully
+ * another printk() caller will take over the printing.
*/
preempt_disable();
/*
* Try to acquire and then immediately release the console
- * semaphore. The release will print out buffers and wake up
- * /dev/kmsg and syslog() users.
+ * semaphore. The release will print out buffers. With the
+ * spinning variant, this context tries to take over the
+ * printing from another printing context.
*/
if (console_trylock_spinning())
console_unlock();
@@ -2297,18 +2426,21 @@ asmlinkage __visible int _printk(const char *fmt, ...)
}
EXPORT_SYMBOL(_printk);
+static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
+
+static void printk_start_kthread(struct console *con);
+
#else /* CONFIG_PRINTK */
#define CONSOLE_LOG_MAX 0
+#define DROPPED_TEXT_MAX 0
#define printk_time false
#define prb_read_valid(rb, seq, r) false
#define prb_first_valid_seq(rb) 0
+#define prb_next_seq(rb) 0
static u64 syslog_seq;
-static u64 console_seq;
-static u64 exclusive_console_stop_seq;
-static unsigned long console_dropped;
static size_t record_print_text(const struct printk_record *r,
bool syslog, bool time)
@@ -2325,9 +2457,14 @@ static ssize_t msg_print_ext_body(char *buf, size_t size,
struct dev_printk_info *dev_info) { return 0; }
static void console_lock_spinning_enable(void) { }
static int console_lock_spinning_disable_and_check(void) { return 0; }
-static void call_console_drivers(const char *ext_text, size_t ext_len,
- const char *text, size_t len) {}
+static void call_console_driver(struct console *con, const char *text, size_t len,
+ char *dropped_text)
+{
+}
static bool suppress_message_printing(int level) { return false; }
+static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
+static void printk_start_kthread(struct console *con) { }
+static bool allow_direct_printing(void) { return true; }
#endif /* CONFIG_PRINTK */
@@ -2515,6 +2652,7 @@ void suspend_console(void)
if (!console_suspend_enabled)
return;
pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
+ pr_flush(1000, true);
console_lock();
console_suspended = 1;
up_console_sem();
@@ -2527,6 +2665,7 @@ void resume_console(void)
down_console_sem();
console_suspended = 0;
console_unlock();
+ pr_flush(1000, true);
}
/**
@@ -2544,6 +2683,14 @@ static int console_cpu_notify(unsigned int cpu)
/* If trylock fails, someone else is doing the printing */
if (console_trylock())
console_unlock();
+ else {
+ /*
+ * If a new CPU comes online, the conditions for
+ * printer_should_wake() may have changed for some
+ * kthread printer with !CON_ANYTIME.
+ */
+ wake_up_klogd();
+ }
}
return 0;
}
@@ -2563,7 +2710,7 @@ void console_lock(void)
down_console_sem();
if (console_suspended)
return;
- console_locked = 1;
+ console_kthreads_block();
console_may_schedule = 1;
}
EXPORT_SYMBOL(console_lock);
@@ -2584,33 +2731,32 @@ int console_trylock(void)
up_console_sem();
return 0;
}
- console_locked = 1;
+ if (!console_kthreads_atomic_tryblock()) {
+ up_console_sem();
+ return 0;
+ }
console_may_schedule = 0;
return 1;
}
EXPORT_SYMBOL(console_trylock);
-int is_console_locked(void)
-{
- return console_locked;
-}
-EXPORT_SYMBOL(is_console_locked);
-
/*
- * Check if we have any console that is capable of printing while cpu is
- * booting or shutting down. Requires console_sem.
+ * This is used to help to make sure that certain paths within the VT code are
+ * running with the console lock held. It is definitely not the perfect debug
+ * tool (it is not known if the VT code is the task holding the console lock),
+ * but it helps tracking those weird code paths in the console code such as
+ * when the console is suspended: where the console is not locked but no
+ * console printing may occur.
+ *
+ * Note: This returns true when the console is suspended but is not locked.
+ * This is intentional because the VT code must consider that situation
+ * the same as if the console was locked.
*/
-static int have_callable_console(void)
+int is_console_locked(void)
{
- struct console *con;
-
- for_each_console(con)
- if ((con->flags & CON_ENABLED) &&
- (con->flags & CON_ANYTIME))
- return 1;
-
- return 0;
+ return (console_kthreads_blocked || atomic_read(&console_kthreads_active));
}
+EXPORT_SYMBOL(is_console_locked);
/*
* Return true when this CPU should unlock console_sem without pushing all
@@ -2631,132 +2777,125 @@ static bool abandon_console_lock_in_panic(void)
return atomic_read(&panic_cpu) != raw_smp_processor_id();
}
+static inline bool __console_is_usable(short flags)
+{
+ if (!(flags & CON_ENABLED))
+ return false;
+
+ /*
+ * Console drivers may assume that per-cpu resources have been
+ * allocated. So unless they're explicitly marked as being able to
+ * cope (CON_ANYTIME) don't call them until this CPU is officially up.
+ */
+ if (!cpu_online(raw_smp_processor_id()) &&
+ !(flags & CON_ANYTIME))
+ return false;
+
+ return true;
+}
+
/*
- * Can we actually use the console at this time on this cpu?
+ * Check if the given console is currently capable and allowed to print
+ * records.
*
- * Console drivers may assume that per-cpu resources have been allocated. So
- * unless they're explicitly marked as being able to cope (CON_ANYTIME) don't
- * call them until this CPU is officially up.
+ * Requires holding the console_lock.
*/
-static inline int can_use_console(void)
+static inline bool console_is_usable(struct console *con)
{
- return cpu_online(raw_smp_processor_id()) || have_callable_console();
+ if (!con->write)
+ return false;
+
+ return __console_is_usable(con->flags);
}
-/**
- * console_unlock - unlock the console system
+static void __console_unlock(void)
+{
+ /*
+ * Depending on whether console_lock() or console_trylock() was used,
+ * appropriately allow the kthread printers to continue.
+ */
+ if (console_kthreads_blocked)
+ console_kthreads_unblock();
+ else
+ console_kthreads_atomic_unblock();
+
+ /*
+ * New records may have arrived while the console was locked.
+ * Wake the kthread printers to print them.
+ */
+ wake_up_klogd();
+
+ up_console_sem();
+}
+
+/*
+ * Print one record for the given console. The record printed is whatever
+ * record is the next available record for the given console.
*
- * Releases the console_lock which the caller holds on the console system
- * and the console driver list.
+ * @text is a buffer of size CONSOLE_LOG_MAX.
*
- * While the console_lock was held, console output may have been buffered
- * by printk(). If this is the case, console_unlock(); emits
- * the output prior to releasing the lock.
+ * If extended messages should be printed, @ext_text is a buffer of size
+ * CONSOLE_EXT_LOG_MAX. Otherwise @ext_text must be NULL.
*
- * If there is output waiting, we wake /dev/kmsg and syslog() users.
+ * If dropped messages should be printed, @dropped_text is a buffer of size
+ * DROPPED_TEXT_MAX. Otherwise @dropped_text must be NULL.
*
- * console_unlock(); may be called from any context.
+ * @handover will be set to true if a printk waiter has taken over the
+ * console_lock, in which case the caller is no longer holding the
+ * console_lock. Otherwise it is set to false. A NULL pointer may be provided
+ * to disable allowing the console_lock to be taken over by a printk waiter.
+ *
+ * Returns false if the given console has no next record to print, otherwise
+ * true.
+ *
+ * Requires the console_lock if @handover is non-NULL.
+ * Requires con->lock otherwise.
*/
-void console_unlock(void)
+static bool __console_emit_next_record(struct console *con, char *text, char *ext_text,
+ char *dropped_text, bool *handover)
{
- static char ext_text[CONSOLE_EXT_LOG_MAX];
- static char text[CONSOLE_LOG_MAX];
- static int panic_console_dropped;
- unsigned long flags;
- bool do_cond_resched, retry;
+ static atomic_t panic_console_dropped = ATOMIC_INIT(0);
struct printk_info info;
struct printk_record r;
- u64 __maybe_unused next_seq;
-
- if (console_suspended) {
- up_console_sem();
- return;
- }
-
- prb_rec_init_rd(&r, &info, text, sizeof(text));
-
- /*
- * Console drivers are called with interrupts disabled, so
- * @console_may_schedule should be cleared before; however, we may
- * end up dumping a lot of lines, for example, if called from
- * console registration path, and should invoke cond_resched()
- * between lines if allowable. Not doing so can cause a very long
- * scheduling stall on a slow console leading to RCU stall and
- * softlockup warnings which exacerbate the issue with more
- * messages practically incapacitating the system.
- *
- * console_trylock() is not able to detect the preemptive
- * context reliably. Therefore the value must be stored before
- * and cleared after the "again" goto label.
- */
- do_cond_resched = console_may_schedule;
-again:
- console_may_schedule = 0;
-
- /*
- * We released the console_sem lock, so we need to recheck if
- * cpu is online and (if not) is there at least one CON_ANYTIME
- * console.
- */
- if (!can_use_console()) {
- console_locked = 0;
- up_console_sem();
- return;
- }
+ unsigned long flags;
+ char *write_text;
+ size_t len;
- for (;;) {
- size_t ext_len = 0;
- int handover;
- size_t len;
+ prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
-skip:
- if (!prb_read_valid(prb, console_seq, &r))
- break;
+ if (handover)
+ *handover = false;
- if (console_seq != r.info->seq) {
- console_dropped += r.info->seq - console_seq;
- console_seq = r.info->seq;
- if (panic_in_progress() && panic_console_dropped++ > 10) {
- suppress_panic_printk = 1;
- pr_warn_once("Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.\n");
- }
- }
+ if (!prb_read_valid(prb, con->seq, &r))
+ return false;
- if (suppress_message_printing(r.info->level)) {
- /*
- * Skip record we have buffered and already printed
- * directly to the console when we received it, and
- * record that has level above the console loglevel.
- */
- console_seq++;
- goto skip;
+ if (con->seq != r.info->seq) {
+ con->dropped += r.info->seq - con->seq;
+ con->seq = r.info->seq;
+ if (panic_in_progress() &&
+ atomic_fetch_inc_relaxed(&panic_console_dropped) > 10) {
+ suppress_panic_printk = 1;
+ pr_warn_once("Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.\n");
}
+ }
- /* Output to all consoles once old messages replayed. */
- if (unlikely(exclusive_console &&
- console_seq >= exclusive_console_stop_seq)) {
- exclusive_console = NULL;
- }
+ /* Skip record that has level above the console loglevel. */
+ if (suppress_message_printing(r.info->level)) {
+ con->seq++;
+ goto skip;
+ }
- /*
- * Handle extended console text first because later
- * record_print_text() will modify the record buffer in-place.
- */
- if (nr_ext_console_drivers) {
- ext_len = info_print_ext_header(ext_text,
- sizeof(ext_text),
- r.info);
- ext_len += msg_print_ext_body(ext_text + ext_len,
- sizeof(ext_text) - ext_len,
- &r.text_buf[0],
- r.info->text_len,
- &r.info->dev_info);
- }
- len = record_print_text(&r,
- console_msg_format & MSG_FORMAT_SYSLOG,
- printk_time);
- console_seq++;
+ if (ext_text) {
+ write_text = ext_text;
+ len = info_print_ext_header(ext_text, CONSOLE_EXT_LOG_MAX, r.info);
+ len += msg_print_ext_body(ext_text + len, CONSOLE_EXT_LOG_MAX - len,
+ &r.text_buf[0], r.info->text_len, &r.info->dev_info);
+ } else {
+ write_text = text;
+ len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
+ }
+ if (handover) {
/*
* While actively printing out messages, if another printk()
* were to occur on another CPU, it may wait for this one to
@@ -2770,38 +2909,187 @@ skip:
printk_safe_enter_irqsave(flags);
console_lock_spinning_enable();
- stop_critical_timings(); /* don't trace print latency */
- call_console_drivers(ext_text, ext_len, text, len);
- start_critical_timings();
+ /* don't trace irqsoff print latency */
+ stop_critical_timings();
+ }
- handover = console_lock_spinning_disable_and_check();
- printk_safe_exit_irqrestore(flags);
- if (handover)
- return;
+ call_console_driver(con, write_text, len, dropped_text);
- /* Allow panic_cpu to take over the consoles safely */
- if (abandon_console_lock_in_panic())
- break;
+ con->seq++;
+
+ if (handover) {
+ start_critical_timings();
+ *handover = console_lock_spinning_disable_and_check();
+ printk_safe_exit_irqrestore(flags);
+ }
+skip:
+ return true;
+}
- if (do_cond_resched)
- cond_resched();
+/*
+ * Print a record for a given console, but allow another printk() caller to
+ * take over the console_lock and continue printing.
+ *
+ * Requires the console_lock, but depending on @handover after the call, the
+ * caller may no longer have the console_lock.
+ *
+ * See __console_emit_next_record() for argument and return details.
+ */
+static bool console_emit_next_record_transferable(struct console *con, char *text, char *ext_text,
+ char *dropped_text, bool *handover)
+{
+ /*
+ * Handovers are only supported if threaded printers are atomically
+ * blocked. The context taking over the console_lock may be atomic.
+ */
+ if (!console_kthreads_atomically_blocked()) {
+ *handover = false;
+ handover = NULL;
}
- /* Get consistent value of the next-to-be-used sequence number. */
- next_seq = console_seq;
+ return __console_emit_next_record(con, text, ext_text, dropped_text, handover);
+}
- console_locked = 0;
- up_console_sem();
+/*
+ * Print out all remaining records to all consoles.
+ *
+ * @do_cond_resched is set by the caller. It can be true only in schedulable
+ * context.
+ *
+ * @next_seq is set to the sequence number after the last available record.
+ * The value is valid only when this function returns true. It means that all
+ * usable consoles are completely flushed.
+ *
+ * @handover will be set to true if a printk waiter has taken over the
+ * console_lock, in which case the caller is no longer holding the
+ * console_lock. Otherwise it is set to false.
+ *
+ * Returns true when there was at least one usable console and all messages
+ * were flushed to all usable consoles. A returned false informs the caller
+ * that everything was not flushed (either there were no usable consoles or
+ * another context has taken over printing or it is a panic situation and this
+ * is not the panic CPU or direct printing is not preferred). Regardless the
+ * reason, the caller should assume it is not useful to immediately try again.
+ *
+ * Requires the console_lock.
+ */
+static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
+{
+ static char dropped_text[DROPPED_TEXT_MAX];
+ static char ext_text[CONSOLE_EXT_LOG_MAX];
+ static char text[CONSOLE_LOG_MAX];
+ bool any_usable = false;
+ struct console *con;
+ bool any_progress;
+
+ *next_seq = 0;
+ *handover = false;
+
+ do {
+ /* Let the kthread printers do the work if they can. */
+ if (!allow_direct_printing())
+ return false;
+
+ any_progress = false;
+
+ for_each_console(con) {
+ bool progress;
+
+ if (!console_is_usable(con))
+ continue;
+ any_usable = true;
+
+ if (con->flags & CON_EXTENDED) {
+ /* Extended consoles do not print "dropped messages". */
+ progress = console_emit_next_record_transferable(con, &text[0],
+ &ext_text[0], NULL, handover);
+ } else {
+ progress = console_emit_next_record_transferable(con, &text[0],
+ NULL, &dropped_text[0], handover);
+ }
+ if (*handover)
+ return false;
+
+ /* Track the next of the highest seq flushed. */
+ if (con->seq > *next_seq)
+ *next_seq = con->seq;
+
+ if (!progress)
+ continue;
+ any_progress = true;
+
+ /* Allow panic_cpu to take over the consoles safely. */
+ if (abandon_console_lock_in_panic())
+ return false;
+
+ if (do_cond_resched)
+ cond_resched();
+ }
+ } while (any_progress);
+
+ return any_usable;
+}
+
+/**
+ * console_unlock - unlock the console system
+ *
+ * Releases the console_lock which the caller holds on the console system
+ * and the console driver list.
+ *
+ * While the console_lock was held, console output may have been buffered
+ * by printk(). If this is the case, console_unlock(); emits
+ * the output prior to releasing the lock.
+ *
+ * console_unlock(); may be called from any context.
+ */
+void console_unlock(void)
+{
+ bool do_cond_resched;
+ bool handover;
+ bool flushed;
+ u64 next_seq;
+
+ if (console_suspended) {
+ up_console_sem();
+ return;
+ }
/*
- * Someone could have filled up the buffer again, so re-check if there's
- * something to flush. In case we cannot trylock the console_sem again,
- * there's a new owner and the console_unlock() from them will do the
- * flush, no worries.
+ * Console drivers are called with interrupts disabled, so
+ * @console_may_schedule should be cleared before; however, we may
+ * end up dumping a lot of lines, for example, if called from
+ * console registration path, and should invoke cond_resched()
+ * between lines if allowable. Not doing so can cause a very long
+ * scheduling stall on a slow console leading to RCU stall and
+ * softlockup warnings which exacerbate the issue with more
+ * messages practically incapacitating the system. Therefore, create
+ * a local to use for the printing loop.
*/
- retry = prb_read_valid(prb, next_seq, NULL);
- if (retry && !abandon_console_lock_in_panic() && console_trylock())
- goto again;
+ do_cond_resched = console_may_schedule;
+
+ do {
+ console_may_schedule = 0;
+
+ flushed = console_flush_all(do_cond_resched, &next_seq, &handover);
+ if (!handover)
+ __console_unlock();
+
+ /*
+ * Abort if there was a failure to flush all messages to all
+ * usable consoles. Either it is not possible to flush (in
+ * which case it would be an infinite loop of retrying) or
+ * another context has taken over printing.
+ */
+ if (!flushed)
+ break;
+
+ /*
+ * Some context may have added new records after
+ * console_flush_all() but before unlocking the console.
+ * Re-check if there is a new record to flush. If the trylock
+ * fails, another context is already handling the printing.
+ */
+ } while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
}
EXPORT_SYMBOL(console_unlock);
@@ -2832,15 +3120,21 @@ void console_unblank(void)
if (oops_in_progress) {
if (down_trylock_console_sem() != 0)
return;
+ if (!console_kthreads_atomic_tryblock()) {
+ up_console_sem();
+ return;
+ }
} else
console_lock();
- console_locked = 1;
console_may_schedule = 0;
for_each_console(c)
if ((c->flags & CON_ENABLED) && c->unblank)
c->unblank();
console_unlock();
+
+ if (!oops_in_progress)
+ pr_flush(1000, true);
}
/**
@@ -2861,8 +3155,14 @@ void console_flush_on_panic(enum con_flush_mode mode)
console_trylock();
console_may_schedule = 0;
- if (mode == CONSOLE_REPLAY_ALL)
- console_seq = prb_first_valid_seq(prb);
+ if (mode == CONSOLE_REPLAY_ALL) {
+ struct console *c;
+ u64 seq;
+
+ seq = prb_first_valid_seq(prb);
+ for_each_console(c)
+ c->seq = seq;
+ }
console_unlock();
}
@@ -2893,6 +3193,7 @@ struct tty_driver *console_device(int *index)
*/
void console_stop(struct console *console)
{
+ __pr_flush(console, 1000, true);
console_lock();
console->flags &= ~CON_ENABLED;
console_unlock();
@@ -2904,6 +3205,7 @@ void console_start(struct console *console)
console_lock();
console->flags |= CON_ENABLED;
console_unlock();
+ __pr_flush(console, 1000, true);
}
EXPORT_SYMBOL(console_start);
@@ -2990,6 +3292,11 @@ static void try_enable_default_console(struct console *newcon)
newcon->flags |= CON_CONSDEV;
}
+#define con_printk(lvl, con, fmt, ...) \
+ printk(lvl pr_fmt("%sconsole [%s%d] " fmt), \
+ (con->flags & CON_BOOT) ? "boot" : "", \
+ con->name, con->index, ##__VA_ARGS__)
+
/*
* The console driver calls this routine during kernel initialization
* to register the console printing procedure with printk() and to
@@ -3097,27 +3404,24 @@ void register_console(struct console *newcon)
if (newcon->flags & CON_EXTENDED)
nr_ext_console_drivers++;
- if (newcon->flags & CON_PRINTBUFFER) {
- /*
- * console_unlock(); will print out the buffered messages
- * for us.
- *
- * We're about to replay the log buffer. Only do this to the
- * just-registered console to avoid excessive message spam to
- * the already-registered consoles.
- *
- * Set exclusive_console with disabled interrupts to reduce
- * race window with eventual console_flush_on_panic() that
- * ignores console_lock.
- */
- exclusive_console = newcon;
- exclusive_console_stop_seq = console_seq;
+ newcon->dropped = 0;
+ newcon->thread = NULL;
+ newcon->blocked = true;
+ mutex_init(&newcon->lock);
+ if (newcon->flags & CON_PRINTBUFFER) {
/* Get a consistent copy of @syslog_seq. */
mutex_lock(&syslog_lock);
- console_seq = syslog_seq;
+ newcon->seq = syslog_seq;
mutex_unlock(&syslog_lock);
+ } else {
+ /* Begin with next message. */
+ newcon->seq = prb_next_seq(prb);
}
+
+ if (printk_kthreads_available)
+ printk_start_kthread(newcon);
+
console_unlock();
console_sysfs_notify();
@@ -3128,9 +3432,7 @@ void register_console(struct console *newcon)
* users know there might be something in the kernel's log buffer that
* went to the bootconsole (that they do not see on the real console)
*/
- pr_info("%sconsole [%s%d] enabled\n",
- (newcon->flags & CON_BOOT) ? "boot" : "" ,
- newcon->name, newcon->index);
+ con_printk(KERN_INFO, newcon, "enabled\n");
if (bootcon_enabled &&
((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
!keep_bootcon) {
@@ -3146,12 +3448,11 @@ EXPORT_SYMBOL(register_console);
int unregister_console(struct console *console)
{
+ struct task_struct *thd;
struct console *con;
int res;
- pr_info("%sconsole [%s%d] disabled\n",
- (console->flags & CON_BOOT) ? "boot" : "" ,
- console->name, console->index);
+ con_printk(KERN_INFO, console, "disabled\n");
res = _braille_unregister_console(console);
if (res < 0)
@@ -3188,7 +3489,20 @@ int unregister_console(struct console *console)
console_drivers->flags |= CON_CONSDEV;
console->flags &= ~CON_ENABLED;
+
+ /*
+ * console->thread can only be cleared under the console lock. But
+ * stopping the thread must be done without the console lock. The
+ * task that clears @thread is the task that stops the kthread.
+ */
+ thd = console->thread;
+ console->thread = NULL;
+
console_unlock();
+
+ if (thd)
+ kthread_stop(thd);
+
console_sysfs_notify();
if (console->exit)
@@ -3284,12 +3598,294 @@ static int __init printk_late_init(void)
}
late_initcall(printk_late_init);
+static int __init printk_activate_kthreads(void)
+{
+ struct console *con;
+
+ console_lock();
+ printk_kthreads_available = true;
+ for_each_console(con)
+ printk_start_kthread(con);
+ console_unlock();
+
+ return 0;
+}
+early_initcall(printk_activate_kthreads);
+
#if defined CONFIG_PRINTK
+/* If @con is specified, only wait for that console. Otherwise wait for all. */
+static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress)
+{
+ int remaining = timeout_ms;
+ struct console *c;
+ u64 last_diff = 0;
+ u64 printk_seq;
+ u64 diff;
+ u64 seq;
+
+ might_sleep();
+
+ seq = prb_next_seq(prb);
+
+ for (;;) {
+ diff = 0;
+
+ console_lock();
+ for_each_console(c) {
+ if (con && con != c)
+ continue;
+ if (!console_is_usable(c))
+ continue;
+ printk_seq = c->seq;
+ if (printk_seq < seq)
+ diff += seq - printk_seq;
+ }
+ console_unlock();
+
+ if (diff != last_diff && reset_on_progress)
+ remaining = timeout_ms;
+
+ if (diff == 0 || remaining == 0)
+ break;
+
+ if (remaining < 0) {
+ /* no timeout limit */
+ msleep(100);
+ } else if (remaining < 100) {
+ msleep(remaining);
+ remaining = 0;
+ } else {
+ msleep(100);
+ remaining -= 100;
+ }
+
+ last_diff = diff;
+ }
+
+ return (diff == 0);
+}
+
+/**
+ * pr_flush() - Wait for printing threads to catch up.
+ *
+ * @timeout_ms: The maximum time (in ms) to wait.
+ * @reset_on_progress: Reset the timeout if forward progress is seen.
+ *
+ * A value of 0 for @timeout_ms means no waiting will occur. A value of -1
+ * represents infinite waiting.
+ *
+ * If @reset_on_progress is true, the timeout will be reset whenever any
+ * printer has been seen to make some forward progress.
+ *
+ * Context: Process context. May sleep while acquiring console lock.
+ * Return: true if all enabled printers are caught up.
+ */
+bool pr_flush(int timeout_ms, bool reset_on_progress)
+{
+ return __pr_flush(NULL, timeout_ms, reset_on_progress);
+}
+EXPORT_SYMBOL(pr_flush);
+
+static void __printk_fallback_preferred_direct(void)
+{
+ printk_prefer_direct_enter();
+ pr_err("falling back to preferred direct printing\n");
+ printk_kthreads_available = false;
+}
+
+/*
+ * Enter preferred direct printing, but never exit. Mark console threads as
+ * unavailable. The system is then forever in preferred direct printing and
+ * any printing threads will exit.
+ *
+ * Must *not* be called under console_lock. Use
+ * __printk_fallback_preferred_direct() if already holding console_lock.
+ */
+static void printk_fallback_preferred_direct(void)
+{
+ console_lock();
+ __printk_fallback_preferred_direct();
+ console_unlock();
+}
+
+/*
+ * Print a record for a given console, not allowing another printk() caller
+ * to take over. This is appropriate for contexts that do not have the
+ * console_lock.
+ *
+ * See __console_emit_next_record() for argument and return details.
+ */
+static bool console_emit_next_record(struct console *con, char *text, char *ext_text,
+ char *dropped_text)
+{
+ return __console_emit_next_record(con, text, ext_text, dropped_text, NULL);
+}
+
+static bool printer_should_wake(struct console *con, u64 seq)
+{
+ short flags;
+
+ if (kthread_should_stop() || !printk_kthreads_available)
+ return true;
+
+ if (con->blocked ||
+ console_kthreads_atomically_blocked()) {
+ return false;
+ }
+
+ /*
+ * This is an unsafe read from con->flags, but a false positive is
+ * not a problem. Worst case it would allow the printer to wake up
+ * although it is disabled. But the printer will notice that when
+ * attempting to print and instead go back to sleep.
+ */
+ flags = data_race(READ_ONCE(con->flags));
+
+ if (!__console_is_usable(flags))
+ return false;
+
+ return prb_read_valid(prb, seq, NULL);
+}
+
+static int printk_kthread_func(void *data)
+{
+ struct console *con = data;
+ char *dropped_text = NULL;
+ char *ext_text = NULL;
+ u64 seq = 0;
+ char *text;
+ int error;
+
+ text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
+ if (!text) {
+ con_printk(KERN_ERR, con, "failed to allocate text buffer\n");
+ printk_fallback_preferred_direct();
+ goto out;
+ }
+
+ if (con->flags & CON_EXTENDED) {
+ ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL);
+ if (!ext_text) {
+ con_printk(KERN_ERR, con, "failed to allocate ext_text buffer\n");
+ printk_fallback_preferred_direct();
+ goto out;
+ }
+ } else {
+ dropped_text = kmalloc(DROPPED_TEXT_MAX, GFP_KERNEL);
+ if (!dropped_text) {
+ con_printk(KERN_ERR, con, "failed to allocate dropped_text buffer\n");
+ printk_fallback_preferred_direct();
+ goto out;
+ }
+ }
+
+ con_printk(KERN_INFO, con, "printing thread started\n");
+
+ for (;;) {
+ /*
+ * Guarantee this task is visible on the waitqueue before
+ * checking the wake condition.
+ *
+ * The full memory barrier within set_current_state() of
+ * prepare_to_wait_event() pairs with the full memory barrier
+ * within wq_has_sleeper().
+ *
+ * This pairs with __wake_up_klogd:A.
+ */
+ error = wait_event_interruptible(log_wait,
+ printer_should_wake(con, seq)); /* LMM(printk_kthread_func:A) */
+
+ if (kthread_should_stop() || !printk_kthreads_available)
+ break;
+
+ if (error)
+ continue;
+
+ error = mutex_lock_interruptible(&con->lock);
+ if (error)
+ continue;
+
+ if (con->blocked ||
+ !console_kthread_printing_tryenter()) {
+ /* Another context has locked the console_lock. */
+ mutex_unlock(&con->lock);
+ continue;
+ }
+
+ /*
+ * Although this context has not locked the console_lock, it
+ * is known that the console_lock is not locked and it is not
+ * possible for any other context to lock the console_lock.
+ * Therefore it is safe to read con->flags.
+ */
+
+ if (!__console_is_usable(con->flags)) {
+ console_kthread_printing_exit();
+ mutex_unlock(&con->lock);
+ continue;
+ }
+
+ /*
+ * Even though the printk kthread is always preemptible, it is
+ * still not allowed to call cond_resched() from within
+ * console drivers. The task may become non-preemptible in the
+ * console driver call chain. For example, vt_console_print()
+ * takes a spinlock and then can call into fbcon_redraw(),
+ * which can conditionally invoke cond_resched().
+ */
+ console_may_schedule = 0;
+ console_emit_next_record(con, text, ext_text, dropped_text);
+
+ seq = con->seq;
+
+ console_kthread_printing_exit();
+
+ mutex_unlock(&con->lock);
+ }
+
+ con_printk(KERN_INFO, con, "printing thread stopped\n");
+out:
+ kfree(dropped_text);
+ kfree(ext_text);
+ kfree(text);
+
+ console_lock();
+ /*
+ * If this kthread is being stopped by another task, con->thread will
+ * already be NULL. That is fine. The important thing is that it is
+ * NULL after the kthread exits.
+ */
+ con->thread = NULL;
+ console_unlock();
+
+ return 0;
+}
+
+/* Must be called under console_lock. */
+static void printk_start_kthread(struct console *con)
+{
+ /*
+ * Do not start a kthread if there is no write() callback. The
+ * kthreads assume the write() callback exists.
+ */
+ if (!con->write)
+ return;
+
+ con->thread = kthread_run(printk_kthread_func, con,
+ "pr/%s%d", con->name, con->index);
+ if (IS_ERR(con->thread)) {
+ con->thread = NULL;
+ con_printk(KERN_ERR, con, "unable to start printing thread\n");
+ __printk_fallback_preferred_direct();
+ return;
+ }
+}
+
/*
* Delayed printk version, for scheduler-internal messages:
*/
-#define PRINTK_PENDING_WAKEUP 0x01
-#define PRINTK_PENDING_OUTPUT 0x02
+#define PRINTK_PENDING_WAKEUP 0x01
+#define PRINTK_PENDING_DIRECT_OUTPUT 0x02
static DEFINE_PER_CPU(int, printk_pending);
@@ -3297,41 +3893,71 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work)
{
int pending = this_cpu_xchg(printk_pending, 0);
- if (pending & PRINTK_PENDING_OUTPUT) {
+ if (pending & PRINTK_PENDING_DIRECT_OUTPUT) {
+ printk_prefer_direct_enter();
+
/* If trylock fails, someone else is doing the printing */
if (console_trylock())
console_unlock();
+
+ printk_prefer_direct_exit();
}
if (pending & PRINTK_PENDING_WAKEUP)
- wake_up_interruptible(&log_wait);
+ wake_up_interruptible_all(&log_wait);
}
static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) =
IRQ_WORK_INIT_LAZY(wake_up_klogd_work_func);
-void wake_up_klogd(void)
+static void __wake_up_klogd(int val)
{
if (!printk_percpu_data_ready())
return;
preempt_disable();
- if (waitqueue_active(&log_wait)) {
- this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
+ /*
+ * Guarantee any new records can be seen by tasks preparing to wait
+ * before this context checks if the wait queue is empty.
+ *
+ * The full memory barrier within wq_has_sleeper() pairs with the full
+ * memory barrier within set_current_state() of
+ * prepare_to_wait_event(), which is called after ___wait_event() adds
+ * the waiter but before it has checked the wait condition.
+ *
+ * This pairs with devkmsg_read:A, syslog_print:A, and
+ * printk_kthread_func:A.
+ */
+ if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */
+ (val & PRINTK_PENDING_DIRECT_OUTPUT)) {
+ this_cpu_or(printk_pending, val);
irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
}
preempt_enable();
}
+void wake_up_klogd(void)
+{
+ __wake_up_klogd(PRINTK_PENDING_WAKEUP);
+}
+
void defer_console_output(void)
{
- if (!printk_percpu_data_ready())
- return;
+ /*
+ * New messages may have been added directly to the ringbuffer
+ * using vprintk_store(), so wake any waiters as well.
+ */
+ int val = PRINTK_PENDING_WAKEUP;
- preempt_disable();
- this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
- irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
- preempt_enable();
+ /*
+ * Make sure that some context will print the messages when direct
+ * printing is allowed. This happens in situations when the kthreads
+ * may not be as reliable or perhaps unusable.
+ */
+ if (allow_direct_printing())
+ val |= PRINTK_PENDING_DIRECT_OUTPUT;
+
+ __wake_up_klogd(val);
}
void printk_trigger_flush(void)
@@ -3667,26 +4293,26 @@ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
#endif
#ifdef CONFIG_SMP
-static atomic_t printk_cpulock_owner = ATOMIC_INIT(-1);
-static atomic_t printk_cpulock_nested = ATOMIC_INIT(0);
+static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1);
+static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0);
/**
- * __printk_wait_on_cpu_lock() - Busy wait until the printk cpu-reentrant
- * spinning lock is not owned by any CPU.
+ * __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant
+ * spinning lock is not owned by any CPU.
*
* Context: Any context.
*/
-void __printk_wait_on_cpu_lock(void)
+void __printk_cpu_sync_wait(void)
{
do {
cpu_relax();
- } while (atomic_read(&printk_cpulock_owner) != -1);
+ } while (atomic_read(&printk_cpu_sync_owner) != -1);
}
-EXPORT_SYMBOL(__printk_wait_on_cpu_lock);
+EXPORT_SYMBOL(__printk_cpu_sync_wait);
/**
- * __printk_cpu_trylock() - Try to acquire the printk cpu-reentrant
- * spinning lock.
+ * __printk_cpu_sync_try_get() - Try to acquire the printk cpu-reentrant
+ * spinning lock.
*
* If no processor has the lock, the calling processor takes the lock and
* becomes the owner. If the calling processor is already the owner of the
@@ -3695,7 +4321,7 @@ EXPORT_SYMBOL(__printk_wait_on_cpu_lock);
* Context: Any context. Expects interrupts to be disabled.
* Return: 1 on success, otherwise 0.
*/
-int __printk_cpu_trylock(void)
+int __printk_cpu_sync_try_get(void)
{
int cpu;
int old;
@@ -3705,79 +4331,80 @@ int __printk_cpu_trylock(void)
/*
* Guarantee loads and stores from this CPU when it is the lock owner
* are _not_ visible to the previous lock owner. This pairs with
- * __printk_cpu_unlock:B.
+ * __printk_cpu_sync_put:B.
*
* Memory barrier involvement:
*
- * If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B, then
- * __printk_cpu_unlock:A can never read from __printk_cpu_trylock:B.
+ * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
+ * then __printk_cpu_sync_put:A can never read from
+ * __printk_cpu_sync_try_get:B.
*
* Relies on:
*
- * RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B
+ * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
* of the previous CPU
* matching
- * ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B
- * of this CPU
+ * ACQUIRE from __printk_cpu_sync_try_get:A to
+ * __printk_cpu_sync_try_get:B of this CPU
*/
- old = atomic_cmpxchg_acquire(&printk_cpulock_owner, -1,
- cpu); /* LMM(__printk_cpu_trylock:A) */
+ old = atomic_cmpxchg_acquire(&printk_cpu_sync_owner, -1,
+ cpu); /* LMM(__printk_cpu_sync_try_get:A) */
if (old == -1) {
/*
* This CPU is now the owner and begins loading/storing
- * data: LMM(__printk_cpu_trylock:B)
+ * data: LMM(__printk_cpu_sync_try_get:B)
*/
return 1;
} else if (old == cpu) {
/* This CPU is already the owner. */
- atomic_inc(&printk_cpulock_nested);
+ atomic_inc(&printk_cpu_sync_nested);
return 1;
}
return 0;
}
-EXPORT_SYMBOL(__printk_cpu_trylock);
+EXPORT_SYMBOL(__printk_cpu_sync_try_get);
/**
- * __printk_cpu_unlock() - Release the printk cpu-reentrant spinning lock.
+ * __printk_cpu_sync_put() - Release the printk cpu-reentrant spinning lock.
*
* The calling processor must be the owner of the lock.
*
* Context: Any context. Expects interrupts to be disabled.
*/
-void __printk_cpu_unlock(void)
+void __printk_cpu_sync_put(void)
{
- if (atomic_read(&printk_cpulock_nested)) {
- atomic_dec(&printk_cpulock_nested);
+ if (atomic_read(&printk_cpu_sync_nested)) {
+ atomic_dec(&printk_cpu_sync_nested);
return;
}
/*
* This CPU is finished loading/storing data:
- * LMM(__printk_cpu_unlock:A)
+ * LMM(__printk_cpu_sync_put:A)
*/
/*
* Guarantee loads and stores from this CPU when it was the
* lock owner are visible to the next lock owner. This pairs
- * with __printk_cpu_trylock:A.
+ * with __printk_cpu_sync_try_get:A.
*
* Memory barrier involvement:
*
- * If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B,
- * then __printk_cpu_trylock:B reads from __printk_cpu_unlock:A.
+ * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
+ * then __printk_cpu_sync_try_get:B reads from __printk_cpu_sync_put:A.
*
* Relies on:
*
- * RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B
+ * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
* of this CPU
* matching
- * ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B
- * of the next CPU
+ * ACQUIRE from __printk_cpu_sync_try_get:A to
+ * __printk_cpu_sync_try_get:B of the next CPU
*/
- atomic_set_release(&printk_cpulock_owner,
- -1); /* LMM(__printk_cpu_unlock:B) */
+ atomic_set_release(&printk_cpu_sync_owner,
+ -1); /* LMM(__printk_cpu_sync_put:B) */
}
-EXPORT_SYMBOL(__printk_cpu_unlock);
+EXPORT_SYMBOL(__printk_cpu_sync_put);
#endif /* CONFIG_SMP */
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
index a001e1e7a992..4995c078cff9 100644
--- a/kernel/rcu/tree_stall.h
+++ b/kernel/rcu/tree_stall.h
@@ -647,6 +647,7 @@ static void print_cpu_stall(unsigned long gps)
* See Documentation/RCU/stallwarn.rst for info on how to debug
* RCU CPU stall warnings.
*/
+ printk_prefer_direct_enter();
trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected"));
pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
@@ -684,6 +685,7 @@ static void print_cpu_stall(unsigned long gps)
*/
set_tsk_need_resched(current);
set_preempt_need_resched();
+ printk_prefer_direct_exit();
}
static void check_cpu_stall(struct rcu_data *rdp)
diff --git a/kernel/reboot.c b/kernel/reboot.c
index 6bcc5d6a6572..4177645e74d6 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -447,9 +447,11 @@ static int __orderly_reboot(void)
ret = run_cmd(reboot_cmd);
if (ret) {
+ printk_prefer_direct_enter();
pr_warn("Failed to start orderly reboot: forcing the issue\n");
emergency_sync();
kernel_restart(NULL);
+ printk_prefer_direct_exit();
}
return ret;
@@ -462,6 +464,7 @@ static int __orderly_poweroff(bool force)
ret = run_cmd(poweroff_cmd);
if (ret && force) {
+ printk_prefer_direct_enter();
pr_warn("Failed to start orderly shutdown: forcing the issue\n");
/*
@@ -471,6 +474,7 @@ static int __orderly_poweroff(bool force)
*/
emergency_sync();
kernel_power_off();
+ printk_prefer_direct_exit();
}
return ret;
@@ -528,6 +532,8 @@ EXPORT_SYMBOL_GPL(orderly_reboot);
*/
static void hw_failure_emergency_poweroff_func(struct work_struct *work)
{
+ printk_prefer_direct_enter();
+
/*
* We have reached here after the emergency shutdown waiting period has
* expired. This means orderly_poweroff has not been able to shut off
@@ -544,6 +550,8 @@ static void hw_failure_emergency_poweroff_func(struct work_struct *work)
*/
pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n");
emergency_restart();
+
+ printk_prefer_direct_exit();
}
static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work,
@@ -582,11 +590,13 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
{
static atomic_t allow_proceed = ATOMIC_INIT(1);
+ printk_prefer_direct_enter();
+
pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason);
/* Shutdown should be initiated only once. */
if (!atomic_dec_and_test(&allow_proceed))
- return;
+ goto out;
/*
* Queue a backup emergency shutdown in the event of
@@ -594,6 +604,8 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
*/
hw_failure_emergency_poweroff(ms_until_forced);
orderly_poweroff(true);
+out:
+ printk_prefer_direct_exit();
}
EXPORT_SYMBOL_GPL(hw_protection_shutdown);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 9166220457bc..40024e03d422 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -424,6 +424,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
/* Start period for the next softlockup warning. */
update_report_ts();
+ printk_prefer_direct_enter();
+
pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
smp_processor_id(), duration,
current->comm, task_pid_nr(current));
@@ -442,6 +444,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
if (softlockup_panic)
panic("softlockup: hung tasks");
+
+ printk_prefer_direct_exit();
}
return HRTIMER_RESTART;
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index 247bf0b1582c..701f35f0e2d4 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -135,6 +135,8 @@ static void watchdog_overflow_callback(struct perf_event *event,
if (__this_cpu_read(hard_watchdog_warn) == true)
return;
+ printk_prefer_direct_enter();
+
pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n",
this_cpu);
print_modules();
@@ -155,6 +157,8 @@ static void watchdog_overflow_callback(struct perf_event *event,
if (hardlockup_panic)
nmi_panic(regs, "Hard LOCKUP");
+ printk_prefer_direct_exit();
+
__this_cpu_write(hard_watchdog_warn, true);
return;
}
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 6b7f1bf6715d..83471e81501a 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -102,9 +102,9 @@ asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
* Permit this cpu to perform nested stack dumps while serialising
* against other CPUs
*/
- printk_cpu_lock_irqsave(flags);
+ printk_cpu_sync_get_irqsave(flags);
__dump_stack(log_lvl);
- printk_cpu_unlock_irqrestore(flags);
+ printk_cpu_sync_put_irqrestore(flags);
}
EXPORT_SYMBOL(dump_stack_lvl);
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 199ab201d501..d01aec6ae15c 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -99,7 +99,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
* Allow nested NMI backtraces while serializing
* against other CPUs.
*/
- printk_cpu_lock_irqsave(flags);
+ printk_cpu_sync_get_irqsave(flags);
if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) {
pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
cpu, (void *)instruction_pointer(regs));
@@ -110,7 +110,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
else
dump_stack();
}
- printk_cpu_unlock_irqrestore(flags);
+ printk_cpu_sync_put_irqrestore(flags);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return true;
}