summaryrefslogtreecommitdiff
path: root/kernel/printk
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/printk')
-rw-r--r--kernel/printk/.kunitconfig3
-rw-r--r--kernel/printk/Makefile2
-rw-r--r--kernel/printk/internal.h1
-rw-r--r--kernel/printk/nbcon.c14
-rw-r--r--kernel/printk/printk.c37
-rw-r--r--kernel/printk/printk_ringbuffer.c50
-rw-r--r--kernel/printk/printk_ringbuffer_kunit_test.c327
7 files changed, 379 insertions, 55 deletions
diff --git a/kernel/printk/.kunitconfig b/kernel/printk/.kunitconfig
new file mode 100644
index 000000000000..f31458fd1a92
--- /dev/null
+++ b/kernel/printk/.kunitconfig
@@ -0,0 +1,3 @@
+CONFIG_KUNIT=y
+CONFIG_PRINTK=y
+CONFIG_PRINTK_RINGBUFFER_KUNIT_TEST=y
diff --git a/kernel/printk/Makefile b/kernel/printk/Makefile
index 39a2b61c7232..f8004ac3983d 100644
--- a/kernel/printk/Makefile
+++ b/kernel/printk/Makefile
@@ -7,3 +7,5 @@ obj-$(CONFIG_PRINTK_INDEX) += index.o
obj-$(CONFIG_PRINTK) += printk_support.o
printk_support-y := printk_ringbuffer.o
printk_support-$(CONFIG_SYSCTL) += sysctl.o
+
+obj-$(CONFIG_PRINTK_RINGBUFFER_KUNIT_TEST) += printk_ringbuffer_kunit_test.o
diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
index ef282001f200..f72bbfa266d6 100644
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -332,7 +332,6 @@ struct printk_message {
unsigned long dropped;
};
-bool other_cpu_in_panic(void);
bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
bool is_extended, bool may_supress);
diff --git a/kernel/printk/nbcon.c b/kernel/printk/nbcon.c
index 646801813415..558ef3177976 100644
--- a/kernel/printk/nbcon.c
+++ b/kernel/printk/nbcon.c
@@ -12,6 +12,7 @@
#include <linux/irqflags.h>
#include <linux/kthread.h>
#include <linux/minmax.h>
+#include <linux/panic.h>
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/slab.h>
@@ -254,7 +255,7 @@ static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt,
* opportunity to perform any necessary cleanup if they were
* interrupted by the panic CPU while printing.
*/
- if (other_cpu_in_panic() &&
+ if (panic_on_other_cpu() &&
(!is_reacquire || cur->unsafe_takeover)) {
return -EPERM;
}
@@ -309,7 +310,7 @@ static bool nbcon_waiter_matches(struct nbcon_state *cur, int expected_prio)
* Event #2 implies the new context is PANIC.
* Event #3 occurs when panic() has flushed the console.
* Event #4 occurs when a non-panic CPU reacquires.
- * Event #5 is not possible due to the other_cpu_in_panic() check
+ * Event #5 is not possible due to the panic_on_other_cpu() check
* in nbcon_context_try_acquire_handover().
*/
@@ -348,7 +349,7 @@ static int nbcon_context_try_acquire_requested(struct nbcon_context *ctxt,
struct nbcon_state new;
/* Note that the caller must still remove the request! */
- if (other_cpu_in_panic())
+ if (panic_on_other_cpu())
return -EPERM;
/*
@@ -446,7 +447,7 @@ static int nbcon_context_try_acquire_handover(struct nbcon_context *ctxt,
* nbcon_waiter_matches(). In particular, the assumption that
* lower priorities are ignored during panic.
*/
- if (other_cpu_in_panic())
+ if (panic_on_other_cpu())
return -EPERM;
/* Handover is not possible on the same CPU. */
@@ -589,7 +590,6 @@ static struct printk_buffers panic_nbcon_pbufs;
*/
static bool nbcon_context_try_acquire(struct nbcon_context *ctxt, bool is_reacquire)
{
- unsigned int cpu = smp_processor_id();
struct console *con = ctxt->console;
struct nbcon_state cur;
int err;
@@ -614,7 +614,7 @@ out:
/* Acquire succeeded. */
/* Assign the appropriate buffer for this context. */
- if (atomic_read(&panic_cpu) == cpu)
+ if (panic_on_this_cpu())
ctxt->pbufs = &panic_nbcon_pbufs;
else
ctxt->pbufs = con->pbufs;
@@ -1394,7 +1394,7 @@ enum nbcon_prio nbcon_get_default_prio(void)
{
unsigned int *cpu_emergency_nesting;
- if (this_cpu_in_panic())
+ if (panic_on_this_cpu())
return NBCON_PRIO_PANIC;
cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 0efbcdda9aab..5aee9ffb16b9 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -48,6 +48,7 @@
#include <linux/sched/clock.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
+#include <linux/panic.h>
#include <linux/uaccess.h>
#include <asm/sections.h>
@@ -345,34 +346,6 @@ static void __up_console_sem(unsigned long ip)
}
#define up_console_sem() __up_console_sem(_RET_IP_)
-static bool panic_in_progress(void)
-{
- return unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID);
-}
-
-/* Return true if a panic is in progress on the current CPU. */
-bool this_cpu_in_panic(void)
-{
- /*
- * We can use raw_smp_processor_id() here because it is impossible for
- * the task to be migrated to the panic_cpu, or away from it. If
- * panic_cpu has already been set, and we're not currently executing on
- * that CPU, then we never will be.
- */
- return unlikely(atomic_read(&panic_cpu) == raw_smp_processor_id());
-}
-
-/*
- * Return true if a panic is in progress on a remote CPU.
- *
- * On true, the local CPU should immediately release any printing resources
- * that may be needed by the panic CPU.
- */
-bool other_cpu_in_panic(void)
-{
- return (panic_in_progress() && !this_cpu_in_panic());
-}
-
/*
* This is used for debugging the mess that is the VT code by
* keeping track if we have the console semaphore held. It's
@@ -2407,7 +2380,7 @@ asmlinkage int vprintk_emit(int facility, int level,
* non-panic CPUs are generating any messages, they will be
* silently dropped.
*/
- if (other_cpu_in_panic() &&
+ if (panic_on_other_cpu() &&
!debug_non_panic_cpus &&
!panic_triggering_all_cpu_backtrace)
return 0;
@@ -2843,7 +2816,7 @@ void console_lock(void)
might_sleep();
/* On panic, the console_lock must be left to the panic cpu. */
- while (other_cpu_in_panic())
+ while (panic_on_other_cpu())
msleep(1000);
down_console_sem();
@@ -2863,7 +2836,7 @@ EXPORT_SYMBOL(console_lock);
int console_trylock(void)
{
/* On panic, the console_lock must be left to the panic cpu. */
- if (other_cpu_in_panic())
+ if (panic_on_other_cpu())
return 0;
if (down_trylock_console_sem())
return 0;
@@ -3243,7 +3216,7 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
any_progress = true;
/* Allow panic_cpu to take over the consoles safely. */
- if (other_cpu_in_panic())
+ if (panic_on_other_cpu())
goto abandon;
if (do_cond_resched)
diff --git a/kernel/printk/printk_ringbuffer.c b/kernel/printk/printk_ringbuffer.c
index d9fb053cff67..40198bffb7d0 100644
--- a/kernel/printk/printk_ringbuffer.c
+++ b/kernel/printk/printk_ringbuffer.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#include <kunit/visibility.h>
#include <linux/kernel.h>
#include <linux/irqflags.h>
#include <linux/string.h>
@@ -393,25 +394,21 @@ static unsigned int to_blk_size(unsigned int size)
* Sanity checker for reserve size. The ringbuffer code assumes that a data
* block does not exceed the maximum possible size that could fit within the
* ringbuffer. This function provides that basic size check so that the
- * assumption is safe.
+ * assumption is safe. In particular, it guarantees that data_push_tail() will
+ * never attempt to push the tail beyond the head.
*/
static bool data_check_size(struct prb_data_ring *data_ring, unsigned int size)
{
- struct prb_data_block *db = NULL;
-
+ /* Data-less blocks take no space. */
if (size == 0)
return true;
/*
- * Ensure the alignment padded size could possibly fit in the data
- * array. The largest possible data block must still leave room for
- * at least the ID of the next block.
+ * If data blocks were allowed to be larger than half the data ring
+ * size, a wrapping data block could require more space than the full
+ * ringbuffer.
*/
- size = to_blk_size(size);
- if (size > DATA_SIZE(data_ring) - sizeof(db->id))
- return false;
-
- return true;
+ return to_blk_size(size) <= DATA_SIZE(data_ring) / 2;
}
/* Query the state of a descriptor. */
@@ -1051,8 +1048,17 @@ static char *data_alloc(struct printk_ringbuffer *rb, unsigned int size,
do {
next_lpos = get_next_lpos(data_ring, begin_lpos, size);
- if (!data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) {
- /* Failed to allocate, specify a data-less block. */
+ /*
+ * data_check_size() prevents data block allocation that could
+ * cause illegal ringbuffer states. But double check that the
+ * used space will not be bigger than the ring buffer. Wrapped
+ * messages need to reserve more space, see get_next_lpos().
+ *
+ * Specify a data-less block when the check or the allocation
+ * fails.
+ */
+ if (WARN_ON_ONCE(next_lpos - begin_lpos > DATA_SIZE(data_ring)) ||
+ !data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) {
blk_lpos->begin = FAILED_LPOS;
blk_lpos->next = FAILED_LPOS;
return NULL;
@@ -1140,8 +1146,18 @@ static char *data_realloc(struct printk_ringbuffer *rb, unsigned int size,
return &blk->data[0];
}
- if (!data_push_tail(rb, next_lpos - DATA_SIZE(data_ring)))
+ /*
+ * data_check_size() prevents data block reallocation that could
+ * cause illegal ringbuffer states. But double check that the
+ * new used space will not be bigger than the ring buffer. Wrapped
+ * messages need to reserve more space, see get_next_lpos().
+ *
+ * Specify failure when the check or the allocation fails.
+ */
+ if (WARN_ON_ONCE(next_lpos - blk_lpos->begin > DATA_SIZE(data_ring)) ||
+ !data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) {
return NULL;
+ }
/* The memory barrier involvement is the same as data_alloc:A. */
if (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &head_lpos,
@@ -1685,6 +1701,7 @@ fail:
memset(r, 0, sizeof(*r));
return false;
}
+EXPORT_SYMBOL_IF_KUNIT(prb_reserve);
/* Commit the data (possibly finalizing it) and restore interrupts. */
static void _prb_commit(struct prb_reserved_entry *e, unsigned long state_val)
@@ -1759,6 +1776,7 @@ void prb_commit(struct prb_reserved_entry *e)
if (head_id != e->id)
desc_make_final(e->rb, e->id);
}
+EXPORT_SYMBOL_IF_KUNIT(prb_commit);
/**
* prb_final_commit() - Commit and finalize (previously reserved) data to
@@ -2143,7 +2161,7 @@ static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
* But it would have the sequence number returned
* by "prb_next_reserve_seq() - 1".
*/
- if (this_cpu_in_panic() &&
+ if (panic_on_this_cpu() &&
(!debug_non_panic_cpus || legacy_allow_panic_sync) &&
((*seq + 1) < prb_next_reserve_seq(rb))) {
(*seq)++;
@@ -2184,6 +2202,7 @@ bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
{
return _prb_read_valid(rb, &seq, r, NULL);
}
+EXPORT_SYMBOL_IF_KUNIT(prb_read_valid);
/**
* prb_read_valid_info() - Non-blocking read of meta data for a requested
@@ -2333,6 +2352,7 @@ void prb_init(struct printk_ringbuffer *rb,
infos[0].seq = -(u64)_DESCS_COUNT(descbits);
infos[_DESCS_COUNT(descbits) - 1].seq = 0;
}
+EXPORT_SYMBOL_IF_KUNIT(prb_init);
/**
* prb_record_text_space() - Query the full actual used ringbuffer space for
diff --git a/kernel/printk/printk_ringbuffer_kunit_test.c b/kernel/printk/printk_ringbuffer_kunit_test.c
new file mode 100644
index 000000000000..2282348e869a
--- /dev/null
+++ b/kernel/printk/printk_ringbuffer_kunit_test.c
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/cpuhplock.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/wait.h>
+
+#include <kunit/resource.h>
+#include <kunit/test.h>
+
+#include "printk_ringbuffer.h"
+
+/*
+ * This KUnit tests the data integrity of the lockless printk_ringbuffer.
+ * From multiple CPUs it writes messages of varying length and content while
+ * a reader validates the correctness of the messages.
+ *
+ * IMPORTANT: The more CPUs you can use for this KUnit, the better!
+ *
+ * The test works by starting "num_online_cpus() - 1" writer threads, each
+ * pinned to their own CPU. Each writer thread loops, writing data of varying
+ * length into a printk_ringbuffer as fast as possible. The data content is
+ * an embedded data struct followed by string content repeating the byte:
+ *
+ * 'A' + CPUID
+ *
+ * The reader is running on the remaining online CPU, or if there is only one
+ * CPU on the same as the writer.
+ * It ensures that the embedded struct content is consistent with the string
+ * and that the string * is terminated and is composed of the same repeating
+ * byte as its first byte.
+ *
+ * Because the threads are running in such tight loops, they will call
+ * cond_resched() from time to time so the system stays functional.
+ *
+ * If the reader encounters an error, the test is aborted and some
+ * information about the error is reported.
+ * The runtime of the test can be configured with the runtime_ms module parameter.
+ *
+ * Note that the test is performed on a separate printk_ringbuffer instance
+ * and not the instance used by printk().
+ */
+
+static unsigned long runtime_ms = 10 * MSEC_PER_SEC;
+module_param(runtime_ms, ulong, 0400);
+
+/* test data structure */
+struct prbtest_rbdata {
+ unsigned int size;
+ char text[] __counted_by(size);
+};
+
+#define MAX_RBDATA_TEXT_SIZE 0x80
+#define MAX_PRB_RECORD_SIZE (sizeof(struct prbtest_rbdata) + MAX_RBDATA_TEXT_SIZE)
+
+struct prbtest_data {
+ struct kunit *test;
+ struct printk_ringbuffer *ringbuffer;
+ /* used by writers to signal reader of new records */
+ wait_queue_head_t new_record_wait;
+};
+
+struct prbtest_thread_data {
+ unsigned long num;
+ struct prbtest_data *test_data;
+};
+
+static void prbtest_fail_record(struct kunit *test, const struct prbtest_rbdata *dat, u64 seq)
+{
+ unsigned int len;
+
+ len = dat->size - 1;
+
+ KUNIT_FAIL(test, "BAD RECORD: seq=%llu size=%u text=%.*s\n",
+ seq, dat->size,
+ len < MAX_RBDATA_TEXT_SIZE ? len : -1,
+ len < MAX_RBDATA_TEXT_SIZE ? dat->text : "<invalid>");
+}
+
+static bool prbtest_check_data(const struct prbtest_rbdata *dat)
+{
+ unsigned int len;
+
+ /* Sane size? At least one character + trailing '\0' */
+ if (dat->size < 2 || dat->size > MAX_RBDATA_TEXT_SIZE)
+ return false;
+
+ len = dat->size - 1;
+ if (dat->text[len] != '\0')
+ return false;
+
+ /* String repeats with the same character? */
+ while (len--) {
+ if (dat->text[len] != dat->text[0])
+ return false;
+ }
+
+ return true;
+}
+
+static int prbtest_writer(void *data)
+{
+ struct prbtest_thread_data *tr = data;
+ char text_id = 'A' + tr->num;
+ struct prb_reserved_entry e;
+ struct prbtest_rbdata *dat;
+ u32 record_size, text_size;
+ unsigned long count = 0;
+ struct printk_record r;
+
+ kunit_info(tr->test_data->test, "start thread %03lu (writer)\n", tr->num);
+
+ for (;;) {
+ /* ensure at least 1 character + trailing '\0' */
+ text_size = get_random_u32_inclusive(2, MAX_RBDATA_TEXT_SIZE);
+ if (WARN_ON_ONCE(text_size < 2))
+ text_size = 2;
+ if (WARN_ON_ONCE(text_size > MAX_RBDATA_TEXT_SIZE))
+ text_size = MAX_RBDATA_TEXT_SIZE;
+
+ record_size = sizeof(struct prbtest_rbdata) + text_size;
+ WARN_ON_ONCE(record_size > MAX_PRB_RECORD_SIZE);
+
+ /* specify the text sizes for reservation */
+ prb_rec_init_wr(&r, record_size);
+
+ /*
+ * Reservation can fail if:
+ *
+ * - No free descriptor is available.
+ * - The buffer is full, and the oldest record is reserved
+ * but not yet committed.
+ *
+ * It actually happens in this test because all CPUs are trying
+ * to write an unbounded number of messages in a tight loop.
+ * These failures are intentionally ignored because this test
+ * focuses on races, ringbuffer consistency, and pushing system
+ * usability limits.
+ */
+ if (prb_reserve(&e, tr->test_data->ringbuffer, &r)) {
+ r.info->text_len = record_size;
+
+ dat = (struct prbtest_rbdata *)r.text_buf;
+ dat->size = text_size;
+ memset(dat->text, text_id, text_size - 1);
+ dat->text[text_size - 1] = '\0';
+
+ prb_commit(&e);
+
+ wake_up_interruptible(&tr->test_data->new_record_wait);
+ }
+
+ if ((count++ & 0x3fff) == 0)
+ cond_resched();
+
+ if (kthread_should_stop())
+ break;
+ }
+
+ kunit_info(tr->test_data->test, "end thread %03lu: wrote=%lu\n", tr->num, count);
+
+ return 0;
+}
+
+struct prbtest_wakeup_timer {
+ struct timer_list timer;
+ struct task_struct *task;
+};
+
+static void prbtest_wakeup_callback(struct timer_list *timer)
+{
+ struct prbtest_wakeup_timer *wakeup = timer_container_of(wakeup, timer, timer);
+
+ set_tsk_thread_flag(wakeup->task, TIF_NOTIFY_SIGNAL);
+ wake_up_process(wakeup->task);
+}
+
+static int prbtest_reader(struct prbtest_data *test_data, unsigned long timeout_ms)
+{
+ struct prbtest_wakeup_timer wakeup;
+ char text_buf[MAX_PRB_RECORD_SIZE];
+ unsigned long count = 0;
+ struct printk_info info;
+ struct printk_record r;
+ u64 seq = 0;
+
+ wakeup.task = current;
+ timer_setup_on_stack(&wakeup.timer, prbtest_wakeup_callback, 0);
+ mod_timer(&wakeup.timer, jiffies + msecs_to_jiffies(timeout_ms));
+
+ prb_rec_init_rd(&r, &info, text_buf, sizeof(text_buf));
+
+ kunit_info(test_data->test, "start reader\n");
+
+ while (!wait_event_interruptible(test_data->new_record_wait,
+ prb_read_valid(test_data->ringbuffer, seq, &r))) {
+ /* check/track the sequence */
+ if (info.seq < seq)
+ KUNIT_FAIL(test_data->test, "BAD SEQ READ: request=%llu read=%llu\n",
+ seq, info.seq);
+
+ if (!prbtest_check_data((struct prbtest_rbdata *)r.text_buf))
+ prbtest_fail_record(test_data->test,
+ (struct prbtest_rbdata *)r.text_buf, info.seq);
+
+ if ((count++ & 0x3fff) == 0)
+ cond_resched();
+
+ seq = info.seq + 1;
+ }
+
+ timer_delete_sync(&wakeup.timer);
+ timer_destroy_on_stack(&wakeup.timer);
+
+ kunit_info(test_data->test, "end reader: read=%lu seq=%llu\n", count, info.seq);
+
+ return 0;
+}
+
+KUNIT_DEFINE_ACTION_WRAPPER(prbtest_cpumask_cleanup, free_cpumask_var, struct cpumask *);
+KUNIT_DEFINE_ACTION_WRAPPER(prbtest_kthread_cleanup, kthread_stop, struct task_struct *);
+
+static void prbtest_add_cpumask_cleanup(struct kunit *test, cpumask_var_t mask)
+{
+ int err;
+
+ err = kunit_add_action_or_reset(test, prbtest_cpumask_cleanup, mask);
+ KUNIT_ASSERT_EQ(test, err, 0);
+}
+
+static void prbtest_add_kthread_cleanup(struct kunit *test, struct task_struct *kthread)
+{
+ int err;
+
+ err = kunit_add_action_or_reset(test, prbtest_kthread_cleanup, kthread);
+ KUNIT_ASSERT_EQ(test, err, 0);
+}
+
+static inline void prbtest_prb_reinit(struct printk_ringbuffer *rb)
+{
+ prb_init(rb, rb->text_data_ring.data, rb->text_data_ring.size_bits, rb->desc_ring.descs,
+ rb->desc_ring.count_bits, rb->desc_ring.infos);
+}
+
+static void test_readerwriter(struct kunit *test)
+{
+ /* Equivalent to CONFIG_LOG_BUF_SHIFT=13 */
+ DEFINE_PRINTKRB(test_rb, 8, 5);
+
+ struct prbtest_thread_data *thread_data;
+ struct prbtest_data *test_data;
+ struct task_struct *thread;
+ cpumask_var_t test_cpus;
+ int cpu, reader_cpu;
+
+ KUNIT_ASSERT_TRUE(test, alloc_cpumask_var(&test_cpus, GFP_KERNEL));
+ prbtest_add_cpumask_cleanup(test, test_cpus);
+
+ cpus_read_lock();
+ /*
+ * Failure of KUNIT_ASSERT() kills the current task
+ * so it can not be called while the CPU hotplug lock is held.
+ * Instead use a snapshot of the online CPUs.
+ * If they change during test execution it is unfortunate but not a grave error.
+ */
+ cpumask_copy(test_cpus, cpu_online_mask);
+ cpus_read_unlock();
+
+ /* One CPU is for the reader, all others are writers */
+ reader_cpu = cpumask_first(test_cpus);
+ if (cpumask_weight(test_cpus) == 1)
+ kunit_warn(test, "more than one CPU is recommended");
+ else
+ cpumask_clear_cpu(reader_cpu, test_cpus);
+
+ /* KUnit test can get restarted more times. */
+ prbtest_prb_reinit(&test_rb);
+
+ test_data = kunit_kmalloc(test, sizeof(*test_data), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, test_data);
+ test_data->test = test;
+ test_data->ringbuffer = &test_rb;
+ init_waitqueue_head(&test_data->new_record_wait);
+
+ kunit_info(test, "running for %lu ms\n", runtime_ms);
+
+ for_each_cpu(cpu, test_cpus) {
+ thread_data = kunit_kmalloc(test, sizeof(*thread_data), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, thread_data);
+ thread_data->test_data = test_data;
+ thread_data->num = cpu;
+
+ thread = kthread_run_on_cpu(prbtest_writer, thread_data, cpu,
+ "prbtest writer %u");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, thread);
+ prbtest_add_kthread_cleanup(test, thread);
+ }
+
+ kunit_info(test, "starting test\n");
+
+ set_cpus_allowed_ptr(current, cpumask_of(reader_cpu));
+ prbtest_reader(test_data, runtime_ms);
+
+ kunit_info(test, "completed test\n");
+}
+
+static struct kunit_case prb_test_cases[] = {
+ KUNIT_CASE_SLOW(test_readerwriter),
+ {}
+};
+
+static struct kunit_suite prb_test_suite = {
+ .name = "printk-ringbuffer",
+ .test_cases = prb_test_cases,
+};
+kunit_test_suite(prb_test_suite);
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+MODULE_AUTHOR("John Ogness <john.ogness@linutronix.de>");
+MODULE_DESCRIPTION("printk_ringbuffer KUnit test");
+MODULE_LICENSE("GPL");