summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2020-04-13 09:44:39 +0200
committerIngo Molnar <mingo@kernel.org>2020-04-13 09:44:39 +0200
commit3b02a051d25d9600e9d403ad3043aed7de00160e (patch)
tree5b8f58b79328c04654bf5ab6286401057edeca8f /lib
parentf5d2313bd3c540be405c4977a63840cd6d0167b5 (diff)
parent8f3d9f354286745c751374f5f1fcafee6b3f3136 (diff)
Merge tag 'v5.7-rc1' into locking/kcsan, to resolve conflicts and refresh
Resolve these conflicts: arch/x86/Kconfig arch/x86/kernel/Makefile Do a minor "evil merge" to move the KCSAN entry up a bit by a few lines in the Kconfig to reduce the probability of future conflicts. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/.gitignore4
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug73
-rw-r--r--lib/Kconfig.ubsan49
-rw-r--r--lib/Makefile9
-rw-r--r--lib/bch.c2
-rw-r--r--lib/bootconfig.c35
-rw-r--r--lib/cpumask.c29
-rw-r--r--lib/crypto/chacha20poly1305-selftest.c11
-rw-r--r--lib/dynamic_debug.c30
-rw-r--r--lib/kunit/Kconfig8
-rw-r--r--lib/kunit/Makefile4
-rw-r--r--lib/kunit/assert.c79
-rw-r--r--lib/kunit/debugfs.c116
-rw-r--r--lib/kunit/debugfs.h30
-rw-r--r--lib/kunit/kunit-test.c44
-rw-r--r--lib/kunit/test.c148
-rw-r--r--lib/list-test.c4
-rw-r--r--lib/objagg.c4
-rw-r--r--lib/percpu-refcount.c7
-rw-r--r--lib/radix-tree.c8
-rw-r--r--lib/raid6/.gitignore1
-rw-r--r--lib/raid6/algos.c8
-rw-r--r--lib/raid6/avx2.c4
-rw-r--r--lib/raid6/recov_avx2.c6
-rw-r--r--lib/raid6/recov_ssse3.c6
-rw-r--r--lib/raid6/test/Makefile9
-rw-r--r--lib/rbtree.c4
-rw-r--r--lib/scatterlist.c2
-rw-r--r--lib/stackdepot.c39
-rw-r--r--lib/test_bitmap.c2
-rw-r--r--lib/test_bpf.c4
-rw-r--r--lib/test_firmware.c55
-rw-r--r--lib/test_kasan.c19
-rw-r--r--lib/test_kmod.c2
-rw-r--r--lib/test_lockup.c599
-rw-r--r--lib/test_min_heap.c194
-rw-r--r--lib/test_stackinit.c28
-rw-r--r--lib/test_xarray.c55
-rw-r--r--lib/ts_bm.c2
-rw-r--r--lib/ts_fsm.c2
-rw-r--r--lib/ts_kmp.c2
-rw-r--r--lib/ubsan.c47
-rw-r--r--lib/uuid.c10
-rw-r--r--lib/vdso/gettimeofday.c153
-rw-r--r--lib/xarray.c9
46 files changed, 1695 insertions, 264 deletions
diff --git a/lib/.gitignore b/lib/.gitignore
index f2a39c9e5485..327cb2c7f2c9 100644
--- a/lib/.gitignore
+++ b/lib/.gitignore
@@ -1,6 +1,4 @@
-#
-# Generated files
-#
+# SPDX-License-Identifier: GPL-2.0-only
gen_crc32table
gen_crc64table
crc32table.h
diff --git a/lib/Kconfig b/lib/Kconfig
index bc7e56370129..5d53f9609c25 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -615,6 +615,9 @@ config ARCH_HAS_PMEM_API
config MEMREGION
bool
+config ARCH_HAS_MEMREMAP_COMPAT_ALIGN
+ bool
+
# use memcpy to implement user copies for nommu architectures
config UACCESS_MEMCPY
bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1458505192cd..971d6202c93a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -98,7 +98,7 @@ config DYNAMIC_DEBUG
bool "Enable dynamic printk() support"
default n
depends on PRINTK
- depends on DEBUG_FS
+ depends on (DEBUG_FS || PROC_FS)
help
Compiles debug level messages into the kernel, which would not
@@ -116,8 +116,9 @@ config DYNAMIC_DEBUG
Usage:
Dynamic debugging is controlled via the 'dynamic_debug/control' file,
- which is contained in the 'debugfs' filesystem. Thus, the debugfs
- filesystem must first be mounted before making use of this feature.
+ which is contained in the 'debugfs' filesystem or procfs.
+ Thus, the debugfs or procfs filesystem must first be mounted before
+ making use of this feature.
We refer the control file as: <debugfs>/dynamic_debug/control. This
file contains a list of the debug statements that can be enabled. The
format for each line of the file is:
@@ -266,7 +267,7 @@ config ENABLE_MUST_CHECK
attribute warn_unused_result" messages.
config FRAME_WARN
- int "Warn for stack frames larger than (needs gcc 4.4)"
+ int "Warn for stack frames larger than"
range 0 8192
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
default 1280 if (!64BIT && PARISC)
@@ -276,7 +277,6 @@ config FRAME_WARN
Tell gcc to warn at build time for stack frames larger than this.
Setting this too low will cause a lot of warnings.
Setting it to 0 disables the warning.
- Requires gcc 4.4
config STRIP_ASM_SYMS
bool "Strip assembler-generated symbols during link"
@@ -305,18 +305,6 @@ config HEADERS_INSTALL
user-space program samples. It is also needed by some features such
as uapi header sanity checks.
-config OPTIMIZE_INLINING
- def_bool y
- help
- This option determines if the kernel forces gcc to inline the functions
- developers have marked 'inline'. Doing so takes away freedom from gcc to
- do what it thinks is best, which is desirable for the gcc 3.x series of
- compilers. The gcc 4.x series have a rewritten inlining algorithm and
- enabling this option will generate a smaller kernel there. Hopefully
- this algorithm is so good that allowing gcc 4.x and above to make the
- decision will become the default in the future. Until then this option
- is there to test gcc for this.
-
config DEBUG_SECTION_MISMATCH
bool "Enable full Section mismatch analysis"
help
@@ -431,6 +419,16 @@ config MAGIC_SYSRQ_SERIAL
This option allows you to decide whether you want to enable the
magic SysRq key.
+config MAGIC_SYSRQ_SERIAL_SEQUENCE
+ string "Char sequence that enables magic SysRq over serial"
+ depends on MAGIC_SYSRQ_SERIAL
+ default ""
+ help
+ Specifies a sequence of characters that can follow BREAK to enable
+ SysRq on a serial console.
+
+ If unsure, leave an empty string and the option will not be enabled.
+
config DEBUG_FS
bool "Debug Filesystem"
help
@@ -978,6 +976,18 @@ config WQ_WATCHDOG
state. This can be configured through kernel parameter
"workqueue.watchdog_thresh" and its sysfs counterpart.
+config TEST_LOCKUP
+ tristate "Test module to generate lockups"
+ help
+ This builds the "test_lockup" module that helps to make sure
+ that watchdogs and lockup detectors are working properly.
+
+ Depending on module parameters it could emulate soft or hard
+ lockup, "hung task", or locking arbitrary lock for a long time.
+ Also it could generate series of lockups with cooling-down periods.
+
+ If unsure, say N.
+
endmenu # "Debug lockups and hangs"
menu "Scheduler Debugging"
@@ -1086,6 +1096,23 @@ config PROVE_LOCKING
For more details, see Documentation/locking/lockdep-design.rst.
+config PROVE_RAW_LOCK_NESTING
+ bool "Enable raw_spinlock - spinlock nesting checks"
+ depends on PROVE_LOCKING
+ default n
+ help
+ Enable the raw_spinlock vs. spinlock nesting checks which ensure
+ that the lock nesting rules for PREEMPT_RT enabled kernels are
+ not violated.
+
+ NOTE: There are known nesting problems. So if you enable this
+ option expect lockdep splats until these problems have been fully
+ addressed which is work in progress. This config switch allows to
+ identify and analyze these problems. It will be removed and the
+ check permanentely enabled once the main issues have been fixed.
+
+ If unsure, select N.
+
config LOCK_STAT
bool "Lock usage statistics"
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
@@ -1630,7 +1657,7 @@ config FAILSLAB
Provide fault-injection capability for kmalloc.
config FAIL_PAGE_ALLOC
- bool "Fault-injection capabilitiy for alloc_pages()"
+ bool "Fault-injection capability for alloc_pages()"
depends on FAULT_INJECTION
help
Provide fault-injection capability for alloc_pages().
@@ -1771,6 +1798,16 @@ config TEST_LIST_SORT
If unsure, say N.
+config TEST_MIN_HEAP
+ tristate "Min heap test"
+ depends on DEBUG_KERNEL || m
+ help
+ Enable this to turn on min heap function tests. This test is
+ executed only once during system boot (so affects only boot time),
+ or at module load time.
+
+ If unsure, say N.
+
config TEST_SORT
tristate "Array-based sort test"
depends on DEBUG_KERNEL || m
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 0e04fcb3ab3d..48469c95d78e 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -2,18 +2,50 @@
config ARCH_HAS_UBSAN_SANITIZE_ALL
bool
-config UBSAN
+menuconfig UBSAN
bool "Undefined behaviour sanity checker"
help
- This option enables undefined behaviour sanity checker
+ This option enables the Undefined Behaviour sanity checker.
Compile-time instrumentation is used to detect various undefined
- behaviours in runtime. Various types of checks may be enabled
- via boot parameter ubsan_handle
- (see: Documentation/dev-tools/ubsan.rst).
+ behaviours at runtime. For more details, see:
+ Documentation/dev-tools/ubsan.rst
+
+if UBSAN
+
+config UBSAN_TRAP
+ bool "On Sanitizer warnings, abort the running kernel code"
+ depends on $(cc-option, -fsanitize-undefined-trap-on-error)
+ help
+ Building kernels with Sanitizer features enabled tends to grow
+ the kernel size by around 5%, due to adding all the debugging
+ text on failure paths. To avoid this, Sanitizer instrumentation
+ can just issue a trap. This reduces the kernel size overhead but
+ turns all warnings (including potentially harmless conditions)
+ into full exceptions that abort the running kernel code
+ (regardless of context, locks held, etc), which may destabilize
+ the system. For some system builders this is an acceptable
+ trade-off.
+
+config UBSAN_BOUNDS
+ bool "Perform array index bounds checking"
+ default UBSAN
+ help
+ This option enables detection of directly indexed out of bounds
+ array accesses, where the array size is known at compile time.
+ Note that this does not protect array overflows via bad calls
+ to the {str,mem}*cpy() family of functions (that is addressed
+ by CONFIG_FORTIFY_SOURCE).
+
+config UBSAN_MISC
+ bool "Enable all other Undefined Behavior sanity checks"
+ default UBSAN
+ help
+ This option enables all sanity checks that don't have their
+ own Kconfig options. Disable this if you only want to have
+ individually selected checks.
config UBSAN_SANITIZE_ALL
bool "Enable instrumentation for the entire kernel"
- depends on UBSAN
depends on ARCH_HAS_UBSAN_SANITIZE_ALL
# We build with -Wno-maybe-uninitilzed, but we still want to
@@ -30,7 +62,6 @@ config UBSAN_SANITIZE_ALL
config UBSAN_NO_ALIGNMENT
bool "Disable checking of pointers alignment"
- depends on UBSAN
default y if HAVE_EFFICIENT_UNALIGNED_ACCESS
help
This option disables the check of unaligned memory accesses.
@@ -43,7 +74,9 @@ config UBSAN_ALIGNMENT
config TEST_UBSAN
tristate "Module for testing for undefined behavior detection"
- depends on m && UBSAN
+ depends on m
help
This is a test module for UBSAN.
It triggers various undefined behavior, and detect it.
+
+endif # if UBSAN
diff --git a/lib/Makefile b/lib/Makefile
index f19b85c87fda..ab68a8674360 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -70,6 +70,7 @@ CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla)
UBSAN_SANITIZE_test_ubsan.o := y
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
+obj-$(CONFIG_TEST_MIN_HEAP) += test_min_heap.o
obj-$(CONFIG_TEST_LKM) += test_module.o
obj-$(CONFIG_TEST_VMALLOC) += test_vmalloc.o
obj-$(CONFIG_TEST_OVERFLOW) += test_overflow.o
@@ -89,9 +90,11 @@ obj-$(CONFIG_TEST_KMOD) += test_kmod.o
obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
+CFLAGS_test_stackinit.o += $(call cc-disable-warning, switch-unreachable)
obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o
obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o
obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o
+obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o
obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/
@@ -223,6 +226,10 @@ obj-$(CONFIG_MEMREGION) += memregion.o
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
obj-$(CONFIG_IRQ_POLL) += irq_poll.o
+# stackdepot.c should not be instrumented or call instrumented functions.
+# Prevent the compiler from calling builtins like memcmp() or bcmp() from this
+# file.
+CFLAGS_stackdepot.o += -fno-builtin
obj-$(CONFIG_STACKDEPOT) += stackdepot.o
KASAN_SANITIZE_stackdepot.o := n
KCOV_INSTRUMENT_stackdepot.o := n
@@ -282,7 +289,9 @@ quiet_cmd_build_OID_registry = GEN $@
clean-files += oid_registry_data.c
obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
+ifneq ($(CONFIG_UBSAN_TRAP),y)
obj-$(CONFIG_UBSAN) += ubsan.o
+endif
UBSAN_SANITIZE_ubsan.o := n
KASAN_SANITIZE_ubsan.o := n
diff --git a/lib/bch.c b/lib/bch.c
index 5db6d3a4c8a6..052d3fb753a0 100644
--- a/lib/bch.c
+++ b/lib/bch.c
@@ -102,7 +102,7 @@
*/
struct gf_poly {
unsigned int deg; /* polynomial degree */
- unsigned int c[0]; /* polynomial terms */
+ unsigned int c[]; /* polynomial terms */
};
/* given its degree, compute a polynomial size in bytes */
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index ec3ce7fd299f..912ef4921398 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -29,12 +29,14 @@ static int xbc_node_num __initdata;
static char *xbc_data __initdata;
static size_t xbc_data_size __initdata;
static struct xbc_node *last_parent __initdata;
+static const char *xbc_err_msg __initdata;
+static int xbc_err_pos __initdata;
static int __init xbc_parse_error(const char *msg, const char *p)
{
- int pos = p - xbc_data;
+ xbc_err_msg = msg;
+ xbc_err_pos = (int)(p - xbc_data);
- pr_err("Parse error at pos %d: %s\n", pos, msg);
return -EINVAL;
}
@@ -738,33 +740,44 @@ void __init xbc_destroy_all(void)
/**
* xbc_init() - Parse given XBC file and build XBC internal tree
* @buf: boot config text
+ * @emsg: A pointer of const char * to store the error message
+ * @epos: A pointer of int to store the error position
*
* This parses the boot config text in @buf. @buf must be a
* null terminated string and smaller than XBC_DATA_MAX.
* Return the number of stored nodes (>0) if succeeded, or -errno
* if there is any error.
+ * In error cases, @emsg will be updated with an error message and
+ * @epos will be updated with the error position which is the byte offset
+ * of @buf. If the error is not a parser error, @epos will be -1.
*/
-int __init xbc_init(char *buf)
+int __init xbc_init(char *buf, const char **emsg, int *epos)
{
char *p, *q;
int ret, c;
+ if (epos)
+ *epos = -1;
+
if (xbc_data) {
- pr_err("Error: bootconfig is already initialized.\n");
+ if (emsg)
+ *emsg = "Bootconfig is already initialized";
return -EBUSY;
}
ret = strlen(buf);
if (ret > XBC_DATA_MAX - 1 || ret == 0) {
- pr_err("Error: Config data is %s.\n",
- ret ? "too big" : "empty");
+ if (emsg)
+ *emsg = ret ? "Config data is too big" :
+ "Config data is empty";
return -ERANGE;
}
xbc_nodes = memblock_alloc(sizeof(struct xbc_node) * XBC_NODE_MAX,
SMP_CACHE_BYTES);
if (!xbc_nodes) {
- pr_err("Failed to allocate memory for bootconfig nodes.\n");
+ if (emsg)
+ *emsg = "Failed to allocate bootconfig nodes";
return -ENOMEM;
}
memset(xbc_nodes, 0, sizeof(struct xbc_node) * XBC_NODE_MAX);
@@ -814,9 +827,13 @@ int __init xbc_init(char *buf)
if (!ret)
ret = xbc_verify_tree();
- if (ret < 0)
+ if (ret < 0) {
+ if (epos)
+ *epos = xbc_err_pos;
+ if (emsg)
+ *emsg = xbc_err_msg;
xbc_destroy_all();
- else
+ } else
ret = xbc_node_num;
return ret;
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 0cb672eb107c..fb22fb266f93 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -232,3 +232,32 @@ unsigned int cpumask_local_spread(unsigned int i, int node)
BUG();
}
EXPORT_SYMBOL(cpumask_local_spread);
+
+static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
+
+/**
+ * Returns an arbitrary cpu within srcp1 & srcp2.
+ *
+ * Iterated calls using the same srcp1 and srcp2 will be distributed within
+ * their intersection.
+ *
+ * Returns >= nr_cpu_ids if the intersection is empty.
+ */
+int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ int next, prev;
+
+ /* NOTE: our first selection will skip 0. */
+ prev = __this_cpu_read(distribute_cpu_mask_prev);
+
+ next = cpumask_next_and(prev, src1p, src2p);
+ if (next >= nr_cpu_ids)
+ next = cpumask_first_and(src1p, src2p);
+
+ if (next < nr_cpu_ids)
+ __this_cpu_write(distribute_cpu_mask_prev, next);
+
+ return next;
+}
+EXPORT_SYMBOL(cpumask_any_and_distribute);
diff --git a/lib/crypto/chacha20poly1305-selftest.c b/lib/crypto/chacha20poly1305-selftest.c
index c391a91364e9..fa43deda2660 100644
--- a/lib/crypto/chacha20poly1305-selftest.c
+++ b/lib/crypto/chacha20poly1305-selftest.c
@@ -9028,10 +9028,15 @@ bool __init chacha20poly1305_selftest(void)
&& total_len <= 1 << 10; ++total_len) {
for (i = 0; i <= total_len; ++i) {
for (j = i; j <= total_len; ++j) {
+ k = 0;
sg_init_table(sg_src, 3);
- sg_set_buf(&sg_src[0], input, i);
- sg_set_buf(&sg_src[1], input + i, j - i);
- sg_set_buf(&sg_src[2], input + j, total_len - j);
+ if (i)
+ sg_set_buf(&sg_src[k++], input, i);
+ if (j - i)
+ sg_set_buf(&sg_src[k++], input + i, j - i);
+ if (total_len - j)
+ sg_set_buf(&sg_src[k++], input + j, total_len - j);
+ sg_init_marker(sg_src, k);
memset(computed_output, 0, total_len);
memset(input, 0, total_len);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index c60409138e13..8f199f403ab5 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -876,6 +876,14 @@ static const struct file_operations ddebug_proc_fops = {
.write = ddebug_proc_write
};
+static const struct proc_ops proc_fops = {
+ .proc_open = ddebug_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release_private,
+ .proc_write = ddebug_proc_write
+};
+
/*
* Allocate a new ddebug_table for the given module
* and add it to the global list.
@@ -991,15 +999,25 @@ static void ddebug_remove_all_tables(void)
static __initdata int ddebug_init_success;
-static int __init dynamic_debug_init_debugfs(void)
+static int __init dynamic_debug_init_control(void)
{
- struct dentry *dir;
+ struct proc_dir_entry *procfs_dir;
+ struct dentry *debugfs_dir;
if (!ddebug_init_success)
return -ENODEV;
- dir = debugfs_create_dir("dynamic_debug", NULL);
- debugfs_create_file("control", 0644, dir, NULL, &ddebug_proc_fops);
+ /* Create the control file in debugfs if it is enabled */
+ if (debugfs_initialized()) {
+ debugfs_dir = debugfs_create_dir("dynamic_debug", NULL);
+ debugfs_create_file("control", 0644, debugfs_dir, NULL,
+ &ddebug_proc_fops);
+ }
+
+ /* Also create the control file in procfs */
+ procfs_dir = proc_mkdir("dynamic_debug", NULL);
+ if (procfs_dir)
+ proc_create("control", 0644, procfs_dir, &proc_fops);
return 0;
}
@@ -1013,7 +1031,7 @@ static int __init dynamic_debug_init(void)
int n = 0, entries = 0, modct = 0;
int verbose_bytes = 0;
- if (__start___verbose == __stop___verbose) {
+ if (&__start___verbose == &__stop___verbose) {
pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n");
return 1;
}
@@ -1077,4 +1095,4 @@ out_err:
early_initcall(dynamic_debug_init);
/* Debugfs setup must be done later */
-fs_initcall(dynamic_debug_init_debugfs);
+fs_initcall(dynamic_debug_init_control);
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
index 065aa16f448b..95d12e3d6d95 100644
--- a/lib/kunit/Kconfig
+++ b/lib/kunit/Kconfig
@@ -14,6 +14,14 @@ menuconfig KUNIT
if KUNIT
+config KUNIT_DEBUGFS
+ bool "KUnit - Enable /sys/kernel/debug/kunit debugfs representation"
+ help
+ Enable debugfs representation for kunit. Currently this consists
+ of /sys/kernel/debug/kunit/<test_suite>/results files for each
+ test suite, which allow users to see results of the last test suite
+ run that occurred.
+
config KUNIT_TEST
tristate "KUnit test for KUnit"
help
diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
index fab55649b69a..724b94311ca3 100644
--- a/lib/kunit/Makefile
+++ b/lib/kunit/Makefile
@@ -5,6 +5,10 @@ kunit-objs += test.o \
assert.o \
try-catch.o
+ifeq ($(CONFIG_KUNIT_DEBUGFS),y)
+kunit-objs += debugfs.o
+endif
+
obj-$(CONFIG_KUNIT_TEST) += kunit-test.o
# string-stream-test compiles built-in only.
diff --git a/lib/kunit/assert.c b/lib/kunit/assert.c
index b24bebca052d..33acdaa28a7d 100644
--- a/lib/kunit/assert.c
+++ b/lib/kunit/assert.c
@@ -6,6 +6,7 @@
* Author: Brendan Higgins <brendanhiggins@google.com>
*/
#include <kunit/assert.h>
+#include <kunit/test.h>
#include "string-stream.h"
@@ -53,12 +54,12 @@ void kunit_unary_assert_format(const struct kunit_assert *assert,
kunit_base_assert_format(assert, stream);
if (unary_assert->expected_true)
string_stream_add(stream,
- "\tExpected %s to be true, but is false\n",
- unary_assert->condition);
+ KUNIT_SUBTEST_INDENT "Expected %s to be true, but is false\n",
+ unary_assert->condition);
else
string_stream_add(stream,
- "\tExpected %s to be false, but is true\n",
- unary_assert->condition);
+ KUNIT_SUBTEST_INDENT "Expected %s to be false, but is true\n",
+ unary_assert->condition);
kunit_assert_print_msg(assert, stream);
}
EXPORT_SYMBOL_GPL(kunit_unary_assert_format);
@@ -72,13 +73,13 @@ void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
kunit_base_assert_format(assert, stream);
if (!ptr_assert->value) {
string_stream_add(stream,
- "\tExpected %s is not null, but is\n",
- ptr_assert->text);
+ KUNIT_SUBTEST_INDENT "Expected %s is not null, but is\n",
+ ptr_assert->text);
} else if (IS_ERR(ptr_assert->value)) {
string_stream_add(stream,
- "\tExpected %s is not error, but is: %ld\n",
- ptr_assert->text,
- PTR_ERR(ptr_assert->value));
+ KUNIT_SUBTEST_INDENT "Expected %s is not error, but is: %ld\n",
+ ptr_assert->text,
+ PTR_ERR(ptr_assert->value));
}
kunit_assert_print_msg(assert, stream);
}
@@ -92,16 +93,16 @@ void kunit_binary_assert_format(const struct kunit_assert *assert,
kunit_base_assert_format(assert, stream);
string_stream_add(stream,
- "\tExpected %s %s %s, but\n",
- binary_assert->left_text,
- binary_assert->operation,
- binary_assert->right_text);
- string_stream_add(stream, "\t\t%s == %lld\n",
- binary_assert->left_text,
- binary_assert->left_value);
- string_stream_add(stream, "\t\t%s == %lld",
- binary_assert->right_text,
- binary_assert->right_value);
+ KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
+ binary_assert->left_text,
+ binary_assert->operation,
+ binary_assert->right_text);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld\n",
+ binary_assert->left_text,
+ binary_assert->left_value);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld",
+ binary_assert->right_text,
+ binary_assert->right_value);
kunit_assert_print_msg(assert, stream);
}
EXPORT_SYMBOL_GPL(kunit_binary_assert_format);
@@ -114,16 +115,16 @@ void kunit_binary_ptr_assert_format(const struct kunit_assert *assert,
kunit_base_assert_format(assert, stream);
string_stream_add(stream,
- "\tExpected %s %s %s, but\n",
- binary_assert->left_text,
- binary_assert->operation,
- binary_assert->right_text);
- string_stream_add(stream, "\t\t%s == %pK\n",
- binary_assert->left_text,
- binary_assert->left_value);
- string_stream_add(stream, "\t\t%s == %pK",
- binary_assert->right_text,
- binary_assert->right_value);
+ KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
+ binary_assert->left_text,
+ binary_assert->operation,
+ binary_assert->right_text);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %px\n",
+ binary_assert->left_text,
+ binary_assert->left_value);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %px",
+ binary_assert->right_text,
+ binary_assert->right_value);
kunit_assert_print_msg(assert, stream);
}
EXPORT_SYMBOL_GPL(kunit_binary_ptr_assert_format);
@@ -136,16 +137,16 @@ void kunit_binary_str_assert_format(const struct kunit_assert *assert,
kunit_base_assert_format(assert, stream);
string_stream_add(stream,
- "\tExpected %s %s %s, but\n",
- binary_assert->left_text,
- binary_assert->operation,
- binary_assert->right_text);
- string_stream_add(stream, "\t\t%s == %s\n",
- binary_assert->left_text,
- binary_assert->left_value);
- string_stream_add(stream, "\t\t%s == %s",
- binary_assert->right_text,
- binary_assert->right_value);
+ KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
+ binary_assert->left_text,
+ binary_assert->operation,
+ binary_assert->right_text);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %s\n",
+ binary_assert->left_text,
+ binary_assert->left_value);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %s",
+ binary_assert->right_text,
+ binary_assert->right_value);
kunit_assert_print_msg(assert, stream);
}
EXPORT_SYMBOL_GPL(kunit_binary_str_assert_format);
diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
new file mode 100644
index 000000000000..9214c493d8b7
--- /dev/null
+++ b/lib/kunit/debugfs.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, Oracle and/or its affiliates.
+ * Author: Alan Maguire <alan.maguire@oracle.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+
+#include <kunit/test.h>
+
+#include "string-stream.h"
+
+#define KUNIT_DEBUGFS_ROOT "kunit"
+#define KUNIT_DEBUGFS_RESULTS "results"
+
+/*
+ * Create a debugfs representation of test suites:
+ *
+ * Path Semantics
+ * /sys/kernel/debug/kunit/<testsuite>/results Show results of last run for
+ * testsuite
+ *
+ */
+
+static struct dentry *debugfs_rootdir;
+
+void kunit_debugfs_cleanup(void)
+{
+ debugfs_remove_recursive(debugfs_rootdir);
+}
+
+void kunit_debugfs_init(void)
+{
+ if (!debugfs_rootdir)
+ debugfs_rootdir = debugfs_create_dir(KUNIT_DEBUGFS_ROOT, NULL);
+}
+
+static void debugfs_print_result(struct seq_file *seq,
+ struct kunit_suite *suite,
+ struct kunit_case *test_case)
+{
+ if (!test_case || !test_case->log)
+ return;
+
+ seq_printf(seq, "%s", test_case->log);
+}
+
+/*
+ * /sys/kernel/debug/kunit/<testsuite>/results shows all results for testsuite.
+ */
+static int debugfs_print_results(struct seq_file *seq, void *v)
+{
+ struct kunit_suite *suite = (struct kunit_suite *)seq->private;
+ bool success = kunit_suite_has_succeeded(suite);
+ struct kunit_case *test_case;
+
+ if (!suite || !suite->log)
+ return 0;
+
+ seq_printf(seq, "%s", suite->log);
+
+ kunit_suite_for_each_test_case(suite, test_case)
+ debugfs_print_result(seq, suite, test_case);
+
+ seq_printf(seq, "%s %d - %s\n",
+ kunit_status_to_string(success), 1, suite->name);
+ return 0;
+}
+
+static int debugfs_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static int debugfs_results_open(struct inode *inode, struct file *file)
+{
+ struct kunit_suite *suite;
+
+ suite = (struct kunit_suite *)inode->i_private;
+
+ return single_open(file, debugfs_print_results, suite);
+}
+
+static const struct file_operations debugfs_results_fops = {
+ .open = debugfs_results_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = debugfs_release,
+};
+
+void kunit_debugfs_create_suite(struct kunit_suite *suite)
+{
+ struct kunit_case *test_case;
+
+ /* Allocate logs before creating debugfs representation. */
+ suite->log = kzalloc(KUNIT_LOG_SIZE, GFP_KERNEL);
+ kunit_suite_for_each_test_case(suite, test_case)
+ test_case->log = kzalloc(KUNIT_LOG_SIZE, GFP_KERNEL);
+
+ suite->debugfs = debugfs_create_dir(suite->name, debugfs_rootdir);
+
+ debugfs_create_file(KUNIT_DEBUGFS_RESULTS, S_IFREG | 0444,
+ suite->debugfs,
+ suite, &debugfs_results_fops);
+}
+
+void kunit_debugfs_destroy_suite(struct kunit_suite *suite)
+{
+ struct kunit_case *test_case;
+
+ debugfs_remove_recursive(suite->debugfs);
+ kfree(suite->log);
+ kunit_suite_for_each_test_case(suite, test_case)
+ kfree(test_case->log);
+}
diff --git a/lib/kunit/debugfs.h b/lib/kunit/debugfs.h
new file mode 100644
index 000000000000..dcc7d7556107
--- /dev/null
+++ b/lib/kunit/debugfs.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020, Oracle and/or its affiliates.
+ */
+
+#ifndef _KUNIT_DEBUGFS_H
+#define _KUNIT_DEBUGFS_H
+
+#include <kunit/test.h>
+
+#ifdef CONFIG_KUNIT_DEBUGFS
+
+void kunit_debugfs_create_suite(struct kunit_suite *suite);
+void kunit_debugfs_destroy_suite(struct kunit_suite *suite);
+void kunit_debugfs_init(void);
+void kunit_debugfs_cleanup(void);
+
+#else
+
+static inline void kunit_debugfs_create_suite(struct kunit_suite *suite) { }
+
+static inline void kunit_debugfs_destroy_suite(struct kunit_suite *suite) { }
+
+static inline void kunit_debugfs_init(void) { }
+
+static inline void kunit_debugfs_cleanup(void) { }
+
+#endif /* CONFIG_KUNIT_DEBUGFS */
+
+#endif /* _KUNIT_DEBUGFS_H */
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index ccb8d2e332f7..4f3d36a72f8f 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -134,7 +134,7 @@ static void kunit_resource_test_init_resources(struct kunit *test)
{
struct kunit_test_resource_context *ctx = test->priv;
- kunit_init_test(&ctx->test, "testing_test_init_test");
+ kunit_init_test(&ctx->test, "testing_test_init_test", NULL);
KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
}
@@ -301,7 +301,7 @@ static int kunit_resource_test_init(struct kunit *test)
test->priv = ctx;
- kunit_init_test(&ctx->test, "test_test_context");
+ kunit_init_test(&ctx->test, "test_test_context", NULL);
return 0;
}
@@ -329,6 +329,44 @@ static struct kunit_suite kunit_resource_test_suite = {
.exit = kunit_resource_test_exit,
.test_cases = kunit_resource_test_cases,
};
-kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite);
+
+static void kunit_log_test(struct kunit *test);
+
+static struct kunit_case kunit_log_test_cases[] = {
+ KUNIT_CASE(kunit_log_test),
+ {}
+};
+
+static struct kunit_suite kunit_log_test_suite = {
+ .name = "kunit-log-test",
+ .test_cases = kunit_log_test_cases,
+};
+
+static void kunit_log_test(struct kunit *test)
+{
+ struct kunit_suite *suite = &kunit_log_test_suite;
+
+ kunit_log(KERN_INFO, test, "put this in log.");
+ kunit_log(KERN_INFO, test, "this too.");
+ kunit_log(KERN_INFO, suite, "add to suite log.");
+ kunit_log(KERN_INFO, suite, "along with this.");
+
+#ifdef CONFIG_KUNIT_DEBUGFS
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ strstr(test->log, "put this in log."));
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ strstr(test->log, "this too."));
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ strstr(suite->log, "add to suite log."));
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ strstr(suite->log, "along with this."));
+#else
+ KUNIT_EXPECT_PTR_EQ(test, test->log, (char *)NULL);
+ KUNIT_EXPECT_PTR_EQ(test, suite->log, (char *)NULL);
+#endif
+}
+
+kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
+ &kunit_log_test_suite);
MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 9242f932896c..7a6430a7fca0 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/sched/debug.h>
+#include "debugfs.h"
#include "string-stream.h"
#include "try-catch-impl.h"
@@ -28,73 +29,117 @@ static void kunit_print_tap_version(void)
}
}
-static size_t kunit_test_cases_len(struct kunit_case *test_cases)
+/*
+ * Append formatted message to log, size of which is limited to
+ * KUNIT_LOG_SIZE bytes (including null terminating byte).
+ */
+void kunit_log_append(char *log, const char *fmt, ...)
+{
+ char line[KUNIT_LOG_SIZE];
+ va_list args;
+ int len_left;
+
+ if (!log)
+ return;
+
+ len_left = KUNIT_LOG_SIZE - strlen(log) - 1;
+ if (len_left <= 0)
+ return;
+
+ va_start(args, fmt);
+ vsnprintf(line, sizeof(line), fmt, args);
+ va_end(args);
+
+ strncat(log, line, len_left);
+}
+EXPORT_SYMBOL_GPL(kunit_log_append);
+
+size_t kunit_suite_num_test_cases(struct kunit_suite *suite)
{
struct kunit_case *test_case;
size_t len = 0;
- for (test_case = test_cases; test_case->run_case; test_case++)
+ kunit_suite_for_each_test_case(suite, test_case)
len++;
return len;
}
+EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases);
static void kunit_print_subtest_start(struct kunit_suite *suite)
{
kunit_print_tap_version();
- pr_info("\t# Subtest: %s\n", suite->name);
- pr_info("\t1..%zd\n", kunit_test_cases_len(suite->test_cases));
+ kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "# Subtest: %s",
+ suite->name);
+ kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "1..%zd",
+ kunit_suite_num_test_cases(suite));
}
-static void kunit_print_ok_not_ok(bool should_indent,
+static void kunit_print_ok_not_ok(void *test_or_suite,
+ bool is_test,
bool is_ok,
size_t test_number,
const char *description)
{
- const char *indent, *ok_not_ok;
-
- if (should_indent)
- indent = "\t";
- else
- indent = "";
+ struct kunit_suite *suite = is_test ? NULL : test_or_suite;
+ struct kunit *test = is_test ? test_or_suite : NULL;
- if (is_ok)
- ok_not_ok = "ok";
+ /*
+ * We do not log the test suite results as doing so would
+ * mean debugfs display would consist of the test suite
+ * description and status prior to individual test results.
+ * Hence directly printk the suite status, and we will
+ * separately seq_printf() the suite status for the debugfs
+ * representation.
+ */
+ if (suite)
+ pr_info("%s %zd - %s",
+ kunit_status_to_string(is_ok),
+ test_number, description);
else
- ok_not_ok = "not ok";
-
- pr_info("%s%s %zd - %s\n", indent, ok_not_ok, test_number, description);
+ kunit_log(KERN_INFO, test, KUNIT_SUBTEST_INDENT "%s %zd - %s",
+ kunit_status_to_string(is_ok),
+ test_number, description);
}
-static bool kunit_suite_has_succeeded(struct kunit_suite *suite)
+bool kunit_suite_has_succeeded(struct kunit_suite *suite)
{
const struct kunit_case *test_case;
- for (test_case = suite->test_cases; test_case->run_case; test_case++)
+ kunit_suite_for_each_test_case(suite, test_case) {
if (!test_case->success)
return false;
+ }
return true;
}
+EXPORT_SYMBOL_GPL(kunit_suite_has_succeeded);
static void kunit_print_subtest_end(struct kunit_suite *suite)
{
static size_t kunit_suite_counter = 1;
- kunit_print_ok_not_ok(false,
+ kunit_print_ok_not_ok((void *)suite, false,
kunit_suite_has_succeeded(suite),
kunit_suite_counter++,
suite->name);
}
-static void kunit_print_test_case_ok_not_ok(struct kunit_case *test_case,
- size_t test_number)
+unsigned int kunit_test_case_num(struct kunit_suite *suite,
+ struct kunit_case *test_case)
{
- kunit_print_ok_not_ok(true,
- test_case->success,
- test_number,
- test_case->name);
+ struct kunit_case *tc;
+ unsigned int i = 1;
+
+ kunit_suite_for_each_test_case(suite, tc) {
+ if (tc == test_case)
+ return i;
+ i++;
+ }
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(kunit_test_case_num);
static void kunit_print_string_stream(struct kunit *test,
struct string_stream *stream)
@@ -102,6 +147,9 @@ static void kunit_print_string_stream(struct kunit *test,
struct string_stream_fragment *fragment;
char *buf;
+ if (string_stream_is_empty(stream))
+ return;
+
buf = string_stream_get_string(stream);
if (!buf) {
kunit_err(test,
@@ -175,11 +223,14 @@ void kunit_do_assertion(struct kunit *test,
}
EXPORT_SYMBOL_GPL(kunit_do_assertion);
-void kunit_init_test(struct kunit *test, const char *name)
+void kunit_init_test(struct kunit *test, const char *name, char *log)
{
spin_lock_init(&test->lock);
INIT_LIST_HEAD(&test->resources);
test->name = name;
+ test->log = log;
+ if (test->log)
+ test->log[0] = '\0';
test->success = true;
}
EXPORT_SYMBOL_GPL(kunit_init_test);
@@ -290,7 +341,7 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite,
struct kunit_try_catch *try_catch;
struct kunit test;
- kunit_init_test(&test, test_case->name);
+ kunit_init_test(&test, test_case->name, test_case->log);
try_catch = &test.try_catch;
kunit_try_catch_init(try_catch,
@@ -303,19 +354,20 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite,
kunit_try_catch_run(try_catch, &context);
test_case->success = test.success;
+
+ kunit_print_ok_not_ok(&test, true, test_case->success,
+ kunit_test_case_num(suite, test_case),
+ test_case->name);
}
int kunit_run_tests(struct kunit_suite *suite)
{
struct kunit_case *test_case;
- size_t test_case_count = 1;
kunit_print_subtest_start(suite);
- for (test_case = suite->test_cases; test_case->run_case; test_case++) {
+ kunit_suite_for_each_test_case(suite, test_case)
kunit_run_case_catch_errors(suite, test_case);
- kunit_print_test_case_ok_not_ok(test_case, test_case_count++);
- }
kunit_print_subtest_end(suite);
@@ -323,6 +375,37 @@ int kunit_run_tests(struct kunit_suite *suite)
}
EXPORT_SYMBOL_GPL(kunit_run_tests);
+static void kunit_init_suite(struct kunit_suite *suite)
+{
+ kunit_debugfs_create_suite(suite);
+}
+
+int __kunit_test_suites_init(struct kunit_suite **suites)
+{
+ unsigned int i;
+
+ for (i = 0; suites[i] != NULL; i++) {
+ kunit_init_suite(suites[i]);
+ kunit_run_tests(suites[i]);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__kunit_test_suites_init);
+
+static void kunit_exit_suite(struct kunit_suite *suite)
+{
+ kunit_debugfs_destroy_suite(suite);
+}
+
+void __kunit_test_suites_exit(struct kunit_suite **suites)
+{
+ unsigned int i;
+
+ for (i = 0; suites[i] != NULL; i++)
+ kunit_exit_suite(suites[i]);
+}
+EXPORT_SYMBOL_GPL(__kunit_test_suites_exit);
+
struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test,
kunit_resource_init_t init,
kunit_resource_free_t free,
@@ -489,12 +572,15 @@ EXPORT_SYMBOL_GPL(kunit_cleanup);
static int __init kunit_init(void)
{
+ kunit_debugfs_init();
+
return 0;
}
late_initcall(kunit_init);
static void __exit kunit_exit(void)
{
+ kunit_debugfs_cleanup();
}
module_exit(kunit_exit);
diff --git a/lib/list-test.c b/lib/list-test.c
index 76babb1df889..ee09505df16f 100644
--- a/lib/list-test.c
+++ b/lib/list-test.c
@@ -659,7 +659,7 @@ static void list_test_list_for_each_prev_safe(struct kunit *test)
static void list_test_list_for_each_entry(struct kunit *test)
{
struct list_test_struct entries[5], *cur;
- static LIST_HEAD(list);
+ LIST_HEAD(list);
int i = 0;
for (i = 0; i < 5; ++i) {
@@ -680,7 +680,7 @@ static void list_test_list_for_each_entry(struct kunit *test)
static void list_test_list_for_each_entry_reverse(struct kunit *test)
{
struct list_test_struct entries[5], *cur;
- static LIST_HEAD(list);
+ LIST_HEAD(list);
int i = 0;
for (i = 0; i < 5; ++i) {
diff --git a/lib/objagg.c b/lib/objagg.c
index 55621fb82e0a..5e1676ccdadd 100644
--- a/lib/objagg.c
+++ b/lib/objagg.c
@@ -28,7 +28,7 @@ struct objagg_hints_node {
struct objagg_hints_node *parent;
unsigned int root_id;
struct objagg_obj_stats_info stats_info;
- unsigned long obj[0];
+ unsigned long obj[];
};
static struct objagg_hints_node *
@@ -66,7 +66,7 @@ struct objagg_obj {
* including nested objects
*/
struct objagg_obj_stats stats;
- unsigned long obj[0];
+ unsigned long obj[];
};
static unsigned int objagg_obj_ref_inc(struct objagg_obj *objagg_obj)
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 4f6c6ebbbbde..8d092609928e 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -50,9 +50,10 @@ static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
* @flags: PERCPU_REF_INIT_* flags
* @gfp: allocation mask to use
*
- * Initializes @ref. If @flags is zero, @ref starts in percpu mode with a
- * refcount of 1; analagous to atomic_long_set(ref, 1). See the
- * definitions of PERCPU_REF_INIT_* flags for flag behaviors.
+ * Initializes @ref. @ref starts out in percpu mode with a refcount of 1 unless
+ * @flags contains PERCPU_REF_INIT_ATOMIC or PERCPU_REF_INIT_DEAD. These flags
+ * change the start state to atomic with the latter setting the initial refcount
+ * to 0. See the definitions of PERCPU_REF_INIT_* flags for flag behaviors.
*
* Note that @release must not sleep - it may potentially be called from RCU
* callback context by percpu_ref_kill().
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index c8fa1d274530..2ee6ae3b0ade 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -56,14 +56,6 @@ struct kmem_cache *radix_tree_node_cachep;
#define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
/*
- * The IDA is even shorter since it uses a bitmap at the last level.
- */
-#define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS))
-#define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \
- RADIX_TREE_MAP_SHIFT))
-#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
-
-/*
* Per-cpu pool of preloaded nodes
*/
struct radix_tree_preload {
diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore
index 3de0d8921286..6be57745afd1 100644
--- a/lib/raid6/.gitignore
+++ b/lib/raid6/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
mktables
altivec*.c
int*.c
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index bf1b4765c8f6..6d5e5000fdd7 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -34,10 +34,8 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_avx512x2,
&raid6_avx512x1,
#endif
-#ifdef CONFIG_AS_AVX2
&raid6_avx2x2,
&raid6_avx2x1,
-#endif
&raid6_sse2x2,
&raid6_sse2x1,
&raid6_sse1x2,
@@ -51,11 +49,9 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_avx512x2,
&raid6_avx512x1,
#endif
-#ifdef CONFIG_AS_AVX2
&raid6_avx2x4,
&raid6_avx2x2,
&raid6_avx2x1,
-#endif
&raid6_sse2x4,
&raid6_sse2x2,
&raid6_sse2x1,
@@ -97,13 +93,11 @@ void (*raid6_datap_recov)(int, size_t, int, void **);
EXPORT_SYMBOL_GPL(raid6_datap_recov);
const struct raid6_recov_calls *const raid6_recov_algos[] = {
+#ifdef CONFIG_X86
#ifdef CONFIG_AS_AVX512
&raid6_recov_avx512,
#endif
-#ifdef CONFIG_AS_AVX2
&raid6_recov_avx2,
-#endif
-#ifdef CONFIG_AS_SSSE3
&raid6_recov_ssse3,
#endif
#ifdef CONFIG_S390
diff --git a/lib/raid6/avx2.c b/lib/raid6/avx2.c
index 87184b6da28a..f299476e1d76 100644
--- a/lib/raid6/avx2.c
+++ b/lib/raid6/avx2.c
@@ -13,8 +13,6 @@
*
*/
-#ifdef CONFIG_AS_AVX2
-
#include <linux/raid/pq.h>
#include "x86.h"
@@ -470,5 +468,3 @@ const struct raid6_calls raid6_avx2x4 = {
1 /* Has cache hints */
};
#endif
-
-#endif /* CONFIG_AS_AVX2 */
diff --git a/lib/raid6/recov_avx2.c b/lib/raid6/recov_avx2.c
index 7a3b5e7f66ee..4e8095403ee2 100644
--- a/lib/raid6/recov_avx2.c
+++ b/lib/raid6/recov_avx2.c
@@ -4,8 +4,6 @@
* Author: Jim Kukunas <james.t.kukunas@linux.intel.com>
*/
-#ifdef CONFIG_AS_AVX2
-
#include <linux/raid/pq.h>
#include "x86.h"
@@ -313,7 +311,3 @@ const struct raid6_recov_calls raid6_recov_avx2 = {
#endif
.priority = 2,
};
-
-#else
-#warning "your version of binutils lacks AVX2 support"
-#endif
diff --git a/lib/raid6/recov_ssse3.c b/lib/raid6/recov_ssse3.c
index 1de97d2405d0..4bfa3c6b60de 100644
--- a/lib/raid6/recov_ssse3.c
+++ b/lib/raid6/recov_ssse3.c
@@ -3,8 +3,6 @@
* Copyright (C) 2012 Intel Corporation
*/
-#ifdef CONFIG_AS_SSSE3
-
#include <linux/raid/pq.h>
#include "x86.h"
@@ -328,7 +326,3 @@ const struct raid6_recov_calls raid6_recov_ssse3 = {
#endif
.priority = 1,
};
-
-#else
-#warning "your version of binutils lacks SSSE3 support"
-#endif
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index 3ab8720aa2f8..a4c7cd74cff5 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -34,14 +34,9 @@ endif
ifeq ($(IS_X86),yes)
OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o
- CFLAGS += $(shell echo "pshufb %xmm0, %xmm0" | \
- gcc -c -x assembler - >&/dev/null && \
- rm ./-.o && echo -DCONFIG_AS_SSSE3=1)
- CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \
- gcc -c -x assembler - >&/dev/null && \
- rm ./-.o && echo -DCONFIG_AS_AVX2=1)
+ CFLAGS += -DCONFIG_X86
CFLAGS += $(shell echo "vpmovm2b %k1, %zmm5" | \
- gcc -c -x assembler - >&/dev/null && \
+ gcc -c -x assembler - >/dev/null 2>&1 && \
rm ./-.o && echo -DCONFIG_AS_AVX512=1)
else ifeq ($(HAS_NEON),yes)
OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
diff --git a/lib/rbtree.c b/lib/rbtree.c
index abc86c6a3177..8545872e61db 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -503,7 +503,7 @@ struct rb_node *rb_next(const struct rb_node *node)
if (node->rb_right) {
node = node->rb_right;
while (node->rb_left)
- node=node->rb_left;
+ node = node->rb_left;
return (struct rb_node *)node;
}
@@ -535,7 +535,7 @@ struct rb_node *rb_prev(const struct rb_node *node)
if (node->rb_left) {
node = node->rb_left;
while (node->rb_right)
- node=node->rb_right;
+ node = node->rb_right;
return (struct rb_node *)node;
}
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 5813072bc589..5d63a8857f36 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -832,7 +832,7 @@ EXPORT_SYMBOL(sg_miter_stop);
* @buflen: The number of bytes to copy
* @skip: Number of bytes to skip before copying
* @to_buffer: transfer direction (true == from an sg list to a
- * buffer, false == from a buffer to an sg list
+ * buffer, false == from a buffer to an sg list)
*
* Returns the number of copied bytes.
*
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 81c69c08d1d1..2caffc64e4c8 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -20,6 +20,7 @@
*/
#include <linux/gfp.h>
+#include <linux/interrupt.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -202,9 +203,20 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries)
{
union handle_parts parts = { .handle = handle };
- void *slab = stack_slabs[parts.slabindex];
+ void *slab;
size_t offset = parts.offset << STACK_ALLOC_ALIGN;
- struct stack_record *stack = slab + offset;
+ struct stack_record *stack;
+
+ *entries = NULL;
+ if (parts.slabindex > depot_index) {
+ WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
+ parts.slabindex, depot_index, handle);
+ return 0;
+ }
+ slab = stack_slabs[parts.slabindex];
+ if (!slab)
+ return 0;
+ stack = slab + offset;
*entries = stack->entries;
return stack->size;
@@ -305,3 +317,26 @@ fast_exit:
return retval;
}
EXPORT_SYMBOL_GPL(stack_depot_save);
+
+static inline int in_irqentry_text(unsigned long ptr)
+{
+ return (ptr >= (unsigned long)&__irqentry_text_start &&
+ ptr < (unsigned long)&__irqentry_text_end) ||
+ (ptr >= (unsigned long)&__softirqentry_text_start &&
+ ptr < (unsigned long)&__softirqentry_text_end);
+}
+
+unsigned int filter_irq_stacks(unsigned long *entries,
+ unsigned int nr_entries)
+{
+ unsigned int i;
+
+ for (i = 0; i < nr_entries; i++) {
+ if (in_irqentry_text(entries[i])) {
+ /* Include the irqentry function into the stack. */
+ return i + 1;
+ }
+ }
+ return nr_entries;
+}
+EXPORT_SYMBOL_GPL(filter_irq_stacks);
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 61ed71c1daba..6b13150667f5 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -278,6 +278,8 @@ static void __init test_replace(void)
unsigned int nlongs = DIV_ROUND_UP(nbits, BITS_PER_LONG);
DECLARE_BITMAP(bmap, 1024);
+ BUILD_BUG_ON(EXP2_IN_BITS < nbits * 2);
+
bitmap_zero(bmap, 1024);
bitmap_replace(bmap, &exp2[0 * nlongs], &exp2[1 * nlongs], exp2_to_exp3_mask, nbits);
expect_eq_bitmap(bmap, exp3_0_1, nbits);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index cecb230833be..a5fddf9ebcb7 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -6660,14 +6660,14 @@ static int __run_one(const struct bpf_prog *fp, const void *data,
u64 start, finish;
int ret = 0, i;
- preempt_disable();
+ migrate_disable();
start = ktime_get_ns();
for (i = 0; i < runs; i++)
ret = BPF_PROG_RUN(fp, data);
finish = ktime_get_ns();
- preempt_enable();
+ migrate_enable();
*duration = finish - start;
do_div(*duration, runs);
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 251213c872b5..0c7fbcf07ac5 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/vmalloc.h>
+#include <linux/efi_embedded_fw.h>
#define TEST_FIRMWARE_NAME "test-firmware.bin"
#define TEST_FIRMWARE_NUM_REQS 4
@@ -507,6 +508,57 @@ out:
}
static DEVICE_ATTR_WO(trigger_request);
+#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
+static ssize_t trigger_request_platform_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ static const u8 test_data[] = {
+ 0x55, 0xaa, 0x55, 0xaa, 0x01, 0x02, 0x03, 0x04,
+ 0x55, 0xaa, 0x55, 0xaa, 0x05, 0x06, 0x07, 0x08,
+ 0x55, 0xaa, 0x55, 0xaa, 0x10, 0x20, 0x30, 0x40,
+ 0x55, 0xaa, 0x55, 0xaa, 0x50, 0x60, 0x70, 0x80
+ };
+ struct efi_embedded_fw efi_embedded_fw;
+ const struct firmware *firmware = NULL;
+ char *name;
+ int rc;
+
+ name = kstrndup(buf, count, GFP_KERNEL);
+ if (!name)
+ return -ENOSPC;
+
+ pr_info("inserting test platform fw '%s'\n", name);
+ efi_embedded_fw.name = name;
+ efi_embedded_fw.data = (void *)test_data;
+ efi_embedded_fw.length = sizeof(test_data);
+ list_add(&efi_embedded_fw.list, &efi_embedded_fw_list);
+
+ pr_info("loading '%s'\n", name);
+ rc = firmware_request_platform(&firmware, name, dev);
+ if (rc) {
+ pr_info("load of '%s' failed: %d\n", name, rc);
+ goto out;
+ }
+ if (firmware->size != sizeof(test_data) ||
+ memcmp(firmware->data, test_data, sizeof(test_data)) != 0) {
+ pr_info("firmware contents mismatch for '%s'\n", name);
+ rc = -EINVAL;
+ goto out;
+ }
+ pr_info("loaded: %zu\n", firmware->size);
+ rc = count;
+
+out:
+ release_firmware(firmware);
+ list_del(&efi_embedded_fw.list);
+ kfree(name);
+
+ return rc;
+}
+static DEVICE_ATTR_WO(trigger_request_platform);
+#endif
+
static DECLARE_COMPLETION(async_fw_done);
static void trigger_async_request_cb(const struct firmware *fw, void *context)
@@ -903,6 +955,9 @@ static struct attribute *test_dev_attrs[] = {
TEST_FW_DEV_ATTR(trigger_request),
TEST_FW_DEV_ATTR(trigger_async_request),
TEST_FW_DEV_ATTR(trigger_custom_fallback),
+#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
+ TEST_FW_DEV_ATTR(trigger_request_platform),
+#endif
/* These use the config and can use the test_result */
TEST_FW_DEV_ATTR(trigger_batched_requests),
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 3872d250ed2c..e3087d90e00d 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -285,6 +285,24 @@ static noinline void __init kmalloc_oob_in_memset(void)
kfree(ptr);
}
+static noinline void __init kmalloc_memmove_invalid_size(void)
+{
+ char *ptr;
+ size_t size = 64;
+ volatile size_t invalid_size = -2;
+
+ pr_info("invalid size in memmove\n");
+ ptr = kmalloc(size, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ memset((char *)ptr, 0, 64);
+ memmove((char *)ptr, (char *)ptr + 4, invalid_size);
+ kfree(ptr);
+}
+
static noinline void __init kmalloc_uaf(void)
{
char *ptr;
@@ -799,6 +817,7 @@ static int __init kmalloc_tests_init(void)
kmalloc_oob_memset_4();
kmalloc_oob_memset_8();
kmalloc_oob_memset_16();
+ kmalloc_memmove_invalid_size();
kmalloc_uaf();
kmalloc_uaf_memset();
kmalloc_uaf2();
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 9cf77628fc91..e651c37d56db 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -204,7 +204,7 @@ static void test_kmod_put_module(struct kmod_test_device_info *info)
case TEST_KMOD_DRIVER:
break;
case TEST_KMOD_FS_TYPE:
- if (info && info->fs_sync && info->fs_sync->owner)
+ if (info->fs_sync && info->fs_sync->owner)
module_put(info->fs_sync->owner);
break;
default:
diff --git a/lib/test_lockup.c b/lib/test_lockup.c
new file mode 100644
index 000000000000..ea09ca335b21
--- /dev/null
+++ b/lib/test_lockup.c
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test module to generate lockups
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/clock.h>
+#include <linux/cpu.h>
+#include <linux/nmi.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/file.h>
+
+static unsigned int time_secs;
+module_param(time_secs, uint, 0600);
+MODULE_PARM_DESC(time_secs, "lockup time in seconds, default 0");
+
+static unsigned int time_nsecs;
+module_param(time_nsecs, uint, 0600);
+MODULE_PARM_DESC(time_nsecs, "nanoseconds part of lockup time, default 0");
+
+static unsigned int cooldown_secs;
+module_param(cooldown_secs, uint, 0600);
+MODULE_PARM_DESC(cooldown_secs, "cooldown time between iterations in seconds, default 0");
+
+static unsigned int cooldown_nsecs;
+module_param(cooldown_nsecs, uint, 0600);
+MODULE_PARM_DESC(cooldown_nsecs, "nanoseconds part of cooldown, default 0");
+
+static unsigned int iterations = 1;
+module_param(iterations, uint, 0600);
+MODULE_PARM_DESC(iterations, "lockup iterations, default 1");
+
+static bool all_cpus;
+module_param(all_cpus, bool, 0400);
+MODULE_PARM_DESC(all_cpus, "trigger lockup at all cpus at once");
+
+static int wait_state;
+static char *state = "R";
+module_param(state, charp, 0400);
+MODULE_PARM_DESC(state, "wait in 'R' running (default), 'D' uninterruptible, 'K' killable, 'S' interruptible state");
+
+static bool use_hrtimer;
+module_param(use_hrtimer, bool, 0400);
+MODULE_PARM_DESC(use_hrtimer, "use high-resolution timer for sleeping");
+
+static bool iowait;
+module_param(iowait, bool, 0400);
+MODULE_PARM_DESC(iowait, "account sleep time as iowait");
+
+static bool lock_read;
+module_param(lock_read, bool, 0400);
+MODULE_PARM_DESC(lock_read, "lock read-write locks for read");
+
+static bool lock_single;
+module_param(lock_single, bool, 0400);
+MODULE_PARM_DESC(lock_single, "acquire locks only at one cpu");
+
+static bool reacquire_locks;
+module_param(reacquire_locks, bool, 0400);
+MODULE_PARM_DESC(reacquire_locks, "release and reacquire locks/irq/preempt between iterations");
+
+static bool touch_softlockup;
+module_param(touch_softlockup, bool, 0600);
+MODULE_PARM_DESC(touch_softlockup, "touch soft-lockup watchdog between iterations");
+
+static bool touch_hardlockup;
+module_param(touch_hardlockup, bool, 0600);
+MODULE_PARM_DESC(touch_hardlockup, "touch hard-lockup watchdog between iterations");
+
+static bool call_cond_resched;
+module_param(call_cond_resched, bool, 0600);
+MODULE_PARM_DESC(call_cond_resched, "call cond_resched() between iterations");
+
+static bool measure_lock_wait;
+module_param(measure_lock_wait, bool, 0400);
+MODULE_PARM_DESC(measure_lock_wait, "measure lock wait time");
+
+static unsigned long lock_wait_threshold = ULONG_MAX;
+module_param(lock_wait_threshold, ulong, 0400);
+MODULE_PARM_DESC(lock_wait_threshold, "print lock wait time longer than this in nanoseconds, default off");
+
+static bool test_disable_irq;
+module_param_named(disable_irq, test_disable_irq, bool, 0400);
+MODULE_PARM_DESC(disable_irq, "disable interrupts: generate hard-lockups");
+
+static bool disable_softirq;
+module_param(disable_softirq, bool, 0400);
+MODULE_PARM_DESC(disable_softirq, "disable bottom-half irq handlers");
+
+static bool disable_preempt;
+module_param(disable_preempt, bool, 0400);
+MODULE_PARM_DESC(disable_preempt, "disable preemption: generate soft-lockups");
+
+static bool lock_rcu;
+module_param(lock_rcu, bool, 0400);
+MODULE_PARM_DESC(lock_rcu, "grab rcu_read_lock: generate rcu stalls");
+
+static bool lock_mmap_sem;
+module_param(lock_mmap_sem, bool, 0400);
+MODULE_PARM_DESC(lock_mmap_sem, "lock mm->mmap_sem: block procfs interfaces");
+
+static unsigned long lock_rwsem_ptr;
+module_param_unsafe(lock_rwsem_ptr, ulong, 0400);
+MODULE_PARM_DESC(lock_rwsem_ptr, "lock rw_semaphore at address");
+
+static unsigned long lock_mutex_ptr;
+module_param_unsafe(lock_mutex_ptr, ulong, 0400);
+MODULE_PARM_DESC(lock_mutex_ptr, "lock mutex at address");
+
+static unsigned long lock_spinlock_ptr;
+module_param_unsafe(lock_spinlock_ptr, ulong, 0400);
+MODULE_PARM_DESC(lock_spinlock_ptr, "lock spinlock at address");
+
+static unsigned long lock_rwlock_ptr;
+module_param_unsafe(lock_rwlock_ptr, ulong, 0400);
+MODULE_PARM_DESC(lock_rwlock_ptr, "lock rwlock at address");
+
+static unsigned int alloc_pages_nr;
+module_param_unsafe(alloc_pages_nr, uint, 0600);
+MODULE_PARM_DESC(alloc_pages_nr, "allocate and free pages under locks");
+
+static unsigned int alloc_pages_order;
+module_param(alloc_pages_order, uint, 0400);
+MODULE_PARM_DESC(alloc_pages_order, "page order to allocate");
+
+static gfp_t alloc_pages_gfp = GFP_KERNEL;
+module_param_unsafe(alloc_pages_gfp, uint, 0400);
+MODULE_PARM_DESC(alloc_pages_gfp, "allocate pages with this gfp_mask, default GFP_KERNEL");
+
+static bool alloc_pages_atomic;
+module_param(alloc_pages_atomic, bool, 0400);
+MODULE_PARM_DESC(alloc_pages_atomic, "allocate pages with GFP_ATOMIC");
+
+static bool reallocate_pages;
+module_param(reallocate_pages, bool, 0400);
+MODULE_PARM_DESC(reallocate_pages, "free and allocate pages between iterations");
+
+struct file *test_file;
+struct inode *test_inode;
+static char test_file_path[256];
+module_param_string(file_path, test_file_path, sizeof(test_file_path), 0400);
+MODULE_PARM_DESC(file_path, "file path to test");
+
+static bool test_lock_inode;
+module_param_named(lock_inode, test_lock_inode, bool, 0400);
+MODULE_PARM_DESC(lock_inode, "lock file -> inode -> i_rwsem");
+
+static bool test_lock_mapping;
+module_param_named(lock_mapping, test_lock_mapping, bool, 0400);
+MODULE_PARM_DESC(lock_mapping, "lock file -> mapping -> i_mmap_rwsem");
+
+static bool test_lock_sb_umount;
+module_param_named(lock_sb_umount, test_lock_sb_umount, bool, 0400);
+MODULE_PARM_DESC(lock_sb_umount, "lock file -> sb -> s_umount");
+
+static atomic_t alloc_pages_failed = ATOMIC_INIT(0);
+
+static atomic64_t max_lock_wait = ATOMIC64_INIT(0);
+
+static struct task_struct *main_task;
+static int master_cpu;
+
+static void test_lock(bool master, bool verbose)
+{
+ u64 uninitialized_var(wait_start);
+
+ if (measure_lock_wait)
+ wait_start = local_clock();
+
+ if (lock_mutex_ptr && master) {
+ if (verbose)
+ pr_notice("lock mutex %ps\n", (void *)lock_mutex_ptr);
+ mutex_lock((struct mutex *)lock_mutex_ptr);
+ }
+
+ if (lock_rwsem_ptr && master) {
+ if (verbose)
+ pr_notice("lock rw_semaphore %ps\n",
+ (void *)lock_rwsem_ptr);
+ if (lock_read)
+ down_read((struct rw_semaphore *)lock_rwsem_ptr);
+ else
+ down_write((struct rw_semaphore *)lock_rwsem_ptr);
+ }
+
+ if (lock_mmap_sem && master) {
+ if (verbose)
+ pr_notice("lock mmap_sem pid=%d\n", main_task->pid);
+ if (lock_read)
+ down_read(&main_task->mm->mmap_sem);
+ else
+ down_write(&main_task->mm->mmap_sem);
+ }
+
+ if (test_disable_irq)
+ local_irq_disable();
+
+ if (disable_softirq)
+ local_bh_disable();
+
+ if (disable_preempt)
+ preempt_disable();
+
+ if (lock_rcu)
+ rcu_read_lock();
+
+ if (lock_spinlock_ptr && master) {
+ if (verbose)
+ pr_notice("lock spinlock %ps\n",
+ (void *)lock_spinlock_ptr);
+ spin_lock((spinlock_t *)lock_spinlock_ptr);
+ }
+
+ if (lock_rwlock_ptr && master) {
+ if (verbose)
+ pr_notice("lock rwlock %ps\n",
+ (void *)lock_rwlock_ptr);
+ if (lock_read)
+ read_lock((rwlock_t *)lock_rwlock_ptr);
+ else
+ write_lock((rwlock_t *)lock_rwlock_ptr);
+ }
+
+ if (measure_lock_wait) {
+ s64 cur_wait = local_clock() - wait_start;
+ s64 max_wait = atomic64_read(&max_lock_wait);
+
+ do {
+ if (cur_wait < max_wait)
+ break;
+ max_wait = atomic64_cmpxchg(&max_lock_wait,
+ max_wait, cur_wait);
+ } while (max_wait != cur_wait);
+
+ if (cur_wait > lock_wait_threshold)
+ pr_notice_ratelimited("lock wait %lld ns\n", cur_wait);
+ }
+}
+
+static void test_unlock(bool master, bool verbose)
+{
+ if (lock_rwlock_ptr && master) {
+ if (lock_read)
+ read_unlock((rwlock_t *)lock_rwlock_ptr);
+ else
+ write_unlock((rwlock_t *)lock_rwlock_ptr);
+ if (verbose)
+ pr_notice("unlock rwlock %ps\n",
+ (void *)lock_rwlock_ptr);
+ }
+
+ if (lock_spinlock_ptr && master) {
+ spin_unlock((spinlock_t *)lock_spinlock_ptr);
+ if (verbose)
+ pr_notice("unlock spinlock %ps\n",
+ (void *)lock_spinlock_ptr);
+ }
+
+ if (lock_rcu)
+ rcu_read_unlock();
+
+ if (disable_preempt)
+ preempt_enable();
+
+ if (disable_softirq)
+ local_bh_enable();
+
+ if (test_disable_irq)
+ local_irq_enable();
+
+ if (lock_mmap_sem && master) {
+ if (lock_read)
+ up_read(&main_task->mm->mmap_sem);
+ else
+ up_write(&main_task->mm->mmap_sem);
+ if (verbose)
+ pr_notice("unlock mmap_sem pid=%d\n", main_task->pid);
+ }
+
+ if (lock_rwsem_ptr && master) {
+ if (lock_read)
+ up_read((struct rw_semaphore *)lock_rwsem_ptr);
+ else
+ up_write((struct rw_semaphore *)lock_rwsem_ptr);
+ if (verbose)
+ pr_notice("unlock rw_semaphore %ps\n",
+ (void *)lock_rwsem_ptr);
+ }
+
+ if (lock_mutex_ptr && master) {
+ mutex_unlock((struct mutex *)lock_mutex_ptr);
+ if (verbose)
+ pr_notice("unlock mutex %ps\n",
+ (void *)lock_mutex_ptr);
+ }
+}
+
+static void test_alloc_pages(struct list_head *pages)
+{
+ struct page *page;
+ unsigned int i;
+
+ for (i = 0; i < alloc_pages_nr; i++) {
+ page = alloc_pages(alloc_pages_gfp, alloc_pages_order);
+ if (!page) {
+ atomic_inc(&alloc_pages_failed);
+ break;
+ }
+ list_add(&page->lru, pages);
+ }
+}
+
+static void test_free_pages(struct list_head *pages)
+{
+ struct page *page, *next;
+
+ list_for_each_entry_safe(page, next, pages, lru)
+ __free_pages(page, alloc_pages_order);
+ INIT_LIST_HEAD(pages);
+}
+
+static void test_wait(unsigned int secs, unsigned int nsecs)
+{
+ if (wait_state == TASK_RUNNING) {
+ if (secs)
+ mdelay(secs * MSEC_PER_SEC);
+ if (nsecs)
+ ndelay(nsecs);
+ return;
+ }
+
+ __set_current_state(wait_state);
+ if (use_hrtimer) {
+ ktime_t time;
+
+ time = ns_to_ktime((u64)secs * NSEC_PER_SEC + nsecs);
+ schedule_hrtimeout(&time, HRTIMER_MODE_REL);
+ } else {
+ schedule_timeout(secs * HZ + nsecs_to_jiffies(nsecs));
+ }
+}
+
+static void test_lockup(bool master)
+{
+ u64 lockup_start = local_clock();
+ unsigned int iter = 0;
+ LIST_HEAD(pages);
+
+ pr_notice("Start on CPU%d\n", raw_smp_processor_id());
+
+ test_lock(master, true);
+
+ test_alloc_pages(&pages);
+
+ while (iter++ < iterations && !signal_pending(main_task)) {
+
+ if (iowait)
+ current->in_iowait = 1;
+
+ test_wait(time_secs, time_nsecs);
+
+ if (iowait)
+ current->in_iowait = 0;
+
+ if (reallocate_pages)
+ test_free_pages(&pages);
+
+ if (reacquire_locks)
+ test_unlock(master, false);
+
+ if (touch_softlockup)
+ touch_softlockup_watchdog();
+
+ if (touch_hardlockup)
+ touch_nmi_watchdog();
+
+ if (call_cond_resched)
+ cond_resched();
+
+ test_wait(cooldown_secs, cooldown_nsecs);
+
+ if (reacquire_locks)
+ test_lock(master, false);
+
+ if (reallocate_pages)
+ test_alloc_pages(&pages);
+ }
+
+ pr_notice("Finish on CPU%d in %lld ns\n", raw_smp_processor_id(),
+ local_clock() - lockup_start);
+
+ test_free_pages(&pages);
+
+ test_unlock(master, true);
+}
+
+DEFINE_PER_CPU(struct work_struct, test_works);
+
+static void test_work_fn(struct work_struct *work)
+{
+ test_lockup(!lock_single ||
+ work == per_cpu_ptr(&test_works, master_cpu));
+}
+
+static bool test_kernel_ptr(unsigned long addr, int size)
+{
+ void *ptr = (void *)addr;
+ char buf;
+
+ if (!addr)
+ return false;
+
+ /* should be at least readable kernel address */
+ if (access_ok(ptr, 1) ||
+ access_ok(ptr + size - 1, 1) ||
+ probe_kernel_address(ptr, buf) ||
+ probe_kernel_address(ptr + size - 1, buf)) {
+ pr_err("invalid kernel ptr: %#lx\n", addr);
+ return true;
+ }
+
+ return false;
+}
+
+static bool __maybe_unused test_magic(unsigned long addr, int offset,
+ unsigned int expected)
+{
+ void *ptr = (void *)addr + offset;
+ unsigned int magic = 0;
+
+ if (!addr)
+ return false;
+
+ if (probe_kernel_address(ptr, magic) || magic != expected) {
+ pr_err("invalid magic at %#lx + %#x = %#x, expected %#x\n",
+ addr, offset, magic, expected);
+ return true;
+ }
+
+ return false;
+}
+
+static int __init test_lockup_init(void)
+{
+ u64 test_start = local_clock();
+
+ main_task = current;
+
+ switch (state[0]) {
+ case 'S':
+ wait_state = TASK_INTERRUPTIBLE;
+ break;
+ case 'D':
+ wait_state = TASK_UNINTERRUPTIBLE;
+ break;
+ case 'K':
+ wait_state = TASK_KILLABLE;
+ break;
+ case 'R':
+ wait_state = TASK_RUNNING;
+ break;
+ default:
+ pr_err("unknown state=%s\n", state);
+ return -EINVAL;
+ }
+
+ if (alloc_pages_atomic)
+ alloc_pages_gfp = GFP_ATOMIC;
+
+ if (test_kernel_ptr(lock_spinlock_ptr, sizeof(spinlock_t)) ||
+ test_kernel_ptr(lock_rwlock_ptr, sizeof(rwlock_t)) ||
+ test_kernel_ptr(lock_mutex_ptr, sizeof(struct mutex)) ||
+ test_kernel_ptr(lock_rwsem_ptr, sizeof(struct rw_semaphore)))
+ return -EINVAL;
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ if (test_magic(lock_spinlock_ptr,
+ offsetof(spinlock_t, rlock.magic),
+ SPINLOCK_MAGIC) ||
+ test_magic(lock_rwlock_ptr,
+ offsetof(rwlock_t, magic),
+ RWLOCK_MAGIC) ||
+ test_magic(lock_mutex_ptr,
+ offsetof(struct mutex, wait_lock.rlock.magic),
+ SPINLOCK_MAGIC) ||
+ test_magic(lock_rwsem_ptr,
+ offsetof(struct rw_semaphore, wait_lock.magic),
+ SPINLOCK_MAGIC))
+ return -EINVAL;
+#endif
+
+ if ((wait_state != TASK_RUNNING ||
+ (call_cond_resched && !reacquire_locks) ||
+ (alloc_pages_nr && gfpflags_allow_blocking(alloc_pages_gfp))) &&
+ (test_disable_irq || disable_softirq || disable_preempt ||
+ lock_rcu || lock_spinlock_ptr || lock_rwlock_ptr)) {
+ pr_err("refuse to sleep in atomic context\n");
+ return -EINVAL;
+ }
+
+ if (lock_mmap_sem && !main_task->mm) {
+ pr_err("no mm to lock mmap_sem\n");
+ return -EINVAL;
+ }
+
+ if (test_file_path[0]) {
+ test_file = filp_open(test_file_path, O_RDONLY, 0);
+ if (IS_ERR(test_file)) {
+ pr_err("cannot find file_path\n");
+ return -EINVAL;
+ }
+ test_inode = file_inode(test_file);
+ } else if (test_lock_inode ||
+ test_lock_mapping ||
+ test_lock_sb_umount) {
+ pr_err("no file to lock\n");
+ return -EINVAL;
+ }
+
+ if (test_lock_inode && test_inode)
+ lock_rwsem_ptr = (unsigned long)&test_inode->i_rwsem;
+
+ if (test_lock_mapping && test_file && test_file->f_mapping)
+ lock_rwsem_ptr = (unsigned long)&test_file->f_mapping->i_mmap_rwsem;
+
+ if (test_lock_sb_umount && test_inode)
+ lock_rwsem_ptr = (unsigned long)&test_inode->i_sb->s_umount;
+
+ pr_notice("START pid=%d time=%u +%u ns cooldown=%u +%u ns iterations=%u state=%s %s%s%s%s%s%s%s%s%s%s%s\n",
+ main_task->pid, time_secs, time_nsecs,
+ cooldown_secs, cooldown_nsecs, iterations, state,
+ all_cpus ? "all_cpus " : "",
+ iowait ? "iowait " : "",
+ test_disable_irq ? "disable_irq " : "",
+ disable_softirq ? "disable_softirq " : "",
+ disable_preempt ? "disable_preempt " : "",
+ lock_rcu ? "lock_rcu " : "",
+ lock_read ? "lock_read " : "",
+ touch_softlockup ? "touch_softlockup " : "",
+ touch_hardlockup ? "touch_hardlockup " : "",
+ call_cond_resched ? "call_cond_resched " : "",
+ reacquire_locks ? "reacquire_locks " : "");
+
+ if (alloc_pages_nr)
+ pr_notice("ALLOCATE PAGES nr=%u order=%u gfp=%pGg %s\n",
+ alloc_pages_nr, alloc_pages_order, &alloc_pages_gfp,
+ reallocate_pages ? "reallocate_pages " : "");
+
+ if (all_cpus) {
+ unsigned int cpu;
+
+ cpus_read_lock();
+
+ preempt_disable();
+ master_cpu = smp_processor_id();
+ for_each_online_cpu(cpu) {
+ INIT_WORK(per_cpu_ptr(&test_works, cpu), test_work_fn);
+ queue_work_on(cpu, system_highpri_wq,
+ per_cpu_ptr(&test_works, cpu));
+ }
+ preempt_enable();
+
+ for_each_online_cpu(cpu)
+ flush_work(per_cpu_ptr(&test_works, cpu));
+
+ cpus_read_unlock();
+ } else {
+ test_lockup(true);
+ }
+
+ if (measure_lock_wait)
+ pr_notice("Maximum lock wait: %lld ns\n",
+ atomic64_read(&max_lock_wait));
+
+ if (alloc_pages_nr)
+ pr_notice("Page allocation failed %u times\n",
+ atomic_read(&alloc_pages_failed));
+
+ pr_notice("FINISH in %llu ns\n", local_clock() - test_start);
+
+ if (test_file)
+ fput(test_file);
+
+ if (signal_pending(main_task))
+ return -EINTR;
+
+ return -EAGAIN;
+}
+module_init(test_lockup_init);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Konstantin Khlebnikov <khlebnikov@yandex-team.ru>");
+MODULE_DESCRIPTION("Test module to generate lockups");
diff --git a/lib/test_min_heap.c b/lib/test_min_heap.c
new file mode 100644
index 000000000000..d19c8080fd4d
--- /dev/null
+++ b/lib/test_min_heap.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define pr_fmt(fmt) "min_heap_test: " fmt
+
+/*
+ * Test cases for the min max heap.
+ */
+
+#include <linux/log2.h>
+#include <linux/min_heap.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+
+static __init bool less_than(const void *lhs, const void *rhs)
+{
+ return *(int *)lhs < *(int *)rhs;
+}
+
+static __init bool greater_than(const void *lhs, const void *rhs)
+{
+ return *(int *)lhs > *(int *)rhs;
+}
+
+static __init void swap_ints(void *lhs, void *rhs)
+{
+ int temp = *(int *)lhs;
+
+ *(int *)lhs = *(int *)rhs;
+ *(int *)rhs = temp;
+}
+
+static __init int pop_verify_heap(bool min_heap,
+ struct min_heap *heap,
+ const struct min_heap_callbacks *funcs)
+{
+ int *values = heap->data;
+ int err = 0;
+ int last;
+
+ last = values[0];
+ min_heap_pop(heap, funcs);
+ while (heap->nr > 0) {
+ if (min_heap) {
+ if (last > values[0]) {
+ pr_err("error: expected %d <= %d\n", last,
+ values[0]);
+ err++;
+ }
+ } else {
+ if (last < values[0]) {
+ pr_err("error: expected %d >= %d\n", last,
+ values[0]);
+ err++;
+ }
+ }
+ last = values[0];
+ min_heap_pop(heap, funcs);
+ }
+ return err;
+}
+
+static __init int test_heapify_all(bool min_heap)
+{
+ int values[] = { 3, 1, 2, 4, 0x8000000, 0x7FFFFFF, 0,
+ -3, -1, -2, -4, 0x8000000, 0x7FFFFFF };
+ struct min_heap heap = {
+ .data = values,
+ .nr = ARRAY_SIZE(values),
+ .size = ARRAY_SIZE(values),
+ };
+ struct min_heap_callbacks funcs = {
+ .elem_size = sizeof(int),
+ .less = min_heap ? less_than : greater_than,
+ .swp = swap_ints,
+ };
+ int i, err;
+
+ /* Test with known set of values. */
+ min_heapify_all(&heap, &funcs);
+ err = pop_verify_heap(min_heap, &heap, &funcs);
+
+
+ /* Test with randomly generated values. */
+ heap.nr = ARRAY_SIZE(values);
+ for (i = 0; i < heap.nr; i++)
+ values[i] = get_random_int();
+
+ min_heapify_all(&heap, &funcs);
+ err += pop_verify_heap(min_heap, &heap, &funcs);
+
+ return err;
+}
+
+static __init int test_heap_push(bool min_heap)
+{
+ const int data[] = { 3, 1, 2, 4, 0x80000000, 0x7FFFFFFF, 0,
+ -3, -1, -2, -4, 0x80000000, 0x7FFFFFFF };
+ int values[ARRAY_SIZE(data)];
+ struct min_heap heap = {
+ .data = values,
+ .nr = 0,
+ .size = ARRAY_SIZE(values),
+ };
+ struct min_heap_callbacks funcs = {
+ .elem_size = sizeof(int),
+ .less = min_heap ? less_than : greater_than,
+ .swp = swap_ints,
+ };
+ int i, temp, err;
+
+ /* Test with known set of values copied from data. */
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ min_heap_push(&heap, &data[i], &funcs);
+
+ err = pop_verify_heap(min_heap, &heap, &funcs);
+
+ /* Test with randomly generated values. */
+ while (heap.nr < heap.size) {
+ temp = get_random_int();
+ min_heap_push(&heap, &temp, &funcs);
+ }
+ err += pop_verify_heap(min_heap, &heap, &funcs);
+
+ return err;
+}
+
+static __init int test_heap_pop_push(bool min_heap)
+{
+ const int data[] = { 3, 1, 2, 4, 0x80000000, 0x7FFFFFFF, 0,
+ -3, -1, -2, -4, 0x80000000, 0x7FFFFFFF };
+ int values[ARRAY_SIZE(data)];
+ struct min_heap heap = {
+ .data = values,
+ .nr = 0,
+ .size = ARRAY_SIZE(values),
+ };
+ struct min_heap_callbacks funcs = {
+ .elem_size = sizeof(int),
+ .less = min_heap ? less_than : greater_than,
+ .swp = swap_ints,
+ };
+ int i, temp, err;
+
+ /* Fill values with data to pop and replace. */
+ temp = min_heap ? 0x80000000 : 0x7FFFFFFF;
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ min_heap_push(&heap, &temp, &funcs);
+
+ /* Test with known set of values copied from data. */
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ min_heap_pop_push(&heap, &data[i], &funcs);
+
+ err = pop_verify_heap(min_heap, &heap, &funcs);
+
+ heap.nr = 0;
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ min_heap_push(&heap, &temp, &funcs);
+
+ /* Test with randomly generated values. */
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ temp = get_random_int();
+ min_heap_pop_push(&heap, &temp, &funcs);
+ }
+ err += pop_verify_heap(min_heap, &heap, &funcs);
+
+ return err;
+}
+
+static int __init test_min_heap_init(void)
+{
+ int err = 0;
+
+ err += test_heapify_all(true);
+ err += test_heapify_all(false);
+ err += test_heap_push(true);
+ err += test_heap_push(false);
+ err += test_heap_pop_push(true);
+ err += test_heap_pop_push(false);
+ if (err) {
+ pr_err("test failed with %d errors\n", err);
+ return -EINVAL;
+ }
+ pr_info("test passed\n");
+ return 0;
+}
+module_init(test_min_heap_init);
+
+static void __exit test_min_heap_exit(void)
+{
+ /* do nothing */
+}
+module_exit(test_min_heap_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_stackinit.c b/lib/test_stackinit.c
index 2d7d257a430e..f93b1e145ada 100644
--- a/lib/test_stackinit.c
+++ b/lib/test_stackinit.c
@@ -92,8 +92,9 @@ static bool range_contains(char *haystack_start, size_t haystack_size,
* @var_type: type to be tested for zeroing initialization
* @which: is this a SCALAR, STRING, or STRUCT type?
* @init_level: what kind of initialization is performed
+ * @xfail: is this test expected to fail?
*/
-#define DEFINE_TEST_DRIVER(name, var_type, which) \
+#define DEFINE_TEST_DRIVER(name, var_type, which, xfail) \
/* Returns 0 on success, 1 on failure. */ \
static noinline __init int test_ ## name (void) \
{ \
@@ -139,13 +140,14 @@ static noinline __init int test_ ## name (void) \
for (sum = 0, i = 0; i < target_size; i++) \
sum += (check_buf[i] == 0xFF); \
\
- if (sum == 0) \
+ if (sum == 0) { \
pr_info(#name " ok\n"); \
- else \
- pr_warn(#name " FAIL (uninit bytes: %d)\n", \
- sum); \
- \
- return (sum != 0); \
+ return 0; \
+ } else { \
+ pr_warn(#name " %sFAIL (uninit bytes: %d)\n", \
+ (xfail) ? "X" : "", sum); \
+ return (xfail) ? 0 : 1; \
+ } \
}
#define DEFINE_TEST(name, var_type, which, init_level) \
/* no-op to force compiler into ignoring "uninitialized" vars */\
@@ -189,7 +191,7 @@ static noinline __init int leaf_ ## name(unsigned long sp, \
\
return (int)buf[0] | (int)buf[sizeof(buf) - 1]; \
} \
-DEFINE_TEST_DRIVER(name, var_type, which)
+DEFINE_TEST_DRIVER(name, var_type, which, 0)
/* Structure with no padding. */
struct test_packed {
@@ -326,8 +328,14 @@ static noinline __init int leaf_switch_2_none(unsigned long sp, bool fill,
return __leaf_switch_none(2, fill);
}
-DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR);
-DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR);
+/*
+ * These are expected to fail for most configurations because neither
+ * GCC nor Clang have a way to perform initialization of variables in
+ * non-code areas (i.e. in a switch statement before the first "case").
+ * https://bugs.llvm.org/show_bug.cgi?id=44916
+ */
+DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, 1);
+DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, 1);
static int __init test_stackinit_init(void)
{
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 55c14e8c8859..d4f97925dbd8 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -12,6 +12,9 @@
static unsigned int tests_run;
static unsigned int tests_passed;
+static const unsigned int order_limit =
+ IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1;
+
#ifndef XA_DEBUG
# ifdef __KERNEL__
void xa_dump(const struct xarray *xa) { }
@@ -959,6 +962,20 @@ static noinline void check_multi_find_2(struct xarray *xa)
}
}
+static noinline void check_multi_find_3(struct xarray *xa)
+{
+ unsigned int order;
+
+ for (order = 5; order < order_limit; order++) {
+ unsigned long index = 1UL << (order - 5);
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+ xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT));
+ xa_erase_index(xa, 0);
+ }
+}
+
static noinline void check_find_1(struct xarray *xa)
{
unsigned long i, j, k;
@@ -1081,6 +1098,7 @@ static noinline void check_find(struct xarray *xa)
for (i = 2; i < 10; i++)
check_multi_find_1(xa, i);
check_multi_find_2(xa);
+ check_multi_find_3(xa);
}
/* See find_swap_entry() in mm/shmem.c */
@@ -1138,6 +1156,42 @@ static noinline void check_find_entry(struct xarray *xa)
XA_BUG_ON(xa, !xa_empty(xa));
}
+static noinline void check_pause(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+ void *entry;
+ unsigned int order;
+ unsigned long index = 1;
+ unsigned int count = 0;
+
+ for (order = 0; order < order_limit; order++) {
+ XA_BUG_ON(xa, xa_store_order(xa, index, order,
+ xa_mk_index(index), GFP_KERNEL));
+ index += 1UL << order;
+ }
+
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
+ count++;
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != order_limit);
+
+ count = 0;
+ xas_set(&xas, 0);
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
+ count++;
+ xas_pause(&xas);
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != order_limit);
+
+ xa_destroy(xa);
+}
+
static noinline void check_move_tiny(struct xarray *xa)
{
XA_STATE(xas, xa, 0);
@@ -1646,6 +1700,7 @@ static int xarray_checks(void)
check_xa_alloc();
check_find(&array);
check_find_entry(&array);
+ check_pause(&array);
check_account(&array);
check_destroy(&array);
check_move(&array);
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index b352903c50e3..277cb4417ac2 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -52,7 +52,7 @@ struct ts_bm
u8 * pattern;
unsigned int patlen;
unsigned int bad_shift[ASIZE];
- unsigned int good_shift[0];
+ unsigned int good_shift[];
};
static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index 9c873cadab7c..ab749ec10ab5 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -32,7 +32,7 @@
struct ts_fsm
{
unsigned int ntokens;
- struct ts_fsm_token tokens[0];
+ struct ts_fsm_token tokens[];
};
/* other values derived from ctype.h */
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
index 94617e014b3a..c77a3d537f24 100644
--- a/lib/ts_kmp.c
+++ b/lib/ts_kmp.c
@@ -36,7 +36,7 @@ struct ts_kmp
{
u8 * pattern;
unsigned int pattern_len;
- unsigned int prefix_tbl[0];
+ unsigned int prefix_tbl[];
};
static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
diff --git a/lib/ubsan.c b/lib/ubsan.c
index 7b9b58aee72c..f8c0ccf35f29 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -45,13 +45,6 @@ static bool was_reported(struct source_location *location)
return test_and_set_bit(REPORTED_BIT, &location->reported);
}
-static void print_source_location(const char *prefix,
- struct source_location *loc)
-{
- pr_err("%s %s:%d:%d\n", prefix, loc->file_name,
- loc->line & LINE_MASK, loc->column & COLUMN_MASK);
-}
-
static bool suppress_report(struct source_location *loc)
{
return current->in_ubsan || was_reported(loc);
@@ -140,13 +133,14 @@ static void val_to_string(char *str, size_t size, struct type_descriptor *type,
}
}
-static void ubsan_prologue(struct source_location *location)
+static void ubsan_prologue(struct source_location *loc, const char *reason)
{
current->in_ubsan++;
pr_err("========================================"
"========================================\n");
- print_source_location("UBSAN: Undefined behaviour in", location);
+ pr_err("UBSAN: %s in %s:%d:%d\n", reason, loc->file_name,
+ loc->line & LINE_MASK, loc->column & COLUMN_MASK);
}
static void ubsan_epilogue(void)
@@ -156,6 +150,17 @@ static void ubsan_epilogue(void)
"========================================\n");
current->in_ubsan--;
+
+ if (panic_on_warn) {
+ /*
+ * This thread may hit another WARN() in the panic path.
+ * Resetting this prevents additional WARN() from panicking the
+ * system on this thread. Other threads are blocked by the
+ * panic_mutex in panic().
+ */
+ panic_on_warn = 0;
+ panic("panic_on_warn set ...\n");
+ }
}
static void handle_overflow(struct overflow_data *data, void *lhs,
@@ -169,12 +174,12 @@ static void handle_overflow(struct overflow_data *data, void *lhs,
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location);
+ ubsan_prologue(&data->location, type_is_signed(type) ?
+ "signed-integer-overflow" :
+ "unsigned-integer-overflow");
val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs);
val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs);
- pr_err("%s integer overflow:\n",
- type_is_signed(type) ? "signed" : "unsigned");
pr_err("%s %c %s cannot be represented in type %s\n",
lhs_val_str,
op,
@@ -214,7 +219,7 @@ void __ubsan_handle_negate_overflow(struct overflow_data *data,
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location);
+ ubsan_prologue(&data->location, "negation-overflow");
val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val);
@@ -234,7 +239,7 @@ void __ubsan_handle_divrem_overflow(struct overflow_data *data,
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location);
+ ubsan_prologue(&data->location, "division-overflow");
val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs);
@@ -253,7 +258,7 @@ static void handle_null_ptr_deref(struct type_mismatch_data_common *data)
if (suppress_report(data->location))
return;
- ubsan_prologue(data->location);
+ ubsan_prologue(data->location, "null-ptr-deref");
pr_err("%s null pointer of type %s\n",
type_check_kinds[data->type_check_kind],
@@ -268,7 +273,7 @@ static void handle_misaligned_access(struct type_mismatch_data_common *data,
if (suppress_report(data->location))
return;
- ubsan_prologue(data->location);
+ ubsan_prologue(data->location, "misaligned-access");
pr_err("%s misaligned address %p for type %s\n",
type_check_kinds[data->type_check_kind],
@@ -284,7 +289,7 @@ static void handle_object_size_mismatch(struct type_mismatch_data_common *data,
if (suppress_report(data->location))
return;
- ubsan_prologue(data->location);
+ ubsan_prologue(data->location, "object-size-mismatch");
pr_err("%s address %p with insufficient space\n",
type_check_kinds[data->type_check_kind],
(void *) ptr);
@@ -343,7 +348,7 @@ void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index)
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location);
+ ubsan_prologue(&data->location, "array-index-out-of-bounds");
val_to_string(index_str, sizeof(index_str), data->index_type, index);
pr_err("index %s is out of range for type %s\n", index_str,
@@ -364,7 +369,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
if (suppress_report(&data->location))
goto out;
- ubsan_prologue(&data->location);
+ ubsan_prologue(&data->location, "shift-out-of-bounds");
val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs);
val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs);
@@ -396,7 +401,7 @@ EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
{
- ubsan_prologue(&data->location);
+ ubsan_prologue(&data->location, "unreachable");
pr_err("calling __builtin_unreachable()\n");
ubsan_epilogue();
panic("can't return from __builtin_unreachable()");
@@ -411,7 +416,7 @@ void __ubsan_handle_load_invalid_value(struct invalid_value_data *data,
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location);
+ ubsan_prologue(&data->location, "invalid-load");
val_to_string(val_str, sizeof(val_str), data->type, val);
diff --git a/lib/uuid.c b/lib/uuid.c
index b6a1edb61d87..562d53977cab 100644
--- a/lib/uuid.c
+++ b/lib/uuid.c
@@ -40,6 +40,16 @@ void generate_random_uuid(unsigned char uuid[16])
}
EXPORT_SYMBOL(generate_random_uuid);
+void generate_random_guid(unsigned char guid[16])
+{
+ get_random_bytes(guid, 16);
+ /* Set GUID version to 4 --- truly random generation */
+ guid[7] = (guid[7] & 0x0F) | 0x40;
+ /* Set the GUID variant to DCE */
+ guid[8] = (guid[8] & 0x3F) | 0x80;
+}
+EXPORT_SYMBOL(generate_random_guid);
+
static void __uuid_gen_common(__u8 b[16])
{
prandom_bytes(b, 16);
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index f8b8ec5e63ac..a2909af4b924 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -2,30 +2,9 @@
/*
* Generic userspace implementations of gettimeofday() and similar.
*/
-#include <linux/compiler.h>
-#include <linux/math64.h>
-#include <linux/time.h>
-#include <linux/kernel.h>
-#include <linux/hrtimer_defs.h>
#include <vdso/datapage.h>
#include <vdso/helpers.h>
-/*
- * The generic vDSO implementation requires that gettimeofday.h
- * provides:
- * - __arch_get_vdso_data(): to get the vdso datapage.
- * - __arch_get_hw_counter(): to get the hw counter based on the
- * clock_mode.
- * - gettimeofday_fallback(): fallback for gettimeofday.
- * - clock_gettime_fallback(): fallback for clock_gettime.
- * - clock_getres_fallback(): fallback for clock_getres.
- */
-#ifdef ENABLE_COMPAT_VDSO
-#include <asm/vdso/compat_gettimeofday.h>
-#else
-#include <asm/vdso/gettimeofday.h>
-#endif /* ENABLE_COMPAT_VDSO */
-
#ifndef vdso_calc_delta
/*
* Default implementation which works for all sane clocksources. That
@@ -38,6 +17,27 @@ u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
}
#endif
+#ifndef vdso_shift_ns
+static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
+{
+ return ns >> shift;
+}
+#endif
+
+#ifndef __arch_vdso_hres_capable
+static inline bool __arch_vdso_hres_capable(void)
+{
+ return true;
+}
+#endif
+
+#ifndef vdso_clocksource_ok
+static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
+{
+ return vd->clock_mode != VDSO_CLOCKMODE_NONE;
+}
+#endif
+
#ifdef CONFIG_TIME_NS
static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
struct __kernel_timespec *ts)
@@ -57,14 +57,15 @@ static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
do {
seq = vdso_read_begin(vd);
+
+ if (unlikely(!vdso_clocksource_ok(vd)))
+ return -1;
+
cycles = __arch_get_hw_counter(vd->clock_mode);
ns = vdso_ts->nsec;
last = vd->cycle_last;
- if (unlikely((s64)cycles < 0))
- return -1;
-
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
- ns >>= vd->shift;
+ ns = vdso_shift_ns(ns, vd->shift);
sec = vdso_ts->sec;
} while (unlikely(vdso_read_retry(vd, seq)));
@@ -101,12 +102,16 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
u64 cycles, last, sec, ns;
u32 seq;
+ /* Allows to compile the high resolution parts out */
+ if (!__arch_vdso_hres_capable())
+ return -1;
+
do {
/*
- * Open coded to handle VCLOCK_TIMENS. Time namespace
+ * Open coded to handle VDSO_CLOCKMODE_TIMENS. Time namespace
* enabled tasks have a special VVAR page installed which
* has vd->seq set to 1 and vd->clock_mode set to
- * VCLOCK_TIMENS. For non time namespace affected tasks
+ * VDSO_CLOCKMODE_TIMENS. For non time namespace affected tasks
* this does not affect performance because if vd->seq is
* odd, i.e. a concurrent update is in progress the extra
* check for vd->clock_mode is just a few extra
@@ -115,20 +120,20 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
*/
while (unlikely((seq = READ_ONCE(vd->seq)) & 1)) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
- vd->clock_mode == VCLOCK_TIMENS)
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
return do_hres_timens(vd, clk, ts);
cpu_relax();
}
smp_rmb();
+ if (unlikely(!vdso_clocksource_ok(vd)))
+ return -1;
+
cycles = __arch_get_hw_counter(vd->clock_mode);
ns = vdso_ts->nsec;
last = vd->cycle_last;
- if (unlikely((s64)cycles < 0))
- return -1;
-
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
- ns >>= vd->shift;
+ ns = vdso_shift_ns(ns, vd->shift);
sec = vdso_ts->sec;
} while (unlikely(vdso_read_retry(vd, seq)));
@@ -187,12 +192,12 @@ static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
do {
/*
- * Open coded to handle VCLOCK_TIMENS. See comment in
+ * Open coded to handle VDSO_CLOCK_TIMENS. See comment in
* do_hres().
*/
while ((seq = READ_ONCE(vd->seq)) & 1) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
- vd->clock_mode == VCLOCK_TIMENS)
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
return do_coarse_timens(vd, clk, ts);
cpu_relax();
}
@@ -206,9 +211,9 @@ static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
}
static __maybe_unused int
-__cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts)
+__cvdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock,
+ struct __kernel_timespec *ts)
{
- const struct vdso_data *vd = __arch_get_vdso_data();
u32 msk;
/* Check for negative values or invalid clocks */
@@ -233,23 +238,31 @@ __cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts)
}
static __maybe_unused int
-__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+__cvdso_clock_gettime_data(const struct vdso_data *vd, clockid_t clock,
+ struct __kernel_timespec *ts)
{
- int ret = __cvdso_clock_gettime_common(clock, ts);
+ int ret = __cvdso_clock_gettime_common(vd, clock, ts);
if (unlikely(ret))
return clock_gettime_fallback(clock, ts);
return 0;
}
+static __maybe_unused int
+__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+{
+ return __cvdso_clock_gettime_data(__arch_get_vdso_data(), clock, ts);
+}
+
#ifdef BUILD_VDSO32
static __maybe_unused int
-__cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
+__cvdso_clock_gettime32_data(const struct vdso_data *vd, clockid_t clock,
+ struct old_timespec32 *res)
{
struct __kernel_timespec ts;
int ret;
- ret = __cvdso_clock_gettime_common(clock, &ts);
+ ret = __cvdso_clock_gettime_common(vd, clock, &ts);
if (unlikely(ret))
return clock_gettime32_fallback(clock, res);
@@ -260,12 +273,18 @@ __cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
return ret;
}
+
+static __maybe_unused int
+__cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
+{
+ return __cvdso_clock_gettime32_data(__arch_get_vdso_data(), clock, res);
+}
#endif /* BUILD_VDSO32 */
static __maybe_unused int
-__cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
+__cvdso_gettimeofday_data(const struct vdso_data *vd,
+ struct __kernel_old_timeval *tv, struct timezone *tz)
{
- const struct vdso_data *vd = __arch_get_vdso_data();
if (likely(tv != NULL)) {
struct __kernel_timespec ts;
@@ -279,7 +298,7 @@ __cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
if (unlikely(tz != NULL)) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
- vd->clock_mode == VCLOCK_TIMENS)
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
vd = __arch_get_timens_vdso_data();
tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest;
@@ -289,13 +308,20 @@ __cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
return 0;
}
+static __maybe_unused int
+__cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
+{
+ return __cvdso_gettimeofday_data(__arch_get_vdso_data(), tv, tz);
+}
+
#ifdef VDSO_HAS_TIME
-static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time)
+static __maybe_unused __kernel_old_time_t
+__cvdso_time_data(const struct vdso_data *vd, __kernel_old_time_t *time)
{
- const struct vdso_data *vd = __arch_get_vdso_data();
__kernel_old_time_t t;
- if (IS_ENABLED(CONFIG_TIME_NS) && vd->clock_mode == VCLOCK_TIMENS)
+ if (IS_ENABLED(CONFIG_TIME_NS) &&
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
vd = __arch_get_timens_vdso_data();
t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
@@ -305,13 +331,18 @@ static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time
return t;
}
+
+static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time)
+{
+ return __cvdso_time_data(__arch_get_vdso_data(), time);
+}
#endif /* VDSO_HAS_TIME */
#ifdef VDSO_HAS_CLOCK_GETRES
static __maybe_unused
-int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
+int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock,
+ struct __kernel_timespec *res)
{
- const struct vdso_data *vd = __arch_get_vdso_data();
u32 msk;
u64 ns;
@@ -319,7 +350,8 @@ int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
if (unlikely((u32) clock >= MAX_CLOCKS))
return -1;
- if (IS_ENABLED(CONFIG_TIME_NS) && vd->clock_mode == VCLOCK_TIMENS)
+ if (IS_ENABLED(CONFIG_TIME_NS) &&
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
vd = __arch_get_timens_vdso_data();
/*
@@ -349,23 +381,31 @@ int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
}
static __maybe_unused
-int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
+int __cvdso_clock_getres_data(const struct vdso_data *vd, clockid_t clock,
+ struct __kernel_timespec *res)
{
- int ret = __cvdso_clock_getres_common(clock, res);
+ int ret = __cvdso_clock_getres_common(vd, clock, res);
if (unlikely(ret))
return clock_getres_fallback(clock, res);
return 0;
}
+static __maybe_unused
+int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
+{
+ return __cvdso_clock_getres_data(__arch_get_vdso_data(), clock, res);
+}
+
#ifdef BUILD_VDSO32
static __maybe_unused int
-__cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
+__cvdso_clock_getres_time32_data(const struct vdso_data *vd, clockid_t clock,
+ struct old_timespec32 *res)
{
struct __kernel_timespec ts;
int ret;
- ret = __cvdso_clock_getres_common(clock, &ts);
+ ret = __cvdso_clock_getres_common(vd, clock, &ts);
if (unlikely(ret))
return clock_getres32_fallback(clock, res);
@@ -376,5 +416,12 @@ __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
}
return ret;
}
+
+static __maybe_unused int
+__cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
+{
+ return __cvdso_clock_getres_time32_data(__arch_get_vdso_data(),
+ clock, res);
+}
#endif /* BUILD_VDSO32 */
#endif /* VDSO_HAS_CLOCK_GETRES */
diff --git a/lib/xarray.c b/lib/xarray.c
index 1d9fab7db8da..e9e641d3c0c3 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -970,7 +970,7 @@ void xas_pause(struct xa_state *xas)
xas->xa_node = XAS_RESTART;
if (node) {
- unsigned int offset = xas->xa_offset;
+ unsigned long offset = xas->xa_offset;
while (++offset < XA_CHUNK_SIZE) {
if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
break;
@@ -1208,6 +1208,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
}
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+ if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
+ continue;
if (!xa_is_node(entry))
return entry;
xas->xa_node = xa_to_node(entry);
@@ -1836,10 +1838,11 @@ static bool xas_sibling(struct xa_state *xas)
struct xa_node *node = xas->xa_node;
unsigned long mask;
- if (!node)
+ if (!IS_ENABLED(CONFIG_XARRAY_MULTI) || !node)
return false;
mask = (XA_CHUNK_SIZE << node->shift) - 1;
- return (xas->xa_index & mask) > (xas->xa_offset << node->shift);
+ return (xas->xa_index & mask) >
+ ((unsigned long)xas->xa_offset << node->shift);
}
/**