summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorPetr Mladek <pmladek@suse.com>2021-08-30 14:56:06 +0200
committerPetr Mladek <pmladek@suse.com>2021-08-30 14:56:06 +0200
commit71af75b6929458d85f63c0649dc26d6f4c19729e (patch)
treec05c57903424d8270f6b6f3ec3493791fdba4e5c /lib
parentfe8e3ee0d588566c1f44f28a555042ef50eba491 (diff)
parentbc17bed5fd73ef1a9aed39f3b0ea26936dad60b8 (diff)
Merge branch 'for-5.15-printk-index' into for-linus
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig14
-rw-r--r--lib/Kconfig.debug62
-rw-r--r--lib/Kconfig.kasan16
-rw-r--r--lib/Makefile3
-rw-r--r--lib/asn1_encoder.c2
-rw-r--r--lib/bitmap.c14
-rw-r--r--lib/bootconfig.c76
-rw-r--r--lib/buildid.c74
-rw-r--r--lib/cmdline_kunit.c2
-rw-r--r--lib/decompress_bunzip2.c6
-rw-r--r--lib/decompress_unlz4.c8
-rw-r--r--lib/decompress_unlzo.c3
-rw-r--r--lib/decompress_unxz.c2
-rw-r--r--lib/decompress_unzstd.c4
-rw-r--r--lib/devres.c6
-rw-r--r--lib/dump_stack.c29
-rw-r--r--lib/dynamic_debug.c8
-rw-r--r--lib/fonts/font_pearl_8x8.c2
-rw-r--r--lib/iov_iter.c1230
-rw-r--r--lib/kfifo.c2
-rw-r--r--lib/kstrtox.c5
-rw-r--r--lib/kunit/debugfs.c2
-rw-r--r--lib/kunit/executor.c53
-rw-r--r--lib/kunit/executor_test.c133
-rw-r--r--lib/kunit/kunit-example-test.c31
-rw-r--r--lib/kunit/kunit-test.c42
-rw-r--r--lib/kunit/string-stream.h6
-rw-r--r--lib/kunit/test.c95
-rw-r--r--lib/list_sort.c2
-rw-r--r--lib/logic_iomem.c318
-rw-r--r--lib/lz4/lz4_decompress.c2
-rw-r--r--lib/math/Makefile1
-rw-r--r--lib/math/rational-test.c56
-rw-r--r--lib/math/rational.c16
-rw-r--r--lib/mpi/longlong.h4
-rw-r--r--lib/mpi/mpicoder.c6
-rw-r--r--lib/mpi/mpiutil.c2
-rw-r--r--lib/nlattr.c4
-rw-r--r--lib/oid_registry.c2
-rw-r--r--lib/parser.c1
-rw-r--r--lib/pldmfw/pldmfw.c2
-rw-r--r--lib/reed_solomon/test_rslib.c2
-rw-r--r--lib/refcount.c2
-rw-r--r--lib/rhashtable.c2
-rw-r--r--lib/sbitmap.c2
-rw-r--r--lib/scatterlist.c4
-rw-r--r--lib/seq_buf.c10
-rw-r--r--lib/slub_kunit.c152
-rw-r--r--lib/sort.c2
-rw-r--r--lib/stackdepot.c2
-rw-r--r--lib/string.c2
-rw-r--r--lib/string_helpers.c106
-rw-r--r--lib/test-string_helpers.c171
-rw-r--r--lib/test_bitmap.c7
-rw-r--r--lib/test_bitops.c2
-rw-r--r--lib/test_bpf.c2
-rw-r--r--lib/test_firmware.c10
-rw-r--r--lib/test_hmm.c132
-rw-r--r--lib/test_hmm_uapi.h2
-rw-r--r--lib/test_kasan.c25
-rw-r--r--lib/test_kmod.c6
-rw-r--r--lib/test_list_sort.c129
-rw-r--r--lib/test_scanf.c2
-rw-r--r--lib/test_string.c5
-rw-r--r--lib/vsprintf.c13
-rw-r--r--lib/xz/xz_dec_bcj.c2
-rw-r--r--lib/xz/xz_dec_lzma2.c8
-rw-r--r--lib/zlib_inflate/inffast.c2
-rw-r--r--lib/zstd/huf.h2
69 files changed, 2122 insertions, 1030 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index ac3b30697b2b..d241fe476fda 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -102,6 +102,20 @@ config INDIRECT_PIO
When in doubt, say N.
+config INDIRECT_IOMEM
+ bool
+ help
+ This is selected by other options/architectures to provide the
+ emulated iomem accessors.
+
+config INDIRECT_IOMEM_FALLBACK
+ bool
+ depends on INDIRECT_IOMEM
+ help
+ If INDIRECT_IOMEM is selected, this enables falling back to plain
+ mmio accesses when the IO memory address is not a registered
+ emulated region.
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index a0e1910d0a16..831212722924 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -35,6 +35,17 @@ config PRINTK_CALLER
no option to enable/disable at the kernel command line parameter or
sysfs interface.
+config STACKTRACE_BUILD_ID
+ bool "Show build ID information in stacktraces"
+ depends on PRINTK
+ help
+ Selecting this option adds build ID information for symbols in
+ stacktraces printed with the printk format '%p[SR]b'.
+
+ This option is intended for distros where debuginfo is not easily
+ accessible but can be downloaded given the build ID of the vmlinux or
+ kernel module where the function is located.
+
config CONSOLE_LOGLEVEL_DEFAULT
int "Default console loglevel (1-15)"
range 1 15
@@ -400,8 +411,8 @@ config SECTION_MISMATCH_WARN_ONLY
If unsure, say Y.
-config DEBUG_FORCE_FUNCTION_ALIGN_32B
- bool "Force all function address 32B aligned" if EXPERT
+config DEBUG_FORCE_FUNCTION_ALIGN_64B
+ bool "Force all function address 64B aligned" if EXPERT
help
There are cases that a commit from one domain changes the function
address alignment of other domains, and cause magic performance
@@ -1279,7 +1290,7 @@ config PROVE_RAW_LOCK_NESTING
option expect lockdep splats until these problems have been fully
addressed which is work in progress. This config switch allows to
identify and analyze these problems. It will be removed and the
- check permanentely enabled once the main issues have been fixed.
+ check permanently enabled once the main issues have been fixed.
If unsure, select N.
@@ -1445,7 +1456,7 @@ config DEBUG_LOCKING_API_SELFTESTS
Say Y here if you want the kernel to run a short self-test during
bootup. The self-test checks whether common types of locking bugs
are detected by debugging mechanisms or not. (if you disable
- lock debugging then those bugs wont be detected of course.)
+ lock debugging then those bugs won't be detected of course.)
The following locking APIs are covered: spinlocks, rwlocks,
mutexes and rwsems.
@@ -1925,7 +1936,7 @@ config FAIL_IO_TIMEOUT
thus exercising the error handling.
Only works with drivers that use the generic timeout handling,
- for others it wont do anything.
+ for others it won't do anything.
config FAIL_FUTEX
bool "Fault-injection capability for futexes"
@@ -2046,8 +2057,9 @@ config LKDTM
Documentation/fault-injection/provoke-crashes.rst
config TEST_LIST_SORT
- tristate "Linked list sorting test"
- depends on DEBUG_KERNEL || m
+ tristate "Linked list sorting test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
help
Enable this to turn on 'list_sort()' function test. This test is
executed only once during system boot (so affects only boot time),
@@ -2431,6 +2443,30 @@ config BITS_TEST
If unsure, say N.
+config SLUB_KUNIT_TEST
+ tristate "KUnit test for SLUB cache error detection" if !KUNIT_ALL_TESTS
+ depends on SLUB_DEBUG && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds SLUB allocator unit test.
+ Tests SLUB cache debugging functionality.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config RATIONAL_KUNIT_TEST
+ tristate "KUnit test for rational.c" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ select RATIONAL
+ default KUNIT_ALL_TESTS
+ help
+ This builds the rational math unit test.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
config TEST_UDELAY
tristate "udelay test driver"
help
@@ -2573,6 +2609,18 @@ config TEST_FPU
If unsure, say N.
+config TEST_CLOCKSOURCE_WATCHDOG
+ tristate "Test clocksource watchdog in kernel space"
+ depends on CLOCKSOURCE_WATCHDOG
+ help
+ Enable this option to create a kernel module that will trigger
+ a test of the clocksource watchdog. This module may be loaded
+ via modprobe or insmod in which case it will run upon being
+ loaded, or it may be built in, in which case it will run
+ shortly after boot.
+
+ If unsure, say N.
+
endif # RUNTIME_TESTING_MENU
config ARCH_USE_MEMTEST
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index cffc2ebbf185..1e2d10f86011 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -12,6 +12,13 @@ config HAVE_ARCH_KASAN_HW_TAGS
config HAVE_ARCH_KASAN_VMALLOC
bool
+config ARCH_DISABLE_KASAN_INLINE
+ bool
+ help
+ An architecture might not support inline instrumentation.
+ When this option is selected, inline and stack instrumentation are
+ disabled.
+
config CC_HAS_KASAN_GENERIC
def_bool $(cc-option, -fsanitize=kernel-address)
@@ -130,6 +137,7 @@ config KASAN_OUTLINE
config KASAN_INLINE
bool "Inline instrumentation"
+ depends on !ARCH_DISABLE_KASAN_INLINE
help
Compiler directly inserts code checking shadow memory before
memory accesses. This is faster than outline (in some workloads
@@ -141,6 +149,7 @@ endchoice
config KASAN_STACK
bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
depends on KASAN_GENERIC || KASAN_SW_TAGS
+ depends on !ARCH_DISABLE_KASAN_INLINE
default y if CC_IS_GCC
help
The LLVM stack address sanitizer has a know problem that
@@ -154,10 +163,13 @@ config KASAN_STACK
but clang users can still enable it for builds without
CONFIG_COMPILE_TEST. On gcc it is assumed to always be safe
to use and enabled by default.
+ If the architecture disables inline instrumentation, stack
+ instrumentation is also disabled as it adds inline-style
+ instrumentation that is run unconditionally.
-config KASAN_SW_TAGS_IDENTIFY
+config KASAN_TAGS_IDENTIFY
bool "Enable memory corruption identification"
- depends on KASAN_SW_TAGS
+ depends on KASAN_SW_TAGS || KASAN_HW_TAGS
help
This option enables best-effort identification of bug type
(use-after-free or out-of-bounds) at the cost of increased
diff --git a/lib/Makefile b/lib/Makefile
index a93f08038a25..5efd1b435a37 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -148,6 +148,8 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
lib-y += logic_pio.o
+lib-$(CONFIG_INDIRECT_IOMEM) += logic_iomem.o
+
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_BTREE) += btree.o
@@ -355,5 +357,6 @@ obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
obj-$(CONFIG_BITS_TEST) += test_bits.o
obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
+obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
diff --git a/lib/asn1_encoder.c b/lib/asn1_encoder.c
index 41e71aae3ef6..27bbe891714f 100644
--- a/lib/asn1_encoder.c
+++ b/lib/asn1_encoder.c
@@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(asn1_encode_oid);
/**
* asn1_encode_length() - encode a length to follow an ASN.1 tag
* @data: pointer to encode at
- * @data_len: pointer to remaning length (adjusted by routine)
+ * @data_len: pointer to remaining length (adjusted by routine)
* @len: length to encode
*
* This routine can encode lengths up to 65535 using the ASN.1 rules.
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 74ceb02f45e3..9401d39e4722 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -581,6 +581,14 @@ static const char *bitmap_parse_region(const char *str, struct region *r)
{
unsigned int lastbit = r->nbits - 1;
+ if (!strncasecmp(str, "all", 3)) {
+ r->start = 0;
+ r->end = lastbit;
+ str += 3;
+
+ goto check_pattern;
+ }
+
str = bitmap_getnum(str, &r->start, lastbit);
if (IS_ERR(str))
return str;
@@ -595,6 +603,7 @@ static const char *bitmap_parse_region(const char *str, struct region *r)
if (IS_ERR(str))
return str;
+check_pattern:
if (end_of_region(*str))
goto no_pattern;
@@ -784,8 +793,6 @@ int bitmap_parse(const char *start, unsigned int buflen,
}
EXPORT_SYMBOL(bitmap_parse);
-
-#ifdef CONFIG_NUMA
/**
* bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
* @buf: pointer to a bitmap
@@ -894,6 +901,7 @@ void bitmap_remap(unsigned long *dst, const unsigned long *src,
set_bit(bitmap_ord_to_pos(new, n % w, nbits), dst);
}
}
+EXPORT_SYMBOL(bitmap_remap);
/**
* bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit
@@ -931,7 +939,9 @@ int bitmap_bitremap(int oldbit, const unsigned long *old,
else
return bitmap_ord_to_pos(new, n % w, bits);
}
+EXPORT_SYMBOL(bitmap_bitremap);
+#ifdef CONFIG_NUMA
/**
* bitmap_onto - translate one bitmap relative to another
* @dst: resulting translated bitmap
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index 9f8c70a98fcf..927017431fb6 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -156,7 +156,7 @@ xbc_node_find_child(struct xbc_node *parent, const char *key)
struct xbc_node *node;
if (parent)
- node = xbc_node_get_child(parent);
+ node = xbc_node_get_subkey(parent);
else
node = xbc_root_node();
@@ -164,7 +164,7 @@ xbc_node_find_child(struct xbc_node *parent, const char *key)
if (!xbc_node_match_prefix(node, &key))
node = xbc_node_get_next(node);
else if (*key != '\0')
- node = xbc_node_get_child(node);
+ node = xbc_node_get_subkey(node);
else
break;
}
@@ -274,6 +274,8 @@ int __init xbc_node_compose_key_after(struct xbc_node *root,
struct xbc_node * __init xbc_node_find_next_leaf(struct xbc_node *root,
struct xbc_node *node)
{
+ struct xbc_node *next;
+
if (unlikely(!xbc_data))
return NULL;
@@ -282,6 +284,13 @@ struct xbc_node * __init xbc_node_find_next_leaf(struct xbc_node *root,
if (!node)
node = xbc_nodes;
} else {
+ /* Leaf node may have a subkey */
+ next = xbc_node_get_subkey(node);
+ if (next) {
+ node = next;
+ goto found;
+ }
+
if (node == root) /* @root was a leaf, no child node. */
return NULL;
@@ -296,6 +305,7 @@ struct xbc_node * __init xbc_node_find_next_leaf(struct xbc_node *root,
node = xbc_node_get_next(node);
}
+found:
while (node && !xbc_node_is_leaf(node))
node = xbc_node_get_child(node);
@@ -367,18 +377,28 @@ static inline __init struct xbc_node *xbc_last_sibling(struct xbc_node *node)
return node;
}
-static struct xbc_node * __init xbc_add_sibling(char *data, u32 flag)
+static inline __init struct xbc_node *xbc_last_child(struct xbc_node *node)
+{
+ while (node->child)
+ node = xbc_node_get_child(node);
+
+ return node;
+}
+
+static struct xbc_node * __init __xbc_add_sibling(char *data, u32 flag, bool head)
{
struct xbc_node *sib, *node = xbc_add_node(data, flag);
if (node) {
if (!last_parent) {
+ /* Ignore @head in this case */
node->parent = XBC_NODE_MAX;
sib = xbc_last_sibling(xbc_nodes);
sib->next = xbc_node_index(node);
} else {
node->parent = xbc_node_index(last_parent);
- if (!last_parent->child) {
+ if (!last_parent->child || head) {
+ node->next = last_parent->child;
last_parent->child = xbc_node_index(node);
} else {
sib = xbc_node_get_child(last_parent);
@@ -392,6 +412,16 @@ static struct xbc_node * __init xbc_add_sibling(char *data, u32 flag)
return node;
}
+static inline struct xbc_node * __init xbc_add_sibling(char *data, u32 flag)
+{
+ return __xbc_add_sibling(data, flag, false);
+}
+
+static inline struct xbc_node * __init xbc_add_head_sibling(char *data, u32 flag)
+{
+ return __xbc_add_sibling(data, flag, true);
+}
+
static inline __init struct xbc_node *xbc_add_child(char *data, u32 flag)
{
struct xbc_node *node = xbc_add_sibling(data, flag);
@@ -517,17 +547,20 @@ static int __init xbc_parse_array(char **__v)
char *next;
int c = 0;
+ if (last_parent->child)
+ last_parent = xbc_node_get_child(last_parent);
+
do {
c = __xbc_parse_value(__v, &next);
if (c < 0)
return c;
- node = xbc_add_sibling(*__v, XBC_VALUE);
+ node = xbc_add_child(*__v, XBC_VALUE);
if (!node)
return -ENOMEM;
*__v = next;
} while (c == ',');
- node->next = 0;
+ node->child = 0;
return c;
}
@@ -557,8 +590,9 @@ static int __init __xbc_add_key(char *k)
node = find_match_node(xbc_nodes, k);
else {
child = xbc_node_get_child(last_parent);
+ /* Since the value node is the first child, skip it. */
if (child && xbc_node_is_value(child))
- return xbc_parse_error("Subkey is mixed with value", k);
+ child = xbc_node_get_next(child);
node = find_match_node(child, k);
}
@@ -601,23 +635,29 @@ static int __init xbc_parse_kv(char **k, char *v, int op)
if (ret)
return ret;
- child = xbc_node_get_child(last_parent);
- if (child) {
- if (xbc_node_is_key(child))
- return xbc_parse_error("Value is mixed with subkey", v);
- else if (op == '=')
- return xbc_parse_error("Value is redefined", v);
- }
-
c = __xbc_parse_value(&v, &next);
if (c < 0)
return c;
- if (op == ':' && child) {
- xbc_init_node(child, v, XBC_VALUE);
- } else if (!xbc_add_sibling(v, XBC_VALUE))
+ child = xbc_node_get_child(last_parent);
+ if (child && xbc_node_is_value(child)) {
+ if (op == '=')
+ return xbc_parse_error("Value is redefined", v);
+ if (op == ':') {
+ unsigned short nidx = child->next;
+
+ xbc_init_node(child, v, XBC_VALUE);
+ child->next = nidx; /* keep subkeys */
+ goto array;
+ }
+ /* op must be '+' */
+ last_parent = xbc_last_child(child);
+ }
+ /* The value node should always be the first child */
+ if (!xbc_add_head_sibling(v, XBC_VALUE))
return -ENOMEM;
+array:
if (c == ',') { /* Array */
c = xbc_parse_array(&next);
if (c < 0)
diff --git a/lib/buildid.c b/lib/buildid.c
index 6156997c3895..dfc62625cae4 100644
--- a/lib/buildid.c
+++ b/lib/buildid.c
@@ -1,36 +1,31 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/buildid.h>
+#include <linux/cache.h>
#include <linux/elf.h>
+#include <linux/kernel.h>
#include <linux/pagemap.h>
#define BUILD_ID 3
+
/*
* Parse build id from the note segment. This logic can be shared between
* 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
* identical.
*/
-static inline int parse_build_id(void *page_addr,
- unsigned char *build_id,
- __u32 *size,
- void *note_start,
- Elf32_Word note_size)
+static int parse_build_id_buf(unsigned char *build_id,
+ __u32 *size,
+ const void *note_start,
+ Elf32_Word note_size)
{
Elf32_Word note_offs = 0, new_offs;
- /* check for overflow */
- if (note_start < page_addr || note_start + note_size < note_start)
- return -EINVAL;
-
- /* only supports note that fits in the first page */
- if (note_start + note_size > page_addr + PAGE_SIZE)
- return -EINVAL;
-
while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
if (nhdr->n_type == BUILD_ID &&
nhdr->n_namesz == sizeof("GNU") &&
+ !strcmp((char *)(nhdr + 1), "GNU") &&
nhdr->n_descsz > 0 &&
nhdr->n_descsz <= BUILD_ID_SIZE_MAX) {
memcpy(build_id,
@@ -49,11 +44,29 @@ static inline int parse_build_id(void *page_addr,
break;
note_offs = new_offs;
}
+
return -EINVAL;
}
+static inline int parse_build_id(const void *page_addr,
+ unsigned char *build_id,
+ __u32 *size,
+ const void *note_start,
+ Elf32_Word note_size)
+{
+ /* check for overflow */
+ if (note_start < page_addr || note_start + note_size < note_start)
+ return -EINVAL;
+
+ /* only supports note that fits in the first page */
+ if (note_start + note_size > page_addr + PAGE_SIZE)
+ return -EINVAL;
+
+ return parse_build_id_buf(build_id, size, note_start, note_size);
+}
+
/* Parse build ID from 32-bit ELF */
-static int get_build_id_32(void *page_addr, unsigned char *build_id,
+static int get_build_id_32(const void *page_addr, unsigned char *build_id,
__u32 *size)
{
Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
@@ -78,7 +91,7 @@ static int get_build_id_32(void *page_addr, unsigned char *build_id,
}
/* Parse build ID from 64-bit ELF */
-static int get_build_id_64(void *page_addr, unsigned char *build_id,
+static int get_build_id_64(const void *page_addr, unsigned char *build_id,
__u32 *size)
{
Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
@@ -108,7 +121,7 @@ static int get_build_id_64(void *page_addr, unsigned char *build_id,
* @build_id: buffer to store build id, at least BUILD_ID_SIZE long
* @size: returns actual build id size in case of success
*
- * Returns 0 on success, otherwise error (< 0).
+ * Return: 0 on success, -EINVAL otherwise
*/
int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
__u32 *size)
@@ -147,3 +160,32 @@ out:
put_page(page);
return ret;
}
+
+/**
+ * build_id_parse_buf - Get build ID from a buffer
+ * @buf: Elf note section(s) to parse
+ * @buf_size: Size of @buf in bytes
+ * @build_id: Build ID parsed from @buf, at least BUILD_ID_SIZE_MAX long
+ *
+ * Return: 0 on success, -EINVAL otherwise
+ */
+int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size)
+{
+ return parse_build_id_buf(build_id, NULL, buf, buf_size);
+}
+
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE)
+unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX] __ro_after_init;
+
+/**
+ * init_vmlinux_build_id - Compute and stash the running kernel's build ID
+ */
+void __init init_vmlinux_build_id(void)
+{
+ extern const void __start_notes __weak;
+ extern const void __stop_notes __weak;
+ unsigned int size = &__stop_notes - &__start_notes;
+
+ build_id_parse_buf(&__start_notes, vmlinux_build_id, size);
+}
+#endif
diff --git a/lib/cmdline_kunit.c b/lib/cmdline_kunit.c
index 018bfc8113c4..a72a2c16066e 100644
--- a/lib/cmdline_kunit.c
+++ b/lib/cmdline_kunit.c
@@ -124,7 +124,7 @@ static void cmdline_do_one_range_test(struct kunit *test, const char *in,
n, e[0], r[0]);
p = memchr_inv(&r[1], 0, sizeof(r) - sizeof(r[0]));
- KUNIT_EXPECT_PTR_EQ_MSG(test, p, (int *)0, "in test %u at %u out of bound", n, p - r);
+ KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %u out of bound", n, p - r);
}
static void cmdline_test_range(struct kunit *test)
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index c72c865032fa..3518e7394eca 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -80,7 +80,7 @@
/* This is what we know about each Huffman coding group */
struct group_data {
- /* We have an extra slot at the end of limit[] for a sentinal value. */
+ /* We have an extra slot at the end of limit[] for a sentinel value. */
int limit[MAX_HUFCODE_BITS+1];
int base[MAX_HUFCODE_BITS];
int permute[MAX_SYMBOLS];
@@ -337,7 +337,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
pp <<= 1;
base[i+1] = pp-(t += temp[i]);
}
- limit[maxLen+1] = INT_MAX; /* Sentinal value for
+ limit[maxLen+1] = INT_MAX; /* Sentinel value for
* reading next sym. */
limit[maxLen] = pp+temp[maxLen]-1;
base[minLen] = 0;
@@ -385,7 +385,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
bd->inbufBits =
(bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
bd->inbufBitCount += 8;
- };
+ }
bd->inbufBitCount -= hufGroup->maxLen;
j = (bd->inbufBits >> bd->inbufBitCount)&
((1 << hufGroup->maxLen)-1);
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
index c0cfcfd486be..e6327391b6b6 100644
--- a/lib/decompress_unlz4.c
+++ b/lib/decompress_unlz4.c
@@ -112,6 +112,9 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
error("data corrupted");
goto exit_2;
}
+ } else if (size < 4) {
+ /* empty or end-of-file */
+ goto exit_3;
}
chunksize = get_unaligned_le32(inp);
@@ -125,6 +128,10 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
continue;
}
+ if (!fill && chunksize == 0) {
+ /* empty or end-of-file */
+ goto exit_3;
+ }
if (posp)
*posp += 4;
@@ -184,6 +191,7 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
}
}
+exit_3:
ret = 0;
exit_2:
if (!input)
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
index 1f439a622076..64c1358500ce 100644
--- a/lib/decompress_unlzo.c
+++ b/lib/decompress_unlzo.c
@@ -43,7 +43,6 @@ STATIC inline long INIT parse_header(u8 *input, long *skip, long in_len)
int l;
u8 *parse = input;
u8 *end = input + in_len;
- u8 level = 0;
u16 version;
/*
@@ -65,7 +64,7 @@ STATIC inline long INIT parse_header(u8 *input, long *skip, long in_len)
version = get_unaligned_be16(parse);
parse += 7;
if (version >= 0x0940)
- level = *parse++;
+ parse++;
if (get_unaligned_be32(parse) & HEADER_HAS_FILTER)
parse += 8; /* flags + filter info */
else
diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
index 25d59a95bd66..a2f38e23004a 100644
--- a/lib/decompress_unxz.c
+++ b/lib/decompress_unxz.c
@@ -23,7 +23,7 @@
* uncompressible. Thus, we must look for worst-case expansion when the
* compressor is encoding uncompressible data.
*
- * The structure of the .xz file in case of a compresed kernel is as follows.
+ * The structure of the .xz file in case of a compressed kernel is as follows.
* Sizes (as bytes) of the fields are in parenthesis.
*
* Stream Header (12)
diff --git a/lib/decompress_unzstd.c b/lib/decompress_unzstd.c
index 790abc472f5b..6b629ab31c1e 100644
--- a/lib/decompress_unzstd.c
+++ b/lib/decompress_unzstd.c
@@ -16,7 +16,7 @@
* uncompressible. Thus, we must look for worst-case expansion when the
* compressor is encoding uncompressible data.
*
- * The structure of the .zst file in case of a compresed kernel is as follows.
+ * The structure of the .zst file in case of a compressed kernel is as follows.
* Maximum sizes (as bytes) of the fields are in parenthesis.
*
* Frame Header: (18)
@@ -56,7 +56,7 @@
/*
* Preboot environments #include "path/to/decompress_unzstd.c".
* All of the source files we depend on must be #included.
- * zstd's only source dependeny is xxhash, which has no source
+ * zstd's only source dependency is xxhash, which has no source
* dependencies.
*
* When UNZSTD_PREBOOT is defined we declare __decompress(), which is
diff --git a/lib/devres.c b/lib/devres.c
index 4679dbb1bf5f..b0e1c6702c71 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -157,8 +157,10 @@ __devm_ioremap_resource(struct device *dev, const struct resource *res,
dev_name(dev), res->name);
else
pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
- if (!pretty_name)
+ if (!pretty_name) {
+ dev_err(dev, "can't generate pretty name for resource %pR\n", res);
return IOMEM_ERR_PTR(-ENOMEM);
+ }
if (!devm_request_mem_region(dev, res->start, size, pretty_name)) {
dev_err(dev, "can't request region for resource %pR\n", res);
@@ -353,7 +355,7 @@ static void pcim_iomap_release(struct device *gendev, void *res)
* detach.
*
* This function might sleep when the table is first allocated but can
- * be safely called without context and guaranteed to succed once
+ * be safely called without context and guaranteed to succeed once
* allocated.
*/
void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 5ebf4375fa8c..cd3387bb34e5 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -5,6 +5,7 @@
*/
#include <linux/kernel.h>
+#include <linux/buildid.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
@@ -36,6 +37,14 @@ void __init dump_stack_set_arch_desc(const char *fmt, ...)
va_end(args);
}
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID)
+#define BUILD_ID_FMT " %20phN"
+#define BUILD_ID_VAL vmlinux_build_id
+#else
+#define BUILD_ID_FMT "%s"
+#define BUILD_ID_VAL ""
+#endif
+
/**
* dump_stack_print_info - print generic debug info for dump_stack()
* @log_lvl: log level
@@ -45,13 +54,13 @@ void __init dump_stack_set_arch_desc(const char *fmt, ...)
*/
void dump_stack_print_info(const char *log_lvl)
{
- printk("%sCPU: %d PID: %d Comm: %.20s %s%s %s %.*s\n",
+ printk("%sCPU: %d PID: %d Comm: %.20s %s%s %s %.*s" BUILD_ID_FMT "\n",
log_lvl, raw_smp_processor_id(), current->pid, current->comm,
kexec_crash_loaded() ? "Kdump: loaded " : "",
print_tainted(),
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
+ init_utsname()->version, BUILD_ID_VAL);
if (dump_stack_arch_desc_str[0] != '\0')
printk("%sHardware name: %s\n",
@@ -73,10 +82,10 @@ void show_regs_print_info(const char *log_lvl)
dump_stack_print_info(log_lvl);
}
-static void __dump_stack(void)
+static void __dump_stack(const char *log_lvl)
{
- dump_stack_print_info(KERN_DEFAULT);
- show_stack(NULL, NULL, KERN_DEFAULT);
+ dump_stack_print_info(log_lvl);
+ show_stack(NULL, NULL, log_lvl);
}
/**
@@ -84,7 +93,7 @@ static void __dump_stack(void)
*
* Architectures can override this implementation by implementing its own.
*/
-asmlinkage __visible void dump_stack(void)
+asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
{
unsigned long flags;
@@ -93,7 +102,13 @@ asmlinkage __visible void dump_stack(void)
* against other CPUs
*/
printk_cpu_lock_irqsave(flags);
- __dump_stack();
+ __dump_stack(log_lvl);
printk_cpu_unlock_irqrestore(flags);
}
+EXPORT_SYMBOL(dump_stack_lvl);
+
+asmlinkage __visible void dump_stack(void)
+{
+ dump_stack_lvl(KERN_DEFAULT);
+}
EXPORT_SYMBOL(dump_stack);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 641767b0dce2..cb5abb42c16a 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -991,7 +991,7 @@ static int ddebug_dyndbg_param_cb(char *param, char *val,
ddebug_exec_queries((val ? val : "+p"), modname);
- return 0; /* query failure shouldnt stop module load */
+ return 0; /* query failure shouldn't stop module load */
}
/* handle both dyndbg and $module.dyndbg params at boot */
@@ -1117,9 +1117,9 @@ static int __init dynamic_debug_init(void)
goto out_err;
ddebug_init_success = 1;
- vpr_info("%d modules, %d entries and %d bytes in ddebug tables, %d bytes in __dyndbg section\n",
- modct, entries, (int)(modct * sizeof(struct ddebug_table)),
- (int)(entries * sizeof(struct _ddebug)));
+ vpr_info("%d prdebugs in %d modules, %d KiB in ddebug tables, %d kiB in __dyndbg section\n",
+ entries, modct, (int)((modct * sizeof(struct ddebug_table)) >> 10),
+ (int)((entries * sizeof(struct _ddebug)) >> 10));
/* apply ddebug_query boot param, dont unload tables on err */
if (ddebug_setup_string[0] != '\0') {
diff --git a/lib/fonts/font_pearl_8x8.c b/lib/fonts/font_pearl_8x8.c
index b1678ed0017c..ae98ca17982e 100644
--- a/lib/fonts/font_pearl_8x8.c
+++ b/lib/fonts/font_pearl_8x8.c
@@ -3,7 +3,7 @@
/* */
/* Font file generated by cpi2fnt */
/* ------------------------------ */
-/* Combined with the alpha-numeric */
+/* Combined with the alphanumeric */
/* portion of Greg Harp's old PEARL */
/* font (from earlier versions of */
/* linux-m86k) by John Shifflett */
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index c701b7a187f2..e23123ae3a13 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -16,170 +16,137 @@
#define PIPE_PARANOIA /* for now */
-#define iterate_iovec(i, n, __v, __p, skip, STEP) { \
- size_t left; \
- size_t wanted = n; \
- __p = i->iov; \
- __v.iov_len = min(n, __p->iov_len - skip); \
- if (likely(__v.iov_len)) { \
- __v.iov_base = __p->iov_base + skip; \
- left = (STEP); \
- __v.iov_len -= left; \
- skip += __v.iov_len; \
- n -= __v.iov_len; \
- } else { \
- left = 0; \
- } \
- while (unlikely(!left && n)) { \
- __p++; \
- __v.iov_len = min(n, __p->iov_len); \
- if (unlikely(!__v.iov_len)) \
- continue; \
- __v.iov_base = __p->iov_base; \
- left = (STEP); \
- __v.iov_len -= left; \
- skip = __v.iov_len; \
- n -= __v.iov_len; \
- } \
- n = wanted - n; \
-}
-
-#define iterate_kvec(i, n, __v, __p, skip, STEP) { \
- size_t wanted = n; \
- __p = i->kvec; \
- __v.iov_len = min(n, __p->iov_len - skip); \
- if (likely(__v.iov_len)) { \
- __v.iov_base = __p->iov_base + skip; \
- (void)(STEP); \
- skip += __v.iov_len; \
- n -= __v.iov_len; \
- } \
- while (unlikely(n)) { \
- __p++; \
- __v.iov_len = min(n, __p->iov_len); \
- if (unlikely(!__v.iov_len)) \
- continue; \
- __v.iov_base = __p->iov_base; \
- (void)(STEP); \
- skip = __v.iov_len; \
- n -= __v.iov_len; \
- } \
- n = wanted; \
-}
-
-#define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
- struct bvec_iter __start; \
- __start.bi_size = n; \
- __start.bi_bvec_done = skip; \
- __start.bi_idx = 0; \
- for_each_bvec(__v, i->bvec, __bi, __start) { \
- (void)(STEP); \
- } \
-}
-
-#define iterate_xarray(i, n, __v, skip, STEP) { \
+/* covers iovec and kvec alike */
+#define iterate_iovec(i, n, base, len, off, __p, STEP) { \
+ size_t off = 0; \
+ size_t skip = i->iov_offset; \
+ do { \
+ len = min(n, __p->iov_len - skip); \
+ if (likely(len)) { \
+ base = __p->iov_base + skip; \
+ len -= (STEP); \
+ off += len; \
+ skip += len; \
+ n -= len; \
+ if (skip < __p->iov_len) \
+ break; \
+ } \
+ __p++; \
+ skip = 0; \
+ } while (n); \
+ i->iov_offset = skip; \
+ n = off; \
+}
+
+#define iterate_bvec(i, n, base, len, off, p, STEP) { \
+ size_t off = 0; \
+ unsigned skip = i->iov_offset; \
+ while (n) { \
+ unsigned offset = p->bv_offset + skip; \
+ unsigned left; \
+ void *kaddr = kmap_local_page(p->bv_page + \
+ offset / PAGE_SIZE); \
+ base = kaddr + offset % PAGE_SIZE; \
+ len = min(min(n, (size_t)(p->bv_len - skip)), \
+ (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
+ left = (STEP); \
+ kunmap_local(kaddr); \
+ len -= left; \
+ off += len; \
+ skip += len; \
+ if (skip == p->bv_len) { \
+ skip = 0; \
+ p++; \
+ } \
+ n -= len; \
+ if (left) \
+ break; \
+ } \
+ i->iov_offset = skip; \
+ n = off; \
+}
+
+#define iterate_xarray(i, n, base, len, __off, STEP) { \
+ __label__ __out; \
+ size_t __off = 0; \
struct page *head = NULL; \
- size_t wanted = n, seg, offset; \
- loff_t start = i->xarray_start + skip; \
- pgoff_t index = start >> PAGE_SHIFT; \
+ loff_t start = i->xarray_start + i->iov_offset; \
+ unsigned offset = start % PAGE_SIZE; \
+ pgoff_t index = start / PAGE_SIZE; \
int j; \
\
XA_STATE(xas, i->xarray, index); \
\
- rcu_read_lock(); \
- xas_for_each(&xas, head, ULONG_MAX) { \
- if (xas_retry(&xas, head)) \
- continue; \
- if (WARN_ON(xa_is_value(head))) \
- break; \
- if (WARN_ON(PageHuge(head))) \
- break; \
+ rcu_read_lock(); \
+ xas_for_each(&xas, head, ULONG_MAX) { \
+ unsigned left; \
+ if (xas_retry(&xas, head)) \
+ continue; \
+ if (WARN_ON(xa_is_value(head))) \
+ break; \
+ if (WARN_ON(PageHuge(head))) \
+ break; \
for (j = (head->index < index) ? index - head->index : 0; \
- j < thp_nr_pages(head); j++) { \
- __v.bv_page = head + j; \
- offset = (i->xarray_start + skip) & ~PAGE_MASK; \
- seg = PAGE_SIZE - offset; \
- __v.bv_offset = offset; \
- __v.bv_len = min(n, seg); \
- (void)(STEP); \
- n -= __v.bv_len; \
- skip += __v.bv_len; \
- if (n == 0) \
- break; \
- } \
- if (n == 0) \
- break; \
- } \
- rcu_read_unlock(); \
- n = wanted - n; \
-}
-
-#define iterate_all_kinds(i, n, v, I, B, K, X) { \
- if (likely(n)) { \
- size_t skip = i->iov_offset; \
- if (unlikely(i->type & ITER_BVEC)) { \
- struct bio_vec v; \
- struct bvec_iter __bi; \
- iterate_bvec(i, n, v, __bi, skip, (B)) \
- } else if (unlikely(i->type & ITER_KVEC)) { \
- const struct kvec *kvec; \
- struct kvec v; \
- iterate_kvec(i, n, v, kvec, skip, (K)) \
- } else if (unlikely(i->type & ITER_DISCARD)) { \
- } else if (unlikely(i->type & ITER_XARRAY)) { \
- struct bio_vec v; \
- iterate_xarray(i, n, v, skip, (X)); \
- } else { \
- const struct iovec *iov; \
- struct iovec v; \
- iterate_iovec(i, n, v, iov, skip, (I)) \
+ j < thp_nr_pages(head); j++) { \
+ void *kaddr = kmap_local_page(head + j); \
+ base = kaddr + offset; \
+ len = PAGE_SIZE - offset; \
+ len = min(n, len); \
+ left = (STEP); \
+ kunmap_local(kaddr); \
+ len -= left; \
+ __off += len; \
+ n -= len; \
+ if (left || n == 0) \
+ goto __out; \
+ offset = 0; \
} \
} \
+__out: \
+ rcu_read_unlock(); \
+ i->iov_offset += __off; \
+ n = __off; \
}
-#define iterate_and_advance(i, n, v, I, B, K, X) { \
+#define __iterate_and_advance(i, n, base, len, off, I, K) { \
if (unlikely(i->count < n)) \
n = i->count; \
- if (i->count) { \
- size_t skip = i->iov_offset; \
- if (unlikely(i->type & ITER_BVEC)) { \
+ if (likely(n)) { \
+ if (likely(iter_is_iovec(i))) { \
+ const struct iovec *iov = i->iov; \
+ void __user *base; \
+ size_t len; \
+ iterate_iovec(i, n, base, len, off, \
+ iov, (I)) \
+ i->nr_segs -= iov - i->iov; \
+ i->iov = iov; \
+ } else if (iov_iter_is_bvec(i)) { \
const struct bio_vec *bvec = i->bvec; \
- struct bio_vec v; \
- struct bvec_iter __bi; \
- iterate_bvec(i, n, v, __bi, skip, (B)) \
- i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
- i->nr_segs -= i->bvec - bvec; \
- skip = __bi.bi_bvec_done; \
- } else if (unlikely(i->type & ITER_KVEC)) { \
- const struct kvec *kvec; \
- struct kvec v; \
- iterate_kvec(i, n, v, kvec, skip, (K)) \
- if (skip == kvec->iov_len) { \
- kvec++; \
- skip = 0; \
- } \
+ void *base; \
+ size_t len; \
+ iterate_bvec(i, n, base, len, off, \
+ bvec, (K)) \
+ i->nr_segs -= bvec - i->bvec; \
+ i->bvec = bvec; \
+ } else if (iov_iter_is_kvec(i)) { \
+ const struct kvec *kvec = i->kvec; \
+ void *base; \
+ size_t len; \
+ iterate_iovec(i, n, base, len, off, \
+ kvec, (K)) \
i->nr_segs -= kvec - i->kvec; \
i->kvec = kvec; \
- } else if (unlikely(i->type & ITER_DISCARD)) { \
- skip += n; \
- } else if (unlikely(i->type & ITER_XARRAY)) { \
- struct bio_vec v; \
- iterate_xarray(i, n, v, skip, (X)) \
- } else { \
- const struct iovec *iov; \
- struct iovec v; \
- iterate_iovec(i, n, v, iov, skip, (I)) \
- if (skip == iov->iov_len) { \
- iov++; \
- skip = 0; \
- } \
- i->nr_segs -= iov - i->iov; \
- i->iov = iov; \
+ } else if (iov_iter_is_xarray(i)) { \
+ void *base; \
+ size_t len; \
+ iterate_xarray(i, n, base, len, off, \
+ (K)) \
} \
i->count -= n; \
- i->iov_offset = skip; \
} \
}
+#define iterate_and_advance(i, n, base, len, off, I, K) \
+ __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
static int copyout(void __user *to, const void *from, size_t n)
{
@@ -469,19 +436,25 @@ out:
* Return 0 on success, or non-zero if the memory could not be accessed (i.e.
* because it is an invalid address).
*/
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes)
{
- size_t skip = i->iov_offset;
- const struct iovec *iov;
- int err;
- struct iovec v;
+ if (iter_is_iovec(i)) {
+ const struct iovec *p;
+ size_t skip;
- if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
- iterate_iovec(i, bytes, v, iov, skip, ({
- err = fault_in_pages_readable(v.iov_base, v.iov_len);
+ if (bytes > i->count)
+ bytes = i->count;
+ for (p = i->iov, skip = i->iov_offset; bytes; p++, skip = 0) {
+ size_t len = min(bytes, p->iov_len - skip);
+ int err;
+
+ if (unlikely(!len))
+ continue;
+ err = fault_in_pages_readable(p->iov_base + skip, len);
if (unlikely(err))
- return err;
- 0;}))
+ return err;
+ bytes -= len;
+ }
}
return 0;
}
@@ -492,19 +465,14 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
size_t count)
{
WARN_ON(direction & ~(READ | WRITE));
- direction &= READ | WRITE;
-
- /* It will get better. Eventually... */
- if (uaccess_kernel()) {
- i->type = ITER_KVEC | direction;
- i->kvec = (struct kvec *)iov;
- } else {
- i->type = ITER_IOVEC | direction;
- i->iov = iov;
- }
- i->nr_segs = nr_segs;
- i->iov_offset = 0;
- i->count = count;
+ *i = (struct iov_iter) {
+ .iter_type = ITER_IOVEC,
+ .data_source = direction,
+ .iov = iov,
+ .nr_segs = nr_segs,
+ .iov_offset = 0,
+ .count = count
+ };
}
EXPORT_SYMBOL(iov_iter_init);
@@ -613,55 +581,45 @@ static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
}
static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
- struct csum_state *csstate,
- struct iov_iter *i)
+ struct iov_iter *i, __wsum *sump)
{
struct pipe_inode_info *pipe = i->pipe;
unsigned int p_mask = pipe->ring_size - 1;
- __wsum sum = csstate->csum;
- size_t off = csstate->off;
+ __wsum sum = *sump;
+ size_t off = 0;
unsigned int i_head;
- size_t n, r;
+ size_t r;
if (!sanity(i))
return 0;
- bytes = n = push_pipe(i, bytes, &i_head, &r);
- if (unlikely(!n))
- return 0;
- do {
- size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
- char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page);
- sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
- kunmap_atomic(p);
+ bytes = push_pipe(i, bytes, &i_head, &r);
+ while (bytes) {
+ size_t chunk = min_t(size_t, bytes, PAGE_SIZE - r);
+ char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
+ sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
+ kunmap_local(p);
i->head = i_head;
i->iov_offset = r + chunk;
- n -= chunk;
+ bytes -= chunk;
off += chunk;
- addr += chunk;
r = 0;
i_head++;
- } while (n);
- i->count -= bytes;
- csstate->csum = sum;
- csstate->off = off;
- return bytes;
+ }
+ *sump = sum;
+ i->count -= off;
+ return off;
}
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
- const char *from = addr;
if (unlikely(iov_iter_is_pipe(i)))
return copy_pipe_to_iter(addr, bytes, i);
if (iter_is_iovec(i))
might_fault();
- iterate_and_advance(i, bytes, v,
- copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
- memcpy_to_page(v.bv_page, v.bv_offset,
- (from += v.bv_len) - v.bv_len, v.bv_len),
- memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
- memcpy_to_page(v.bv_page, v.bv_offset,
- (from += v.bv_len) - v.bv_len, v.bv_len)
+ iterate_and_advance(i, bytes, base, len, off,
+ copyout(base, addr + off, len),
+ memcpy(base, addr + off, len)
)
return bytes;
@@ -678,19 +636,6 @@ static int copyout_mc(void __user *to, const void *from, size_t n)
return n;
}
-static unsigned long copy_mc_to_page(struct page *page, size_t offset,
- const char *from, size_t len)
-{
- unsigned long ret;
- char *to;
-
- to = kmap_atomic(page);
- ret = copy_mc_to_kernel(to + offset, from, len);
- kunmap_atomic(to);
-
- return ret;
-}
-
static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
struct iov_iter *i)
{
@@ -702,25 +647,23 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
if (!sanity(i))
return 0;
- bytes = n = push_pipe(i, bytes, &i_head, &off);
- if (unlikely(!n))
- return 0;
- do {
+ n = push_pipe(i, bytes, &i_head, &off);
+ while (n) {
size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
+ char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
unsigned long rem;
-
- rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page,
- off, addr, chunk);
+ rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
+ chunk -= rem;
+ kunmap_local(p);
i->head = i_head;
- i->iov_offset = off + chunk - rem;
- xfer += chunk - rem;
+ i->iov_offset = off + chunk;
+ xfer += chunk;
if (rem)
break;
n -= chunk;
- addr += chunk;
off = 0;
i_head++;
- } while (n);
+ }
i->count -= xfer;
return xfer;
}
@@ -750,46 +693,13 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
*/
size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
- const char *from = addr;
- unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
-
if (unlikely(iov_iter_is_pipe(i)))
return copy_mc_pipe_to_iter(addr, bytes, i);
if (iter_is_iovec(i))
might_fault();
- iterate_and_advance(i, bytes, v,
- copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
- v.iov_len),
- ({
- rem = copy_mc_to_page(v.bv_page, v.bv_offset,
- (from += v.bv_len) - v.bv_len, v.bv_len);
- if (rem) {
- curr_addr = (unsigned long) from;
- bytes = curr_addr - s_addr - rem;
- return bytes;
- }
- }),
- ({
- rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
- - v.iov_len, v.iov_len);
- if (rem) {
- curr_addr = (unsigned long) from;
- bytes = curr_addr - s_addr - rem;
- return bytes;
- }
- }),
- ({
- rem = copy_mc_to_page(v.bv_page, v.bv_offset,
- (from += v.bv_len) - v.bv_len, v.bv_len);
- if (rem) {
- curr_addr = (unsigned long) from;
- bytes = curr_addr - s_addr - rem;
- rcu_read_unlock();
- i->iov_offset += bytes;
- i->count -= bytes;
- return bytes;
- }
- })
+ __iterate_and_advance(i, bytes, base, len, off,
+ copyout_mc(base, addr + off, len),
+ copy_mc_to_kernel(base, addr + off, len)
)
return bytes;
@@ -799,70 +709,30 @@ EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
- char *to = addr;
if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return 0;
}
if (iter_is_iovec(i))
might_fault();
- iterate_and_advance(i, bytes, v,
- copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len)
+ iterate_and_advance(i, bytes, base, len, off,
+ copyin(addr + off, base, len),
+ memcpy(addr + off, base, len)
)
return bytes;
}
EXPORT_SYMBOL(_copy_from_iter);
-bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
-{
- char *to = addr;
- if (unlikely(iov_iter_is_pipe(i))) {
- WARN_ON(1);
- return false;
- }
- if (unlikely(i->count < bytes))
- return false;
-
- if (iter_is_iovec(i))
- might_fault();
- iterate_all_kinds(i, bytes, v, ({
- if (copyin((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len))
- return false;
- 0;}),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len)
- )
-
- iov_iter_advance(i, bytes);
- return true;
-}
-EXPORT_SYMBOL(_copy_from_iter_full);
-
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
- char *to = addr;
if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return 0;
}
- iterate_and_advance(i, bytes, v,
- __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len)
+ iterate_and_advance(i, bytes, base, len, off,
+ __copy_from_user_inatomic_nocache(addr + off, base, len),
+ memcpy(addr + off, base, len)
)
return bytes;
@@ -886,20 +756,13 @@ EXPORT_SYMBOL(_copy_from_iter_nocache);
*/
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
- char *to = addr;
if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return 0;
}
- iterate_and_advance(i, bytes, v,
- __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len),
- memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
- v.iov_len),
- memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len)
+ iterate_and_advance(i, bytes, base, len, off,
+ __copy_from_user_flushcache(addr + off, base, len),
+ memcpy_flushcache(addr + off, base, len)
)
return bytes;
@@ -907,32 +770,6 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
#endif
-bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
-{
- char *to = addr;
- if (unlikely(iov_iter_is_pipe(i))) {
- WARN_ON(1);
- return false;
- }
- if (unlikely(i->count < bytes))
- return false;
- iterate_all_kinds(i, bytes, v, ({
- if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len))
- return false;
- 0;}),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len)
- )
-
- iov_iter_advance(i, bytes);
- return true;
-}
-EXPORT_SYMBOL(_copy_from_iter_full_nocache);
-
static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
{
struct page *head;
@@ -957,22 +794,51 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
return false;
}
+static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
+ struct iov_iter *i)
+{
+ if (likely(iter_is_iovec(i)))
+ return copy_page_to_iter_iovec(page, offset, bytes, i);
+ if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
+ void *kaddr = kmap_local_page(page);
+ size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
+ kunmap_local(kaddr);
+ return wanted;
+ }
+ if (iov_iter_is_pipe(i))
+ return copy_page_to_iter_pipe(page, offset, bytes, i);
+ if (unlikely(iov_iter_is_discard(i))) {
+ if (unlikely(i->count < bytes))
+ bytes = i->count;
+ i->count -= bytes;
+ return bytes;
+ }
+ WARN_ON(1);
+ return 0;
+}
+
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
+ size_t res = 0;
if (unlikely(!page_copy_sane(page, offset, bytes)))
return 0;
- if (i->type & (ITER_BVEC | ITER_KVEC | ITER_XARRAY)) {
- void *kaddr = kmap_atomic(page);
- size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
- kunmap_atomic(kaddr);
- return wanted;
- } else if (unlikely(iov_iter_is_discard(i)))
- return bytes;
- else if (likely(!iov_iter_is_pipe(i)))
- return copy_page_to_iter_iovec(page, offset, bytes, i);
- else
- return copy_page_to_iter_pipe(page, offset, bytes, i);
+ page += offset / PAGE_SIZE; // first subpage
+ offset %= PAGE_SIZE;
+ while (1) {
+ size_t n = __copy_page_to_iter(page, offset,
+ min(bytes, (size_t)PAGE_SIZE - offset), i);
+ res += n;
+ bytes -= n;
+ if (!bytes || !n)
+ break;
+ offset += n;
+ if (offset == PAGE_SIZE) {
+ page++;
+ offset = 0;
+ }
+ }
+ return res;
}
EXPORT_SYMBOL(copy_page_to_iter);
@@ -981,17 +847,16 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
{
if (unlikely(!page_copy_sane(page, offset, bytes)))
return 0;
- if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
- WARN_ON(1);
- return 0;
- }
- if (i->type & (ITER_BVEC | ITER_KVEC | ITER_XARRAY)) {
- void *kaddr = kmap_atomic(page);
+ if (likely(iter_is_iovec(i)))
+ return copy_page_from_iter_iovec(page, offset, bytes, i);
+ if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
+ void *kaddr = kmap_local_page(page);
size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
return wanted;
- } else
- return copy_page_from_iter_iovec(page, offset, bytes, i);
+ }
+ WARN_ON(1);
+ return 0;
}
EXPORT_SYMBOL(copy_page_from_iter);
@@ -1011,7 +876,9 @@ static size_t pipe_zero(size_t bytes, struct iov_iter *i)
do {
size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
- memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk);
+ char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
+ memset(p + off, 0, chunk);
+ kunmap_local(p);
i->head = i_head;
i->iov_offset = off + chunk;
n -= chunk;
@@ -1026,19 +893,17 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
{
if (unlikely(iov_iter_is_pipe(i)))
return pipe_zero(bytes, i);
- iterate_and_advance(i, bytes, v,
- clear_user(v.iov_base, v.iov_len),
- memzero_page(v.bv_page, v.bv_offset, v.bv_len),
- memset(v.iov_base, 0, v.iov_len),
- memzero_page(v.bv_page, v.bv_offset, v.bv_len)
+ iterate_and_advance(i, bytes, base, len, count,
+ clear_user(base, len),
+ memset(base, 0, len)
)
return bytes;
}
EXPORT_SYMBOL(iov_iter_zero);
-size_t iov_iter_copy_from_user_atomic(struct page *page,
- struct iov_iter *i, unsigned long offset, size_t bytes)
+size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
+ struct iov_iter *i)
{
char *kaddr = kmap_atomic(page), *p = kaddr + offset;
if (unlikely(!page_copy_sane(page, offset, bytes))) {
@@ -1050,18 +915,14 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
WARN_ON(1);
return 0;
}
- iterate_all_kinds(i, bytes, v,
- copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len)
+ iterate_and_advance(i, bytes, base, len, off,
+ copyin(p + off, base, len),
+ memcpy(p + off, base, len)
)
kunmap_atomic(kaddr);
return bytes;
}
-EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
+EXPORT_SYMBOL(copy_page_from_iter_atomic);
static inline void pipe_truncate(struct iov_iter *i)
{
@@ -1092,8 +953,6 @@ static inline void pipe_truncate(struct iov_iter *i)
static void pipe_advance(struct iov_iter *i, size_t size)
{
struct pipe_inode_info *pipe = i->pipe;
- if (unlikely(i->count < size))
- size = i->count;
if (size) {
struct pipe_buffer *buf;
unsigned int p_mask = pipe->ring_size - 1;
@@ -1132,27 +991,42 @@ static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
i->iov_offset = bi.bi_bvec_done;
}
-void iov_iter_advance(struct iov_iter *i, size_t size)
+static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
{
- if (unlikely(iov_iter_is_pipe(i))) {
- pipe_advance(i, size);
- return;
- }
- if (unlikely(iov_iter_is_discard(i))) {
- i->count -= size;
+ const struct iovec *iov, *end;
+
+ if (!i->count)
return;
+ i->count -= size;
+
+ size += i->iov_offset; // from beginning of current segment
+ for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
+ if (likely(size < iov->iov_len))
+ break;
+ size -= iov->iov_len;
}
- if (unlikely(iov_iter_is_xarray(i))) {
- size = min(size, i->count);
+ i->iov_offset = size;
+ i->nr_segs -= iov - i->iov;
+ i->iov = iov;
+}
+
+void iov_iter_advance(struct iov_iter *i, size_t size)
+{
+ if (unlikely(i->count < size))
+ size = i->count;
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
+ /* iovec and kvec have identical layouts */
+ iov_iter_iovec_advance(i, size);
+ } else if (iov_iter_is_bvec(i)) {
+ iov_iter_bvec_advance(i, size);
+ } else if (iov_iter_is_pipe(i)) {
+ pipe_advance(i, size);
+ } else if (unlikely(iov_iter_is_xarray(i))) {
i->iov_offset += size;
i->count -= size;
- return;
- }
- if (iov_iter_is_bvec(i)) {
- iov_iter_bvec_advance(i, size);
- return;
+ } else if (iov_iter_is_discard(i)) {
+ i->count -= size;
}
- iterate_and_advance(i, size, v, 0, 0, 0, 0)
}
EXPORT_SYMBOL(iov_iter_advance);
@@ -1234,16 +1108,13 @@ EXPORT_SYMBOL(iov_iter_revert);
*/
size_t iov_iter_single_seg_count(const struct iov_iter *i)
{
- if (unlikely(iov_iter_is_pipe(i)))
- return i->count; // it is a silly place, anyway
- if (i->nr_segs == 1)
- return i->count;
- if (unlikely(iov_iter_is_discard(i) || iov_iter_is_xarray(i)))
- return i->count;
- if (iov_iter_is_bvec(i))
- return min(i->count, i->bvec->bv_len - i->iov_offset);
- else
- return min(i->count, i->iov->iov_len - i->iov_offset);
+ if (i->nr_segs > 1) {
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
+ return min(i->count, i->iov->iov_len - i->iov_offset);
+ if (iov_iter_is_bvec(i))
+ return min(i->count, i->bvec->bv_len - i->iov_offset);
+ }
+ return i->count;
}
EXPORT_SYMBOL(iov_iter_single_seg_count);
@@ -1252,11 +1123,14 @@ void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
size_t count)
{
WARN_ON(direction & ~(READ | WRITE));
- i->type = ITER_KVEC | (direction & (READ | WRITE));
- i->kvec = kvec;
- i->nr_segs = nr_segs;
- i->iov_offset = 0;
- i->count = count;
+ *i = (struct iov_iter){
+ .iter_type = ITER_KVEC,
+ .data_source = direction,
+ .kvec = kvec,
+ .nr_segs = nr_segs,
+ .iov_offset = 0,
+ .count = count
+ };
}
EXPORT_SYMBOL(iov_iter_kvec);
@@ -1265,11 +1139,14 @@ void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
size_t count)
{
WARN_ON(direction & ~(READ | WRITE));
- i->type = ITER_BVEC | (direction & (READ | WRITE));
- i->bvec = bvec;
- i->nr_segs = nr_segs;
- i->iov_offset = 0;
- i->count = count;
+ *i = (struct iov_iter){
+ .iter_type = ITER_BVEC,
+ .data_source = direction,
+ .bvec = bvec,
+ .nr_segs = nr_segs,
+ .iov_offset = 0,
+ .count = count
+ };
}
EXPORT_SYMBOL(iov_iter_bvec);
@@ -1279,12 +1156,15 @@ void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
{
BUG_ON(direction != READ);
WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
- i->type = ITER_PIPE | READ;
- i->pipe = pipe;
- i->head = pipe->head;
- i->iov_offset = 0;
- i->count = count;
- i->start_head = i->head;
+ *i = (struct iov_iter){
+ .iter_type = ITER_PIPE,
+ .data_source = false,
+ .pipe = pipe,
+ .head = pipe->head,
+ .start_head = pipe->head,
+ .iov_offset = 0,
+ .count = count
+ };
}
EXPORT_SYMBOL(iov_iter_pipe);
@@ -1305,11 +1185,14 @@ void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
struct xarray *xarray, loff_t start, size_t count)
{
BUG_ON(direction & ~1);
- i->type = ITER_XARRAY | (direction & (READ | WRITE));
- i->xarray = xarray;
- i->xarray_start = start;
- i->count = count;
- i->iov_offset = 0;
+ *i = (struct iov_iter) {
+ .iter_type = ITER_XARRAY,
+ .data_source = direction,
+ .xarray = xarray,
+ .xarray_start = start,
+ .count = count,
+ .iov_offset = 0
+ };
}
EXPORT_SYMBOL(iov_iter_xarray);
@@ -1325,56 +1208,103 @@ EXPORT_SYMBOL(iov_iter_xarray);
void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
{
BUG_ON(direction != READ);
- i->type = ITER_DISCARD | READ;
- i->count = count;
- i->iov_offset = 0;
+ *i = (struct iov_iter){
+ .iter_type = ITER_DISCARD,
+ .data_source = false,
+ .count = count,
+ .iov_offset = 0
+ };
}
EXPORT_SYMBOL(iov_iter_discard);
-unsigned long iov_iter_alignment(const struct iov_iter *i)
+static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
{
unsigned long res = 0;
size_t size = i->count;
+ size_t skip = i->iov_offset;
+ unsigned k;
+
+ for (k = 0; k < i->nr_segs; k++, skip = 0) {
+ size_t len = i->iov[k].iov_len - skip;
+ if (len) {
+ res |= (unsigned long)i->iov[k].iov_base + skip;
+ if (len > size)
+ len = size;
+ res |= len;
+ size -= len;
+ if (!size)
+ break;
+ }
+ }
+ return res;
+}
- if (unlikely(iov_iter_is_pipe(i))) {
+static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
+{
+ unsigned res = 0;
+ size_t size = i->count;
+ unsigned skip = i->iov_offset;
+ unsigned k;
+
+ for (k = 0; k < i->nr_segs; k++, skip = 0) {
+ size_t len = i->bvec[k].bv_len - skip;
+ res |= (unsigned long)i->bvec[k].bv_offset + skip;
+ if (len > size)
+ len = size;
+ res |= len;
+ size -= len;
+ if (!size)
+ break;
+ }
+ return res;
+}
+
+unsigned long iov_iter_alignment(const struct iov_iter *i)
+{
+ /* iovec and kvec have identical layouts */
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
+ return iov_iter_alignment_iovec(i);
+
+ if (iov_iter_is_bvec(i))
+ return iov_iter_alignment_bvec(i);
+
+ if (iov_iter_is_pipe(i)) {
unsigned int p_mask = i->pipe->ring_size - 1;
+ size_t size = i->count;
if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
return size | i->iov_offset;
return size;
}
- if (unlikely(iov_iter_is_xarray(i)))
+
+ if (iov_iter_is_xarray(i))
return (i->xarray_start + i->iov_offset) | i->count;
- iterate_all_kinds(i, size, v,
- (res |= (unsigned long)v.iov_base | v.iov_len, 0),
- res |= v.bv_offset | v.bv_len,
- res |= (unsigned long)v.iov_base | v.iov_len,
- res |= v.bv_offset | v.bv_len
- )
- return res;
+
+ return 0;
}
EXPORT_SYMBOL(iov_iter_alignment);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
{
unsigned long res = 0;
+ unsigned long v = 0;
size_t size = i->count;
+ unsigned k;
- if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
- WARN_ON(1);
+ if (WARN_ON(!iter_is_iovec(i)))
return ~0U;
- }
- iterate_all_kinds(i, size, v,
- (res |= (!res ? 0 : (unsigned long)v.iov_base) |
- (size != v.iov_len ? size : 0), 0),
- (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
- (size != v.bv_len ? size : 0)),
- (res |= (!res ? 0 : (unsigned long)v.iov_base) |
- (size != v.iov_len ? size : 0)),
- (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
- (size != v.bv_len ? size : 0))
- );
+ for (k = 0; k < i->nr_segs; k++) {
+ if (i->iov[k].iov_len) {
+ unsigned long base = (unsigned long)i->iov[k].iov_base;
+ if (v) // if not the first one
+ res |= base | v; // this start | previous end
+ v = base + i->iov[k].iov_len;
+ if (size <= i->iov[k].iov_len)
+ break;
+ size -= i->iov[k].iov_len;
+ }
+ }
return res;
}
EXPORT_SYMBOL(iov_iter_gap_alignment);
@@ -1409,9 +1339,6 @@ static ssize_t pipe_get_pages(struct iov_iter *i,
unsigned int iter_head, npages;
size_t capacity;
- if (!maxsize)
- return 0;
-
if (!sanity(i))
return -EFAULT;
@@ -1492,29 +1419,67 @@ static ssize_t iter_xarray_get_pages(struct iov_iter *i,
return actual;
}
+/* must be done on non-empty ITER_IOVEC one */
+static unsigned long first_iovec_segment(const struct iov_iter *i,
+ size_t *size, size_t *start,
+ size_t maxsize, unsigned maxpages)
+{
+ size_t skip;
+ long k;
+
+ for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
+ unsigned long addr = (unsigned long)i->iov[k].iov_base + skip;
+ size_t len = i->iov[k].iov_len - skip;
+
+ if (unlikely(!len))
+ continue;
+ if (len > maxsize)
+ len = maxsize;
+ len += (*start = addr % PAGE_SIZE);
+ if (len > maxpages * PAGE_SIZE)
+ len = maxpages * PAGE_SIZE;
+ *size = len;
+ return addr & PAGE_MASK;
+ }
+ BUG(); // if it had been empty, we wouldn't get called
+}
+
+/* must be done on non-empty ITER_BVEC one */
+static struct page *first_bvec_segment(const struct iov_iter *i,
+ size_t *size, size_t *start,
+ size_t maxsize, unsigned maxpages)
+{
+ struct page *page;
+ size_t skip = i->iov_offset, len;
+
+ len = i->bvec->bv_len - skip;
+ if (len > maxsize)
+ len = maxsize;
+ skip += i->bvec->bv_offset;
+ page = i->bvec->bv_page + skip / PAGE_SIZE;
+ len += (*start = skip % PAGE_SIZE);
+ if (len > maxpages * PAGE_SIZE)
+ len = maxpages * PAGE_SIZE;
+ *size = len;
+ return page;
+}
+
ssize_t iov_iter_get_pages(struct iov_iter *i,
struct page **pages, size_t maxsize, unsigned maxpages,
size_t *start)
{
+ size_t len;
+ int n, res;
+
if (maxsize > i->count)
maxsize = i->count;
+ if (!maxsize)
+ return 0;
- if (unlikely(iov_iter_is_pipe(i)))
- return pipe_get_pages(i, pages, maxsize, maxpages, start);
- if (unlikely(iov_iter_is_xarray(i)))
- return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
- if (unlikely(iov_iter_is_discard(i)))
- return -EFAULT;
-
- iterate_all_kinds(i, maxsize, v, ({
- unsigned long addr = (unsigned long)v.iov_base;
- size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
- int n;
- int res;
+ if (likely(iter_is_iovec(i))) {
+ unsigned long addr;
- if (len > maxpages * PAGE_SIZE)
- len = maxpages * PAGE_SIZE;
- addr &= ~(PAGE_SIZE - 1);
+ addr = first_iovec_segment(i, &len, start, maxsize, maxpages);
n = DIV_ROUND_UP(len, PAGE_SIZE);
res = get_user_pages_fast(addr, n,
iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0,
@@ -1522,17 +1487,21 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
if (unlikely(res < 0))
return res;
return (res == n ? len : res * PAGE_SIZE) - *start;
- 0;}),({
- /* can't be more than PAGE_SIZE */
- *start = v.bv_offset;
- get_page(*pages = v.bv_page);
- return v.bv_len;
- }),({
- return -EFAULT;
- }),
- 0
- )
- return 0;
+ }
+ if (iov_iter_is_bvec(i)) {
+ struct page *page;
+
+ page = first_bvec_segment(i, &len, start, maxsize, maxpages);
+ n = DIV_ROUND_UP(len, PAGE_SIZE);
+ while (n--)
+ get_page(*pages++ = page++);
+ return len - *start;
+ }
+ if (iov_iter_is_pipe(i))
+ return pipe_get_pages(i, pages, maxsize, maxpages, start);
+ if (iov_iter_is_xarray(i))
+ return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
+ return -EFAULT;
}
EXPORT_SYMBOL(iov_iter_get_pages);
@@ -1549,9 +1518,6 @@ static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
unsigned int iter_head, npages;
ssize_t n;
- if (!maxsize)
- return 0;
-
if (!sanity(i))
return -EFAULT;
@@ -1624,24 +1590,18 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
size_t *start)
{
struct page **p;
+ size_t len;
+ int n, res;
if (maxsize > i->count)
maxsize = i->count;
+ if (!maxsize)
+ return 0;
- if (unlikely(iov_iter_is_pipe(i)))
- return pipe_get_pages_alloc(i, pages, maxsize, start);
- if (unlikely(iov_iter_is_xarray(i)))
- return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
- if (unlikely(iov_iter_is_discard(i)))
- return -EFAULT;
-
- iterate_all_kinds(i, maxsize, v, ({
- unsigned long addr = (unsigned long)v.iov_base;
- size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
- int n;
- int res;
+ if (likely(iter_is_iovec(i))) {
+ unsigned long addr;
- addr &= ~(PAGE_SIZE - 1);
+ addr = first_iovec_segment(i, &len, start, maxsize, ~0U);
n = DIV_ROUND_UP(len, PAGE_SIZE);
p = get_pages_array(n);
if (!p)
@@ -1654,61 +1614,42 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
}
*pages = p;
return (res == n ? len : res * PAGE_SIZE) - *start;
- 0;}),({
- /* can't be more than PAGE_SIZE */
- *start = v.bv_offset;
- *pages = p = get_pages_array(1);
+ }
+ if (iov_iter_is_bvec(i)) {
+ struct page *page;
+
+ page = first_bvec_segment(i, &len, start, maxsize, ~0U);
+ n = DIV_ROUND_UP(len, PAGE_SIZE);
+ *pages = p = get_pages_array(n);
if (!p)
return -ENOMEM;
- get_page(*p = v.bv_page);
- return v.bv_len;
- }),({
- return -EFAULT;
- }), 0
- )
- return 0;
+ while (n--)
+ get_page(*p++ = page++);
+ return len - *start;
+ }
+ if (iov_iter_is_pipe(i))
+ return pipe_get_pages_alloc(i, pages, maxsize, start);
+ if (iov_iter_is_xarray(i))
+ return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
+ return -EFAULT;
}
EXPORT_SYMBOL(iov_iter_get_pages_alloc);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
struct iov_iter *i)
{
- char *to = addr;
__wsum sum, next;
- size_t off = 0;
sum = *csum;
if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
WARN_ON(1);
return 0;
}
- iterate_and_advance(i, bytes, v, ({
- next = csum_and_copy_from_user(v.iov_base,
- (to += v.iov_len) - v.iov_len,
- v.iov_len);
- if (next) {
- sum = csum_block_add(sum, next, off);
- off += v.iov_len;
- }
- next ? 0 : v.iov_len;
- }), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
- p + v.bv_offset, v.bv_len,
- sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
- }),({
- sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len,
- sum, off);
- off += v.iov_len;
+ iterate_and_advance(i, bytes, base, len, off, ({
+ next = csum_and_copy_from_user(base, addr + off, len);
+ sum = csum_block_add(sum, next, off);
+ next ? 0 : len;
}), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
- p + v.bv_offset, v.bv_len,
- sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
+ sum = csum_and_memcpy(addr + off, base, len, sum, off);
})
)
*csum = sum;
@@ -1716,104 +1657,30 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
}
EXPORT_SYMBOL(csum_and_copy_from_iter);
-bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
- struct iov_iter *i)
-{
- char *to = addr;
- __wsum sum, next;
- size_t off = 0;
- sum = *csum;
- if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
- WARN_ON(1);
- return false;
- }
- if (unlikely(i->count < bytes))
- return false;
- iterate_all_kinds(i, bytes, v, ({
- next = csum_and_copy_from_user(v.iov_base,
- (to += v.iov_len) - v.iov_len,
- v.iov_len);
- if (!next)
- return false;
- sum = csum_block_add(sum, next, off);
- off += v.iov_len;
- 0;
- }), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
- p + v.bv_offset, v.bv_len,
- sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
- }),({
- sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len,
- sum, off);
- off += v.iov_len;
- }), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
- p + v.bv_offset, v.bv_len,
- sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
- })
- )
- *csum = sum;
- iov_iter_advance(i, bytes);
- return true;
-}
-EXPORT_SYMBOL(csum_and_copy_from_iter_full);
-
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
struct iov_iter *i)
{
struct csum_state *csstate = _csstate;
- const char *from = addr;
__wsum sum, next;
- size_t off;
-
- if (unlikely(iov_iter_is_pipe(i)))
- return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
- sum = csstate->csum;
- off = csstate->off;
if (unlikely(iov_iter_is_discard(i))) {
WARN_ON(1); /* for now */
return 0;
}
- iterate_and_advance(i, bytes, v, ({
- next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
- v.iov_base,
- v.iov_len);
- if (next) {
- sum = csum_block_add(sum, next, off);
- off += v.iov_len;
- }
- next ? 0 : v.iov_len;
- }), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy(p + v.bv_offset,
- (from += v.bv_len) - v.bv_len,
- v.bv_len, sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
- }),({
- sum = csum_and_memcpy(v.iov_base,
- (from += v.iov_len) - v.iov_len,
- v.iov_len, sum, off);
- off += v.iov_len;
+
+ sum = csum_shift(csstate->csum, csstate->off);
+ if (unlikely(iov_iter_is_pipe(i)))
+ bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
+ else iterate_and_advance(i, bytes, base, len, off, ({
+ next = csum_and_copy_to_user(addr + off, base, len);
+ sum = csum_block_add(sum, next, off);
+ next ? 0 : len;
}), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy(p + v.bv_offset,
- (from += v.bv_len) - v.bv_len,
- v.bv_len, sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
+ sum = csum_and_memcpy(base, addr + off, len, sum, off);
})
)
- csstate->csum = sum;
- csstate->off = off;
+ csstate->csum = csum_shift(sum, csstate->off);
+ csstate->off += bytes;
return bytes;
}
EXPORT_SYMBOL(csum_and_copy_to_iter);
@@ -1837,19 +1704,56 @@ size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
}
EXPORT_SYMBOL(hash_and_copy_to_iter);
-int iov_iter_npages(const struct iov_iter *i, int maxpages)
+static int iov_npages(const struct iov_iter *i, int maxpages)
{
- size_t size = i->count;
+ size_t skip = i->iov_offset, size = i->count;
+ const struct iovec *p;
int npages = 0;
- if (!size)
- return 0;
- if (unlikely(iov_iter_is_discard(i)))
- return 0;
+ for (p = i->iov; size; skip = 0, p++) {
+ unsigned offs = offset_in_page(p->iov_base + skip);
+ size_t len = min(p->iov_len - skip, size);
- if (unlikely(iov_iter_is_pipe(i))) {
- struct pipe_inode_info *pipe = i->pipe;
+ if (len) {
+ size -= len;
+ npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
+ if (unlikely(npages > maxpages))
+ return maxpages;
+ }
+ }
+ return npages;
+}
+
+static int bvec_npages(const struct iov_iter *i, int maxpages)
+{
+ size_t skip = i->iov_offset, size = i->count;
+ const struct bio_vec *p;
+ int npages = 0;
+
+ for (p = i->bvec; size; skip = 0, p++) {
+ unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
+ size_t len = min(p->bv_len - skip, size);
+
+ size -= len;
+ npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
+ if (unlikely(npages > maxpages))
+ return maxpages;
+ }
+ return npages;
+}
+
+int iov_iter_npages(const struct iov_iter *i, int maxpages)
+{
+ if (unlikely(!i->count))
+ return 0;
+ /* iovec and kvec have identical layouts */
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
+ return iov_npages(i, maxpages);
+ if (iov_iter_is_bvec(i))
+ return bvec_npages(i, maxpages);
+ if (iov_iter_is_pipe(i)) {
unsigned int iter_head;
+ int npages;
size_t off;
if (!sanity(i))
@@ -1857,44 +1761,15 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
data_start(i, &iter_head, &off);
/* some of this one + all after this one */
- npages = pipe_space_for_user(iter_head, pipe->tail, pipe);
- if (npages >= maxpages)
- return maxpages;
- } else if (unlikely(iov_iter_is_xarray(i))) {
- unsigned offset;
-
- offset = (i->xarray_start + i->iov_offset) & ~PAGE_MASK;
-
- npages = 1;
- if (size > PAGE_SIZE - offset) {
- size -= PAGE_SIZE - offset;
- npages += size >> PAGE_SHIFT;
- size &= ~PAGE_MASK;
- if (size)
- npages++;
- }
- if (npages >= maxpages)
- return maxpages;
- } else iterate_all_kinds(i, size, v, ({
- unsigned long p = (unsigned long)v.iov_base;
- npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
- - p / PAGE_SIZE;
- if (npages >= maxpages)
- return maxpages;
- 0;}),({
- npages++;
- if (npages >= maxpages)
- return maxpages;
- }),({
- unsigned long p = (unsigned long)v.iov_base;
- npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
- - p / PAGE_SIZE;
- if (npages >= maxpages)
- return maxpages;
- }),
- 0
- )
- return npages;
+ npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
+ return min(npages, maxpages);
+ }
+ if (iov_iter_is_xarray(i)) {
+ unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
+ int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
+ return min(npages, maxpages);
+ }
+ return 0;
}
EXPORT_SYMBOL(iov_iter_npages);
@@ -2093,30 +1968,3 @@ int import_single_range(int rw, void __user *buf, size_t len,
return 0;
}
EXPORT_SYMBOL(import_single_range);
-
-int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
- int (*f)(struct kvec *vec, void *context),
- void *context)
-{
- struct kvec w;
- int err = -EINVAL;
- if (!bytes)
- return 0;
-
- iterate_all_kinds(i, bytes, v, -EINVAL, ({
- w.iov_base = kmap(v.bv_page) + v.bv_offset;
- w.iov_len = v.bv_len;
- err = f(&w, context);
- kunmap(v.bv_page);
- err;}), ({
- w = v;
- err = f(&w, context);}), ({
- w.iov_base = kmap(v.bv_page) + v.bv_offset;
- w.iov_len = v.bv_len;
- err = f(&w, context);
- kunmap(v.bv_page);
- err;})
- )
- return err;
-}
-EXPORT_SYMBOL(iov_iter_for_each_range);
diff --git a/lib/kfifo.c b/lib/kfifo.c
index 70dab9ac7827..12f5a347aa13 100644
--- a/lib/kfifo.c
+++ b/lib/kfifo.c
@@ -415,7 +415,7 @@ static unsigned int __kfifo_peek_n(struct __kfifo *fifo, size_t recsize)
)
/*
- * __kfifo_poke_n internal helper function for storeing the length of
+ * __kfifo_poke_n internal helper function for storing the length of
* the record into the fifo
*/
static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize)
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index 0b5fe8b41173..059b8b00dc53 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -14,11 +14,12 @@
*/
#include <linux/ctype.h>
#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/math64.h>
#include <linux/export.h>
+#include <linux/kstrtox.h>
+#include <linux/math64.h>
#include <linux/types.h>
#include <linux/uaccess.h>
+
#include "kstrtox.h"
const char *_parse_integer_fixup_radix(const char *s, unsigned int *base)
diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
index 9214c493d8b7..b71db0abc12b 100644
--- a/lib/kunit/debugfs.c
+++ b/lib/kunit/debugfs.c
@@ -64,7 +64,7 @@ static int debugfs_print_results(struct seq_file *seq, void *v)
debugfs_print_result(seq, suite, test_case);
seq_printf(seq, "%s %d - %s\n",
- kunit_status_to_string(success), 1, suite->name);
+ kunit_status_to_ok_not_ok(success), 1, suite->name);
return 0;
}
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 15832ed44668..acd1de436f59 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/reboot.h>
#include <kunit/test.h>
#include <linux/glob.h>
#include <linux/moduleparam.h>
@@ -13,13 +14,17 @@ extern struct kunit_suite * const * const __kunit_suites_end[];
#if IS_BUILTIN(CONFIG_KUNIT)
-static char *filter_glob;
-module_param(filter_glob, charp, 0);
+static char *filter_glob_param;
+module_param_named(filter_glob, filter_glob_param, charp, 0);
MODULE_PARM_DESC(filter_glob,
"Filter which KUnit test suites run at boot-time, e.g. list*");
+static char *kunit_shutdown;
+core_param(kunit_shutdown, kunit_shutdown, charp, 0644);
+
static struct kunit_suite * const *
-kunit_filter_subsuite(struct kunit_suite * const * const subsuite)
+kunit_filter_subsuite(struct kunit_suite * const * const subsuite,
+ const char *filter_glob)
{
int i, n = 0;
struct kunit_suite **filtered;
@@ -52,19 +57,14 @@ struct suite_set {
struct kunit_suite * const * const *end;
};
-static struct suite_set kunit_filter_suites(void)
+static struct suite_set kunit_filter_suites(const struct suite_set *suite_set,
+ const char *filter_glob)
{
int i;
struct kunit_suite * const **copy, * const *filtered_subsuite;
struct suite_set filtered;
- const size_t max = __kunit_suites_end - __kunit_suites_start;
-
- if (!filter_glob) {
- filtered.start = __kunit_suites_start;
- filtered.end = __kunit_suites_end;
- return filtered;
- }
+ const size_t max = suite_set->end - suite_set->start;
copy = kmalloc_array(max, sizeof(*filtered.start), GFP_KERNEL);
filtered.start = copy;
@@ -74,7 +74,7 @@ static struct suite_set kunit_filter_suites(void)
}
for (i = 0; i < max; ++i) {
- filtered_subsuite = kunit_filter_subsuite(__kunit_suites_start[i]);
+ filtered_subsuite = kunit_filter_subsuite(suite_set->start[i], filter_glob);
if (filtered_subsuite)
*copy++ = filtered_subsuite;
}
@@ -82,6 +82,20 @@ static struct suite_set kunit_filter_suites(void)
return filtered;
}
+static void kunit_handle_shutdown(void)
+{
+ if (!kunit_shutdown)
+ return;
+
+ if (!strcmp(kunit_shutdown, "poweroff"))
+ kernel_power_off();
+ else if (!strcmp(kunit_shutdown, "halt"))
+ kernel_halt();
+ else if (!strcmp(kunit_shutdown, "reboot"))
+ kernel_restart(NULL);
+
+}
+
static void kunit_print_tap_header(struct suite_set *suite_set)
{
struct kunit_suite * const * const *suites, * const *subsuite;
@@ -98,21 +112,32 @@ static void kunit_print_tap_header(struct suite_set *suite_set)
int kunit_run_all_tests(void)
{
struct kunit_suite * const * const *suites;
+ struct suite_set suite_set = {
+ .start = __kunit_suites_start,
+ .end = __kunit_suites_end,
+ };
- struct suite_set suite_set = kunit_filter_suites();
+ if (filter_glob_param)
+ suite_set = kunit_filter_suites(&suite_set, filter_glob_param);
kunit_print_tap_header(&suite_set);
for (suites = suite_set.start; suites < suite_set.end; suites++)
__kunit_test_suites_init(*suites);
- if (filter_glob) { /* a copy was made of each array */
+ if (filter_glob_param) { /* a copy was made of each array */
for (suites = suite_set.start; suites < suite_set.end; suites++)
kfree(*suites);
kfree(suite_set.start);
}
+ kunit_handle_shutdown();
+
return 0;
}
+#if IS_BUILTIN(CONFIG_KUNIT_TEST)
+#include "executor_test.c"
+#endif
+
#endif /* IS_BUILTIN(CONFIG_KUNIT) */
diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
new file mode 100644
index 000000000000..cdbe54b16501
--- /dev/null
+++ b/lib/kunit/executor_test.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the KUnit executor.
+ *
+ * Copyright (C) 2021, Google LLC.
+ * Author: Daniel Latypov <dlatypov@google.com>
+ */
+
+#include <kunit/test.h>
+
+static void kfree_at_end(struct kunit *test, const void *to_free);
+static struct kunit_suite *alloc_fake_suite(struct kunit *test,
+ const char *suite_name);
+
+static void filter_subsuite_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[3] = {NULL, NULL, NULL};
+ struct kunit_suite * const *filtered;
+
+ subsuite[0] = alloc_fake_suite(test, "suite1");
+ subsuite[1] = alloc_fake_suite(test, "suite2");
+
+ /* Want: suite1, suite2, NULL -> suite2, NULL */
+ filtered = kunit_filter_subsuite(subsuite, "suite2*");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered);
+ kfree_at_end(test, filtered);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered[0]);
+ KUNIT_EXPECT_STREQ(test, (const char *)filtered[0]->name, "suite2");
+
+ KUNIT_EXPECT_FALSE(test, filtered[1]);
+}
+
+static void filter_subsuite_to_empty_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[3] = {NULL, NULL, NULL};
+ struct kunit_suite * const *filtered;
+
+ subsuite[0] = alloc_fake_suite(test, "suite1");
+ subsuite[1] = alloc_fake_suite(test, "suite2");
+
+ filtered = kunit_filter_subsuite(subsuite, "not_found");
+ kfree_at_end(test, filtered); /* just in case */
+
+ KUNIT_EXPECT_FALSE_MSG(test, filtered,
+ "should be NULL to indicate no match");
+}
+
+static void kfree_subsuites_at_end(struct kunit *test, struct suite_set *suite_set)
+{
+ struct kunit_suite * const * const *suites;
+
+ kfree_at_end(test, suite_set->start);
+ for (suites = suite_set->start; suites < suite_set->end; suites++)
+ kfree_at_end(test, *suites);
+}
+
+static void filter_suites_test(struct kunit *test)
+{
+ /* Suites per-file are stored as a NULL terminated array */
+ struct kunit_suite *subsuites[2][2] = {
+ {NULL, NULL},
+ {NULL, NULL},
+ };
+ /* Match the memory layout of suite_set */
+ struct kunit_suite * const * const suites[2] = {
+ subsuites[0], subsuites[1],
+ };
+
+ const struct suite_set suite_set = {
+ .start = suites,
+ .end = suites + 2,
+ };
+ struct suite_set filtered = {.start = NULL, .end = NULL};
+
+ /* Emulate two files, each having one suite */
+ subsuites[0][0] = alloc_fake_suite(test, "suite0");
+ subsuites[1][0] = alloc_fake_suite(test, "suite1");
+
+ /* Filter out suite1 */
+ filtered = kunit_filter_suites(&suite_set, "suite0");
+ kfree_subsuites_at_end(test, &filtered); /* let us use ASSERTs without leaking */
+ KUNIT_ASSERT_EQ(test, filtered.end - filtered.start, (ptrdiff_t)1);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start[0]);
+ KUNIT_EXPECT_STREQ(test, (const char *)filtered.start[0][0]->name, "suite0");
+}
+
+static struct kunit_case executor_test_cases[] = {
+ KUNIT_CASE(filter_subsuite_test),
+ KUNIT_CASE(filter_subsuite_to_empty_test),
+ KUNIT_CASE(filter_suites_test),
+ {}
+};
+
+static struct kunit_suite executor_test_suite = {
+ .name = "kunit_executor_test",
+ .test_cases = executor_test_cases,
+};
+
+kunit_test_suites(&executor_test_suite);
+
+/* Test helpers */
+
+static void kfree_res_free(struct kunit_resource *res)
+{
+ kfree(res->data);
+}
+
+/* Use the resource API to register a call to kfree(to_free).
+ * Since we never actually use the resource, it's safe to use on const data.
+ */
+static void kfree_at_end(struct kunit *test, const void *to_free)
+{
+ /* kfree() handles NULL already, but avoid allocating a no-op cleanup. */
+ if (IS_ERR_OR_NULL(to_free))
+ return;
+ kunit_alloc_and_get_resource(test, NULL, kfree_res_free, GFP_KERNEL,
+ (void *)to_free);
+}
+
+static struct kunit_suite *alloc_fake_suite(struct kunit *test,
+ const char *suite_name)
+{
+ struct kunit_suite *suite;
+
+ /* We normally never expect to allocate suites, hence the non-const cast. */
+ suite = kunit_kzalloc(test, sizeof(*suite), GFP_KERNEL);
+ strncpy((char *)suite->name, suite_name, sizeof(suite->name) - 1);
+
+ return suite;
+}
diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c
index be1164ecc476..51099b0ca29c 100644
--- a/lib/kunit/kunit-example-test.c
+++ b/lib/kunit/kunit-example-test.c
@@ -41,6 +41,35 @@ static int example_test_init(struct kunit *test)
}
/*
+ * This test should always be skipped.
+ */
+static void example_skip_test(struct kunit *test)
+{
+ /* This line should run */
+ kunit_info(test, "You should not see a line below.");
+
+ /* Skip (and abort) the test */
+ kunit_skip(test, "this test should be skipped");
+
+ /* This line should not execute */
+ KUNIT_FAIL(test, "You should not see this line.");
+}
+
+/*
+ * This test should always be marked skipped.
+ */
+static void example_mark_skipped_test(struct kunit *test)
+{
+ /* This line should run */
+ kunit_info(test, "You should see a line below.");
+
+ /* Skip (but do not abort) the test */
+ kunit_mark_skipped(test, "this test should be skipped");
+
+ /* This line should run */
+ kunit_info(test, "You should see this line.");
+}
+/*
* Here we make a list of all the test cases we want to add to the test suite
* below.
*/
@@ -52,6 +81,8 @@ static struct kunit_case example_test_cases[] = {
* test suite.
*/
KUNIT_CASE(example_simple_test),
+ KUNIT_CASE(example_skip_test),
+ KUNIT_CASE(example_mark_skipped_test),
{}
};
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index 69f902440a0e..d69efcbed624 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -437,7 +437,47 @@ static void kunit_log_test(struct kunit *test)
#endif
}
+static void kunit_status_set_failure_test(struct kunit *test)
+{
+ struct kunit fake;
+
+ kunit_init_test(&fake, "fake test", NULL);
+
+ KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SUCCESS);
+ kunit_set_failure(&fake);
+ KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_FAILURE);
+}
+
+static void kunit_status_mark_skipped_test(struct kunit *test)
+{
+ struct kunit fake;
+
+ kunit_init_test(&fake, "fake test", NULL);
+
+ /* Before: Should be SUCCESS with no comment. */
+ KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
+ KUNIT_EXPECT_STREQ(test, fake.status_comment, "");
+
+ /* Mark the test as skipped. */
+ kunit_mark_skipped(&fake, "Accepts format string: %s", "YES");
+
+ /* After: Should be SKIPPED with our comment. */
+ KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SKIPPED);
+ KUNIT_EXPECT_STREQ(test, fake.status_comment, "Accepts format string: YES");
+}
+
+static struct kunit_case kunit_status_test_cases[] = {
+ KUNIT_CASE(kunit_status_set_failure_test),
+ KUNIT_CASE(kunit_status_mark_skipped_test),
+ {}
+};
+
+static struct kunit_suite kunit_status_test_suite = {
+ .name = "kunit_status",
+ .test_cases = kunit_status_test_cases,
+};
+
kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
- &kunit_log_test_suite);
+ &kunit_log_test_suite, &kunit_status_test_suite);
MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/string-stream.h b/lib/kunit/string-stream.h
index fe98a00b75a9..5e94b623454f 100644
--- a/lib/kunit/string-stream.h
+++ b/lib/kunit/string-stream.h
@@ -35,9 +35,9 @@ struct string_stream *alloc_string_stream(struct kunit *test, gfp_t gfp);
int __printf(2, 3) string_stream_add(struct string_stream *stream,
const char *fmt, ...);
-int string_stream_vadd(struct string_stream *stream,
- const char *fmt,
- va_list args);
+int __printf(2, 0) string_stream_vadd(struct string_stream *stream,
+ const char *fmt,
+ va_list args);
char *string_stream_get_string(struct string_stream *stream);
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 2f6cc0123232..d79ecb86ea57 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -98,12 +98,14 @@ static void kunit_print_subtest_start(struct kunit_suite *suite)
static void kunit_print_ok_not_ok(void *test_or_suite,
bool is_test,
- bool is_ok,
+ enum kunit_status status,
size_t test_number,
- const char *description)
+ const char *description,
+ const char *directive)
{
struct kunit_suite *suite = is_test ? NULL : test_or_suite;
struct kunit *test = is_test ? test_or_suite : NULL;
+ const char *directive_header = (status == KUNIT_SKIPPED) ? " # SKIP " : "";
/*
* We do not log the test suite results as doing so would
@@ -114,25 +116,31 @@ static void kunit_print_ok_not_ok(void *test_or_suite,
* representation.
*/
if (suite)
- pr_info("%s %zd - %s\n",
- kunit_status_to_string(is_ok),
- test_number, description);
+ pr_info("%s %zd - %s%s%s\n",
+ kunit_status_to_ok_not_ok(status),
+ test_number, description, directive_header,
+ (status == KUNIT_SKIPPED) ? directive : "");
else
- kunit_log(KERN_INFO, test, KUNIT_SUBTEST_INDENT "%s %zd - %s",
- kunit_status_to_string(is_ok),
- test_number, description);
+ kunit_log(KERN_INFO, test,
+ KUNIT_SUBTEST_INDENT "%s %zd - %s%s%s",
+ kunit_status_to_ok_not_ok(status),
+ test_number, description, directive_header,
+ (status == KUNIT_SKIPPED) ? directive : "");
}
-bool kunit_suite_has_succeeded(struct kunit_suite *suite)
+enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite)
{
const struct kunit_case *test_case;
+ enum kunit_status status = KUNIT_SKIPPED;
kunit_suite_for_each_test_case(suite, test_case) {
- if (!test_case->success)
- return false;
+ if (test_case->status == KUNIT_FAILURE)
+ return KUNIT_FAILURE;
+ else if (test_case->status == KUNIT_SUCCESS)
+ status = KUNIT_SUCCESS;
}
- return true;
+ return status;
}
EXPORT_SYMBOL_GPL(kunit_suite_has_succeeded);
@@ -143,7 +151,8 @@ static void kunit_print_subtest_end(struct kunit_suite *suite)
kunit_print_ok_not_ok((void *)suite, false,
kunit_suite_has_succeeded(suite),
kunit_suite_counter++,
- suite->name);
+ suite->name,
+ suite->status_comment);
}
unsigned int kunit_test_case_num(struct kunit_suite *suite,
@@ -252,7 +261,8 @@ void kunit_init_test(struct kunit *test, const char *name, char *log)
test->log = log;
if (test->log)
test->log[0] = '\0';
- test->success = true;
+ test->status = KUNIT_SUCCESS;
+ test->status_comment[0] = '\0';
}
EXPORT_SYMBOL_GPL(kunit_init_test);
@@ -376,7 +386,11 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite,
context.test_case = test_case;
kunit_try_catch_run(try_catch, &context);
- test_case->success = test->success;
+ /* Propagate the parameter result to the test case. */
+ if (test->status == KUNIT_FAILURE)
+ test_case->status = KUNIT_FAILURE;
+ else if (test_case->status != KUNIT_FAILURE && test->status == KUNIT_SUCCESS)
+ test_case->status = KUNIT_SUCCESS;
}
int kunit_run_tests(struct kunit_suite *suite)
@@ -388,7 +402,7 @@ int kunit_run_tests(struct kunit_suite *suite)
kunit_suite_for_each_test_case(suite, test_case) {
struct kunit test = { .param_value = NULL, .param_index = 0 };
- bool test_success = true;
+ test_case->status = KUNIT_SKIPPED;
if (test_case->generate_params) {
/* Get initial param. */
@@ -398,7 +412,6 @@ int kunit_run_tests(struct kunit_suite *suite)
do {
kunit_run_case_catch_errors(suite, test_case, &test);
- test_success &= test_case->success;
if (test_case->generate_params) {
if (param_desc[0] == '\0') {
@@ -410,7 +423,7 @@ int kunit_run_tests(struct kunit_suite *suite)
KUNIT_SUBTEST_INDENT
"# %s: %s %d - %s",
test_case->name,
- kunit_status_to_string(test.success),
+ kunit_status_to_ok_not_ok(test.status),
test.param_index + 1, param_desc);
/* Get next param. */
@@ -420,9 +433,10 @@ int kunit_run_tests(struct kunit_suite *suite)
}
} while (test.param_value);
- kunit_print_ok_not_ok(&test, true, test_success,
+ kunit_print_ok_not_ok(&test, true, test_case->status,
kunit_test_case_num(suite, test_case),
- test_case->name);
+ test_case->name,
+ test.status_comment);
}
kunit_print_subtest_end(suite);
@@ -434,6 +448,7 @@ EXPORT_SYMBOL_GPL(kunit_run_tests);
static void kunit_init_suite(struct kunit_suite *suite)
{
kunit_debugfs_create_suite(suite);
+ suite->status_comment[0] = '\0';
}
int __kunit_test_suites_init(struct kunit_suite * const * const suites)
@@ -475,6 +490,7 @@ int kunit_add_resource(struct kunit *test,
void *data)
{
int ret = 0;
+ unsigned long flags;
res->free = free;
kref_init(&res->refcount);
@@ -487,10 +503,10 @@ int kunit_add_resource(struct kunit *test,
res->data = data;
}
- spin_lock(&test->lock);
+ spin_lock_irqsave(&test->lock, flags);
list_add_tail(&res->node, &test->resources);
/* refcount for list is established by kref_init() */
- spin_unlock(&test->lock);
+ spin_unlock_irqrestore(&test->lock, flags);
return ret;
}
@@ -548,9 +564,11 @@ EXPORT_SYMBOL_GPL(kunit_alloc_and_get_resource);
void kunit_remove_resource(struct kunit *test, struct kunit_resource *res)
{
- spin_lock(&test->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&test->lock, flags);
list_del(&res->node);
- spin_unlock(&test->lock);
+ spin_unlock_irqrestore(&test->lock, flags);
kunit_put_resource(res);
}
EXPORT_SYMBOL_GPL(kunit_remove_resource);
@@ -573,41 +591,43 @@ int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match,
}
EXPORT_SYMBOL_GPL(kunit_destroy_resource);
-struct kunit_kmalloc_params {
+struct kunit_kmalloc_array_params {
+ size_t n;
size_t size;
gfp_t gfp;
};
-static int kunit_kmalloc_init(struct kunit_resource *res, void *context)
+static int kunit_kmalloc_array_init(struct kunit_resource *res, void *context)
{
- struct kunit_kmalloc_params *params = context;
+ struct kunit_kmalloc_array_params *params = context;
- res->data = kmalloc(params->size, params->gfp);
+ res->data = kmalloc_array(params->n, params->size, params->gfp);
if (!res->data)
return -ENOMEM;
return 0;
}
-static void kunit_kmalloc_free(struct kunit_resource *res)
+static void kunit_kmalloc_array_free(struct kunit_resource *res)
{
kfree(res->data);
}
-void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp)
+void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp)
{
- struct kunit_kmalloc_params params = {
+ struct kunit_kmalloc_array_params params = {
.size = size,
+ .n = n,
.gfp = gfp
};
return kunit_alloc_resource(test,
- kunit_kmalloc_init,
- kunit_kmalloc_free,
+ kunit_kmalloc_array_init,
+ kunit_kmalloc_array_free,
gfp,
&params);
}
-EXPORT_SYMBOL_GPL(kunit_kmalloc);
+EXPORT_SYMBOL_GPL(kunit_kmalloc_array);
void kunit_kfree(struct kunit *test, const void *ptr)
{
@@ -630,6 +650,7 @@ EXPORT_SYMBOL_GPL(kunit_kfree);
void kunit_cleanup(struct kunit *test)
{
struct kunit_resource *res;
+ unsigned long flags;
/*
* test->resources is a stack - each allocation must be freed in the
@@ -641,9 +662,9 @@ void kunit_cleanup(struct kunit *test)
* protect against the current node being deleted, not the next.
*/
while (true) {
- spin_lock(&test->lock);
+ spin_lock_irqsave(&test->lock, flags);
if (list_empty(&test->resources)) {
- spin_unlock(&test->lock);
+ spin_unlock_irqrestore(&test->lock, flags);
break;
}
res = list_last_entry(&test->resources,
@@ -654,7 +675,7 @@ void kunit_cleanup(struct kunit *test)
* resource, and this can't happen if the test->lock
* is held.
*/
- spin_unlock(&test->lock);
+ spin_unlock_irqrestore(&test->lock, flags);
kunit_remove_resource(test, res);
}
current->kunit_test = NULL;
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 1e1e37762799..0fb59e92ca2d 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -104,7 +104,7 @@ static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
* @head: the list to sort
* @cmp: the elements comparison function
*
- * The comparison funtion @cmp must return > 0 if @a should sort after
+ * The comparison function @cmp must return > 0 if @a should sort after
* @b ("@a > @b" if you want an ascending sort), and <= 0 if @a should
* sort before @b *or* their original order should be preserved. It is
* always called with the element that came first in the input in @a,
diff --git a/lib/logic_iomem.c b/lib/logic_iomem.c
new file mode 100644
index 000000000000..b76b92dd0f1f
--- /dev/null
+++ b/lib/logic_iomem.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/logic_iomem.h>
+
+struct logic_iomem_region {
+ const struct resource *res;
+ const struct logic_iomem_region_ops *ops;
+ struct list_head list;
+};
+
+struct logic_iomem_area {
+ const struct logic_iomem_ops *ops;
+ void *priv;
+};
+
+#define AREA_SHIFT 24
+#define MAX_AREA_SIZE (1 << AREA_SHIFT)
+#define MAX_AREAS ((1ULL<<32) / MAX_AREA_SIZE)
+#define AREA_BITS ((MAX_AREAS - 1) << AREA_SHIFT)
+#define AREA_MASK (MAX_AREA_SIZE - 1)
+#ifdef CONFIG_64BIT
+#define IOREMAP_BIAS 0xDEAD000000000000UL
+#define IOREMAP_MASK 0xFFFFFFFF00000000UL
+#else
+#define IOREMAP_BIAS 0
+#define IOREMAP_MASK 0
+#endif
+
+static DEFINE_MUTEX(regions_mtx);
+static LIST_HEAD(regions_list);
+static struct logic_iomem_area mapped_areas[MAX_AREAS];
+
+int logic_iomem_add_region(struct resource *resource,
+ const struct logic_iomem_region_ops *ops)
+{
+ struct logic_iomem_region *rreg;
+ int err;
+
+ if (WARN_ON(!resource || !ops))
+ return -EINVAL;
+
+ if (WARN_ON((resource->flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM))
+ return -EINVAL;
+
+ rreg = kzalloc(sizeof(*rreg), GFP_KERNEL);
+ if (!rreg)
+ return -ENOMEM;
+
+ err = request_resource(&iomem_resource, resource);
+ if (err) {
+ kfree(rreg);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&regions_mtx);
+ rreg->res = resource;
+ rreg->ops = ops;
+ list_add_tail(&rreg->list, &regions_list);
+ mutex_unlock(&regions_mtx);
+
+ return 0;
+}
+EXPORT_SYMBOL(logic_iomem_add_region);
+
+#ifndef CONFIG_LOGIC_IOMEM_FALLBACK
+static void __iomem *real_ioremap(phys_addr_t offset, size_t size)
+{
+ WARN(1, "invalid ioremap(0x%llx, 0x%zx)\n",
+ (unsigned long long)offset, size);
+ return NULL;
+}
+
+static void real_iounmap(void __iomem *addr)
+{
+ WARN(1, "invalid iounmap for addr 0x%llx\n",
+ (unsigned long long)addr);
+}
+#endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
+
+void __iomem *ioremap(phys_addr_t offset, size_t size)
+{
+ void __iomem *ret = NULL;
+ struct logic_iomem_region *rreg, *found = NULL;
+ int i;
+
+ mutex_lock(&regions_mtx);
+ list_for_each_entry(rreg, &regions_list, list) {
+ if (rreg->res->start > offset)
+ continue;
+ if (rreg->res->end < offset + size - 1)
+ continue;
+ found = rreg;
+ break;
+ }
+
+ if (!found)
+ goto out;
+
+ for (i = 0; i < MAX_AREAS; i++) {
+ long offs;
+
+ if (mapped_areas[i].ops)
+ continue;
+
+ offs = rreg->ops->map(offset - found->res->start,
+ size, &mapped_areas[i].ops,
+ &mapped_areas[i].priv);
+ if (offs < 0) {
+ mapped_areas[i].ops = NULL;
+ break;
+ }
+
+ if (WARN_ON(!mapped_areas[i].ops)) {
+ mapped_areas[i].ops = NULL;
+ break;
+ }
+
+ ret = (void __iomem *)(IOREMAP_BIAS + (i << AREA_SHIFT) + offs);
+ break;
+ }
+out:
+ mutex_unlock(&regions_mtx);
+ if (ret)
+ return ret;
+ return real_ioremap(offset, size);
+}
+EXPORT_SYMBOL(ioremap);
+
+static inline struct logic_iomem_area *
+get_area(const volatile void __iomem *addr)
+{
+ unsigned long a = (unsigned long)addr;
+ unsigned int idx;
+
+ if (WARN_ON((a & IOREMAP_MASK) != IOREMAP_BIAS))
+ return NULL;
+
+ idx = (a & AREA_BITS) >> AREA_SHIFT;
+
+ if (mapped_areas[idx].ops)
+ return &mapped_areas[idx];
+
+ return NULL;
+}
+
+void iounmap(void __iomem *addr)
+{
+ struct logic_iomem_area *area = get_area(addr);
+
+ if (!area) {
+ real_iounmap(addr);
+ return;
+ }
+
+ if (area->ops->unmap)
+ area->ops->unmap(area->priv);
+
+ mutex_lock(&regions_mtx);
+ area->ops = NULL;
+ area->priv = NULL;
+ mutex_unlock(&regions_mtx);
+}
+EXPORT_SYMBOL(iounmap);
+
+#ifndef CONFIG_LOGIC_IOMEM_FALLBACK
+#define MAKE_FALLBACK(op, sz) \
+static u##sz real_raw_read ## op(const volatile void __iomem *addr) \
+{ \
+ WARN(1, "Invalid read" #op " at address %llx\n", \
+ (unsigned long long)addr); \
+ return (u ## sz)~0ULL; \
+} \
+ \
+void real_raw_write ## op(u ## sz val, volatile void __iomem *addr) \
+{ \
+ WARN(1, "Invalid writeq" #op " of 0x%llx at address %llx\n", \
+ (unsigned long long)val, (unsigned long long)addr); \
+} \
+
+MAKE_FALLBACK(b, 8);
+MAKE_FALLBACK(w, 16);
+MAKE_FALLBACK(l, 32);
+#ifdef CONFIG_64BIT
+MAKE_FALLBACK(q, 64);
+#endif
+
+static void real_memset_io(volatile void __iomem *addr, int value, size_t size)
+{
+ WARN(1, "Invalid memset_io at address 0x%llx\n",
+ (unsigned long long)addr);
+}
+
+static void real_memcpy_fromio(void *buffer, const volatile void __iomem *addr,
+ size_t size)
+{
+ WARN(1, "Invalid memcpy_fromio at address 0x%llx\n",
+ (unsigned long long)addr);
+
+ memset(buffer, 0xff, size);
+}
+
+static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer,
+ size_t size)
+{
+ WARN(1, "Invalid memcpy_toio at address 0x%llx\n",
+ (unsigned long long)addr);
+}
+#endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
+
+#define MAKE_OP(op, sz) \
+u##sz __raw_read ## op(const volatile void __iomem *addr) \
+{ \
+ struct logic_iomem_area *area = get_area(addr); \
+ \
+ if (!area) \
+ return real_raw_read ## op(addr); \
+ \
+ return (u ## sz) area->ops->read(area->priv, \
+ (unsigned long)addr & AREA_MASK,\
+ sz / 8); \
+} \
+EXPORT_SYMBOL(__raw_read ## op); \
+ \
+void __raw_write ## op(u ## sz val, volatile void __iomem *addr) \
+{ \
+ struct logic_iomem_area *area = get_area(addr); \
+ \
+ if (!area) { \
+ real_raw_write ## op(val, addr); \
+ return; \
+ } \
+ \
+ area->ops->write(area->priv, \
+ (unsigned long)addr & AREA_MASK, \
+ sz / 8, val); \
+} \
+EXPORT_SYMBOL(__raw_write ## op)
+
+MAKE_OP(b, 8);
+MAKE_OP(w, 16);
+MAKE_OP(l, 32);
+#ifdef CONFIG_64BIT
+MAKE_OP(q, 64);
+#endif
+
+void memset_io(volatile void __iomem *addr, int value, size_t size)
+{
+ struct logic_iomem_area *area = get_area(addr);
+ unsigned long offs, start;
+
+ if (!area) {
+ real_memset_io(addr, value, size);
+ return;
+ }
+
+ start = (unsigned long)addr & AREA_MASK;
+
+ if (area->ops->set) {
+ area->ops->set(area->priv, start, value, size);
+ return;
+ }
+
+ for (offs = 0; offs < size; offs++)
+ area->ops->write(area->priv, start + offs, 1, value);
+}
+EXPORT_SYMBOL(memset_io);
+
+void memcpy_fromio(void *buffer, const volatile void __iomem *addr,
+ size_t size)
+{
+ struct logic_iomem_area *area = get_area(addr);
+ u8 *buf = buffer;
+ unsigned long offs, start;
+
+ if (!area) {
+ real_memcpy_fromio(buffer, addr, size);
+ return;
+ }
+
+ start = (unsigned long)addr & AREA_MASK;
+
+ if (area->ops->copy_from) {
+ area->ops->copy_from(area->priv, buffer, start, size);
+ return;
+ }
+
+ for (offs = 0; offs < size; offs++)
+ buf[offs] = area->ops->read(area->priv, start + offs, 1);
+}
+EXPORT_SYMBOL(memcpy_fromio);
+
+void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size)
+{
+ struct logic_iomem_area *area = get_area(addr);
+ const u8 *buf = buffer;
+ unsigned long offs, start;
+
+ if (!area) {
+ real_memcpy_toio(addr, buffer, size);
+ return;
+ }
+
+ start = (unsigned long)addr & AREA_MASK;
+
+ if (area->ops->copy_to) {
+ area->ops->copy_to(area->priv, start, buffer, size);
+ return;
+ }
+
+ for (offs = 0; offs < size; offs++)
+ area->ops->write(area->priv, start + offs, 1, buf[offs]);
+}
+EXPORT_SYMBOL(memcpy_toio);
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 8a7724a6ce2f..926f4823d5ea 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -481,7 +481,7 @@ int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
/* ===== Instantiate a few more decoding cases, used more than once. ===== */
-int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
+static int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
int compressedSize, int maxOutputSize)
{
return LZ4_decompress_generic(source, dest,
diff --git a/lib/math/Makefile b/lib/math/Makefile
index 7456edb864fc..bfac26ddfc22 100644
--- a/lib/math/Makefile
+++ b/lib/math/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
obj-$(CONFIG_RATIONAL) += rational.o
obj-$(CONFIG_TEST_DIV64) += test_div64.o
+obj-$(CONFIG_RATIONAL_KUNIT_TEST) += rational-test.o
diff --git a/lib/math/rational-test.c b/lib/math/rational-test.c
new file mode 100644
index 000000000000..01611ddff420
--- /dev/null
+++ b/lib/math/rational-test.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <kunit/test.h>
+
+#include <linux/rational.h>
+
+struct rational_test_param {
+ unsigned long num, den;
+ unsigned long max_num, max_den;
+ unsigned long exp_num, exp_den;
+
+ const char *name;
+};
+
+static const struct rational_test_param test_parameters[] = {
+ { 1230, 10, 100, 20, 100, 1, "Exceeds bounds, semi-convergent term > 1/2 last term" },
+ { 34567,100, 120, 20, 120, 1, "Exceeds bounds, semi-convergent term < 1/2 last term" },
+ { 1, 30, 100, 10, 0, 1, "Closest to zero" },
+ { 1, 19, 100, 10, 1, 10, "Closest to smallest non-zero" },
+ { 27,32, 16, 16, 11, 13, "Use convergent" },
+ { 1155, 7735, 255, 255, 33, 221, "Exact answer" },
+ { 87, 32, 70, 32, 68, 25, "Semiconvergent, numerator limit" },
+ { 14533, 4626, 15000, 2400, 7433, 2366, "Semiconvergent, denominator limit" },
+};
+
+static void get_desc(const struct rational_test_param *param, char *desc)
+{
+ strscpy(desc, param->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+/* Creates function rational_gen_params */
+KUNIT_ARRAY_PARAM(rational, test_parameters, get_desc);
+
+static void rational_test(struct kunit *test)
+{
+ const struct rational_test_param *param = (const struct rational_test_param *)test->param_value;
+ unsigned long n = 0, d = 0;
+
+ rational_best_approximation(param->num, param->den, param->max_num, param->max_den, &n, &d);
+ KUNIT_EXPECT_EQ(test, n, param->exp_num);
+ KUNIT_EXPECT_EQ(test, d, param->exp_den);
+}
+
+static struct kunit_case rational_test_cases[] = {
+ KUNIT_CASE_PARAM(rational_test, rational_gen_params),
+ {}
+};
+
+static struct kunit_suite rational_test_suite = {
+ .name = "rational",
+ .test_cases = rational_test_cases,
+};
+
+kunit_test_suites(&rational_test_suite);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/math/rational.c b/lib/math/rational.c
index 9781d521963d..c0ab51d8fbb9 100644
--- a/lib/math/rational.c
+++ b/lib/math/rational.c
@@ -12,6 +12,7 @@
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/minmax.h>
+#include <linux/limits.h>
/*
* calculate best rational approximation for a given fraction
@@ -78,13 +79,18 @@ void rational_best_approximation(
* found below as 't'.
*/
if ((n2 > max_numerator) || (d2 > max_denominator)) {
- unsigned long t = min((max_numerator - n0) / n1,
- (max_denominator - d0) / d1);
+ unsigned long t = ULONG_MAX;
- /* This tests if the semi-convergent is closer
- * than the previous convergent.
+ if (d1)
+ t = (max_denominator - d0) / d1;
+ if (n1)
+ t = min(t, (max_numerator - n0) / n1);
+
+ /* This tests if the semi-convergent is closer than the previous
+ * convergent. If d1 is zero there is no previous convergent as this
+ * is the 1st iteration, so always choose the semi-convergent.
*/
- if (2u * t > a || (2u * t == a && d0 * dp > d1 * d)) {
+ if (!d1 || 2u * t > a || (2u * t == a && d0 * dp > d1 * d)) {
n1 = n0 + t * n1;
d1 = d0 + t * d1;
}
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index afbd99987cf8..b6fa1d08fb55 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -48,8 +48,8 @@
/* Define auxiliary asm macros.
*
- * 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two
- * UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype
+ * 1) umul_ppmm(high_prod, low_prod, multiplier, multiplicand) multiplies two
+ * UWtype integers MULTIPLIER and MULTIPLICAND, and generates a two UWtype
* word product in HIGH_PROD and LOW_PROD.
*
* 2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index 7ea225b2204f..39c4c6731094 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -234,11 +234,11 @@ static int count_lzeros(MPI a)
}
/**
- * mpi_read_buffer() - read MPI to a bufer provided by user (msb first)
+ * mpi_read_buffer() - read MPI to a buffer provided by user (msb first)
*
* @a: a multi precision integer
- * @buf: bufer to which the output will be written to. Needs to be at
- * leaset mpi_get_size(a) long.
+ * @buf: buffer to which the output will be written to. Needs to be at
+ * least mpi_get_size(a) long.
* @buf_len: size of the buf.
* @nbytes: receives the actual length of the data written on success and
* the data to-be-written on -EOVERFLOW in case buf_len was too
diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
index 3c63710c20c6..9a75ca3f7edf 100644
--- a/lib/mpi/mpiutil.c
+++ b/lib/mpi/mpiutil.c
@@ -80,7 +80,7 @@ EXPORT_SYMBOL_GPL(mpi_const);
/****************
* Note: It was a bad idea to use the number of limbs to allocate
* because on a alpha the limbs are large but we normally need
- * integers of n bits - So we should chnage this to bits (or bytes).
+ * integers of n bits - So we should change this to bits (or bytes).
*
* But mpi_alloc is used in a lot of places :-)
*/
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 1d051ef66afe..86029ad5ead4 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -619,7 +619,7 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
* Validates all attributes in the specified attribute stream against the
* specified policy. Validation depends on the validate flags passed, see
* &enum netlink_validation for more details on that.
- * See documenation of struct nla_policy for more details.
+ * See documentation of struct nla_policy for more details.
*
* Returns 0 on success or a negative error code.
*/
@@ -633,7 +633,7 @@ int __nla_validate(const struct nlattr *head, int len, int maxtype,
EXPORT_SYMBOL(__nla_validate);
/**
- * nla_policy_len - Determin the max. length of a policy
+ * nla_policy_len - Determine the max. length of a policy
* @policy: policy to use
* @n: number of policies
*
diff --git a/lib/oid_registry.c b/lib/oid_registry.c
index 3dfaa836e7c5..e592d48b1974 100644
--- a/lib/oid_registry.c
+++ b/lib/oid_registry.c
@@ -124,7 +124,7 @@ EXPORT_SYMBOL_GPL(parse_OID);
* @bufsize: The size of the buffer
*
* The OID is rendered into the buffer in "a.b.c.d" format and the number of
- * bytes is returned. -EBADMSG is returned if the data could not be intepreted
+ * bytes is returned. -EBADMSG is returned if the data could not be interpreted
* and -ENOBUFS if the buffer was too small.
*/
int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
diff --git a/lib/parser.c b/lib/parser.c
index f1a6d90b8c34..bcb23484100e 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -6,6 +6,7 @@
#include <linux/ctype.h>
#include <linux/types.h>
#include <linux/export.h>
+#include <linux/kstrtox.h>
#include <linux/parser.h>
#include <linux/slab.h>
#include <linux/string.h>
diff --git a/lib/pldmfw/pldmfw.c b/lib/pldmfw/pldmfw.c
index e5d4b3b2af81..6e77eb6d8e72 100644
--- a/lib/pldmfw/pldmfw.c
+++ b/lib/pldmfw/pldmfw.c
@@ -82,7 +82,7 @@ pldm_check_fw_space(struct pldmfw_priv *data, size_t offset, size_t length)
* @bytes_to_move: number of bytes to move the offset forward by
*
* Check that there is enough space past the current offset, and then move the
- * offset forward by this ammount.
+ * offset forward by this amount.
*
* Returns: zero on success, or -EFAULT if the image is too small to fit the
* expected length.
diff --git a/lib/reed_solomon/test_rslib.c b/lib/reed_solomon/test_rslib.c
index 4eb29f365ece..d9d1c33aebda 100644
--- a/lib/reed_solomon/test_rslib.c
+++ b/lib/reed_solomon/test_rslib.c
@@ -385,7 +385,7 @@ static void test_bc(struct rs_control *rs, int len, int errs,
/*
* We check that the returned word is actually a
- * codeword. The obious way to do this would be to
+ * codeword. The obvious way to do this would be to
* compute the syndrome, but we don't want to replicate
* that code here. However, all the codes are in
* systematic form, and therefore we can encode the
diff --git a/lib/refcount.c b/lib/refcount.c
index ebac8b7d15a7..a207a8f22b3c 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -164,7 +164,7 @@ EXPORT_SYMBOL(refcount_dec_and_lock);
* @flags: saved IRQ-flags if the is acquired
*
* Same as refcount_dec_and_lock() above except that the spinlock is acquired
- * with disabled interupts.
+ * with disabled interrupts.
*
* Return: true and hold spinlock if able to decrement refcount to 0, false
* otherwise
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index c949c1e3b87c..e12bbfb240b8 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -703,7 +703,7 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
*
* Returns zero if successful.
*
- * Returns -EAGAIN if resize event occured. Note that the iterator
+ * Returns -EAGAIN if resize event occurred. Note that the iterator
* will rewind back to the beginning and you may use it immediately
* by calling rhashtable_walk_next.
*
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 47b3691058eb..b25db9be938a 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -583,7 +583,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
/*
* Once the clear bit is set, the bit may be allocated out.
*
- * Orders READ/WRITE on the asssociated instance(such as request
+ * Orders READ/WRITE on the associated instance(such as request
* of blk_mq) by this bit for avoiding race with re-allocation,
* and its pair is the memory barrier implied in __sbitmap_get_word.
*
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index a59778946404..27efa6178153 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -38,7 +38,7 @@ EXPORT_SYMBOL(sg_next);
* @sg: The scatterlist
*
* Description:
- * Allows to know how many entries are in sg, taking into acount
+ * Allows to know how many entries are in sg, taking into account
* chaining as well
*
**/
@@ -59,7 +59,7 @@ EXPORT_SYMBOL(sg_nents);
*
* Description:
* Determines the number of entries in sg that are required to meet
- * the supplied length, taking into acount chaining as well
+ * the supplied length, taking into account chaining as well
*
* Returns:
* the number of sg entries needed, negative error on failure
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index 707453f5d58e..0a68f7aa85d6 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -229,8 +229,10 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
WARN_ON(s->size == 0);
+ BUILD_BUG_ON(MAX_MEMHEX_BYTES * 2 >= HEX_CHARS);
+
while (len) {
- start_len = min(len, HEX_CHARS - 1);
+ start_len = min(len, MAX_MEMHEX_BYTES);
#ifdef __BIG_ENDIAN
for (i = 0, j = 0; i < start_len; i++) {
#else
@@ -243,12 +245,14 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
break;
/* j increments twice per loop */
- len -= j / 2;
hex[j++] = ' ';
seq_buf_putmem(s, hex, j);
if (seq_buf_has_overflowed(s))
return -1;
+
+ len -= start_len;
+ data += start_len;
}
return 0;
}
@@ -285,7 +289,7 @@ int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc)
}
/**
- * seq_buf_to_user - copy the squence buffer to user space
+ * seq_buf_to_user - copy the sequence buffer to user space
* @s: seq_buf descriptor
* @ubuf: The userspace memory location to copy to
* @cnt: The amount to copy
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
new file mode 100644
index 000000000000..8662dc6cb509
--- /dev/null
+++ b/lib/slub_kunit.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <kunit/test.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include "../mm/slab.h"
+
+static struct kunit_resource resource;
+static int slab_errors;
+
+static void test_clobber_zone(struct kunit *test)
+{
+ struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_alloc", 64, 0,
+ SLAB_RED_ZONE, NULL);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kasan_disable_current();
+ p[64] = 0x12;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kasan_enable_current();
+ kmem_cache_free(s, p);
+ kmem_cache_destroy(s);
+}
+
+#ifndef CONFIG_KASAN
+static void test_next_pointer(struct kunit *test)
+{
+ struct kmem_cache *s = kmem_cache_create("TestSlub_next_ptr_free", 64, 0,
+ SLAB_POISON, NULL);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+ unsigned long tmp;
+ unsigned long *ptr_addr;
+
+ kmem_cache_free(s, p);
+
+ ptr_addr = (unsigned long *)(p + s->offset);
+ tmp = *ptr_addr;
+ p[s->offset] = 0x12;
+
+ /*
+ * Expecting three errors.
+ * One for the corrupted freechain and the other one for the wrong
+ * count of objects in use. The third error is fixing broken cache.
+ */
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 3, slab_errors);
+
+ /*
+ * Try to repair corrupted freepointer.
+ * Still expecting two errors. The first for the wrong count
+ * of objects in use.
+ * The second error is for fixing broken cache.
+ */
+ *ptr_addr = tmp;
+ slab_errors = 0;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ /*
+ * Previous validation repaired the count of objects in use.
+ * Now expecting no error.
+ */
+ slab_errors = 0;
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 0, slab_errors);
+
+ kmem_cache_destroy(s);
+}
+
+static void test_first_word(struct kunit *test)
+{
+ struct kmem_cache *s = kmem_cache_create("TestSlub_1th_word_free", 64, 0,
+ SLAB_POISON, NULL);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kmem_cache_free(s, p);
+ *p = 0x78;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kmem_cache_destroy(s);
+}
+
+static void test_clobber_50th_byte(struct kunit *test)
+{
+ struct kmem_cache *s = kmem_cache_create("TestSlub_50th_word_free", 64, 0,
+ SLAB_POISON, NULL);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kmem_cache_free(s, p);
+ p[50] = 0x9a;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kmem_cache_destroy(s);
+}
+#endif
+
+static void test_clobber_redzone_free(struct kunit *test)
+{
+ struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_free", 64, 0,
+ SLAB_RED_ZONE, NULL);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kasan_disable_current();
+ kmem_cache_free(s, p);
+ p[64] = 0xab;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kasan_enable_current();
+ kmem_cache_destroy(s);
+}
+
+static int test_init(struct kunit *test)
+{
+ slab_errors = 0;
+
+ kunit_add_named_resource(test, NULL, NULL, &resource,
+ "slab_errors", &slab_errors);
+ return 0;
+}
+
+static struct kunit_case test_cases[] = {
+ KUNIT_CASE(test_clobber_zone),
+
+#ifndef CONFIG_KASAN
+ KUNIT_CASE(test_next_pointer),
+ KUNIT_CASE(test_first_word),
+ KUNIT_CASE(test_clobber_50th_byte),
+#endif
+
+ KUNIT_CASE(test_clobber_redzone_free),
+ {}
+};
+
+static struct kunit_suite test_suite = {
+ .name = "slub_test",
+ .init = test_init,
+ .test_cases = test_cases,
+};
+kunit_test_suite(test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/sort.c b/lib/sort.c
index 3ad454411997..aa18153864d2 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -51,7 +51,7 @@ static bool is_aligned(const void *base, size_t size, unsigned char align)
* which basically all CPUs have, to minimize loop overhead computations.
*
* For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
- * bottom of the loop, even though the zero flag is stil valid from the
+ * bottom of the loop, even though the zero flag is still valid from the
* subtract (since the intervening mov instructions don't alter the flags).
* Gcc 8.1.0 doesn't have that problem.
*/
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index df9179f4f441..0a2e417f83cb 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -11,7 +11,7 @@
* Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
* and free stacks repeat a lot, we save about 100x space.
* Stacks are never removed from depot, so we store them contiguously one after
- * another in a contiguos memory allocation.
+ * another in a contiguous memory allocation.
*
* Author: Alexander Potapenko <glider@google.com>
* Copyright (C) 2016 Google, Inc.
diff --git a/lib/string.c b/lib/string.c
index 7548eb715ddb..77bd0b1d3296 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -977,7 +977,7 @@ void *memscan(void *addr, int c, size_t size)
unsigned char *p = addr;
while (size) {
- if (*p == c)
+ if (*p == (unsigned char)c)
return (void *)p;
p++;
size--;
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 7f2d5fbaf243..3806a52ce697 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -361,6 +361,9 @@ static bool escape_special(unsigned char c, char **dst, char *end)
case '\e':
to = 'e';
break;
+ case '"':
+ to = '"';
+ break;
default:
return false;
}
@@ -452,18 +455,20 @@ static bool escape_hex(unsigned char c, char **dst, char *end)
* The process of escaping byte buffer includes several parts. They are applied
* in the following sequence.
*
- * 1. The character is matched to the printable class, if asked, and in
- * case of match it passes through to the output.
- * 2. The character is not matched to the one from @only string and thus
+ * 1. The character is not matched to the one from @only string and thus
* must go as-is to the output.
- * 3. The character is checked if it falls into the class given by @flags.
+ * 2. The character is matched to the printable and ASCII classes, if asked,
+ * and in case of match it passes through to the output.
+ * 3. The character is matched to the printable or ASCII class, if asked,
+ * and in case of match it passes through to the output.
+ * 4. The character is checked if it falls into the class given by @flags.
* %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any
* character. Note that they actually can't go together, otherwise
* %ESCAPE_HEX will be ignored.
*
* Caller must provide valid source and destination pointers. Be aware that
* destination buffer will not be NULL-terminated, thus caller have to append
- * it if needs. The supported flags are::
+ * it if needs. The supported flags are::
*
* %ESCAPE_SPACE: (special white space, not space itself)
* '\f' - form feed
@@ -472,6 +477,7 @@ static bool escape_hex(unsigned char c, char **dst, char *end)
* '\t' - horizontal tab
* '\v' - vertical tab
* %ESCAPE_SPECIAL:
+ * '\"' - double quote
* '\\' - backslash
* '\a' - alert (BEL)
* '\e' - escape
@@ -482,11 +488,27 @@ static bool escape_hex(unsigned char c, char **dst, char *end)
* %ESCAPE_ANY:
* all previous together
* %ESCAPE_NP:
- * escape only non-printable characters (checked by isprint)
+ * escape only non-printable characters, checked by isprint()
* %ESCAPE_ANY_NP:
* all previous together
* %ESCAPE_HEX:
* '\xHH' - byte with hexadecimal value HH (2 digits)
+ * %ESCAPE_NA:
+ * escape only non-ascii characters, checked by isascii()
+ * %ESCAPE_NAP:
+ * escape only non-printable or non-ascii characters
+ * %ESCAPE_APPEND:
+ * append characters from @only to be escaped by the given classes
+ *
+ * %ESCAPE_APPEND would help to pass additional characters to the escaped, when
+ * one of %ESCAPE_NP, %ESCAPE_NA, or %ESCAPE_NAP is provided.
+ *
+ * One notable caveat, the %ESCAPE_NAP, %ESCAPE_NP and %ESCAPE_NA have the
+ * higher priority than the rest of the flags (%ESCAPE_NAP is the highest).
+ * It doesn't make much sense to use either of them without %ESCAPE_OCTAL
+ * or %ESCAPE_HEX, because they cover most of the other character classes.
+ * %ESCAPE_NAP can utilize %ESCAPE_SPACE or %ESCAPE_SPECIAL in addition to
+ * the above.
*
* Return:
* The total size of the escaped output that would be generated for
@@ -500,67 +522,69 @@ int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
char *p = dst;
char *end = p + osz;
bool is_dict = only && *only;
+ bool is_append = flags & ESCAPE_APPEND;
while (isz--) {
unsigned char c = *src++;
+ bool in_dict = is_dict && strchr(only, c);
/*
* Apply rules in the following sequence:
- * - the character is printable, when @flags has
- * %ESCAPE_NP bit set
* - the @only string is supplied and does not contain a
* character under question
+ * - the character is printable and ASCII, when @flags has
+ * %ESCAPE_NAP bit set
+ * - the character is printable, when @flags has
+ * %ESCAPE_NP bit set
+ * - the character is ASCII, when @flags has
+ * %ESCAPE_NA bit set
* - the character doesn't fall into a class of symbols
* defined by given @flags
* In these cases we just pass through a character to the
* output buffer.
+ *
+ * When %ESCAPE_APPEND is passed, the characters from @only
+ * have been excluded from the %ESCAPE_NAP, %ESCAPE_NP, and
+ * %ESCAPE_NA cases.
*/
- if ((flags & ESCAPE_NP && isprint(c)) ||
- (is_dict && !strchr(only, c))) {
- /* do nothing */
- } else {
- if (flags & ESCAPE_SPACE && escape_space(c, &p, end))
- continue;
+ if (!(is_append || in_dict) && is_dict &&
+ escape_passthrough(c, &p, end))
+ continue;
- if (flags & ESCAPE_SPECIAL && escape_special(c, &p, end))
- continue;
+ if (!(is_append && in_dict) && isascii(c) && isprint(c) &&
+ flags & ESCAPE_NAP && escape_passthrough(c, &p, end))
+ continue;
- if (flags & ESCAPE_NULL && escape_null(c, &p, end))
- continue;
+ if (!(is_append && in_dict) && isprint(c) &&
+ flags & ESCAPE_NP && escape_passthrough(c, &p, end))
+ continue;
- /* ESCAPE_OCTAL and ESCAPE_HEX always go last */
- if (flags & ESCAPE_OCTAL && escape_octal(c, &p, end))
- continue;
+ if (!(is_append && in_dict) && isascii(c) &&
+ flags & ESCAPE_NA && escape_passthrough(c, &p, end))
+ continue;
- if (flags & ESCAPE_HEX && escape_hex(c, &p, end))
- continue;
- }
+ if (flags & ESCAPE_SPACE && escape_space(c, &p, end))
+ continue;
- escape_passthrough(c, &p, end);
- }
+ if (flags & ESCAPE_SPECIAL && escape_special(c, &p, end))
+ continue;
- return p - dst;
-}
-EXPORT_SYMBOL(string_escape_mem);
+ if (flags & ESCAPE_NULL && escape_null(c, &p, end))
+ continue;
-int string_escape_mem_ascii(const char *src, size_t isz, char *dst,
- size_t osz)
-{
- char *p = dst;
- char *end = p + osz;
+ /* ESCAPE_OCTAL and ESCAPE_HEX always go last */
+ if (flags & ESCAPE_OCTAL && escape_octal(c, &p, end))
+ continue;
- while (isz--) {
- unsigned char c = *src++;
+ if (flags & ESCAPE_HEX && escape_hex(c, &p, end))
+ continue;
- if (!isprint(c) || !isascii(c) || c == '"' || c == '\\')
- escape_hex(c, &p, end);
- else
- escape_passthrough(c, &p, end);
+ escape_passthrough(c, &p, end);
}
return p - dst;
}
-EXPORT_SYMBOL(string_escape_mem_ascii);
+EXPORT_SYMBOL(string_escape_mem);
/*
* Return an allocated string that has been escaped of special characters
diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c
index 10360d4ea273..437d8e6b7cb1 100644
--- a/lib/test-string_helpers.c
+++ b/lib/test-string_helpers.c
@@ -19,7 +19,7 @@ static __init bool test_string_check_buf(const char *name, unsigned int flags,
if (q_real == q_test && !memcmp(out_test, out_real, q_test))
return true;
- pr_warn("Test '%s' failed: flags = %u\n", name, flags);
+ pr_warn("Test '%s' failed: flags = %#x\n", name, flags);
print_hex_dump(KERN_WARNING, "Input: ", DUMP_PREFIX_NONE, 16, 1,
in, p, true);
@@ -136,31 +136,31 @@ static const struct test_string_2 escape0[] __initconst = {{
.flags = ESCAPE_SPACE | ESCAPE_HEX,
},{
/* terminator */
- }},
+ }}
},{
.in = "\\h\\\"\a\e\\",
.s1 = {{
- .out = "\\\\h\\\\\"\\a\\e\\\\",
+ .out = "\\\\h\\\\\\\"\\a\\e\\\\",
.flags = ESCAPE_SPECIAL,
},{
- .out = "\\\\\\150\\\\\\042\\a\\e\\\\",
+ .out = "\\\\\\150\\\\\\\"\\a\\e\\\\",
.flags = ESCAPE_SPECIAL | ESCAPE_OCTAL,
},{
- .out = "\\\\\\x68\\\\\\x22\\a\\e\\\\",
+ .out = "\\\\\\x68\\\\\\\"\\a\\e\\\\",
.flags = ESCAPE_SPECIAL | ESCAPE_HEX,
},{
/* terminator */
- }},
+ }}
},{
.in = "\eb \\C\007\"\x90\r]",
.s1 = {{
.out = "\eb \\C\007\"\x90\\r]",
.flags = ESCAPE_SPACE,
},{
- .out = "\\eb \\\\C\\a\"\x90\r]",
+ .out = "\\eb \\\\C\\a\\\"\x90\r]",
.flags = ESCAPE_SPECIAL,
},{
- .out = "\\eb \\\\C\\a\"\x90\\r]",
+ .out = "\\eb \\\\C\\a\\\"\x90\\r]",
.flags = ESCAPE_SPACE | ESCAPE_SPECIAL,
},{
.out = "\\033\\142\\040\\134\\103\\007\\042\\220\\015\\135",
@@ -169,10 +169,10 @@ static const struct test_string_2 escape0[] __initconst = {{
.out = "\\033\\142\\040\\134\\103\\007\\042\\220\\r\\135",
.flags = ESCAPE_SPACE | ESCAPE_OCTAL,
},{
- .out = "\\e\\142\\040\\\\\\103\\a\\042\\220\\015\\135",
+ .out = "\\e\\142\\040\\\\\\103\\a\\\"\\220\\015\\135",
.flags = ESCAPE_SPECIAL | ESCAPE_OCTAL,
},{
- .out = "\\e\\142\\040\\\\\\103\\a\\042\\220\\r\\135",
+ .out = "\\e\\142\\040\\\\\\103\\a\\\"\\220\\r\\135",
.flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_OCTAL,
},{
.out = "\eb \\C\007\"\x90\r]",
@@ -201,12 +201,26 @@ static const struct test_string_2 escape0[] __initconst = {{
.flags = ESCAPE_NP | ESCAPE_HEX,
},{
/* terminator */
- }},
+ }}
+},{
+ .in = "\007 \eb\"\x90\xCF\r",
+ .s1 = {{
+ .out = "\007 \eb\"\\220\\317\r",
+ .flags = ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\\x90\\xcf\r",
+ .flags = ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_NA,
+ },{
+ /* terminator */
+ }}
},{
/* terminator */
}};
-#define TEST_STRING_2_DICT_1 "b\\ \t\r"
+#define TEST_STRING_2_DICT_1 "b\\ \t\r\xCF"
static const struct test_string_2 escape1[] __initconst = {{
.in = "\f\\ \n\r\t\v",
.s1 = {{
@@ -216,16 +230,40 @@ static const struct test_string_2 escape1[] __initconst = {{
.out = "\f\\x5c\\x20\n\\x0d\\x09\v",
.flags = ESCAPE_HEX,
},{
+ .out = "\f\\134\\040\n\\015\\011\v",
+ .flags = ESCAPE_ANY | ESCAPE_APPEND,
+ },{
+ .out = "\\014\\134\\040\\012\\015\\011\\013",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NAP,
+ },{
+ .out = "\\x0c\\x5c\\x20\\x0a\\x0d\\x09\\x0b",
+ .flags = ESCAPE_HEX | ESCAPE_APPEND | ESCAPE_NAP,
+ },{
+ .out = "\f\\134\\040\n\\015\\011\v",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NA,
+ },{
+ .out = "\f\\x5c\\x20\n\\x0d\\x09\v",
+ .flags = ESCAPE_HEX | ESCAPE_APPEND | ESCAPE_NA,
+ },{
/* terminator */
- }},
+ }}
},{
- .in = "\\h\\\"\a\e\\",
+ .in = "\\h\\\"\a\xCF\e\\",
.s1 = {{
- .out = "\\134h\\134\"\a\e\\134",
+ .out = "\\134h\\134\"\a\\317\e\\134",
.flags = ESCAPE_OCTAL,
},{
+ .out = "\\134h\\134\"\a\\317\e\\134",
+ .flags = ESCAPE_ANY | ESCAPE_APPEND,
+ },{
+ .out = "\\134h\\134\"\\007\\317\\033\\134",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NAP,
+ },{
+ .out = "\\134h\\134\"\a\\317\e\\134",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NA,
+ },{
/* terminator */
- }},
+ }}
},{
.in = "\eb \\C\007\"\x90\r]",
.s1 = {{
@@ -233,7 +271,89 @@ static const struct test_string_2 escape1[] __initconst = {{
.flags = ESCAPE_OCTAL,
},{
/* terminator */
- }},
+ }}
+},{
+ .in = "\007 \eb\"\x90\xCF\r",
+ .s1 = {{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPACE | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_ANY | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_SPACE | ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NA,
+ },{
+ /* terminator */
+ }}
+},{
+ .in = "\007 \eb\"\x90\xCF\r",
+ .s1 = {{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\xCF\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\xCF\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\\015",
+ .flags = ESCAPE_OCTAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\\015",
+ .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_ANY | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\x0d",
+ .flags = ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\x0d",
+ .flags = ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ /* terminator */
+ }}
},{
/* terminator */
}};
@@ -290,7 +410,7 @@ test_string_escape_overflow(const char *in, int p, unsigned int flags, const cha
q_real = string_escape_mem(in, p, NULL, 0, flags, esc);
if (q_real != q_test)
- pr_warn("Test '%s' failed: flags = %u, osz = 0, expected %d, got %d\n",
+ pr_warn("Test '%s' failed: flags = %#x, osz = 0, expected %d, got %d\n",
name, flags, q_test, q_real);
}
@@ -315,8 +435,13 @@ static __init void test_string_escape(const char *name,
/* NULL injection */
if (flags & ESCAPE_NULL) {
in[p++] = '\0';
- out_test[q_test++] = '\\';
- out_test[q_test++] = '0';
+ /* '\0' passes isascii() test */
+ if (flags & ESCAPE_NA && !(flags & ESCAPE_APPEND && esc)) {
+ out_test[q_test++] = '\0';
+ } else {
+ out_test[q_test++] = '\\';
+ out_test[q_test++] = '0';
+ }
}
/* Don't try strings that have no output */
@@ -459,17 +584,17 @@ static int __init test_string_helpers_init(void)
unsigned int i;
pr_info("Running tests...\n");
- for (i = 0; i < UNESCAPE_ANY + 1; i++)
+ for (i = 0; i < UNESCAPE_ALL_MASK + 1; i++)
test_string_unescape("unescape", i, false);
test_string_unescape("unescape inplace",
get_random_int() % (UNESCAPE_ANY + 1), true);
/* Without dictionary */
- for (i = 0; i < (ESCAPE_ANY_NP | ESCAPE_HEX) + 1; i++)
+ for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
test_string_escape("escape 0", escape0, i, TEST_STRING_2_DICT_0);
/* With dictionary */
- for (i = 0; i < (ESCAPE_ANY_NP | ESCAPE_HEX) + 1; i++)
+ for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
test_string_escape("escape 1", escape1, i, TEST_STRING_2_DICT_1);
/* Test string_get_size() */
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 9cd575583180..4ea73f5aed41 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -366,6 +366,13 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = {
{0, "0-31:1/3,1-31:1/3,2-31:1/3", &exp1[8 * step], 32, 0},
{0, "1-10:8/12,8-31:24/29,0-31:0/3", &exp1[9 * step], 32, 0},
+ {0, "all", &exp1[8 * step], 32, 0},
+ {0, "0, 1, all, ", &exp1[8 * step], 32, 0},
+ {0, "all:1/2", &exp1[4 * step], 32, 0},
+ {0, "ALL:1/2", &exp1[4 * step], 32, 0},
+ {-EINVAL, "al", NULL, 8, 0},
+ {-EINVAL, "alll", NULL, 8, 0},
+
{-EINVAL, "-1", NULL, 8, 0},
{-EINVAL, "-0", NULL, 8, 0},
{-EINVAL, "10-1", NULL, 8, 0},
diff --git a/lib/test_bitops.c b/lib/test_bitops.c
index 471141ddd691..3b7bcbee84db 100644
--- a/lib/test_bitops.c
+++ b/lib/test_bitops.c
@@ -15,7 +15,7 @@
* get_count_order/long
*/
-/* use an enum because thats the most common BITMAP usage */
+/* use an enum because that's the most common BITMAP usage */
enum bitops_fun {
BITOPS_4 = 4,
BITOPS_7 = 7,
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 4dc4dcbecd12..d500320778c7 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -1095,7 +1095,7 @@ static struct bpf_test tests[] = {
{
"RET_A",
.u.insns = {
- /* check that unitialized X and A contain zeros */
+ /* check that uninitialized X and A contain zeros */
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_RET | BPF_A, 0)
},
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index b6fe89add9fe..1bccd6cd5f48 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -260,8 +260,8 @@ static ssize_t config_show(struct device *dev,
len += scnprintf(buf + len, PAGE_SIZE - len,
"send_uevent:\t\t%s\n",
test_fw_config->send_uevent ?
- "FW_ACTION_HOTPLUG" :
- "FW_ACTION_NOHOTPLUG");
+ "FW_ACTION_UEVENT" :
+ "FW_ACTION_NOUEVENT");
len += scnprintf(buf + len, PAGE_SIZE - len,
"into_buf:\t\t%s\n",
test_fw_config->into_buf ? "true" : "false");
@@ -729,7 +729,7 @@ static ssize_t trigger_custom_fallback_store(struct device *dev,
mutex_lock(&test_fw_mutex);
release_firmware(test_firmware);
test_firmware = NULL;
- rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, name,
+ rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOUEVENT, name,
dev, GFP_KERNEL, NULL,
trigger_async_request_cb);
if (rc) {
@@ -938,8 +938,8 @@ ssize_t trigger_batched_requests_async_store(struct device *dev,
pr_info("batched loading '%s' custom fallback mechanism %u times\n",
test_fw_config->name, test_fw_config->num_requests);
- send_uevent = test_fw_config->send_uevent ? FW_ACTION_HOTPLUG :
- FW_ACTION_NOHOTPLUG;
+ send_uevent = test_fw_config->send_uevent ? FW_ACTION_UEVENT :
+ FW_ACTION_NOUEVENT;
for (i = 0; i < test_fw_config->num_requests; i++) {
req = &test_fw_config->reqs[i];
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 80a78877bd93..8c55c4723692 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -25,6 +25,7 @@
#include <linux/swapops.h>
#include <linux/sched/mm.h>
#include <linux/platform_device.h>
+#include <linux/rmap.h>
#include "test_hmm_uapi.h"
@@ -46,6 +47,7 @@ struct dmirror_bounce {
unsigned long cpages;
};
+#define DPT_XA_TAG_ATOMIC 1UL
#define DPT_XA_TAG_WRITE 3UL
/*
@@ -218,7 +220,7 @@ static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni,
* the invalidation is handled as part of the migration process.
*/
if (range->event == MMU_NOTIFY_MIGRATE &&
- range->migrate_pgmap_owner == dmirror->mdevice)
+ range->owner == dmirror->mdevice)
return true;
if (mmu_notifier_range_blockable(range))
@@ -619,6 +621,54 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
}
}
+static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
+ unsigned long end)
+{
+ unsigned long pfn;
+
+ for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
+ void *entry;
+ struct page *page;
+
+ entry = xa_load(&dmirror->pt, pfn);
+ page = xa_untag_pointer(entry);
+ if (xa_pointer_tag(entry) == DPT_XA_TAG_ATOMIC)
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int dmirror_atomic_map(unsigned long start, unsigned long end,
+ struct page **pages, struct dmirror *dmirror)
+{
+ unsigned long pfn, mapped = 0;
+ int i;
+
+ /* Map the migrated pages into the device's page tables. */
+ mutex_lock(&dmirror->mutex);
+
+ for (i = 0, pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, i++) {
+ void *entry;
+
+ if (!pages[i])
+ continue;
+
+ entry = pages[i];
+ entry = xa_tag_pointer(entry, DPT_XA_TAG_ATOMIC);
+ entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
+ if (xa_is_err(entry)) {
+ mutex_unlock(&dmirror->mutex);
+ return xa_err(entry);
+ }
+
+ mapped++;
+ }
+
+ mutex_unlock(&dmirror->mutex);
+ return mapped;
+}
+
static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
struct dmirror *dmirror)
{
@@ -661,6 +711,72 @@ static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
return 0;
}
+static int dmirror_exclusive(struct dmirror *dmirror,
+ struct hmm_dmirror_cmd *cmd)
+{
+ unsigned long start, end, addr;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ struct mm_struct *mm = dmirror->notifier.mm;
+ struct page *pages[64];
+ struct dmirror_bounce bounce;
+ unsigned long next;
+ int ret;
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ /* Since the mm is for the mirrored process, get a reference first. */
+ if (!mmget_not_zero(mm))
+ return -EINVAL;
+
+ mmap_read_lock(mm);
+ for (addr = start; addr < end; addr = next) {
+ unsigned long mapped;
+ int i;
+
+ if (end < addr + (ARRAY_SIZE(pages) << PAGE_SHIFT))
+ next = end;
+ else
+ next = addr + (ARRAY_SIZE(pages) << PAGE_SHIFT);
+
+ ret = make_device_exclusive_range(mm, addr, next, pages, NULL);
+ mapped = dmirror_atomic_map(addr, next, pages, dmirror);
+ for (i = 0; i < ret; i++) {
+ if (pages[i]) {
+ unlock_page(pages[i]);
+ put_page(pages[i]);
+ }
+ }
+
+ if (addr + (mapped << PAGE_SHIFT) < next) {
+ mmap_read_unlock(mm);
+ mmput(mm);
+ return -EBUSY;
+ }
+ }
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ /* Return the migrated data for verification. */
+ ret = dmirror_bounce_init(&bounce, start, size);
+ if (ret)
+ return ret;
+ mutex_lock(&dmirror->mutex);
+ ret = dmirror_do_read(dmirror, start, end, &bounce);
+ mutex_unlock(&dmirror->mutex);
+ if (ret == 0) {
+ if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr,
+ bounce.size))
+ ret = -EFAULT;
+ }
+
+ cmd->cpages = bounce.cpages;
+ dmirror_bounce_fini(&bounce);
+ return ret;
+}
+
static int dmirror_migrate(struct dmirror *dmirror,
struct hmm_dmirror_cmd *cmd)
{
@@ -686,9 +802,8 @@ static int dmirror_migrate(struct dmirror *dmirror,
mmap_read_lock(mm);
for (addr = start; addr < end; addr = next) {
- vma = find_vma(mm, addr);
- if (!vma || addr < vma->vm_start ||
- !(vma->vm_flags & VM_READ)) {
+ vma = vma_lookup(mm, addr);
+ if (!vma || !(vma->vm_flags & VM_READ)) {
ret = -EINVAL;
goto out;
}
@@ -949,6 +1064,15 @@ static long dmirror_fops_unlocked_ioctl(struct file *filp,
ret = dmirror_migrate(dmirror, &cmd);
break;
+ case HMM_DMIRROR_EXCLUSIVE:
+ ret = dmirror_exclusive(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_CHECK_EXCLUSIVE:
+ ret = dmirror_check_atomic(dmirror, cmd.addr,
+ cmd.addr + (cmd.npages << PAGE_SHIFT));
+ break;
+
case HMM_DMIRROR_SNAPSHOT:
ret = dmirror_snapshot(dmirror, &cmd);
break;
diff --git a/lib/test_hmm_uapi.h b/lib/test_hmm_uapi.h
index 670b4ef2a5b6..f14dea5dcd06 100644
--- a/lib/test_hmm_uapi.h
+++ b/lib/test_hmm_uapi.h
@@ -33,6 +33,8 @@ struct hmm_dmirror_cmd {
#define HMM_DMIRROR_WRITE _IOWR('H', 0x01, struct hmm_dmirror_cmd)
#define HMM_DMIRROR_MIGRATE _IOWR('H', 0x02, struct hmm_dmirror_cmd)
#define HMM_DMIRROR_SNAPSHOT _IOWR('H', 0x03, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_EXCLUSIVE _IOWR('H', 0x04, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_CHECK_EXCLUSIVE _IOWR('H', 0x05, struct hmm_dmirror_cmd)
/*
* Values returned in hmm_dmirror_cmd.ptr for HMM_DMIRROR_SNAPSHOT.
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index cacbbbdef768..8f7b0b2f6e11 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -55,7 +55,6 @@ static int kasan_test_init(struct kunit *test)
multishot = kasan_save_enable_multi_shot();
kasan_set_tagging_report_once(false);
fail_data.report_found = false;
- fail_data.report_expected = false;
kunit_add_named_resource(test, NULL, NULL, &resource,
"kasan_data", &fail_data);
return 0;
@@ -94,34 +93,30 @@ static void kasan_test_exit(struct kunit *test)
!kasan_async_mode_enabled()) \
migrate_disable(); \
KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \
- WRITE_ONCE(fail_data.report_expected, true); \
barrier(); \
expression; \
barrier(); \
- KUNIT_EXPECT_EQ(test, \
- READ_ONCE(fail_data.report_expected), \
- READ_ONCE(fail_data.report_found)); \
+ if (!READ_ONCE(fail_data.report_found)) { \
+ KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \
+ "expected in \"" #expression \
+ "\", but none occurred"); \
+ } \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \
if (READ_ONCE(fail_data.report_found)) \
kasan_enable_tagging_sync(); \
migrate_enable(); \
} \
WRITE_ONCE(fail_data.report_found, false); \
- WRITE_ONCE(fail_data.report_expected, false); \
} while (0)
#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
- if (!IS_ENABLED(config)) { \
- kunit_info((test), "skipping, " #config " required"); \
- return; \
- } \
+ if (!IS_ENABLED(config)) \
+ kunit_skip((test), "Test requires " #config "=y"); \
} while (0)
#define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
- if (IS_ENABLED(config)) { \
- kunit_info((test), "skipping, " #config " enabled"); \
- return; \
- } \
+ if (IS_ENABLED(config)) \
+ kunit_skip((test), "Test requires " #config "=n"); \
} while (0)
static void kmalloc_oob_right(struct kunit *test)
@@ -656,7 +651,7 @@ static void kasan_global_oob(struct kunit *test)
{
/*
* Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
- * from failing here and panicing the kernel, access the array via a
+ * from failing here and panicking the kernel, access the array via a
* volatile pointer, which will prevent the compiler from being able to
* determine the array bounds.
*
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 38c250fbace3..ce1589391413 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -286,7 +286,7 @@ static int tally_work_test(struct kmod_test_device_info *info)
* If this ran it means *all* tasks were created fine and we
* are now just collecting results.
*
- * Only propagate errors, do not override with a subsequent sucess case.
+ * Only propagate errors, do not override with a subsequent success case.
*/
static void tally_up_work(struct kmod_test_device *test_dev)
{
@@ -543,7 +543,7 @@ static int trigger_config_run(struct kmod_test_device *test_dev)
* wrong with the setup of the test. If the test setup went fine
* then userspace must just check the result of config->test_result.
* One issue with relying on the return from a call in the kernel
- * is if the kernel returns a possitive value using this trigger
+ * is if the kernel returns a positive value using this trigger
* will not return the value to userspace, it would be lost.
*
* By not relying on capturing the return value of tests we are using
@@ -585,7 +585,7 @@ trigger_config_store(struct device *dev,
* Note: any return > 0 will be treated as success
* and the error value will not be available to userspace.
* Do not rely on trying to send to userspace a test value
- * return value as possitive return errors will be lost.
+ * return value as positive return errors will be lost.
*/
if (WARN_ON(ret > 0))
return -EINVAL;
diff --git a/lib/test_list_sort.c b/lib/test_list_sort.c
index 00daaf23316f..ade7a1ea0c8e 100644
--- a/lib/test_list_sort.c
+++ b/lib/test_list_sort.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-#define pr_fmt(fmt) "list_sort_test: " fmt
+#include <kunit/test.h>
#include <linux/kernel.h>
#include <linux/list_sort.h>
@@ -23,68 +23,52 @@ struct debug_el {
struct list_head list;
unsigned int poison2;
int value;
- unsigned serial;
+ unsigned int serial;
};
-/* Array, containing pointers to all elements in the test list */
-static struct debug_el **elts __initdata;
-
-static int __init check(struct debug_el *ela, struct debug_el *elb)
+static void check(struct kunit *test, struct debug_el *ela, struct debug_el *elb)
{
- if (ela->serial >= TEST_LIST_LEN) {
- pr_err("error: incorrect serial %d\n", ela->serial);
- return -EINVAL;
- }
- if (elb->serial >= TEST_LIST_LEN) {
- pr_err("error: incorrect serial %d\n", elb->serial);
- return -EINVAL;
- }
- if (elts[ela->serial] != ela || elts[elb->serial] != elb) {
- pr_err("error: phantom element\n");
- return -EINVAL;
- }
- if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) {
- pr_err("error: bad poison: %#x/%#x\n",
- ela->poison1, ela->poison2);
- return -EINVAL;
- }
- if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) {
- pr_err("error: bad poison: %#x/%#x\n",
- elb->poison1, elb->poison2);
- return -EINVAL;
- }
- return 0;
+ struct debug_el **elts = test->priv;
+
+ KUNIT_EXPECT_LT_MSG(test, ela->serial, (unsigned int)TEST_LIST_LEN, "incorrect serial");
+ KUNIT_EXPECT_LT_MSG(test, elb->serial, (unsigned int)TEST_LIST_LEN, "incorrect serial");
+
+ KUNIT_EXPECT_PTR_EQ_MSG(test, elts[ela->serial], ela, "phantom element");
+ KUNIT_EXPECT_PTR_EQ_MSG(test, elts[elb->serial], elb, "phantom element");
+
+ KUNIT_EXPECT_EQ_MSG(test, ela->poison1, TEST_POISON1, "bad poison");
+ KUNIT_EXPECT_EQ_MSG(test, ela->poison2, TEST_POISON2, "bad poison");
+
+ KUNIT_EXPECT_EQ_MSG(test, elb->poison1, TEST_POISON1, "bad poison");
+ KUNIT_EXPECT_EQ_MSG(test, elb->poison2, TEST_POISON2, "bad poison");
}
-static int __init cmp(void *priv, const struct list_head *a,
- const struct list_head *b)
+/* `priv` is the test pointer so check() can fail the test if the list is invalid. */
+static int cmp(void *priv, const struct list_head *a, const struct list_head *b)
{
struct debug_el *ela, *elb;
ela = container_of(a, struct debug_el, list);
elb = container_of(b, struct debug_el, list);
- check(ela, elb);
+ check(priv, ela, elb);
return ela->value - elb->value;
}
-static int __init list_sort_test(void)
+static void list_sort_test(struct kunit *test)
{
- int i, count = 1, err = -ENOMEM;
- struct debug_el *el;
+ int i, count = 1;
+ struct debug_el *el, **elts;
struct list_head *cur;
LIST_HEAD(head);
- pr_debug("start testing list_sort()\n");
-
- elts = kcalloc(TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
- if (!elts)
- return err;
+ elts = kunit_kcalloc(test, TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elts);
+ test->priv = elts;
for (i = 0; i < TEST_LIST_LEN; i++) {
- el = kmalloc(sizeof(*el), GFP_KERNEL);
- if (!el)
- goto exit;
+ el = kunit_kmalloc(test, sizeof(*el), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, el);
/* force some equivalencies */
el->value = prandom_u32() % (TEST_LIST_LEN / 3);
@@ -95,55 +79,44 @@ static int __init list_sort_test(void)
list_add_tail(&el->list, &head);
}
- list_sort(NULL, &head, cmp);
+ list_sort(test, &head, cmp);
- err = -EINVAL;
for (cur = head.next; cur->next != &head; cur = cur->next) {
struct debug_el *el1;
int cmp_result;
- if (cur->next->prev != cur) {
- pr_err("error: list is corrupted\n");
- goto exit;
- }
+ KUNIT_ASSERT_PTR_EQ_MSG(test, cur->next->prev, cur,
+ "list is corrupted");
- cmp_result = cmp(NULL, cur, cur->next);
- if (cmp_result > 0) {
- pr_err("error: list is not sorted\n");
- goto exit;
- }
+ cmp_result = cmp(test, cur, cur->next);
+ KUNIT_ASSERT_LE_MSG(test, cmp_result, 0, "list is not sorted");
el = container_of(cur, struct debug_el, list);
el1 = container_of(cur->next, struct debug_el, list);
- if (cmp_result == 0 && el->serial >= el1->serial) {
- pr_err("error: order of equivalent elements not "
- "preserved\n");
- goto exit;
+ if (cmp_result == 0) {
+ KUNIT_ASSERT_LE_MSG(test, el->serial, el1->serial,
+ "order of equivalent elements not preserved");
}
- if (check(el, el1)) {
- pr_err("error: element check failed\n");
- goto exit;
- }
+ check(test, el, el1);
count++;
}
- if (head.prev != cur) {
- pr_err("error: list is corrupted\n");
- goto exit;
- }
+ KUNIT_EXPECT_PTR_EQ_MSG(test, head.prev, cur, "list is corrupted");
+ KUNIT_EXPECT_EQ_MSG(test, count, TEST_LIST_LEN,
+ "list length changed after sorting!");
+}
- if (count != TEST_LIST_LEN) {
- pr_err("error: bad list length %d", count);
- goto exit;
- }
+static struct kunit_case list_sort_cases[] = {
+ KUNIT_CASE(list_sort_test),
+ {}
+};
+
+static struct kunit_suite list_sort_suite = {
+ .name = "list_sort",
+ .test_cases = list_sort_cases,
+};
+
+kunit_test_suites(&list_sort_suite);
- err = 0;
-exit:
- for (i = 0; i < TEST_LIST_LEN; i++)
- kfree(elts[i]);
- kfree(elts);
- return err;
-}
-module_init(list_sort_test);
MODULE_LICENSE("GPL");
diff --git a/lib/test_scanf.c b/lib/test_scanf.c
index 6a828dfc8ea1..abae88848972 100644
--- a/lib/test_scanf.c
+++ b/lib/test_scanf.c
@@ -600,7 +600,7 @@ static void __init numbers_prefix_overflow(void)
/*
* 0x prefix in a field of width 2 using %i conversion: first field
* converts to 0. Next field scan starts at the character after "0x",
- * which will convert if can be intepreted as decimal but will fail
+ * which will convert if can be interpreted as decimal but will fail
* if it contains any hex digits (since no 0x prefix).
*/
test_number_prefix(long long, "0x67", "%2lli%lli", 0, 67, 2, check_ll);
diff --git a/lib/test_string.c b/lib/test_string.c
index 7b31f4a505bf..9dfd6f52de92 100644
--- a/lib/test_string.c
+++ b/lib/test_string.c
@@ -179,6 +179,10 @@ static __init int strnchr_selftest(void)
return 0;
}
+static __exit void string_selftest_remove(void)
+{
+}
+
static __init int string_selftest_init(void)
{
int test, subtest;
@@ -216,4 +220,5 @@ fail:
}
module_init(string_selftest_init);
+module_exit(string_selftest_remove);
MODULE_LICENSE("GPL v2");
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index ea65ec51e63b..26c83943748a 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -86,6 +86,7 @@ static unsigned long long simple_strntoull(const char *startp, size_t max_chars,
*
* This function has caveats. Please use kstrtoull instead.
*/
+noinline
unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
{
return simple_strntoull(cp, INT_MAX, endp, base);
@@ -992,8 +993,12 @@ char *symbol_string(char *buf, char *end, void *ptr,
value = (unsigned long)ptr;
#ifdef CONFIG_KALLSYMS
- if (*fmt == 'B')
+ if (*fmt == 'B' && fmt[1] == 'b')
+ sprint_backtrace_build_id(sym, value);
+ else if (*fmt == 'B')
sprint_backtrace(sym, value);
+ else if (*fmt == 'S' && (fmt[1] == 'b' || (fmt[1] == 'R' && fmt[2] == 'b')))
+ sprint_symbol_build_id(sym, value);
else if (*fmt != 's')
sprint_symbol(sym, value);
else
@@ -2224,7 +2229,7 @@ char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
bool no_hash_pointers __ro_after_init;
EXPORT_SYMBOL_GPL(no_hash_pointers);
-static int __init no_hash_pointers_enable(char *str)
+int __init no_hash_pointers_enable(char *str)
{
if (no_hash_pointers)
return 0;
@@ -2262,9 +2267,11 @@ early_param("no_hash_pointers", no_hash_pointers_enable);
* - 'S' For symbolic direct pointers (or function descriptors) with offset
* - 's' For symbolic direct pointers (or function descriptors) without offset
* - '[Ss]R' as above with __builtin_extract_return_addr() translation
+ * - 'S[R]b' as above with module build ID (for use in backtraces)
* - '[Ff]' %pf and %pF were obsoleted and later removed in favor of
* %ps and %pS. Be careful when re-using these specifiers.
* - 'B' For backtraced symbolic direct pointers with offset
+ * - 'Bb' as above with module build ID (for use in backtraces)
* - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
* - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
* - 'b[l]' For a bitmap, the number of bits is determined by the field
@@ -3416,7 +3423,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
while (*fmt) {
/* skip any white space in format */
- /* white space in format matchs any amount of
+ /* white space in format matches any amount of
* white space, including none, in the input.
*/
if (isspace(*fmt)) {
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c
index 72ddac6ef2ec..ef449e97d1a1 100644
--- a/lib/xz/xz_dec_bcj.c
+++ b/lib/xz/xz_dec_bcj.c
@@ -422,7 +422,7 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
/*
* Flush pending already filtered data to the output buffer. Return
- * immediatelly if we couldn't flush everything, or if the next
+ * immediately if we couldn't flush everything, or if the next
* filter in the chain had already returned XZ_STREAM_END.
*/
if (s->temp.filtered > 0) {
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
index ca2603abee08..7a6781e3f47b 100644
--- a/lib/xz/xz_dec_lzma2.c
+++ b/lib/xz/xz_dec_lzma2.c
@@ -147,8 +147,8 @@ struct lzma_dec {
/*
* LZMA properties or related bit masks (number of literal
- * context bits, a mask dervied from the number of literal
- * position bits, and a mask dervied from the number
+ * context bits, a mask derived from the number of literal
+ * position bits, and a mask derived from the number
* position bits)
*/
uint32_t lc;
@@ -484,7 +484,7 @@ static __always_inline void rc_normalize(struct rc_dec *rc)
}
/*
- * Decode one bit. In some versions, this function has been splitted in three
+ * Decode one bit. In some versions, this function has been split in three
* functions so that the compiler is supposed to be able to more easily avoid
* an extra branch. In this particular version of the LZMA decoder, this
* doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3
@@ -761,7 +761,7 @@ static bool lzma_main(struct xz_dec_lzma2 *s)
}
/*
- * Reset the LZMA decoder and range decoder state. Dictionary is nore reset
+ * Reset the LZMA decoder and range decoder state. Dictionary is not reset
* here, because LZMA state may be reset without resetting the dictionary.
*/
static void lzma_reset(struct xz_dec_lzma2 *s)
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
index ed1f3df27260..f19c4fbe1be7 100644
--- a/lib/zlib_inflate/inffast.c
+++ b/lib/zlib_inflate/inffast.c
@@ -15,7 +15,7 @@ union uu {
unsigned char b[2];
};
-/* Endian independed version */
+/* Endian independent version */
static inline unsigned short
get_unaligned16(const unsigned short *p)
{
diff --git a/lib/zstd/huf.h b/lib/zstd/huf.h
index 2143da28d952..923218d12e28 100644
--- a/lib/zstd/huf.h
+++ b/lib/zstd/huf.h
@@ -134,7 +134,7 @@ typedef enum {
HUF_repeat_none, /**< Cannot use the previous table */
HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1,
4}X_repeat */
- HUF_repeat_valid /**< Can use the previous table and it is asumed to be valid */
+ HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */
} HUF_repeat;
/** HUF_compress4X_repeat() :
* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.