summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig4
-rw-r--r--lib/Kconfig.debug18
-rw-r--r--lib/Kconfig.kcsan26
-rw-r--r--lib/Makefile25
-rw-r--r--lib/cpumask.c16
-rw-r--r--lib/crc-t10dif.c75
-rw-r--r--lib/crc32.c2
-rw-r--r--lib/crypto/chacha20poly1305.c2
-rw-r--r--lib/crypto/sha256.c10
-rw-r--r--lib/debugobjects.c13
-rw-r--r--lib/decompress.c5
-rw-r--r--lib/decompress_unzstd.c345
-rw-r--r--lib/kunit/kunit-test.c111
-rw-r--r--lib/kunit/string-stream.c14
-rw-r--r--lib/kunit/test.c171
-rw-r--r--lib/lzo/lzo1x_decompress_safe.c2
-rw-r--r--lib/math/div64.c41
-rw-r--r--lib/mpi/Makefile1
-rw-r--r--lib/mpi/mpi-sub-ui.c78
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/random32.c2
-rw-r--r--lib/rhashtable.c35
-rw-r--r--lib/sbitmap.c3
-rw-r--r--lib/test-string_helpers.c67
-rw-r--r--lib/test_fpu.c89
-rw-r--r--lib/test_lockup.c2
-rw-r--r--lib/test_vmalloc.c103
-rw-r--r--lib/vsprintf.c17
-rw-r--r--lib/xz/Kconfig2
-rw-r--r--lib/zstd/fse_decompress.c9
-rw-r--r--lib/zstd/zstd_internal.h14
31 files changed, 1102 insertions, 202 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index df3f3da95990..a5d6f23c4cab 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -342,6 +342,10 @@ config DECOMPRESS_LZ4
select LZ4_DECOMPRESS
tristate
+config DECOMPRESS_ZSTD
+ select ZSTD_DECOMPRESS
+ tristate
+
#
# Generic allocator support is selected if needed
#
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 9ad9210d70a1..3e64a8a809f9 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1117,6 +1117,7 @@ config PROVE_LOCKING
select DEBUG_RWSEMS
select DEBUG_WW_MUTEX_SLOWPATH
select DEBUG_LOCK_ALLOC
+ select PREEMPT_COUNT if !ARCH_NO_PREEMPT
select TRACE_IRQFLAGS
default n
help
@@ -1325,11 +1326,17 @@ config WW_MUTEX_SELFTEST
endmenu # lock debugging
config TRACE_IRQFLAGS
+ depends on TRACE_IRQFLAGS_SUPPORT
bool
help
Enables hooks to interrupt enabling and disabling for
either tracing or lock debugging.
+config TRACE_IRQFLAGS_NMI
+ def_bool y
+ depends on TRACE_IRQFLAGS
+ depends on TRACE_IRQFLAGS_NMI_SUPPORT
+
config STACKTRACE
bool "Stack backtrace support"
depends on STACKTRACE_SUPPORT
@@ -2307,6 +2314,17 @@ config TEST_HMM
If unsure, say N.
+config TEST_FPU
+ tristate "Test floating point operations in kernel space"
+ depends on X86 && !KCOV_INSTRUMENT_ALL
+ help
+ Enable this option to add /sys/kernel/debug/selftest_helpers/test_fpu
+ which will trigger a sequence of floating point operations. This is used
+ for self-testing floating point control register setting in
+ kernel_fpu_begin().
+
+ If unsure, say N.
+
endif # RUNTIME_TESTING_MENU
config MEMTEST
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
index 5ee88e5119c2..3d282d51849b 100644
--- a/lib/Kconfig.kcsan
+++ b/lib/Kconfig.kcsan
@@ -4,7 +4,8 @@ config HAVE_ARCH_KCSAN
bool
config HAVE_KCSAN_COMPILER
- def_bool CC_IS_CLANG && $(cc-option,-fsanitize=thread -mllvm -tsan-distinguish-volatile=1)
+ def_bool (CC_IS_CLANG && $(cc-option,-fsanitize=thread -mllvm -tsan-distinguish-volatile=1)) || \
+ (CC_IS_GCC && $(cc-option,-fsanitize=thread --param tsan-distinguish-volatile=1))
help
For the list of compilers that support KCSAN, please see
<file:Documentation/dev-tools/kcsan.rst>.
@@ -59,7 +60,28 @@ config KCSAN_SELFTEST
bool "Perform short selftests on boot"
default y
help
- Run KCSAN selftests on boot. On test failure, causes the kernel to panic.
+ Run KCSAN selftests on boot. On test failure, causes the kernel to
+ panic. Recommended to be enabled, ensuring critical functionality
+ works as intended.
+
+config KCSAN_TEST
+ tristate "KCSAN test for integrated runtime behaviour"
+ depends on TRACEPOINTS && KUNIT
+ select TORTURE_TEST
+ help
+ KCSAN test focusing on behaviour of the integrated runtime. Tests
+ various race scenarios, and verifies the reports generated to
+ console. Makes use of KUnit for test organization, and the Torture
+ framework for test thread control.
+
+ Each test case may run at least up to KCSAN_REPORT_ONCE_IN_MS
+ milliseconds. Test run duration may be optimized by building the
+ kernel and KCSAN test with KCSAN_REPORT_ONCE_IN_MS set to a lower
+ than default value.
+
+ Say Y here if you want the test to be built into the kernel and run
+ during boot; say M if you want the test to build as a module; say N
+ if you are unsure.
config KCSAN_EARLY_ENABLE
bool "Early enable during boot"
diff --git a/lib/Makefile b/lib/Makefile
index b1c42c10073b..7d24dd73e34c 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -99,6 +99,30 @@ obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o
obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o
obj-$(CONFIG_TEST_HMM) += test_hmm.o
+#
+# CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns
+# off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS
+# get appended last to CFLAGS and thus override those previous compiler options.
+#
+FPU_CFLAGS := -mhard-float -msse -msse2
+ifdef CONFIG_CC_IS_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383
+#
+# The "-msse" in the first argument is there so that the
+# -mpreferred-stack-boundary=3 build error:
+#
+# -mpreferred-stack-boundary=3 is not between 4 and 12
+#
+# can be triggered. Otherwise gcc doesn't complain.
+FPU_CFLAGS += $(call cc-option,-msse -mpreferred-stack-boundary=3,-mpreferred-stack-boundary=4)
+endif
+
+obj-$(CONFIG_TEST_FPU) += test_fpu.o
+CFLAGS_test_fpu.o += $(FPU_CFLAGS)
+
obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/
obj-$(CONFIG_KUNIT) += kunit/
@@ -170,6 +194,7 @@ lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o
lib-$(CONFIG_DECOMPRESS_XZ) += decompress_unxz.o
lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o
lib-$(CONFIG_DECOMPRESS_LZ4) += decompress_unlz4.o
+lib-$(CONFIG_DECOMPRESS_ZSTD) += decompress_unzstd.o
obj-$(CONFIG_TEXTSEARCH) += textsearch.o
obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
diff --git a/lib/cpumask.c b/lib/cpumask.c
index fb22fb266f93..85da6ab4fbb5 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -6,6 +6,7 @@
#include <linux/export.h>
#include <linux/memblock.h>
#include <linux/numa.h>
+#include <linux/sched/isolation.h>
/**
* cpumask_next - get the next cpu in a cpumask
@@ -205,22 +206,27 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
*/
unsigned int cpumask_local_spread(unsigned int i, int node)
{
- int cpu;
+ int cpu, hk_flags;
+ const struct cpumask *mask;
+ hk_flags = HK_FLAG_DOMAIN | HK_FLAG_MANAGED_IRQ;
+ mask = housekeeping_cpumask(hk_flags);
/* Wrap: we always want a cpu. */
- i %= num_online_cpus();
+ i %= cpumask_weight(mask);
if (node == NUMA_NO_NODE) {
- for_each_cpu(cpu, cpu_online_mask)
+ for_each_cpu(cpu, mask) {
if (i-- == 0)
return cpu;
+ }
} else {
/* NUMA first. */
- for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
+ for_each_cpu_and(cpu, cpumask_of_node(node), mask) {
if (i-- == 0)
return cpu;
+ }
- for_each_cpu(cpu, cpu_online_mask) {
+ for_each_cpu(cpu, mask) {
/* Skip NUMA nodes, done above. */
if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
continue;
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
index 8cc01a603416..1ed2ed487097 100644
--- a/lib/crc-t10dif.c
+++ b/lib/crc-t10dif.c
@@ -17,64 +17,69 @@
#include <linux/notifier.h>
static struct crypto_shash __rcu *crct10dif_tfm;
-static struct static_key crct10dif_fallback __read_mostly;
+static DEFINE_STATIC_KEY_TRUE(crct10dif_fallback);
static DEFINE_MUTEX(crc_t10dif_mutex);
+static struct work_struct crct10dif_rehash_work;
-static int crc_t10dif_rehash(struct notifier_block *self, unsigned long val, void *data)
+static int crc_t10dif_notify(struct notifier_block *self, unsigned long val, void *data)
{
struct crypto_alg *alg = data;
- struct crypto_shash *new, *old;
if (val != CRYPTO_MSG_ALG_LOADED ||
- static_key_false(&crct10dif_fallback) ||
- strncmp(alg->cra_name, CRC_T10DIF_STRING, strlen(CRC_T10DIF_STRING)))
- return 0;
+ strcmp(alg->cra_name, CRC_T10DIF_STRING))
+ return NOTIFY_DONE;
+
+ schedule_work(&crct10dif_rehash_work);
+ return NOTIFY_OK;
+}
+
+static void crc_t10dif_rehash(struct work_struct *work)
+{
+ struct crypto_shash *new, *old;
mutex_lock(&crc_t10dif_mutex);
old = rcu_dereference_protected(crct10dif_tfm,
lockdep_is_held(&crc_t10dif_mutex));
- if (!old) {
- mutex_unlock(&crc_t10dif_mutex);
- return 0;
- }
- new = crypto_alloc_shash("crct10dif", 0, 0);
+ new = crypto_alloc_shash(CRC_T10DIF_STRING, 0, 0);
if (IS_ERR(new)) {
mutex_unlock(&crc_t10dif_mutex);
- return 0;
+ return;
}
rcu_assign_pointer(crct10dif_tfm, new);
mutex_unlock(&crc_t10dif_mutex);
- synchronize_rcu();
- crypto_free_shash(old);
- return 0;
+ if (old) {
+ synchronize_rcu();
+ crypto_free_shash(old);
+ } else {
+ static_branch_disable(&crct10dif_fallback);
+ }
}
static struct notifier_block crc_t10dif_nb = {
- .notifier_call = crc_t10dif_rehash,
+ .notifier_call = crc_t10dif_notify,
};
__u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
{
struct {
struct shash_desc shash;
- char ctx[2];
+ __u16 crc;
} desc;
int err;
- if (static_key_false(&crct10dif_fallback))
+ if (static_branch_unlikely(&crct10dif_fallback))
return crc_t10dif_generic(crc, buffer, len);
rcu_read_lock();
desc.shash.tfm = rcu_dereference(crct10dif_tfm);
- *(__u16 *)desc.ctx = crc;
-
+ desc.crc = crc;
err = crypto_shash_update(&desc.shash, buffer, len);
rcu_read_unlock();
BUG_ON(err);
- return *(__u16 *)desc.ctx;
+ return desc.crc;
}
EXPORT_SYMBOL(crc_t10dif_update);
@@ -86,19 +91,17 @@ EXPORT_SYMBOL(crc_t10dif);
static int __init crc_t10dif_mod_init(void)
{
+ INIT_WORK(&crct10dif_rehash_work, crc_t10dif_rehash);
crypto_register_notifier(&crc_t10dif_nb);
- crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0);
- if (IS_ERR(crct10dif_tfm)) {
- static_key_slow_inc(&crct10dif_fallback);
- crct10dif_tfm = NULL;
- }
+ crc_t10dif_rehash(&crct10dif_rehash_work);
return 0;
}
static void __exit crc_t10dif_mod_fini(void)
{
crypto_unregister_notifier(&crc_t10dif_nb);
- crypto_free_shash(crct10dif_tfm);
+ cancel_work_sync(&crct10dif_rehash_work);
+ crypto_free_shash(rcu_dereference_protected(crct10dif_tfm, 1));
}
module_init(crc_t10dif_mod_init);
@@ -106,15 +109,23 @@ module_exit(crc_t10dif_mod_fini);
static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp)
{
- if (static_key_false(&crct10dif_fallback))
+ struct crypto_shash *tfm;
+ int len;
+
+ if (static_branch_unlikely(&crct10dif_fallback))
return sprintf(buffer, "fallback\n");
- return sprintf(buffer, "%s\n",
- crypto_tfm_alg_driver_name(crypto_shash_tfm(crct10dif_tfm)));
+ rcu_read_lock();
+ tfm = rcu_dereference(crct10dif_tfm);
+ len = snprintf(buffer, PAGE_SIZE, "%s\n",
+ crypto_shash_driver_name(tfm));
+ rcu_read_unlock();
+
+ return len;
}
-module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0644);
+module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0444);
-MODULE_DESCRIPTION("T10 DIF CRC calculation");
+MODULE_DESCRIPTION("T10 DIF CRC calculation (library API)");
MODULE_LICENSE("GPL");
MODULE_SOFTDEP("pre: crct10dif");
diff --git a/lib/crc32.c b/lib/crc32.c
index 4a20455d1f61..35a03d03f973 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -24,7 +24,7 @@
* Version 2. See the file COPYING for more details.
*/
-/* see: Documentation/crc32.txt for a description of algorithms */
+/* see: Documentation/staging/crc32.rst for a description of algorithms */
#include <linux/crc32.h>
#include <linux/crc32poly.h>
diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c
index ad0699ce702f..431e04280332 100644
--- a/lib/crypto/chacha20poly1305.c
+++ b/lib/crypto/chacha20poly1305.c
@@ -21,8 +21,6 @@
#define CHACHA_KEY_WORDS (CHACHA_KEY_SIZE / sizeof(u32))
-bool __init chacha20poly1305_selftest(void);
-
static void chacha_load_key(u32 *k, const u8 *in)
{
k[0] = get_unaligned_le32(in);
diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c
index 2e621697c5c3..2321f6cb322f 100644
--- a/lib/crypto/sha256.c
+++ b/lib/crypto/sha256.c
@@ -280,4 +280,14 @@ void sha224_final(struct sha256_state *sctx, u8 *out)
}
EXPORT_SYMBOL(sha224_final);
+void sha256(const u8 *data, unsigned int len, u8 *out)
+{
+ struct sha256_state sctx;
+
+ sha256_init(&sctx);
+ sha256_update(&sctx, data, len);
+ sha256_final(&sctx, out);
+}
+EXPORT_SYMBOL(sha256);
+
MODULE_LICENSE("GPL");
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 48054dbf1b51..fe4557955d97 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -1022,18 +1022,7 @@ static int debug_stats_show(struct seq_file *m, void *v)
seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
return 0;
}
-
-static int debug_stats_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, debug_stats_show, NULL);
-}
-
-static const struct file_operations debug_stats_fops = {
- .open = debug_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(debug_stats);
static int __init debug_objects_init_debugfs(void)
{
diff --git a/lib/decompress.c b/lib/decompress.c
index 857ab1af1ef3..ab3fc90ffc64 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -13,6 +13,7 @@
#include <linux/decompress/inflate.h>
#include <linux/decompress/unlzo.h>
#include <linux/decompress/unlz4.h>
+#include <linux/decompress/unzstd.h>
#include <linux/types.h>
#include <linux/string.h>
@@ -37,6 +38,9 @@
#ifndef CONFIG_DECOMPRESS_LZ4
# define unlz4 NULL
#endif
+#ifndef CONFIG_DECOMPRESS_ZSTD
+# define unzstd NULL
+#endif
struct compress_format {
unsigned char magic[2];
@@ -52,6 +56,7 @@ static const struct compress_format compressed_formats[] __initconst = {
{ {0xfd, 0x37}, "xz", unxz },
{ {0x89, 0x4c}, "lzo", unlzo },
{ {0x02, 0x21}, "lz4", unlz4 },
+ { {0x28, 0xb5}, "zstd", unzstd },
{ {0, 0}, NULL, NULL }
};
diff --git a/lib/decompress_unzstd.c b/lib/decompress_unzstd.c
new file mode 100644
index 000000000000..0ad2c15479ed
--- /dev/null
+++ b/lib/decompress_unzstd.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Important notes about in-place decompression
+ *
+ * At least on x86, the kernel is decompressed in place: the compressed data
+ * is placed to the end of the output buffer, and the decompressor overwrites
+ * most of the compressed data. There must be enough safety margin to
+ * guarantee that the write position is always behind the read position.
+ *
+ * The safety margin for ZSTD with a 128 KB block size is calculated below.
+ * Note that the margin with ZSTD is bigger than with GZIP or XZ!
+ *
+ * The worst case for in-place decompression is that the beginning of
+ * the file is compressed extremely well, and the rest of the file is
+ * uncompressible. Thus, we must look for worst-case expansion when the
+ * compressor is encoding uncompressible data.
+ *
+ * The structure of the .zst file in case of a compresed kernel is as follows.
+ * Maximum sizes (as bytes) of the fields are in parenthesis.
+ *
+ * Frame Header: (18)
+ * Blocks: (N)
+ * Checksum: (4)
+ *
+ * The frame header and checksum overhead is at most 22 bytes.
+ *
+ * ZSTD stores the data in blocks. Each block has a header whose size is
+ * a 3 bytes. After the block header, there is up to 128 KB of payload.
+ * The maximum uncompressed size of the payload is 128 KB. The minimum
+ * uncompressed size of the payload is never less than the payload size
+ * (excluding the block header).
+ *
+ * The assumption, that the uncompressed size of the payload is never
+ * smaller than the payload itself, is valid only when talking about
+ * the payload as a whole. It is possible that the payload has parts where
+ * the decompressor consumes more input than it produces output. Calculating
+ * the worst case for this would be tricky. Instead of trying to do that,
+ * let's simply make sure that the decompressor never overwrites any bytes
+ * of the payload which it is currently reading.
+ *
+ * Now we have enough information to calculate the safety margin. We need
+ * - 22 bytes for the .zst file format headers;
+ * - 3 bytes per every 128 KiB of uncompressed size (one block header per
+ * block); and
+ * - 128 KiB (biggest possible zstd block size) to make sure that the
+ * decompressor never overwrites anything from the block it is currently
+ * reading.
+ *
+ * We get the following formula:
+ *
+ * safety_margin = 22 + uncompressed_size * 3 / 131072 + 131072
+ * <= 22 + (uncompressed_size >> 15) + 131072
+ */
+
+/*
+ * Preboot environments #include "path/to/decompress_unzstd.c".
+ * All of the source files we depend on must be #included.
+ * zstd's only source dependeny is xxhash, which has no source
+ * dependencies.
+ *
+ * When UNZSTD_PREBOOT is defined we declare __decompress(), which is
+ * used for kernel decompression, instead of unzstd().
+ *
+ * Define __DISABLE_EXPORTS in preboot environments to prevent symbols
+ * from xxhash and zstd from being exported by the EXPORT_SYMBOL macro.
+ */
+#ifdef STATIC
+# define UNZSTD_PREBOOT
+# include "xxhash.c"
+# include "zstd/entropy_common.c"
+# include "zstd/fse_decompress.c"
+# include "zstd/huf_decompress.c"
+# include "zstd/zstd_common.c"
+# include "zstd/decompress.c"
+#endif
+
+#include <linux/decompress/mm.h>
+#include <linux/kernel.h>
+#include <linux/zstd.h>
+
+/* 128MB is the maximum window size supported by zstd. */
+#define ZSTD_WINDOWSIZE_MAX (1 << ZSTD_WINDOWLOG_MAX)
+/*
+ * Size of the input and output buffers in multi-call mode.
+ * Pick a larger size because it isn't used during kernel decompression,
+ * since that is single pass, and we have to allocate a large buffer for
+ * zstd's window anyway. The larger size speeds up initramfs decompression.
+ */
+#define ZSTD_IOBUF_SIZE (1 << 17)
+
+static int INIT handle_zstd_error(size_t ret, void (*error)(char *x))
+{
+ const int err = ZSTD_getErrorCode(ret);
+
+ if (!ZSTD_isError(ret))
+ return 0;
+
+ switch (err) {
+ case ZSTD_error_memory_allocation:
+ error("ZSTD decompressor ran out of memory");
+ break;
+ case ZSTD_error_prefix_unknown:
+ error("Input is not in the ZSTD format (wrong magic bytes)");
+ break;
+ case ZSTD_error_dstSize_tooSmall:
+ case ZSTD_error_corruption_detected:
+ case ZSTD_error_checksum_wrong:
+ error("ZSTD-compressed data is corrupt");
+ break;
+ default:
+ error("ZSTD-compressed data is probably corrupt");
+ break;
+ }
+ return -1;
+}
+
+/*
+ * Handle the case where we have the entire input and output in one segment.
+ * We can allocate less memory (no circular buffer for the sliding window),
+ * and avoid some memcpy() calls.
+ */
+static int INIT decompress_single(const u8 *in_buf, long in_len, u8 *out_buf,
+ long out_len, long *in_pos,
+ void (*error)(char *x))
+{
+ const size_t wksp_size = ZSTD_DCtxWorkspaceBound();
+ void *wksp = large_malloc(wksp_size);
+ ZSTD_DCtx *dctx = ZSTD_initDCtx(wksp, wksp_size);
+ int err;
+ size_t ret;
+
+ if (dctx == NULL) {
+ error("Out of memory while allocating ZSTD_DCtx");
+ err = -1;
+ goto out;
+ }
+ /*
+ * Find out how large the frame actually is, there may be junk at
+ * the end of the frame that ZSTD_decompressDCtx() can't handle.
+ */
+ ret = ZSTD_findFrameCompressedSize(in_buf, in_len);
+ err = handle_zstd_error(ret, error);
+ if (err)
+ goto out;
+ in_len = (long)ret;
+
+ ret = ZSTD_decompressDCtx(dctx, out_buf, out_len, in_buf, in_len);
+ err = handle_zstd_error(ret, error);
+ if (err)
+ goto out;
+
+ if (in_pos != NULL)
+ *in_pos = in_len;
+
+ err = 0;
+out:
+ if (wksp != NULL)
+ large_free(wksp);
+ return err;
+}
+
+static int INIT __unzstd(unsigned char *in_buf, long in_len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf, long out_len,
+ long *in_pos,
+ void (*error)(char *x))
+{
+ ZSTD_inBuffer in;
+ ZSTD_outBuffer out;
+ ZSTD_frameParams params;
+ void *in_allocated = NULL;
+ void *out_allocated = NULL;
+ void *wksp = NULL;
+ size_t wksp_size;
+ ZSTD_DStream *dstream;
+ int err;
+ size_t ret;
+
+ if (out_len == 0)
+ out_len = LONG_MAX; /* no limit */
+
+ if (fill == NULL && flush == NULL)
+ /*
+ * We can decompress faster and with less memory when we have a
+ * single chunk.
+ */
+ return decompress_single(in_buf, in_len, out_buf, out_len,
+ in_pos, error);
+
+ /*
+ * If in_buf is not provided, we must be using fill(), so allocate
+ * a large enough buffer. If it is provided, it must be at least
+ * ZSTD_IOBUF_SIZE large.
+ */
+ if (in_buf == NULL) {
+ in_allocated = large_malloc(ZSTD_IOBUF_SIZE);
+ if (in_allocated == NULL) {
+ error("Out of memory while allocating input buffer");
+ err = -1;
+ goto out;
+ }
+ in_buf = in_allocated;
+ in_len = 0;
+ }
+ /* Read the first chunk, since we need to decode the frame header. */
+ if (fill != NULL)
+ in_len = fill(in_buf, ZSTD_IOBUF_SIZE);
+ if (in_len < 0) {
+ error("ZSTD-compressed data is truncated");
+ err = -1;
+ goto out;
+ }
+ /* Set the first non-empty input buffer. */
+ in.src = in_buf;
+ in.pos = 0;
+ in.size = in_len;
+ /* Allocate the output buffer if we are using flush(). */
+ if (flush != NULL) {
+ out_allocated = large_malloc(ZSTD_IOBUF_SIZE);
+ if (out_allocated == NULL) {
+ error("Out of memory while allocating output buffer");
+ err = -1;
+ goto out;
+ }
+ out_buf = out_allocated;
+ out_len = ZSTD_IOBUF_SIZE;
+ }
+ /* Set the output buffer. */
+ out.dst = out_buf;
+ out.pos = 0;
+ out.size = out_len;
+
+ /*
+ * We need to know the window size to allocate the ZSTD_DStream.
+ * Since we are streaming, we need to allocate a buffer for the sliding
+ * window. The window size varies from 1 KB to ZSTD_WINDOWSIZE_MAX
+ * (8 MB), so it is important to use the actual value so as not to
+ * waste memory when it is smaller.
+ */
+ ret = ZSTD_getFrameParams(&params, in.src, in.size);
+ err = handle_zstd_error(ret, error);
+ if (err)
+ goto out;
+ if (ret != 0) {
+ error("ZSTD-compressed data has an incomplete frame header");
+ err = -1;
+ goto out;
+ }
+ if (params.windowSize > ZSTD_WINDOWSIZE_MAX) {
+ error("ZSTD-compressed data has too large a window size");
+ err = -1;
+ goto out;
+ }
+
+ /*
+ * Allocate the ZSTD_DStream now that we know how much memory is
+ * required.
+ */
+ wksp_size = ZSTD_DStreamWorkspaceBound(params.windowSize);
+ wksp = large_malloc(wksp_size);
+ dstream = ZSTD_initDStream(params.windowSize, wksp, wksp_size);
+ if (dstream == NULL) {
+ error("Out of memory while allocating ZSTD_DStream");
+ err = -1;
+ goto out;
+ }
+
+ /*
+ * Decompression loop:
+ * Read more data if necessary (error if no more data can be read).
+ * Call the decompression function, which returns 0 when finished.
+ * Flush any data produced if using flush().
+ */
+ if (in_pos != NULL)
+ *in_pos = 0;
+ do {
+ /*
+ * If we need to reload data, either we have fill() and can
+ * try to get more data, or we don't and the input is truncated.
+ */
+ if (in.pos == in.size) {
+ if (in_pos != NULL)
+ *in_pos += in.pos;
+ in_len = fill ? fill(in_buf, ZSTD_IOBUF_SIZE) : -1;
+ if (in_len < 0) {
+ error("ZSTD-compressed data is truncated");
+ err = -1;
+ goto out;
+ }
+ in.pos = 0;
+ in.size = in_len;
+ }
+ /* Returns zero when the frame is complete. */
+ ret = ZSTD_decompressStream(dstream, &out, &in);
+ err = handle_zstd_error(ret, error);
+ if (err)
+ goto out;
+ /* Flush all of the data produced if using flush(). */
+ if (flush != NULL && out.pos > 0) {
+ if (out.pos != flush(out.dst, out.pos)) {
+ error("Failed to flush()");
+ err = -1;
+ goto out;
+ }
+ out.pos = 0;
+ }
+ } while (ret != 0);
+
+ if (in_pos != NULL)
+ *in_pos += in.pos;
+
+ err = 0;
+out:
+ if (in_allocated != NULL)
+ large_free(in_allocated);
+ if (out_allocated != NULL)
+ large_free(out_allocated);
+ if (wksp != NULL)
+ large_free(wksp);
+ return err;
+}
+
+#ifndef UNZSTD_PREBOOT
+STATIC int INIT unzstd(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf,
+ long *pos,
+ void (*error)(char *x))
+{
+ return __unzstd(buf, len, fill, flush, out_buf, 0, pos, error);
+}
+#else
+STATIC int INIT __decompress(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf, long out_len,
+ long *pos,
+ void (*error)(char *x))
+{
+ return __unzstd(buf, len, fill, flush, out_buf, out_len, pos, error);
+}
+#endif
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index 4f3d36a72f8f..69f902440a0e 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -118,14 +118,14 @@ static int fake_resource_init(struct kunit_resource *res, void *context)
{
struct kunit_test_resource_context *ctx = context;
- res->allocation = &ctx->is_resource_initialized;
+ res->data = &ctx->is_resource_initialized;
ctx->is_resource_initialized = true;
return 0;
}
static void fake_resource_free(struct kunit_resource *res)
{
- bool *is_resource_initialized = res->allocation;
+ bool *is_resource_initialized = res->data;
*is_resource_initialized = false;
}
@@ -154,11 +154,21 @@ static void kunit_resource_test_alloc_resource(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, res);
KUNIT_EXPECT_PTR_EQ(test,
&ctx->is_resource_initialized,
- (bool *) res->allocation);
+ (bool *)res->data);
KUNIT_EXPECT_TRUE(test, list_is_last(&res->node, &ctx->test.resources));
KUNIT_EXPECT_PTR_EQ(test, free, res->free);
+
+ kunit_put_resource(res);
}
+/*
+ * Note: tests below use kunit_alloc_and_get_resource(), so as a consequence
+ * they have a reference to the associated resource that they must release
+ * via kunit_put_resource(). In normal operation, users will only
+ * have to do this for cases where they use kunit_find_resource(), and the
+ * kunit_alloc_resource() function will be used (which does not take a
+ * resource reference).
+ */
static void kunit_resource_test_destroy_resource(struct kunit *test)
{
struct kunit_test_resource_context *ctx = test->priv;
@@ -169,11 +179,12 @@ static void kunit_resource_test_destroy_resource(struct kunit *test)
GFP_KERNEL,
ctx);
+ kunit_put_resource(res);
+
KUNIT_ASSERT_FALSE(test,
- kunit_resource_destroy(&ctx->test,
+ kunit_destroy_resource(&ctx->test,
kunit_resource_instance_match,
- res->free,
- res->allocation));
+ res->data));
KUNIT_EXPECT_FALSE(test, ctx->is_resource_initialized);
KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
@@ -191,6 +202,7 @@ static void kunit_resource_test_cleanup_resources(struct kunit *test)
fake_resource_free,
GFP_KERNEL,
ctx);
+ kunit_put_resource(resources[i]);
}
kunit_cleanup(&ctx->test);
@@ -221,14 +233,14 @@ static int fake_resource_2_init(struct kunit_resource *res, void *context)
KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, allocate_order, 2);
- res->allocation = ctx;
+ res->data = ctx;
return 0;
}
static void fake_resource_2_free(struct kunit_resource *res)
{
- struct kunit_test_resource_context *ctx = res->allocation;
+ struct kunit_test_resource_context *ctx = res->data;
KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, free_order, 2);
}
@@ -236,23 +248,26 @@ static void fake_resource_2_free(struct kunit_resource *res)
static int fake_resource_1_init(struct kunit_resource *res, void *context)
{
struct kunit_test_resource_context *ctx = context;
+ struct kunit_resource *res2;
- kunit_alloc_and_get_resource(&ctx->test,
- fake_resource_2_init,
- fake_resource_2_free,
- GFP_KERNEL,
- ctx);
+ res2 = kunit_alloc_and_get_resource(&ctx->test,
+ fake_resource_2_init,
+ fake_resource_2_free,
+ GFP_KERNEL,
+ ctx);
KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, allocate_order, 1);
- res->allocation = ctx;
+ res->data = ctx;
+
+ kunit_put_resource(res2);
return 0;
}
static void fake_resource_1_free(struct kunit_resource *res)
{
- struct kunit_test_resource_context *ctx = res->allocation;
+ struct kunit_test_resource_context *ctx = res->data;
KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, free_order, 1);
}
@@ -265,13 +280,14 @@ static void fake_resource_1_free(struct kunit_resource *res)
static void kunit_resource_test_proper_free_ordering(struct kunit *test)
{
struct kunit_test_resource_context *ctx = test->priv;
+ struct kunit_resource *res;
/* fake_resource_1 allocates a fake_resource_2 in its init. */
- kunit_alloc_and_get_resource(&ctx->test,
- fake_resource_1_init,
- fake_resource_1_free,
- GFP_KERNEL,
- ctx);
+ res = kunit_alloc_and_get_resource(&ctx->test,
+ fake_resource_1_init,
+ fake_resource_1_free,
+ GFP_KERNEL,
+ ctx);
/*
* Since fake_resource_2_init calls KUNIT_RESOURCE_TEST_MARK_ORDER
@@ -281,6 +297,8 @@ static void kunit_resource_test_proper_free_ordering(struct kunit *test)
KUNIT_EXPECT_EQ(test, ctx->allocate_order[0], 2);
KUNIT_EXPECT_EQ(test, ctx->allocate_order[1], 1);
+ kunit_put_resource(res);
+
kunit_cleanup(&ctx->test);
/*
@@ -292,6 +310,57 @@ static void kunit_resource_test_proper_free_ordering(struct kunit *test)
KUNIT_EXPECT_EQ(test, ctx->free_order[1], 2);
}
+static void kunit_resource_test_static(struct kunit *test)
+{
+ struct kunit_test_resource_context ctx;
+ struct kunit_resource res;
+
+ KUNIT_EXPECT_EQ(test, kunit_add_resource(test, NULL, NULL, &res, &ctx),
+ 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, res.data, (void *)&ctx);
+
+ kunit_cleanup(test);
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&test->resources));
+}
+
+static void kunit_resource_test_named(struct kunit *test)
+{
+ struct kunit_resource res1, res2, *found = NULL;
+ struct kunit_test_resource_context ctx;
+
+ KUNIT_EXPECT_EQ(test,
+ kunit_add_named_resource(test, NULL, NULL, &res1,
+ "resource_1", &ctx),
+ 0);
+ KUNIT_EXPECT_PTR_EQ(test, res1.data, (void *)&ctx);
+
+ KUNIT_EXPECT_EQ(test,
+ kunit_add_named_resource(test, NULL, NULL, &res1,
+ "resource_1", &ctx),
+ -EEXIST);
+
+ KUNIT_EXPECT_EQ(test,
+ kunit_add_named_resource(test, NULL, NULL, &res2,
+ "resource_2", &ctx),
+ 0);
+
+ found = kunit_find_named_resource(test, "resource_1");
+
+ KUNIT_EXPECT_PTR_EQ(test, found, &res1);
+
+ if (found)
+ kunit_put_resource(&res1);
+
+ KUNIT_EXPECT_EQ(test, kunit_destroy_named_resource(test, "resource_2"),
+ 0);
+
+ kunit_cleanup(test);
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&test->resources));
+}
+
static int kunit_resource_test_init(struct kunit *test)
{
struct kunit_test_resource_context *ctx =
@@ -320,6 +389,8 @@ static struct kunit_case kunit_resource_test_cases[] = {
KUNIT_CASE(kunit_resource_test_destroy_resource),
KUNIT_CASE(kunit_resource_test_cleanup_resources),
KUNIT_CASE(kunit_resource_test_proper_free_ordering),
+ KUNIT_CASE(kunit_resource_test_static),
+ KUNIT_CASE(kunit_resource_test_named),
{}
};
diff --git a/lib/kunit/string-stream.c b/lib/kunit/string-stream.c
index 350392013c14..141789ca8949 100644
--- a/lib/kunit/string-stream.c
+++ b/lib/kunit/string-stream.c
@@ -33,14 +33,14 @@ static int string_stream_fragment_init(struct kunit_resource *res,
if (!frag->fragment)
return -ENOMEM;
- res->allocation = frag;
+ res->data = frag;
return 0;
}
static void string_stream_fragment_free(struct kunit_resource *res)
{
- struct string_stream_fragment *frag = res->allocation;
+ struct string_stream_fragment *frag = res->data;
list_del(&frag->node);
kunit_kfree(frag->test, frag->fragment);
@@ -65,9 +65,8 @@ static struct string_stream_fragment *alloc_string_stream_fragment(
static int string_stream_fragment_destroy(struct string_stream_fragment *frag)
{
- return kunit_resource_destroy(frag->test,
+ return kunit_destroy_resource(frag->test,
kunit_resource_instance_match,
- string_stream_fragment_free,
frag);
}
@@ -179,7 +178,7 @@ static int string_stream_init(struct kunit_resource *res, void *context)
if (!stream)
return -ENOMEM;
- res->allocation = stream;
+ res->data = stream;
stream->gfp = ctx->gfp;
stream->test = ctx->test;
INIT_LIST_HEAD(&stream->fragments);
@@ -190,7 +189,7 @@ static int string_stream_init(struct kunit_resource *res, void *context)
static void string_stream_free(struct kunit_resource *res)
{
- struct string_stream *stream = res->allocation;
+ struct string_stream *stream = res->data;
string_stream_clear(stream);
}
@@ -211,8 +210,7 @@ struct string_stream *alloc_string_stream(struct kunit *test, gfp_t gfp)
int string_stream_destroy(struct string_stream *stream)
{
- return kunit_resource_destroy(stream->test,
+ return kunit_destroy_resource(stream->test,
kunit_resource_instance_match,
- string_stream_free,
stream);
}
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index ccb2ffad8dcf..c36037200310 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -8,6 +8,7 @@
#include <kunit/test.h>
#include <linux/kernel.h>
+#include <linux/kref.h>
#include <linux/sched/debug.h>
#include "debugfs.h"
@@ -406,90 +407,116 @@ void __kunit_test_suites_exit(struct kunit_suite **suites)
}
EXPORT_SYMBOL_GPL(__kunit_test_suites_exit);
-struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test,
- kunit_resource_init_t init,
- kunit_resource_free_t free,
- gfp_t internal_gfp,
- void *context)
+/*
+ * Used for static resources and when a kunit_resource * has been created by
+ * kunit_alloc_resource(). When an init function is supplied, @data is passed
+ * into the init function; otherwise, we simply set the resource data field to
+ * the data value passed in.
+ */
+int kunit_add_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ struct kunit_resource *res,
+ void *data)
{
- struct kunit_resource *res;
- int ret;
+ int ret = 0;
- res = kzalloc(sizeof(*res), internal_gfp);
- if (!res)
- return NULL;
+ res->free = free;
+ kref_init(&res->refcount);
- ret = init(res, context);
- if (ret)
- return NULL;
+ if (init) {
+ ret = init(res, data);
+ if (ret)
+ return ret;
+ } else {
+ res->data = data;
+ }
- res->free = free;
spin_lock(&test->lock);
list_add_tail(&res->node, &test->resources);
+ /* refcount for list is established by kref_init() */
spin_unlock(&test->lock);
- return res;
+ return ret;
}
-EXPORT_SYMBOL_GPL(kunit_alloc_and_get_resource);
+EXPORT_SYMBOL_GPL(kunit_add_resource);
-static void kunit_resource_free(struct kunit *test, struct kunit_resource *res)
+int kunit_add_named_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ struct kunit_resource *res,
+ const char *name,
+ void *data)
{
- res->free(res);
- kfree(res);
+ struct kunit_resource *existing;
+
+ if (!name)
+ return -EINVAL;
+
+ existing = kunit_find_named_resource(test, name);
+ if (existing) {
+ kunit_put_resource(existing);
+ return -EEXIST;
+ }
+
+ res->name = name;
+
+ return kunit_add_resource(test, init, free, res, data);
}
+EXPORT_SYMBOL_GPL(kunit_add_named_resource);
-static struct kunit_resource *kunit_resource_find(struct kunit *test,
- kunit_resource_match_t match,
- kunit_resource_free_t free,
- void *match_data)
+struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ gfp_t internal_gfp,
+ void *data)
{
- struct kunit_resource *resource;
+ struct kunit_resource *res;
+ int ret;
- lockdep_assert_held(&test->lock);
+ res = kzalloc(sizeof(*res), internal_gfp);
+ if (!res)
+ return NULL;
- list_for_each_entry_reverse(resource, &test->resources, node) {
- if (resource->free != free)
- continue;
- if (match(test, resource->allocation, match_data))
- return resource;
+ ret = kunit_add_resource(test, init, free, res, data);
+ if (!ret) {
+ /*
+ * bump refcount for get; kunit_resource_put() should be called
+ * when done.
+ */
+ kunit_get_resource(res);
+ return res;
}
-
return NULL;
}
+EXPORT_SYMBOL_GPL(kunit_alloc_and_get_resource);
-static struct kunit_resource *kunit_resource_remove(
- struct kunit *test,
- kunit_resource_match_t match,
- kunit_resource_free_t free,
- void *match_data)
+void kunit_remove_resource(struct kunit *test, struct kunit_resource *res)
{
- struct kunit_resource *resource;
-
spin_lock(&test->lock);
- resource = kunit_resource_find(test, match, free, match_data);
- if (resource)
- list_del(&resource->node);
+ list_del(&res->node);
spin_unlock(&test->lock);
-
- return resource;
+ kunit_put_resource(res);
}
+EXPORT_SYMBOL_GPL(kunit_remove_resource);
-int kunit_resource_destroy(struct kunit *test,
- kunit_resource_match_t match,
- kunit_resource_free_t free,
+int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match,
void *match_data)
{
- struct kunit_resource *resource;
-
- resource = kunit_resource_remove(test, match, free, match_data);
+ struct kunit_resource *res = kunit_find_resource(test, match,
+ match_data);
- if (!resource)
+ if (!res)
return -ENOENT;
- kunit_resource_free(test, resource);
+ kunit_remove_resource(test, res);
+
+ /* We have a reference also via _find(); drop it. */
+ kunit_put_resource(res);
+
return 0;
}
-EXPORT_SYMBOL_GPL(kunit_resource_destroy);
+EXPORT_SYMBOL_GPL(kunit_destroy_resource);
struct kunit_kmalloc_params {
size_t size;
@@ -500,8 +527,8 @@ static int kunit_kmalloc_init(struct kunit_resource *res, void *context)
{
struct kunit_kmalloc_params *params = context;
- res->allocation = kmalloc(params->size, params->gfp);
- if (!res->allocation)
+ res->data = kmalloc(params->size, params->gfp);
+ if (!res->data)
return -ENOMEM;
return 0;
@@ -509,7 +536,7 @@ static int kunit_kmalloc_init(struct kunit_resource *res, void *context)
static void kunit_kmalloc_free(struct kunit_resource *res)
{
- kfree(res->allocation);
+ kfree(res->data);
}
void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp)
@@ -529,20 +556,25 @@ EXPORT_SYMBOL_GPL(kunit_kmalloc);
void kunit_kfree(struct kunit *test, const void *ptr)
{
- int rc;
+ struct kunit_resource *res;
- rc = kunit_resource_destroy(test,
- kunit_resource_instance_match,
- kunit_kmalloc_free,
- (void *)ptr);
+ res = kunit_find_resource(test, kunit_resource_instance_match,
+ (void *)ptr);
+
+ /*
+ * Removing the resource from the list of resources drops the
+ * reference count to 1; the final put will trigger the free.
+ */
+ kunit_remove_resource(test, res);
+
+ kunit_put_resource(res);
- WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(kunit_kfree);
void kunit_cleanup(struct kunit *test)
{
- struct kunit_resource *resource;
+ struct kunit_resource *res;
/*
* test->resources is a stack - each allocation must be freed in the
@@ -559,13 +591,16 @@ void kunit_cleanup(struct kunit *test)
spin_unlock(&test->lock);
break;
}
- resource = list_last_entry(&test->resources,
- struct kunit_resource,
- node);
- list_del(&resource->node);
+ res = list_last_entry(&test->resources,
+ struct kunit_resource,
+ node);
+ /*
+ * Need to unlock here as a resource may remove another
+ * resource, and this can't happen if the test->lock
+ * is held.
+ */
spin_unlock(&test->lock);
-
- kunit_resource_free(test, resource);
+ kunit_remove_resource(test, res);
}
}
EXPORT_SYMBOL_GPL(kunit_cleanup);
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
index 2717c7963acd..7892a40cf765 100644
--- a/lib/lzo/lzo1x_decompress_safe.c
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -32,7 +32,7 @@
* depending on the base count. Since the base count is taken from a u8
* and a few bits, it is safe to assume that it will always be lower than
* or equal to 2*255, thus we can always prevent any overflow by accepting
- * two less 255 steps. See Documentation/lzo.txt for more information.
+ * two less 255 steps. See Documentation/staging/lzo.rst for more information.
*/
#define MAX_255_COUNT ((((size_t)~0) / 255) - 2)
diff --git a/lib/math/div64.c b/lib/math/div64.c
index 368ca7fd0d82..3952a07130d8 100644
--- a/lib/math/div64.c
+++ b/lib/math/div64.c
@@ -190,3 +190,44 @@ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
return __iter_div_u64_rem(dividend, divisor, remainder);
}
EXPORT_SYMBOL(iter_div_u64_rem);
+
+#ifndef mul_u64_u64_div_u64
+u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
+{
+ u64 res = 0, div, rem;
+ int shift;
+
+ /* can a * b overflow ? */
+ if (ilog2(a) + ilog2(b) > 62) {
+ /*
+ * (b * a) / c is equal to
+ *
+ * (b / c) * a +
+ * (b % c) * a / c
+ *
+ * if nothing overflows. Can the 1st multiplication
+ * overflow? Yes, but we do not care: this can only
+ * happen if the end result can't fit in u64 anyway.
+ *
+ * So the code below does
+ *
+ * res = (b / c) * a;
+ * b = b % c;
+ */
+ div = div64_u64_rem(b, c, &rem);
+ res = div * a;
+ b = rem;
+
+ shift = ilog2(a) + ilog2(b) - 62;
+ if (shift > 0) {
+ /* drop precision */
+ b >>= shift;
+ c >>= shift;
+ if (!c)
+ return res;
+ }
+ }
+
+ return res + div64_u64(a * b, c);
+}
+#endif
diff --git a/lib/mpi/Makefile b/lib/mpi/Makefile
index d5874a7f5ff9..43b8fce14079 100644
--- a/lib/mpi/Makefile
+++ b/lib/mpi/Makefile
@@ -16,6 +16,7 @@ mpi-y = \
mpicoder.o \
mpi-bit.o \
mpi-cmp.o \
+ mpi-sub-ui.o \
mpih-cmp.o \
mpih-div.o \
mpih-mul.o \
diff --git a/lib/mpi/mpi-sub-ui.c b/lib/mpi/mpi-sub-ui.c
new file mode 100644
index 000000000000..b41b082b5f3e
--- /dev/null
+++ b/lib/mpi/mpi-sub-ui.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* mpi-sub-ui.c - Subtract an unsigned integer from an MPI.
+ *
+ * Copyright 1991, 1993, 1994, 1996, 1999-2002, 2004, 2012, 2013, 2015
+ * Free Software Foundation, Inc.
+ *
+ * This file was based on the GNU MP Library source file:
+ * https://gmplib.org/repo/gmp-6.2/file/510b83519d1c/mpz/aors_ui.h
+ *
+ * The GNU MP Library is free software; you can redistribute it and/or modify
+ * it under the terms of either:
+ *
+ * * the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 3 of the License, or (at your
+ * option) any later version.
+ *
+ * or
+ *
+ * * the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * or both in parallel, as here.
+ *
+ * The GNU MP Library is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received copies of the GNU General Public License and the
+ * GNU Lesser General Public License along with the GNU MP Library. If not,
+ * see https://www.gnu.org/licenses/.
+ */
+
+#include "mpi-internal.h"
+
+int mpi_sub_ui(MPI w, MPI u, unsigned long vval)
+{
+ if (u->nlimbs == 0) {
+ if (mpi_resize(w, 1) < 0)
+ return -ENOMEM;
+ w->d[0] = vval;
+ w->nlimbs = (vval != 0);
+ w->sign = (vval != 0);
+ return 0;
+ }
+
+ /* If not space for W (and possible carry), increase space. */
+ if (mpi_resize(w, u->nlimbs + 1))
+ return -ENOMEM;
+
+ if (u->sign) {
+ mpi_limb_t cy;
+
+ cy = mpihelp_add_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval);
+ w->d[u->nlimbs] = cy;
+ w->nlimbs = u->nlimbs + cy;
+ w->sign = 1;
+ } else {
+ /* The signs are different. Need exact comparison to determine
+ * which operand to subtract from which.
+ */
+ if (u->nlimbs == 1 && u->d[0] < vval) {
+ w->d[0] = vval - u->d[0];
+ w->nlimbs = 1;
+ w->sign = 1;
+ } else {
+ mpihelp_sub_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval);
+ /* Size can decrease with at most one limb. */
+ w->nlimbs = (u->nlimbs - (w->d[u->nlimbs - 1] == 0));
+ w->sign = 0;
+ }
+ }
+
+ mpi_normalize(w);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mpi_sub_ui);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 34e406fe561f..8e4a3a4397f2 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1029,7 +1029,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
{
struct radix_tree_node *node, *parent;
unsigned long maxindex;
- int uninitialized_var(offset);
+ int offset;
radix_tree_load_root(root, &node, &maxindex);
if (index > maxindex)
diff --git a/lib/random32.c b/lib/random32.c
index 763b920a6206..3d749abb9e80 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void)
}
#endif
-static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
+DEFINE_PER_CPU(struct rnd_state, net_rand_state);
/**
* prandom_u32_state - seeded pseudo-random number generator.
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 9f6890aedd1a..c949c1e3b87c 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -31,7 +31,7 @@
union nested_table {
union nested_table __rcu *table;
- struct rhash_lock_head *bucket;
+ struct rhash_lock_head __rcu *bucket;
};
static u32 head_hashfn(struct rhashtable *ht,
@@ -222,7 +222,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
}
static int rhashtable_rehash_one(struct rhashtable *ht,
- struct rhash_lock_head **bkt,
+ struct rhash_lock_head __rcu **bkt,
unsigned int old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
@@ -275,7 +275,7 @@ static int rhashtable_rehash_chain(struct rhashtable *ht,
unsigned int old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
- struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash);
+ struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
int err;
if (!bkt)
@@ -485,7 +485,7 @@ fail:
}
static void *rhashtable_lookup_one(struct rhashtable *ht,
- struct rhash_lock_head **bkt,
+ struct rhash_lock_head __rcu **bkt,
struct bucket_table *tbl, unsigned int hash,
const void *key, struct rhash_head *obj)
{
@@ -535,12 +535,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
return ERR_PTR(-ENOENT);
}
-static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
- struct rhash_lock_head **bkt,
- struct bucket_table *tbl,
- unsigned int hash,
- struct rhash_head *obj,
- void *data)
+static struct bucket_table *rhashtable_insert_one(
+ struct rhashtable *ht, struct rhash_lock_head __rcu **bkt,
+ struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj,
+ void *data)
{
struct bucket_table *new_tbl;
struct rhash_head *head;
@@ -591,7 +589,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
{
struct bucket_table *new_tbl;
struct bucket_table *tbl;
- struct rhash_lock_head **bkt;
+ struct rhash_lock_head __rcu **bkt;
unsigned int hash;
void *data;
@@ -1173,8 +1171,8 @@ void rhashtable_destroy(struct rhashtable *ht)
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);
-struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
- unsigned int hash)
+struct rhash_lock_head __rcu **__rht_bucket_nested(
+ const struct bucket_table *tbl, unsigned int hash)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
unsigned int index = hash & ((1 << tbl->nest) - 1);
@@ -1202,10 +1200,10 @@ struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
}
EXPORT_SYMBOL_GPL(__rht_bucket_nested);
-struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
- unsigned int hash)
+struct rhash_lock_head __rcu **rht_bucket_nested(
+ const struct bucket_table *tbl, unsigned int hash)
{
- static struct rhash_lock_head *rhnull;
+ static struct rhash_lock_head __rcu *rhnull;
if (!rhnull)
INIT_RHT_NULLS_HEAD(rhnull);
@@ -1213,9 +1211,8 @@ struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
}
EXPORT_SYMBOL_GPL(rht_bucket_nested);
-struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
- struct bucket_table *tbl,
- unsigned int hash)
+struct rhash_lock_head __rcu **rht_bucket_nested_insert(
+ struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
unsigned int index = hash & ((1 << tbl->nest) - 1);
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index af88d1346dd7..267aa7709416 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -292,8 +292,11 @@ void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
for (i = 0; i < sb->map_nr; i++) {
unsigned long word = READ_ONCE(sb->map[i].word);
+ unsigned long cleared = READ_ONCE(sb->map[i].cleared);
unsigned int word_bits = READ_ONCE(sb->map[i].depth);
+ word &= ~cleared;
+
while (word_bits > 0) {
unsigned int bits = min(8 - byte_bits, word_bits);
diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c
index 25b5cbfb7615..10360d4ea273 100644
--- a/lib/test-string_helpers.c
+++ b/lib/test-string_helpers.c
@@ -238,6 +238,28 @@ static const struct test_string_2 escape1[] __initconst = {{
/* terminator */
}};
+static const struct test_string strings_upper[] __initconst = {
+ {
+ .in = "abcdefgh1234567890test",
+ .out = "ABCDEFGH1234567890TEST",
+ },
+ {
+ .in = "abCdeFgH1234567890TesT",
+ .out = "ABCDEFGH1234567890TEST",
+ },
+};
+
+static const struct test_string strings_lower[] __initconst = {
+ {
+ .in = "ABCDEFGH1234567890TEST",
+ .out = "abcdefgh1234567890test",
+ },
+ {
+ .in = "abCdeFgH1234567890TesT",
+ .out = "abcdefgh1234567890test",
+ },
+};
+
static __init const char *test_string_find_match(const struct test_string_2 *s2,
unsigned int flags)
{
@@ -390,6 +412,48 @@ static __init void test_string_get_size(void)
test_string_get_size_one(4096, U64_MAX, "75.6 ZB", "64.0 ZiB");
}
+static void __init test_string_upper_lower(void)
+{
+ char *dst;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(strings_upper); i++) {
+ const char *s = strings_upper[i].in;
+ int len = strlen(strings_upper[i].in) + 1;
+
+ dst = kmalloc(len, GFP_KERNEL);
+ if (!dst)
+ return;
+
+ string_upper(dst, s);
+ if (memcmp(dst, strings_upper[i].out, len)) {
+ pr_warn("Test 'string_upper' failed : expected %s, got %s!\n",
+ strings_upper[i].out, dst);
+ kfree(dst);
+ return;
+ }
+ kfree(dst);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(strings_lower); i++) {
+ const char *s = strings_lower[i].in;
+ int len = strlen(strings_lower[i].in) + 1;
+
+ dst = kmalloc(len, GFP_KERNEL);
+ if (!dst)
+ return;
+
+ string_lower(dst, s);
+ if (memcmp(dst, strings_lower[i].out, len)) {
+ pr_warn("Test 'string_lower failed : : expected %s, got %s!\n",
+ strings_lower[i].out, dst);
+ kfree(dst);
+ return;
+ }
+ kfree(dst);
+ }
+}
+
static int __init test_string_helpers_init(void)
{
unsigned int i;
@@ -411,6 +475,9 @@ static int __init test_string_helpers_init(void)
/* Test string_get_size() */
test_string_get_size();
+ /* Test string upper(), string_lower() */
+ test_string_upper_lower();
+
return -EINVAL;
}
module_init(test_string_helpers_init);
diff --git a/lib/test_fpu.c b/lib/test_fpu.c
new file mode 100644
index 000000000000..c33764aa3eb8
--- /dev/null
+++ b/lib/test_fpu.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for using floating point operations inside a kernel module.
+ *
+ * This tests kernel_fpu_begin() and kernel_fpu_end() functions, especially
+ * when userland has modified the floating point control registers. The kernel
+ * state might depend on the state set by the userland thread that was active
+ * before a syscall.
+ *
+ * To facilitate the test, this module registers file
+ * /sys/kernel/debug/selftest_helpers/test_fpu, which when read causes a
+ * sequence of floating point operations. If the operations fail, either the
+ * read returns error status or the kernel crashes.
+ * If the operations succeed, the read returns "1\n".
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <asm/fpu/api.h>
+
+static int test_fpu(void)
+{
+ /*
+ * This sequence of operations tests that rounding mode is
+ * to nearest and that denormal numbers are supported.
+ * Volatile variables are used to avoid compiler optimizing
+ * the calculations away.
+ */
+ volatile double a, b, c, d, e, f, g;
+
+ a = 4.0;
+ b = 1e-15;
+ c = 1e-310;
+
+ /* Sets precision flag */
+ d = a + b;
+
+ /* Result depends on rounding mode */
+ e = a + b / 2;
+
+ /* Denormal and very large values */
+ f = b / c;
+
+ /* Depends on denormal support */
+ g = a + c * f;
+
+ if (d > a && e > a && g > a)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int test_fpu_get(void *data, u64 *val)
+{
+ int status = -EINVAL;
+
+ kernel_fpu_begin();
+ status = test_fpu();
+ kernel_fpu_end();
+
+ *val = 1;
+ return status;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(test_fpu_fops, test_fpu_get, NULL, "%lld\n");
+static struct dentry *selftest_dir;
+
+static int __init test_fpu_init(void)
+{
+ selftest_dir = debugfs_create_dir("selftest_helpers", NULL);
+ if (!selftest_dir)
+ return -ENOMEM;
+
+ debugfs_create_file("test_fpu", 0444, selftest_dir, NULL,
+ &test_fpu_fops);
+
+ return 0;
+}
+
+static void __exit test_fpu_exit(void)
+{
+ debugfs_remove(selftest_dir);
+}
+
+module_init(test_fpu_init);
+module_exit(test_fpu_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_lockup.c b/lib/test_lockup.c
index bd7c7ff39f6b..ff26f36d729f 100644
--- a/lib/test_lockup.c
+++ b/lib/test_lockup.c
@@ -168,7 +168,7 @@ static int master_cpu;
static void test_lock(bool master, bool verbose)
{
- u64 uninitialized_var(wait_start);
+ u64 wait_start;
if (measure_lock_wait)
wait_start = local_clock();
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index ddc9685702b1..5cf2fe9aab9e 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -15,6 +15,8 @@
#include <linux/delay.h>
#include <linux/rwsem.h>
#include <linux/mm.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
#define __param(type, name, init, msg) \
static type name = init; \
@@ -35,14 +37,18 @@ __param(int, test_loop_count, 1000000,
__param(int, run_test_mask, INT_MAX,
"Set tests specified in the mask.\n\n"
- "\t\tid: 1, name: fix_size_alloc_test\n"
- "\t\tid: 2, name: full_fit_alloc_test\n"
- "\t\tid: 4, name: long_busy_list_alloc_test\n"
- "\t\tid: 8, name: random_size_alloc_test\n"
- "\t\tid: 16, name: fix_align_alloc_test\n"
- "\t\tid: 32, name: random_size_align_alloc_test\n"
- "\t\tid: 64, name: align_shift_alloc_test\n"
- "\t\tid: 128, name: pcpu_alloc_test\n"
+ "\t\tid: 1, name: fix_size_alloc_test\n"
+ "\t\tid: 2, name: full_fit_alloc_test\n"
+ "\t\tid: 4, name: long_busy_list_alloc_test\n"
+ "\t\tid: 8, name: random_size_alloc_test\n"
+ "\t\tid: 16, name: fix_align_alloc_test\n"
+ "\t\tid: 32, name: random_size_align_alloc_test\n"
+ "\t\tid: 64, name: align_shift_alloc_test\n"
+ "\t\tid: 128, name: pcpu_alloc_test\n"
+ "\t\tid: 256, name: kvfree_rcu_1_arg_vmalloc_test\n"
+ "\t\tid: 512, name: kvfree_rcu_2_arg_vmalloc_test\n"
+ "\t\tid: 1024, name: kvfree_rcu_1_arg_slab_test\n"
+ "\t\tid: 2048, name: kvfree_rcu_2_arg_slab_test\n"
/* Add a new test case description here. */
);
@@ -316,6 +322,83 @@ pcpu_alloc_test(void)
return rv;
}
+struct test_kvfree_rcu {
+ struct rcu_head rcu;
+ unsigned char array[20];
+};
+
+static int
+kvfree_rcu_1_arg_vmalloc_test(void)
+{
+ struct test_kvfree_rcu *p;
+ int i;
+
+ for (i = 0; i < test_loop_count; i++) {
+ p = vmalloc(1 * PAGE_SIZE);
+ if (!p)
+ return -1;
+
+ p->array[0] = 'a';
+ kvfree_rcu(p);
+ }
+
+ return 0;
+}
+
+static int
+kvfree_rcu_2_arg_vmalloc_test(void)
+{
+ struct test_kvfree_rcu *p;
+ int i;
+
+ for (i = 0; i < test_loop_count; i++) {
+ p = vmalloc(1 * PAGE_SIZE);
+ if (!p)
+ return -1;
+
+ p->array[0] = 'a';
+ kvfree_rcu(p, rcu);
+ }
+
+ return 0;
+}
+
+static int
+kvfree_rcu_1_arg_slab_test(void)
+{
+ struct test_kvfree_rcu *p;
+ int i;
+
+ for (i = 0; i < test_loop_count; i++) {
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -1;
+
+ p->array[0] = 'a';
+ kvfree_rcu(p);
+ }
+
+ return 0;
+}
+
+static int
+kvfree_rcu_2_arg_slab_test(void)
+{
+ struct test_kvfree_rcu *p;
+ int i;
+
+ for (i = 0; i < test_loop_count; i++) {
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -1;
+
+ p->array[0] = 'a';
+ kvfree_rcu(p, rcu);
+ }
+
+ return 0;
+}
+
struct test_case_desc {
const char *test_name;
int (*test_func)(void);
@@ -330,6 +413,10 @@ static struct test_case_desc test_case_array[] = {
{ "random_size_align_alloc_test", random_size_align_alloc_test },
{ "align_shift_alloc_test", align_shift_alloc_test },
{ "pcpu_alloc_test", pcpu_alloc_test },
+ { "kvfree_rcu_1_arg_vmalloc_test", kvfree_rcu_1_arg_vmalloc_test },
+ { "kvfree_rcu_2_arg_vmalloc_test", kvfree_rcu_2_arg_vmalloc_test },
+ { "kvfree_rcu_1_arg_slab_test", kvfree_rcu_1_arg_slab_test },
+ { "kvfree_rcu_2_arg_slab_test", kvfree_rcu_2_arg_slab_test },
/* Add a new test case here. */
};
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 259e55895933..c155769559ab 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -381,6 +381,9 @@ int num_to_str(char *buf, int size, unsigned long long num, unsigned int width)
#define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */
#define SPECIAL 64 /* prefix hex with "0x", octal with "0" */
+static_assert(ZEROPAD == ('0' - ' '));
+static_assert(SMALL == ' ');
+
enum format_type {
FORMAT_TYPE_NONE, /* Just a string part */
FORMAT_TYPE_WIDTH,
@@ -507,7 +510,7 @@ char *number(char *buf, char *end, unsigned long long num,
/* zero or space padding */
if (!(spec.flags & LEFT)) {
char c = ' ' + (spec.flags & ZEROPAD);
- BUILD_BUG_ON(' ' + ZEROPAD != '0');
+
while (--field_width >= 0) {
if (buf < end)
*buf = c;
@@ -1934,7 +1937,7 @@ char *flags_string(char *buf, char *end, void *flags_ptr,
names = vmaflag_names;
break;
case 'g':
- flags = *(gfp_t *)flags_ptr;
+ flags = (__force unsigned long)(*(gfp_t *)flags_ptr);
names = gfpflag_names;
break;
default:
@@ -1976,12 +1979,6 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
char *buf_start = buf;
struct property *prop;
bool has_mult, pass;
- static const struct printf_spec num_spec = {
- .flags = SMALL,
- .field_width = -1,
- .precision = -1,
- .base = 10,
- };
struct printf_spec str_spec = spec;
str_spec.field_width = -1;
@@ -2021,7 +2018,7 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
str_spec.precision = precision;
break;
case 'p': /* phandle */
- buf = number(buf, end, (unsigned int)dn->phandle, num_spec);
+ buf = number(buf, end, (unsigned int)dn->phandle, default_dec_spec);
break;
case 'P': /* path-spec */
p = fwnode_get_name(of_fwnode_handle(dn));
@@ -2134,7 +2131,7 @@ char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
* [4] or [6] and is able to print port [p], flowinfo [f], scope [s]
* - '[Ii][4S][hnbl]' IPv4 addresses in host, network, big or little endian order
* - 'I[6S]c' for IPv6 addresses printed as specified by
- * http://tools.ietf.org/html/rfc5952
+ * https://tools.ietf.org/html/rfc5952
* - 'E[achnops]' For an escaped buffer, where rules are defined by combination
* of the following flags (see string_escape_mem() for the
* details):
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
index 22528743d4ce..5cb50245a878 100644
--- a/lib/xz/Kconfig
+++ b/lib/xz/Kconfig
@@ -5,7 +5,7 @@ config XZ_DEC
help
LZMA2 compression algorithm and BCJ filters are supported using
the .xz file format as the container. For integrity checking,
- CRC32 is supported. See Documentation/xz.txt for more information.
+ CRC32 is supported. See Documentation/staging/xz.rst for more information.
if XZ_DEC
diff --git a/lib/zstd/fse_decompress.c b/lib/zstd/fse_decompress.c
index a84300e5a013..0b353530fb3f 100644
--- a/lib/zstd/fse_decompress.c
+++ b/lib/zstd/fse_decompress.c
@@ -47,6 +47,7 @@
****************************************************************/
#include "bitstream.h"
#include "fse.h"
+#include "zstd_internal.h"
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/string.h> /* memcpy, memset */
@@ -60,14 +61,6 @@
enum { FSE_static_assert = 1 / (int)(!!(c)) }; \
} /* use only *after* variable declarations */
-/* check and forward error code */
-#define CHECK_F(f) \
- { \
- size_t const e = f; \
- if (FSE_isError(e)) \
- return e; \
- }
-
/* **************************************************************
* Templates
****************************************************************/
diff --git a/lib/zstd/zstd_internal.h b/lib/zstd/zstd_internal.h
index 1a79fab9e13a..dac753397f86 100644
--- a/lib/zstd/zstd_internal.h
+++ b/lib/zstd/zstd_internal.h
@@ -127,7 +127,14 @@ static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
* Shared functions to include for inlining
*********************************************/
ZSTD_STATIC void ZSTD_copy8(void *dst, const void *src) {
- memcpy(dst, src, 8);
+ /*
+ * zstd relies heavily on gcc being able to analyze and inline this
+ * memcpy() call, since it is called in a tight loop. Preboot mode
+ * is compiled in freestanding mode, which stops gcc from analyzing
+ * memcpy(). Use __builtin_memcpy() to tell gcc to analyze this as a
+ * regular memcpy().
+ */
+ __builtin_memcpy(dst, src, 8);
}
/*! ZSTD_wildcopy() :
* custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
@@ -137,13 +144,16 @@ ZSTD_STATIC void ZSTD_wildcopy(void *dst, const void *src, ptrdiff_t length)
const BYTE* ip = (const BYTE*)src;
BYTE* op = (BYTE*)dst;
BYTE* const oend = op + length;
- /* Work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388.
+#if defined(GCC_VERSION) && GCC_VERSION >= 70000 && GCC_VERSION < 70200
+ /*
+ * Work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388.
* Avoid the bad case where the loop only runs once by handling the
* special case separately. This doesn't trigger the bug because it
* doesn't involve pointer/integer overflow.
*/
if (length <= 8)
return ZSTD_copy8(dst, src);
+#endif
do {
ZSTD_copy8(op, ip);
op += 8;