summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug10
-rw-r--r--lib/fonts/font_6x8.c8
-rw-r--r--lib/idr.c1
-rw-r--r--lib/radix-tree.c1
-rw-r--r--lib/random32.c525
-rw-r--r--lib/scatterlist.c2
-rw-r--r--lib/test_bitmap.c91
-rw-r--r--lib/test_xarray.c32
-rw-r--r--lib/xarray.c25
9 files changed, 426 insertions, 269 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 66d44d35cc97..d7a7bc3b6098 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -212,9 +212,10 @@ config DEBUG_INFO
If unsure, say N.
+if DEBUG_INFO
+
config DEBUG_INFO_REDUCED
bool "Reduce debugging information"
- depends on DEBUG_INFO
help
If you say Y here gcc is instructed to generate less debugging
information for structure types. This means that tools that
@@ -227,7 +228,6 @@ config DEBUG_INFO_REDUCED
config DEBUG_INFO_COMPRESSED
bool "Compressed debugging information"
- depends on DEBUG_INFO
depends on $(cc-option,-gz=zlib)
depends on $(ld-option,--compress-debug-sections=zlib)
help
@@ -243,7 +243,6 @@ config DEBUG_INFO_COMPRESSED
config DEBUG_INFO_SPLIT
bool "Produce split debuginfo in .dwo files"
- depends on DEBUG_INFO
depends on $(cc-option,-gsplit-dwarf)
help
Generate debug info into separate .dwo files. This significantly
@@ -259,7 +258,6 @@ config DEBUG_INFO_SPLIT
config DEBUG_INFO_DWARF4
bool "Generate dwarf4 debuginfo"
- depends on DEBUG_INFO
depends on $(cc-option,-gdwarf-4)
help
Generate dwarf4 debug info. This requires recent versions
@@ -269,7 +267,6 @@ config DEBUG_INFO_DWARF4
config DEBUG_INFO_BTF
bool "Generate BTF typeinfo"
- depends on DEBUG_INFO
depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
help
@@ -279,7 +276,6 @@ config DEBUG_INFO_BTF
config GDB_SCRIPTS
bool "Provide GDB scripts for kernel debugging"
- depends on DEBUG_INFO
help
This creates the required links to GDB helper scripts in the
build directory. If you load vmlinux into gdb, the helper
@@ -288,6 +284,8 @@ config GDB_SCRIPTS
instance. See Documentation/dev-tools/gdb-kernel-debugging.rst
for further details.
+endif # DEBUG_INFO
+
config ENABLE_MUST_CHECK
bool "Enable __must_check logic"
default y
diff --git a/lib/fonts/font_6x8.c b/lib/fonts/font_6x8.c
index e06447788418..700039a9ceae 100644
--- a/lib/fonts/font_6x8.c
+++ b/lib/fonts/font_6x8.c
@@ -3,8 +3,8 @@
#define FONTDATAMAX 2048
-static const unsigned char fontdata_6x8[FONTDATAMAX] = {
-
+static struct font_data fontdata_6x8 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 000000 */
0x00, /* 000000 */
@@ -2564,13 +2564,13 @@ static const unsigned char fontdata_6x8[FONTDATAMAX] = {
0x00, /* 000000 */
0x00, /* 000000 */
0x00, /* 000000 */
-};
+} };
const struct font_desc font_6x8 = {
.idx = FONT6x8_IDX,
.name = "6x8",
.width = 6,
.height = 8,
- .data = fontdata_6x8,
+ .data = fontdata_6x8.data,
.pref = 0,
};
diff --git a/lib/idr.c b/lib/idr.c
index 3fa8be43696f..f4ab4f4aa3c7 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -471,6 +471,7 @@ alloc:
goto retry;
nospc:
xas_unlock_irqrestore(&xas, flags);
+ kfree(alloc);
return -ENOSPC;
}
EXPORT_SYMBOL(ida_alloc_range);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 005ced92a4a9..3a4da11b804d 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/percpu.h>
-#include <linux/local_lock.h>
#include <linux/preempt.h> /* in_interrupt() */
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
diff --git a/lib/random32.c b/lib/random32.c
index dfb9981ab798..4d0e05e471d7 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -38,19 +38,10 @@
#include <linux/jiffies.h>
#include <linux/random.h>
#include <linux/sched.h>
+#include <linux/bitops.h>
#include <asm/unaligned.h>
#include <trace/events/random.h>
-#ifdef CONFIG_RANDOM32_SELFTEST
-static void __init prandom_state_selftest(void);
-#else
-static inline void prandom_state_selftest(void)
-{
-}
-#endif
-
-DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
-
/**
* prandom_u32_state - seeded pseudo-random number generator.
* @state: pointer to state structure holding seeded state.
@@ -71,26 +62,6 @@ u32 prandom_u32_state(struct rnd_state *state)
EXPORT_SYMBOL(prandom_u32_state);
/**
- * prandom_u32 - pseudo random number generator
- *
- * A 32 bit pseudo-random number is generated using a fast
- * algorithm suitable for simulation. This algorithm is NOT
- * considered safe for cryptographic use.
- */
-u32 prandom_u32(void)
-{
- struct rnd_state *state = &get_cpu_var(net_rand_state);
- u32 res;
-
- res = prandom_u32_state(state);
- trace_prandom_u32(res);
- put_cpu_var(net_rand_state);
-
- return res;
-}
-EXPORT_SYMBOL(prandom_u32);
-
-/**
* prandom_bytes_state - get the requested number of pseudo-random bytes
*
* @state: pointer to state structure holding seeded state.
@@ -121,20 +92,6 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
}
EXPORT_SYMBOL(prandom_bytes_state);
-/**
- * prandom_bytes - get the requested number of pseudo-random bytes
- * @buf: where to copy the pseudo-random bytes to
- * @bytes: the requested number of bytes
- */
-void prandom_bytes(void *buf, size_t bytes)
-{
- struct rnd_state *state = &get_cpu_var(net_rand_state);
-
- prandom_bytes_state(state, buf, bytes);
- put_cpu_var(net_rand_state);
-}
-EXPORT_SYMBOL(prandom_bytes);
-
static void prandom_warmup(struct rnd_state *state)
{
/* Calling RNG ten times to satisfy recurrence condition */
@@ -150,96 +107,6 @@ static void prandom_warmup(struct rnd_state *state)
prandom_u32_state(state);
}
-static u32 __extract_hwseed(void)
-{
- unsigned int val = 0;
-
- (void)(arch_get_random_seed_int(&val) ||
- arch_get_random_int(&val));
-
- return val;
-}
-
-static void prandom_seed_early(struct rnd_state *state, u32 seed,
- bool mix_with_hwseed)
-{
-#define LCG(x) ((x) * 69069U) /* super-duper LCG */
-#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
- state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
- state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
- state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
- state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
-}
-
-/**
- * prandom_seed - add entropy to pseudo random number generator
- * @entropy: entropy value
- *
- * Add some additional entropy to the prandom pool.
- */
-void prandom_seed(u32 entropy)
-{
- int i;
- /*
- * No locking on the CPUs, but then somewhat random results are, well,
- * expected.
- */
- for_each_possible_cpu(i) {
- struct rnd_state *state = &per_cpu(net_rand_state, i);
-
- state->s1 = __seed(state->s1 ^ entropy, 2U);
- prandom_warmup(state);
- }
-}
-EXPORT_SYMBOL(prandom_seed);
-
-/*
- * Generate some initially weak seeding values to allow
- * to start the prandom_u32() engine.
- */
-static int __init prandom_init(void)
-{
- int i;
-
- prandom_state_selftest();
-
- for_each_possible_cpu(i) {
- struct rnd_state *state = &per_cpu(net_rand_state, i);
- u32 weak_seed = (i + jiffies) ^ random_get_entropy();
-
- prandom_seed_early(state, weak_seed, true);
- prandom_warmup(state);
- }
-
- return 0;
-}
-core_initcall(prandom_init);
-
-static void __prandom_timer(struct timer_list *unused);
-
-static DEFINE_TIMER(seed_timer, __prandom_timer);
-
-static void __prandom_timer(struct timer_list *unused)
-{
- u32 entropy;
- unsigned long expires;
-
- get_random_bytes(&entropy, sizeof(entropy));
- prandom_seed(entropy);
-
- /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
- expires = 40 + prandom_u32_max(40);
- seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC);
-
- add_timer(&seed_timer);
-}
-
-static void __init __prandom_start_seed_timer(void)
-{
- seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
- add_timer(&seed_timer);
-}
-
void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
{
int i;
@@ -259,51 +126,6 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
}
EXPORT_SYMBOL(prandom_seed_full_state);
-/*
- * Generate better values after random number generator
- * is fully initialized.
- */
-static void __prandom_reseed(bool late)
-{
- unsigned long flags;
- static bool latch = false;
- static DEFINE_SPINLOCK(lock);
-
- /* Asking for random bytes might result in bytes getting
- * moved into the nonblocking pool and thus marking it
- * as initialized. In this case we would double back into
- * this function and attempt to do a late reseed.
- * Ignore the pointless attempt to reseed again if we're
- * already waiting for bytes when the nonblocking pool
- * got initialized.
- */
-
- /* only allow initial seeding (late == false) once */
- if (!spin_trylock_irqsave(&lock, flags))
- return;
-
- if (latch && !late)
- goto out;
-
- latch = true;
- prandom_seed_full_state(&net_rand_state);
-out:
- spin_unlock_irqrestore(&lock, flags);
-}
-
-void prandom_reseed_late(void)
-{
- __prandom_reseed(true);
-}
-
-static int __init prandom_reseed(void)
-{
- __prandom_reseed(false);
- __prandom_start_seed_timer();
- return 0;
-}
-late_initcall(prandom_reseed);
-
#ifdef CONFIG_RANDOM32_SELFTEST
static struct prandom_test1 {
u32 seed;
@@ -423,7 +245,28 @@ static struct prandom_test2 {
{ 407983964U, 921U, 728767059U },
};
-static void __init prandom_state_selftest(void)
+static u32 __extract_hwseed(void)
+{
+ unsigned int val = 0;
+
+ (void)(arch_get_random_seed_int(&val) ||
+ arch_get_random_int(&val));
+
+ return val;
+}
+
+static void prandom_seed_early(struct rnd_state *state, u32 seed,
+ bool mix_with_hwseed)
+{
+#define LCG(x) ((x) * 69069U) /* super-duper LCG */
+#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
+ state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
+ state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
+ state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
+ state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
+}
+
+static int __init prandom_state_selftest(void)
{
int i, j, errors = 0, runs = 0;
bool error = false;
@@ -463,5 +306,327 @@ static void __init prandom_state_selftest(void)
pr_warn("prandom: %d/%d self tests failed\n", errors, runs);
else
pr_info("prandom: %d self tests passed\n", runs);
+ return 0;
}
+core_initcall(prandom_state_selftest);
#endif
+
+/*
+ * The prandom_u32() implementation is now completely separate from the
+ * prandom_state() functions, which are retained (for now) for compatibility.
+ *
+ * Because of (ab)use in the networking code for choosing random TCP/UDP port
+ * numbers, which open DoS possibilities if guessable, we want something
+ * stronger than a standard PRNG. But the performance requirements of
+ * the network code do not allow robust crypto for this application.
+ *
+ * So this is a homebrew Junior Spaceman implementation, based on the
+ * lowest-latency trustworthy crypto primitive available, SipHash.
+ * (The authors of SipHash have not been consulted about this abuse of
+ * their work.)
+ *
+ * Standard SipHash-2-4 uses 2n+4 rounds to hash n words of input to
+ * one word of output. This abbreviated version uses 2 rounds per word
+ * of output.
+ */
+
+struct siprand_state {
+ unsigned long v0;
+ unsigned long v1;
+ unsigned long v2;
+ unsigned long v3;
+};
+
+static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy;
+DEFINE_PER_CPU(unsigned long, net_rand_noise);
+EXPORT_PER_CPU_SYMBOL(net_rand_noise);
+
+/*
+ * This is the core CPRNG function. As "pseudorandom", this is not used
+ * for truly valuable things, just intended to be a PITA to guess.
+ * For maximum speed, we do just two SipHash rounds per word. This is
+ * the same rate as 4 rounds per 64 bits that SipHash normally uses,
+ * so hopefully it's reasonably secure.
+ *
+ * There are two changes from the official SipHash finalization:
+ * - We omit some constants XORed with v2 in the SipHash spec as irrelevant;
+ * they are there only to make the output rounds distinct from the input
+ * rounds, and this application has no input rounds.
+ * - Rather than returning v0^v1^v2^v3, return v1+v3.
+ * If you look at the SipHash round, the last operation on v3 is
+ * "v3 ^= v0", so "v0 ^ v3" just undoes that, a waste of time.
+ * Likewise "v1 ^= v2". (The rotate of v2 makes a difference, but
+ * it still cancels out half of the bits in v2 for no benefit.)
+ * Second, since the last combining operation was xor, continue the
+ * pattern of alternating xor/add for a tiny bit of extra non-linearity.
+ */
+static inline u32 siprand_u32(struct siprand_state *s)
+{
+ unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3;
+ unsigned long n = raw_cpu_read(net_rand_noise);
+
+ v3 ^= n;
+ PRND_SIPROUND(v0, v1, v2, v3);
+ PRND_SIPROUND(v0, v1, v2, v3);
+ v0 ^= n;
+ s->v0 = v0; s->v1 = v1; s->v2 = v2; s->v3 = v3;
+ return v1 + v3;
+}
+
+
+/**
+ * prandom_u32 - pseudo random number generator
+ *
+ * A 32 bit pseudo-random number is generated using a fast
+ * algorithm suitable for simulation. This algorithm is NOT
+ * considered safe for cryptographic use.
+ */
+u32 prandom_u32(void)
+{
+ struct siprand_state *state = get_cpu_ptr(&net_rand_state);
+ u32 res = siprand_u32(state);
+
+ trace_prandom_u32(res);
+ put_cpu_ptr(&net_rand_state);
+ return res;
+}
+EXPORT_SYMBOL(prandom_u32);
+
+/**
+ * prandom_bytes - get the requested number of pseudo-random bytes
+ * @buf: where to copy the pseudo-random bytes to
+ * @bytes: the requested number of bytes
+ */
+void prandom_bytes(void *buf, size_t bytes)
+{
+ struct siprand_state *state = get_cpu_ptr(&net_rand_state);
+ u8 *ptr = buf;
+
+ while (bytes >= sizeof(u32)) {
+ put_unaligned(siprand_u32(state), (u32 *)ptr);
+ ptr += sizeof(u32);
+ bytes -= sizeof(u32);
+ }
+
+ if (bytes > 0) {
+ u32 rem = siprand_u32(state);
+
+ do {
+ *ptr++ = (u8)rem;
+ rem >>= BITS_PER_BYTE;
+ } while (--bytes > 0);
+ }
+ put_cpu_ptr(&net_rand_state);
+}
+EXPORT_SYMBOL(prandom_bytes);
+
+/**
+ * prandom_seed - add entropy to pseudo random number generator
+ * @entropy: entropy value
+ *
+ * Add some additional seed material to the prandom pool.
+ * The "entropy" is actually our IP address (the only caller is
+ * the network code), not for unpredictability, but to ensure that
+ * different machines are initialized differently.
+ */
+void prandom_seed(u32 entropy)
+{
+ int i;
+
+ add_device_randomness(&entropy, sizeof(entropy));
+
+ for_each_possible_cpu(i) {
+ struct siprand_state *state = per_cpu_ptr(&net_rand_state, i);
+ unsigned long v0 = state->v0, v1 = state->v1;
+ unsigned long v2 = state->v2, v3 = state->v3;
+
+ do {
+ v3 ^= entropy;
+ PRND_SIPROUND(v0, v1, v2, v3);
+ PRND_SIPROUND(v0, v1, v2, v3);
+ v0 ^= entropy;
+ } while (unlikely(!v0 || !v1 || !v2 || !v3));
+
+ WRITE_ONCE(state->v0, v0);
+ WRITE_ONCE(state->v1, v1);
+ WRITE_ONCE(state->v2, v2);
+ WRITE_ONCE(state->v3, v3);
+ }
+}
+EXPORT_SYMBOL(prandom_seed);
+
+/*
+ * Generate some initially weak seeding values to allow
+ * the prandom_u32() engine to be started.
+ */
+static int __init prandom_init_early(void)
+{
+ int i;
+ unsigned long v0, v1, v2, v3;
+
+ if (!arch_get_random_long(&v0))
+ v0 = jiffies;
+ if (!arch_get_random_long(&v1))
+ v1 = random_get_entropy();
+ v2 = v0 ^ PRND_K0;
+ v3 = v1 ^ PRND_K1;
+
+ for_each_possible_cpu(i) {
+ struct siprand_state *state;
+
+ v3 ^= i;
+ PRND_SIPROUND(v0, v1, v2, v3);
+ PRND_SIPROUND(v0, v1, v2, v3);
+ v0 ^= i;
+
+ state = per_cpu_ptr(&net_rand_state, i);
+ state->v0 = v0; state->v1 = v1;
+ state->v2 = v2; state->v3 = v3;
+ }
+
+ return 0;
+}
+core_initcall(prandom_init_early);
+
+
+/* Stronger reseeding when available, and periodically thereafter. */
+static void prandom_reseed(struct timer_list *unused);
+
+static DEFINE_TIMER(seed_timer, prandom_reseed);
+
+static void prandom_reseed(struct timer_list *unused)
+{
+ unsigned long expires;
+ int i;
+
+ /*
+ * Reinitialize each CPU's PRNG with 128 bits of key.
+ * No locking on the CPUs, but then somewhat random results are,
+ * well, expected.
+ */
+ for_each_possible_cpu(i) {
+ struct siprand_state *state;
+ unsigned long v0 = get_random_long(), v2 = v0 ^ PRND_K0;
+ unsigned long v1 = get_random_long(), v3 = v1 ^ PRND_K1;
+#if BITS_PER_LONG == 32
+ int j;
+
+ /*
+ * On 32-bit machines, hash in two extra words to
+ * approximate 128-bit key length. Not that the hash
+ * has that much security, but this prevents a trivial
+ * 64-bit brute force.
+ */
+ for (j = 0; j < 2; j++) {
+ unsigned long m = get_random_long();
+
+ v3 ^= m;
+ PRND_SIPROUND(v0, v1, v2, v3);
+ PRND_SIPROUND(v0, v1, v2, v3);
+ v0 ^= m;
+ }
+#endif
+ /*
+ * Probably impossible in practice, but there is a
+ * theoretical risk that a race between this reseeding
+ * and the target CPU writing its state back could
+ * create the all-zero SipHash fixed point.
+ *
+ * To ensure that never happens, ensure the state
+ * we write contains no zero words.
+ */
+ state = per_cpu_ptr(&net_rand_state, i);
+ WRITE_ONCE(state->v0, v0 ? v0 : -1ul);
+ WRITE_ONCE(state->v1, v1 ? v1 : -1ul);
+ WRITE_ONCE(state->v2, v2 ? v2 : -1ul);
+ WRITE_ONCE(state->v3, v3 ? v3 : -1ul);
+ }
+
+ /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
+ expires = round_jiffies(jiffies + 40 * HZ + prandom_u32_max(40 * HZ));
+ mod_timer(&seed_timer, expires);
+}
+
+/*
+ * The random ready callback can be called from almost any interrupt.
+ * To avoid worrying about whether it's safe to delay that interrupt
+ * long enough to seed all CPUs, just schedule an immediate timer event.
+ */
+static void prandom_timer_start(struct random_ready_callback *unused)
+{
+ mod_timer(&seed_timer, jiffies);
+}
+
+#ifdef CONFIG_RANDOM32_SELFTEST
+/* Principle: True 32-bit random numbers will all have 16 differing bits on
+ * average. For each 32-bit number, there are 601M numbers differing by 16
+ * bits, and 89% of the numbers differ by at least 12 bits. Note that more
+ * than 16 differing bits also implies a correlation with inverted bits. Thus
+ * we take 1024 random numbers and compare each of them to the other ones,
+ * counting the deviation of correlated bits to 16. Constants report 32,
+ * counters 32-log2(TEST_SIZE), and pure randoms, around 6 or lower. With the
+ * u32 total, TEST_SIZE may be as large as 4096 samples.
+ */
+#define TEST_SIZE 1024
+static int __init prandom32_state_selftest(void)
+{
+ unsigned int x, y, bits, samples;
+ u32 xor, flip;
+ u32 total;
+ u32 *data;
+
+ data = kmalloc(sizeof(*data) * TEST_SIZE, GFP_KERNEL);
+ if (!data)
+ return 0;
+
+ for (samples = 0; samples < TEST_SIZE; samples++)
+ data[samples] = prandom_u32();
+
+ flip = total = 0;
+ for (x = 0; x < samples; x++) {
+ for (y = 0; y < samples; y++) {
+ if (x == y)
+ continue;
+ xor = data[x] ^ data[y];
+ flip |= xor;
+ bits = hweight32(xor);
+ total += (bits - 16) * (bits - 16);
+ }
+ }
+
+ /* We'll return the average deviation as 2*sqrt(corr/samples), which
+ * is also sqrt(4*corr/samples) which provides a better resolution.
+ */
+ bits = int_sqrt(total / (samples * (samples - 1)) * 4);
+ if (bits > 6)
+ pr_warn("prandom32: self test failed (at least %u bits"
+ " correlated, fixed_mask=%#x fixed_value=%#x\n",
+ bits, ~flip, data[0] & ~flip);
+ else
+ pr_info("prandom32: self test passed (less than %u bits"
+ " correlated)\n",
+ bits+1);
+ kfree(data);
+ return 0;
+}
+core_initcall(prandom32_state_selftest);
+#endif /* CONFIG_RANDOM32_SELFTEST */
+
+/*
+ * Start periodic full reseeding as soon as strong
+ * random numbers are available.
+ */
+static int __init prandom_init_late(void)
+{
+ static struct random_ready_callback random_ready = {
+ .func = prandom_timer_start
+ };
+ int ret = add_random_ready_callback(&random_ready);
+
+ if (ret == -EALREADY) {
+ prandom_timer_start(&random_ready);
+ ret = 0;
+ }
+ return ret;
+}
+late_initcall(prandom_init_late);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 9a4992dc8e8c..0a482ef988e5 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -595,7 +595,7 @@ struct scatterlist *sgl_alloc_order(unsigned long long length,
elem_len = min_t(u64, length, PAGE_SIZE << order);
page = alloc_pages(gfp, order);
if (!page) {
- sgl_free(sgl);
+ sgl_free_order(sgl, order);
return NULL;
}
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index df903c53952b..4425a1dd4ef1 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -354,50 +354,37 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = {
};
-static void __init __test_bitmap_parselist(int is_user)
+static void __init test_bitmap_parselist(void)
{
int i;
int err;
ktime_t time;
DECLARE_BITMAP(bmap, 2048);
- char *mode = is_user ? "_user" : "";
for (i = 0; i < ARRAY_SIZE(parselist_tests); i++) {
#define ptest parselist_tests[i]
- if (is_user) {
- mm_segment_t orig_fs = get_fs();
- size_t len = strlen(ptest.in);
-
- set_fs(KERNEL_DS);
- time = ktime_get();
- err = bitmap_parselist_user((__force const char __user *)ptest.in, len,
- bmap, ptest.nbits);
- time = ktime_get() - time;
- set_fs(orig_fs);
- } else {
- time = ktime_get();
- err = bitmap_parselist(ptest.in, bmap, ptest.nbits);
- time = ktime_get() - time;
- }
+ time = ktime_get();
+ err = bitmap_parselist(ptest.in, bmap, ptest.nbits);
+ time = ktime_get() - time;
if (err != ptest.errno) {
- pr_err("parselist%s: %d: input is %s, errno is %d, expected %d\n",
- mode, i, ptest.in, err, ptest.errno);
+ pr_err("parselist: %d: input is %s, errno is %d, expected %d\n",
+ i, ptest.in, err, ptest.errno);
continue;
}
if (!err && ptest.expected
&& !__bitmap_equal(bmap, ptest.expected, ptest.nbits)) {
- pr_err("parselist%s: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
- mode, i, ptest.in, bmap[0],
+ pr_err("parselist: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
+ i, ptest.in, bmap[0],
*ptest.expected);
continue;
}
if (ptest.flags & PARSE_TIME)
- pr_err("parselist%s: %d: input is '%s' OK, Time: %llu\n",
- mode, i, ptest.in, time);
+ pr_err("parselist: %d: input is '%s' OK, Time: %llu\n",
+ i, ptest.in, time);
#undef ptest
}
@@ -443,75 +430,41 @@ static const struct test_bitmap_parselist parse_tests[] __initconst = {
#undef step
};
-static void __init __test_bitmap_parse(int is_user)
+static void __init test_bitmap_parse(void)
{
int i;
int err;
ktime_t time;
DECLARE_BITMAP(bmap, 2048);
- char *mode = is_user ? "_user" : "";
for (i = 0; i < ARRAY_SIZE(parse_tests); i++) {
struct test_bitmap_parselist test = parse_tests[i];
+ size_t len = test.flags & NO_LEN ? UINT_MAX : strlen(test.in);
- if (is_user) {
- size_t len = strlen(test.in);
- mm_segment_t orig_fs = get_fs();
-
- set_fs(KERNEL_DS);
- time = ktime_get();
- err = bitmap_parse_user((__force const char __user *)test.in, len,
- bmap, test.nbits);
- time = ktime_get() - time;
- set_fs(orig_fs);
- } else {
- size_t len = test.flags & NO_LEN ?
- UINT_MAX : strlen(test.in);
- time = ktime_get();
- err = bitmap_parse(test.in, len, bmap, test.nbits);
- time = ktime_get() - time;
- }
+ time = ktime_get();
+ err = bitmap_parse(test.in, len, bmap, test.nbits);
+ time = ktime_get() - time;
if (err != test.errno) {
- pr_err("parse%s: %d: input is %s, errno is %d, expected %d\n",
- mode, i, test.in, err, test.errno);
+ pr_err("parse: %d: input is %s, errno is %d, expected %d\n",
+ i, test.in, err, test.errno);
continue;
}
if (!err && test.expected
&& !__bitmap_equal(bmap, test.expected, test.nbits)) {
- pr_err("parse%s: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
- mode, i, test.in, bmap[0],
+ pr_err("parse: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
+ i, test.in, bmap[0],
*test.expected);
continue;
}
if (test.flags & PARSE_TIME)
- pr_err("parse%s: %d: input is '%s' OK, Time: %llu\n",
- mode, i, test.in, time);
+ pr_err("parse: %d: input is '%s' OK, Time: %llu\n",
+ i, test.in, time);
}
}
-static void __init test_bitmap_parselist(void)
-{
- __test_bitmap_parselist(0);
-}
-
-static void __init test_bitmap_parselist_user(void)
-{
- __test_bitmap_parselist(1);
-}
-
-static void __init test_bitmap_parse(void)
-{
- __test_bitmap_parse(0);
-}
-
-static void __init test_bitmap_parse_user(void)
-{
- __test_bitmap_parse(1);
-}
-
#define EXP1_IN_BITS (sizeof(exp1) * 8)
static void __init test_bitmap_arr32(void)
@@ -675,9 +628,7 @@ static void __init selftest(void)
test_replace();
test_bitmap_arr32();
test_bitmap_parse();
- test_bitmap_parse_user();
test_bitmap_parselist();
- test_bitmap_parselist_user();
test_mem_optimisations();
test_for_each_set_clump8();
test_bitmap_cut();
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 8262c3f05a5d..8294f43f4981 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -289,6 +289,27 @@ static noinline void check_xa_mark_2(struct xarray *xa)
xa_destroy(xa);
}
+static noinline void check_xa_mark_3(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ XA_STATE(xas, xa, 0x41);
+ void *entry;
+ int count = 0;
+
+ xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL);
+ xa_set_mark(xa, 0x41, XA_MARK_0);
+
+ rcu_read_lock();
+ xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) {
+ count++;
+ XA_BUG_ON(xa, entry != xa_mk_index(0x40));
+ }
+ XA_BUG_ON(xa, count != 1);
+ rcu_read_unlock();
+ xa_destroy(xa);
+#endif
+}
+
static noinline void check_xa_mark(struct xarray *xa)
{
unsigned long index;
@@ -297,6 +318,7 @@ static noinline void check_xa_mark(struct xarray *xa)
check_xa_mark_1(xa, index);
check_xa_mark_2(xa);
+ check_xa_mark_3(xa);
}
static noinline void check_xa_shrink(struct xarray *xa)
@@ -393,6 +415,9 @@ static noinline void check_cmpxchg(struct xarray *xa)
XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE);
+ XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY);
xa_erase_index(xa, 12345678);
xa_erase_index(xa, 5);
XA_BUG_ON(xa, !xa_empty(xa));
@@ -1618,14 +1643,9 @@ static noinline void shadow_remove(struct xarray *xa)
xa_lock(xa);
while ((node = list_first_entry_or_null(&shadow_nodes,
struct xa_node, private_list))) {
- XA_STATE(xas, node->array, 0);
XA_BUG_ON(xa, node->array != xa);
list_del_init(&node->private_list);
- xas.xa_node = xa_parent_locked(node->array, node);
- xas.xa_offset = node->offset;
- xas.xa_shift = node->shift + XA_CHUNK_SHIFT;
- xas_set_update(&xas, test_update_node);
- xas_store(&xas, NULL);
+ xa_delete_node(node, test_update_node);
}
xa_unlock(xa);
}
diff --git a/lib/xarray.c b/lib/xarray.c
index b76eea7b314c..5fa51614802a 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -706,7 +706,7 @@ void xas_create_range(struct xa_state *xas)
unsigned char shift = xas->xa_shift;
unsigned char sibs = xas->xa_sibs;
- xas->xa_index |= ((sibs + 1) << shift) - 1;
+ xas->xa_index |= ((sibs + 1UL) << shift) - 1;
if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
xas->xa_offset |= sibs;
xas->xa_shift = 0;
@@ -2164,6 +2164,29 @@ unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
EXPORT_SYMBOL(xa_extract);
/**
+ * xa_delete_node() - Private interface for workingset code.
+ * @node: Node to be removed from the tree.
+ * @update: Function to call to update ancestor nodes.
+ *
+ * Context: xa_lock must be held on entry and will not be released.
+ */
+void xa_delete_node(struct xa_node *node, xa_update_node_t update)
+{
+ struct xa_state xas = {
+ .xa = node->array,
+ .xa_index = (unsigned long)node->offset <<
+ (node->shift + XA_CHUNK_SHIFT),
+ .xa_shift = node->shift + XA_CHUNK_SHIFT,
+ .xa_offset = node->offset,
+ .xa_node = xa_parent_locked(node->array, node),
+ .xa_update = update,
+ };
+
+ xas_store(&xas, NULL);
+}
+EXPORT_SYMBOL_GPL(xa_delete_node); /* For the benefit of the test suite */
+
+/**
* xa_destroy() - Free all internal data structures.
* @xa: XArray.
*