diff options
author | Mauro Carvalho Chehab <mchehab+huawei@kernel.org> | 2024-11-11 12:16:33 +0100 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab+huawei@kernel.org> | 2024-11-11 12:16:33 +0100 |
commit | 5516200c466f92954551406ea641376963c43a92 (patch) | |
tree | 4d9814bad59490d0cde3ce014a8ae08e9a8672db /lib | |
parent | 9b47364fd75b5494b716857af62737cdd1bca1b8 (diff) | |
parent | 2d5404caa8c7bb5c4e0435f94b28834ae5456623 (diff) |
Merge tag 'v6.12-rc7' into __tmp-hansg-linux-tags_media_atomisp_6_13_1
Linux 6.12-rc7
* tag 'v6.12-rc7': (1909 commits)
Linux 6.12-rc7
filemap: Fix bounds checking in filemap_read()
i2c: designware: do not hold SCL low when I2C_DYNAMIC_TAR_UPDATE is not set
mailmap: add entry for Thorsten Blum
ocfs2: remove entry once instead of null-ptr-dereference in ocfs2_xa_remove()
signal: restore the override_rlimit logic
fs/proc: fix compile warning about variable 'vmcore_mmap_ops'
ucounts: fix counter leak in inc_rlimit_get_ucounts()
selftests: hugetlb_dio: check for initial conditions to skip in the start
mm: fix docs for the kernel parameter ``thp_anon=``
mm/damon/core: avoid overflow in damon_feed_loop_next_input()
mm/damon/core: handle zero schemes apply interval
mm/damon/core: handle zero {aggregation,ops_update} intervals
mm/mlock: set the correct prev on failure
objpool: fix to make percpu slot allocation more robust
mm/page_alloc: keep track of free highatomic
bcachefs: Fix UAF in __promote_alloc() error path
bcachefs: Change OPT_STR max to be 1 less than the size of choices array
bcachefs: btree_cache.freeable list fixes
bcachefs: check the invalid parameter for perf test
...
Diffstat (limited to 'lib')
40 files changed, 90 insertions, 65 deletions
diff --git a/lib/842/842.h b/lib/842/842.h index 7b1f581a2907..f9e8a5dd790f 100644 --- a/lib/842/842.h +++ b/lib/842/842.h @@ -78,7 +78,7 @@ #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/crc32.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/sw842.h> diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 7315f643817a..7312ae7c3cc5 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -3060,7 +3060,7 @@ config RUST_BUILD_ASSERT_ALLOW bool "Allow unoptimized build-time assertions" depends on RUST help - Controls how are `build_error!` and `build_assert!` handled during build. + Controls how `build_error!` and `build_assert!` are handled during the build. If calls to them exist in the binary, it may indicate a violated invariant or that the optimizer failed to verify the invariant during compilation. diff --git a/lib/buildid.c b/lib/buildid.c index 290641d92ac1..c4b0f376fb34 100644 --- a/lib/buildid.c +++ b/lib/buildid.c @@ -5,6 +5,7 @@ #include <linux/elf.h> #include <linux/kernel.h> #include <linux/pagemap.h> +#include <linux/secretmem.h> #define BUILD_ID 3 @@ -64,6 +65,10 @@ static int freader_get_folio(struct freader *r, loff_t file_off) freader_put_folio(r); + /* reject secretmem folios created with memfd_secret() */ + if (secretmem_mapping(r->file->f_mapping)) + return -EFAULT; + r->folio = filemap_get_folio(r->file->f_mapping, file_off >> PAGE_SHIFT); /* if sleeping is allowed, wait for the page, if necessary */ diff --git a/lib/codetag.c b/lib/codetag.c index afa8a2d4f317..d1fbbb7c2ec3 100644 --- a/lib/codetag.c +++ b/lib/codetag.c @@ -228,6 +228,9 @@ bool codetag_unload_module(struct module *mod) if (!mod) return true; + /* await any module's kfree_rcu() operations to complete */ + kvfree_rcu_barrier(); + mutex_lock(&codetag_lock); list_for_each_entry(cttype, &codetag_types, link) { struct codetag_module *found = NULL; diff --git a/lib/crypto/aes.c b/lib/crypto/aes.c index 827fe89922ff..eafe14d021f5 100644 --- a/lib/crypto/aes.c +++ b/lib/crypto/aes.c @@ -6,7 +6,7 @@ #include <crypto/aes.h> #include <linux/crypto.h> #include <linux/module.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> /* * Emit the sbox as volatile const to prevent the compiler from doing diff --git a/lib/crypto/blake2s-generic.c b/lib/crypto/blake2s-generic.c index 3b6dcfdd9628..09682136b57c 100644 --- a/lib/crypto/blake2s-generic.c +++ b/lib/crypto/blake2s-generic.c @@ -14,7 +14,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/bug.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> static const u8 blake2s_sigma[10][16] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, diff --git a/lib/crypto/chacha.c b/lib/crypto/chacha.c index b748fd3d256e..3cdda3b5ee06 100644 --- a/lib/crypto/chacha.c +++ b/lib/crypto/chacha.c @@ -10,7 +10,7 @@ #include <linux/export.h> #include <linux/bitops.h> #include <linux/string.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/chacha.h> static void chacha_permute(u32 *x, int nrounds) diff --git a/lib/crypto/chacha20poly1305-selftest.c b/lib/crypto/chacha20poly1305-selftest.c index fa43deda2660..2ea61c28be4f 100644 --- a/lib/crypto/chacha20poly1305-selftest.c +++ b/lib/crypto/chacha20poly1305-selftest.c @@ -7,7 +7,7 @@ #include <crypto/chacha.h> #include <crypto/poly1305.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/bug.h> #include <linux/init.h> #include <linux/mm.h> diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c index fa6a9440fc95..a839c0ac60b2 100644 --- a/lib/crypto/chacha20poly1305.c +++ b/lib/crypto/chacha20poly1305.c @@ -13,7 +13,7 @@ #include <crypto/poly1305.h> #include <crypto/scatterwalk.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/mm.h> diff --git a/lib/crypto/curve25519-fiat32.c b/lib/crypto/curve25519-fiat32.c index 2fde0ec33dbd..2e0ba634e299 100644 --- a/lib/crypto/curve25519-fiat32.c +++ b/lib/crypto/curve25519-fiat32.c @@ -10,7 +10,7 @@ * with 128-bit integer types. */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/curve25519.h> #include <linux/string.h> diff --git a/lib/crypto/curve25519-hacl64.c b/lib/crypto/curve25519-hacl64.c index c40e5d913234..c4204133afb7 100644 --- a/lib/crypto/curve25519-hacl64.c +++ b/lib/crypto/curve25519-hacl64.c @@ -10,7 +10,7 @@ * integer types. */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/curve25519.h> #include <linux/string.h> diff --git a/lib/crypto/des.c b/lib/crypto/des.c index 9518658b97cf..d3423b34a8e9 100644 --- a/lib/crypto/des.c +++ b/lib/crypto/des.c @@ -17,7 +17,7 @@ #include <linux/string.h> #include <linux/types.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/des.h> #include <crypto/internal/des.h> diff --git a/lib/crypto/memneq.c b/lib/crypto/memneq.c index 243d8677cc51..a2afd10349c9 100644 --- a/lib/crypto/memneq.c +++ b/lib/crypto/memneq.c @@ -59,7 +59,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/algapi.h> #include <linux/module.h> diff --git a/lib/crypto/mpi/mpi-mul.c b/lib/crypto/mpi/mpi-mul.c index 892a246216b9..7e6ff1ce3e9b 100644 --- a/lib/crypto/mpi/mpi-mul.c +++ b/lib/crypto/mpi/mpi-mul.c @@ -21,7 +21,7 @@ int mpi_mul(MPI w, MPI u, MPI v) int usign, vsign, sign_product; int assign_wp = 0; mpi_ptr_t tmp_limb = NULL; - int err; + int err = 0; if (u->nlimbs < v->nlimbs) { /* Swap U and V. */ diff --git a/lib/crypto/poly1305-donna32.c b/lib/crypto/poly1305-donna32.c index 7fb71845cc84..0a4a2d99e365 100644 --- a/lib/crypto/poly1305-donna32.c +++ b/lib/crypto/poly1305-donna32.c @@ -7,7 +7,7 @@ */ #include <linux/kernel.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/internal/poly1305.h> void poly1305_core_setkey(struct poly1305_core_key *key, diff --git a/lib/crypto/poly1305-donna64.c b/lib/crypto/poly1305-donna64.c index 988702c9b3b2..530287531b2e 100644 --- a/lib/crypto/poly1305-donna64.c +++ b/lib/crypto/poly1305-donna64.c @@ -7,7 +7,7 @@ */ #include <linux/kernel.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/internal/poly1305.h> void poly1305_core_setkey(struct poly1305_core_key *key, diff --git a/lib/crypto/poly1305.c b/lib/crypto/poly1305.c index 5d8378d23e95..6e80214ebad8 100644 --- a/lib/crypto/poly1305.c +++ b/lib/crypto/poly1305.c @@ -10,7 +10,7 @@ #include <crypto/internal/poly1305.h> #include <linux/kernel.h> #include <linux/module.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 key[POLY1305_KEY_SIZE]) diff --git a/lib/crypto/sha1.c b/lib/crypto/sha1.c index 6d2922747cab..ebb60519ae93 100644 --- a/lib/crypto/sha1.c +++ b/lib/crypto/sha1.c @@ -12,7 +12,7 @@ #include <linux/bitops.h> #include <linux/string.h> #include <crypto/sha1.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> /* * If you have 32 registers or more, the compiler can (and should) diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c index 3f42d203c7bc..04c1f2557e6c 100644 --- a/lib/crypto/sha256.c +++ b/lib/crypto/sha256.c @@ -11,7 +11,7 @@ * Copyright (c) 2014 Red Hat Inc. */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/sha256_base.h> #include <linux/kernel.h> #include <linux/module.h> diff --git a/lib/crypto/utils.c b/lib/crypto/utils.c index 373364141408..87da2a6dd161 100644 --- a/lib/crypto/utils.c +++ b/lib/crypto/utils.c @@ -5,7 +5,7 @@ * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/utils.h> #include <linux/module.h> diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c index e6327391b6b6..c0dbb3cea915 100644 --- a/lib/decompress_unlz4.c +++ b/lib/decompress_unlz4.c @@ -16,7 +16,7 @@ #include <linux/decompress/mm.h> #include <linux/compiler.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> /* * Note: Uncompressed chunk size is used in the compressor side diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c index 64c1358500ce..57a9e93743e1 100644 --- a/lib/decompress_unlzo.c +++ b/lib/decompress_unlzo.c @@ -28,7 +28,7 @@ #include <linux/decompress/mm.h> #include <linux/compiler.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> static const unsigned char lzop_magic[] = { 0x89, 0x4c, 0x5a, 0x4f, 0x00, 0x0d, 0x0a, 0x1a, 0x0a }; diff --git a/lib/hexdump.c b/lib/hexdump.c index 06833d404398..c3db7c3a7643 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c @@ -9,7 +9,7 @@ #include <linux/kernel.h> #include <linux/minmax.h> #include <linux/export.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> const char hex_asc[] = "0123456789abcdef"; EXPORT_SYMBOL(hex_asc); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 97003155bfac..908e75a28d90 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -461,6 +461,8 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { size_t n, copied = 0; + bool uses_kmap = IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) || + PageHighMem(page); if (!page_copy_sane(page, offset, bytes)) return 0; @@ -471,7 +473,7 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset, char *p; n = bytes - copied; - if (PageHighMem(page)) { + if (uses_kmap) { page += offset / PAGE_SIZE; offset %= PAGE_SIZE; n = min_t(size_t, n, PAGE_SIZE - offset); @@ -482,7 +484,7 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset, kunmap_atomic(p); copied += n; offset += n; - } while (PageHighMem(page) && copied != bytes && n > 0); + } while (uses_kmap && copied != bytes && n > 0); return copied; } @@ -1021,19 +1023,22 @@ static ssize_t iter_folioq_get_pages(struct iov_iter *iter, size_t offset = iov_offset, fsize = folioq_folio_size(folioq, slot); size_t part = PAGE_SIZE - offset % PAGE_SIZE; - part = umin(part, umin(maxsize - extracted, fsize - offset)); - count -= part; - iov_offset += part; - extracted += part; + if (offset < fsize) { + part = umin(part, umin(maxsize - extracted, fsize - offset)); + count -= part; + iov_offset += part; + extracted += part; + + *pages = folio_page(folio, offset / PAGE_SIZE); + get_page(*pages); + pages++; + maxpages--; + } - *pages = folio_page(folio, offset / PAGE_SIZE); - get_page(*pages); - pages++; - maxpages--; if (maxpages == 0 || extracted >= maxsize) break; - if (offset >= fsize) { + if (iov_offset >= fsize) { iov_offset = 0; slot++; if (slot == folioq_nr_slots(folioq) && folioq->next) { diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c index 90bb67994688..b0bbeeb74b9e 100644 --- a/lib/lz4/lz4_compress.c +++ b/lib/lz4/lz4_compress.c @@ -37,7 +37,7 @@ #include "lz4defs.h" #include <linux/module.h> #include <linux/kernel.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> static const int LZ4_minLength = (MFLIMIT + 1); static const int LZ4_64Klimit = ((64 * KB) + (MFLIMIT - 1)); diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index 59fe69a63800..0e31e6da5ce7 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c @@ -38,7 +38,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> /*-***************************** * Decompression functions diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h index 330aa539b46e..cb358d6bde5a 100644 --- a/lib/lz4/lz4defs.h +++ b/lib/lz4/lz4defs.h @@ -35,7 +35,7 @@ * Sven Schmidt <4sschmid@informatik.uni-hamburg.de> */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/bitops.h> #include <linux/string.h> /* memset, memcpy */ diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c index 9d31e7126606..47d6d43ea957 100644 --- a/lib/lzo/lzo1x_compress.c +++ b/lib/lzo/lzo1x_compress.c @@ -14,7 +14,7 @@ #include <linux/module.h> #include <linux/kernel.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/lzo.h> #include "lzodefs.h" diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c index 7892a40cf765..c94f4928e188 100644 --- a/lib/lzo/lzo1x_decompress_safe.c +++ b/lib/lzo/lzo1x_decompress_safe.c @@ -16,7 +16,7 @@ #include <linux/module.h> #include <linux/kernel.h> #endif -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/lzo.h> #include "lzodefs.h" diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 20990ecba2dd..3619301dda2e 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -2196,6 +2196,8 @@ static inline void mas_node_or_none(struct ma_state *mas, /* * mas_wr_node_walk() - Find the correct offset for the index in the @mas. + * If @mas->index cannot be found within the containing + * node, we traverse to the last entry in the node. * @wr_mas: The maple write state * * Uses mas_slot_locked() and does not need to worry about dead nodes. @@ -3532,7 +3534,7 @@ static bool mas_wr_walk(struct ma_wr_state *wr_mas) return true; } -static bool mas_wr_walk_index(struct ma_wr_state *wr_mas) +static void mas_wr_walk_index(struct ma_wr_state *wr_mas) { struct ma_state *mas = wr_mas->mas; @@ -3541,11 +3543,9 @@ static bool mas_wr_walk_index(struct ma_wr_state *wr_mas) wr_mas->content = mas_slot_locked(mas, wr_mas->slots, mas->offset); if (ma_is_leaf(wr_mas->type)) - return true; + return; mas_wr_walk_traverse(wr_mas); - } - return true; } /* * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs. @@ -3765,8 +3765,8 @@ static noinline void mas_wr_spanning_store(struct ma_wr_state *wr_mas) memset(&b_node, 0, sizeof(struct maple_big_node)); /* Copy l_mas and store the value in b_node. */ mas_store_b_node(&l_wr_mas, &b_node, l_mas.end); - /* Copy r_mas into b_node. */ - if (r_mas.offset <= r_mas.end) + /* Copy r_mas into b_node if there is anything to copy. */ + if (r_mas.max > r_mas.last) mas_mab_cp(&r_mas, r_mas.offset, r_mas.end, &b_node, b_node.b_end + 1); else @@ -4218,7 +4218,7 @@ static inline void mas_wr_store_type(struct ma_wr_state *wr_mas) /* Potential spanning rebalance collapsing a node */ if (new_end < mt_min_slots[wr_mas->type]) { - if (!mte_is_root(mas->node)) { + if (!mte_is_root(mas->node) && !(mas->mas_flags & MA_STATE_BULK)) { mas->store_type = wr_rebalance; return; } diff --git a/lib/objpool.c b/lib/objpool.c index 234f9d0bd081..b998b720c732 100644 --- a/lib/objpool.c +++ b/lib/objpool.c @@ -74,15 +74,21 @@ objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs, * warm caches and TLB hits. in default vmalloc is used to * reduce the pressure of kernel slab system. as we know, * mimimal size of vmalloc is one page since vmalloc would - * always align the requested size to page size + * always align the requested size to page size. + * but if vmalloc fails or it is not available (e.g. GFP_ATOMIC) + * allocate percpu slot with kmalloc. */ - if (pool->gfp & GFP_ATOMIC) - slot = kmalloc_node(size, pool->gfp, cpu_to_node(i)); - else + slot = NULL; + + if ((pool->gfp & (GFP_ATOMIC | GFP_KERNEL)) != GFP_ATOMIC) slot = __vmalloc_node(size, sizeof(void *), pool->gfp, cpu_to_node(i), __builtin_return_address(0)); - if (!slot) - return -ENOMEM; + + if (!slot) { + slot = kmalloc_node(size, pool->gfp, cpu_to_node(i)); + if (!slot) + return -ENOMEM; + } memset(slot, 0, size); pool->cpu_slots[i] = slot; diff --git a/lib/pldmfw/pldmfw.c b/lib/pldmfw/pldmfw.c index 54e1809a38fd..6e1581b9a616 100644 --- a/lib/pldmfw/pldmfw.c +++ b/lib/pldmfw/pldmfw.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2018-2019, Intel Corporation. */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/crc32.h> #include <linux/device.h> #include <linux/firmware.h> diff --git a/lib/random32.c b/lib/random32.c index 32060b852668..0a5a0e3600c8 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -40,7 +40,7 @@ #include <linux/sched.h> #include <linux/bitops.h> #include <linux/slab.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> /** * prandom_u32_state - seeded pseudo-random number generator. diff --git a/lib/siphash.c b/lib/siphash.c index 15bc5b6f368c..9e4e88752d2e 100644 --- a/lib/siphash.c +++ b/lib/siphash.c @@ -10,7 +10,7 @@ */ #include <linux/siphash.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 #include <linux/dcache.h> diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c index 6e3a1e5a7142..33564f965958 100644 --- a/lib/slub_kunit.c +++ b/lib/slub_kunit.c @@ -141,7 +141,7 @@ static void test_kmalloc_redzone_access(struct kunit *test) { struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32, SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE); - u8 *p = __kmalloc_cache_noprof(s, GFP_KERNEL, 18); + u8 *p = alloc_hooks(__kmalloc_cache_noprof(s, GFP_KERNEL, 18)); kasan_disable_current(); @@ -164,10 +164,16 @@ struct test_kfree_rcu_struct { static void test_kfree_rcu(struct kunit *test) { - struct kmem_cache *s = test_kmem_cache_create("TestSlub_kfree_rcu", - sizeof(struct test_kfree_rcu_struct), - SLAB_NO_MERGE); - struct test_kfree_rcu_struct *p = kmem_cache_alloc(s, GFP_KERNEL); + struct kmem_cache *s; + struct test_kfree_rcu_struct *p; + + if (IS_BUILTIN(CONFIG_SLUB_KUNIT_TEST)) + kunit_skip(test, "can't do kfree_rcu() when test is built-in"); + + s = test_kmem_cache_create("TestSlub_kfree_rcu", + sizeof(struct test_kfree_rcu_struct), + SLAB_NO_MERGE); + p = kmem_cache_alloc(s, GFP_KERNEL); kfree_rcu(p, rcu); kmem_cache_destroy(s); @@ -177,13 +183,13 @@ static void test_kfree_rcu(struct kunit *test) static void test_leak_destroy(struct kunit *test) { - struct kmem_cache *s = test_kmem_cache_create("TestSlub_kfree_rcu", + struct kmem_cache *s = test_kmem_cache_create("TestSlub_leak_destroy", 64, SLAB_NO_MERGE); kmem_cache_alloc(s, GFP_KERNEL); kmem_cache_destroy(s); - KUNIT_EXPECT_EQ(test, 1, slab_errors); + KUNIT_EXPECT_EQ(test, 2, slab_errors); } static int test_init(struct kunit *test) diff --git a/lib/string.c b/lib/string.c index 966da44bfc86..76327b51e36f 100644 --- a/lib/string.c +++ b/lib/string.c @@ -27,7 +27,7 @@ #include <asm/page.h> #include <asm/rwonce.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <asm/word-at-a-time.h> #ifndef __HAVE_ARCH_STRNCASECMP diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 09f022ba1c05..c5e2ec9303c5 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -51,7 +51,7 @@ #include <asm/page.h> /* for PAGE_SIZE */ #include <asm/byteorder.h> /* cpu_to_le16 */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/string_helpers.h> #include "kstrtox.h" diff --git a/lib/xxhash.c b/lib/xxhash.c index d5bb9ff10607..b5bd567aa6b3 100644 --- a/lib/xxhash.c +++ b/lib/xxhash.c @@ -38,7 +38,7 @@ * - xxHash source repository: https://github.com/Cyan4973/xxHash */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/errno.h> #include <linux/compiler.h> #include <linux/kernel.h> diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h index 5f1294a1408c..8409784b1639 100644 --- a/lib/xz/xz_private.h +++ b/lib/xz/xz_private.h @@ -12,7 +12,7 @@ #ifdef __KERNEL__ # include <linux/xz.h> # include <linux/kernel.h> -# include <asm/unaligned.h> +# include <linux/unaligned.h> /* XZ_PREBOOT may be defined only via decompress_unxz.c. */ # ifndef XZ_PREBOOT # include <linux/slab.h> diff --git a/lib/zstd/common/mem.h b/lib/zstd/common/mem.h index 1d9cc03924ca..c22a2e69bf46 100644 --- a/lib/zstd/common/mem.h +++ b/lib/zstd/common/mem.h @@ -15,7 +15,7 @@ /*-**************************************** * Dependencies ******************************************/ -#include <asm/unaligned.h> /* get_unaligned, put_unaligned* */ +#include <linux/unaligned.h> /* get_unaligned, put_unaligned* */ #include <linux/compiler.h> /* inline */ #include <linux/swab.h> /* swab32, swab64 */ #include <linux/types.h> /* size_t, ptrdiff_t */ |