summaryrefslogtreecommitdiff
path: root/include/linux/slab.h
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2022-11-21 10:37:36 +0100
committerVlastimil Babka <vbabka@suse.cz>2022-11-21 10:37:36 +0100
commitb5e72d27e39a092c956b34b286f6564ceee4134c (patch)
tree744c444b3df7124270b2089256c5cd117272c229 /include/linux/slab.h
parent90e9b23a60d5b4c8317f58e01ed05d3bdf063440 (diff)
parent6fa57d78aa7f212fd7c0de70f5756e18513dcdcf (diff)
Merge branch 'slab/for-6.2/alloc_size' into slab/for-next
Two patches from Kees Cook [1]: These patches work around a deficiency in GCC (>=11) and Clang (<16) where the __alloc_size attribute does not apply to inlines. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96503 This manifests as reduced overflow detection coverage for many allocation sites under CONFIG_FORTIFY_SOURCE=y, where the allocation size was not actually being propagated to __builtin_dynamic_object_size(). [1] https://lore.kernel.org/all/20221118034713.gonna.754-kees@kernel.org/
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h28
1 files changed, 14 insertions, 14 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 067f0e80be9e..cd3efac72f3d 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -547,42 +547,42 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align
* Try really hard to succeed the allocation but fail
* eventually.
*/
+#ifndef CONFIG_SLOB
static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
{
- if (__builtin_constant_p(size)) {
-#ifndef CONFIG_SLOB
+ if (__builtin_constant_p(size) && size) {
unsigned int index;
-#endif
+
if (size > KMALLOC_MAX_CACHE_SIZE)
return kmalloc_large(size, flags);
-#ifndef CONFIG_SLOB
- index = kmalloc_index(size);
-
- if (!index)
- return ZERO_SIZE_PTR;
+ index = kmalloc_index(size);
return kmalloc_trace(
kmalloc_caches[kmalloc_type(flags)][index],
flags, size);
-#endif
}
return __kmalloc(size, flags);
}
+#else
+static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
+{
+ if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
+ return kmalloc_large(size, flags);
+
+ return __kmalloc(size, flags);
+}
+#endif
#ifndef CONFIG_SLOB
static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
{
- if (__builtin_constant_p(size)) {
+ if (__builtin_constant_p(size) && size) {
unsigned int index;
if (size > KMALLOC_MAX_CACHE_SIZE)
return kmalloc_large_node(size, flags, node);
index = kmalloc_index(size);
-
- if (!index)
- return ZERO_SIZE_PTR;
-
return kmalloc_node_trace(
kmalloc_caches[kmalloc_type(flags)][index],
flags, node, size);