summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c5
-rw-r--r--mm/kasan/kasan.h12
-rw-r--r--mm/page_alloc.c28
-rw-r--r--mm/slab.h15
-rw-r--r--mm/slub.c14
5 files changed, 47 insertions, 27 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 924553aa8f78..dfc940d5221d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5440,8 +5440,9 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
continue;
}
- refs = min3(pages_per_huge_page(h) - pfn_offset,
- (vma->vm_end - vaddr) >> PAGE_SHIFT, remainder);
+ /* vaddr may not be aligned to PAGE_SIZE */
+ refs = min3(pages_per_huge_page(h) - pfn_offset, remainder,
+ (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);
if (pages || vmas)
record_subpages_vmas(mem_map_offset(page, pfn_offset),
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 98e3059bfea4..d739cdd1621a 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -9,6 +9,7 @@
#ifdef CONFIG_KASAN_HW_TAGS
#include <linux/static_key.h>
+#include "../slab.h"
DECLARE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
extern bool kasan_flag_async __ro_after_init;
@@ -387,6 +388,17 @@ static inline void kasan_unpoison(const void *addr, size_t size, bool init)
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return;
+ /*
+ * Explicitly initialize the memory with the precise object size to
+ * avoid overwriting the SLAB redzone. This disables initialization in
+ * the arch code and may thus lead to performance penalty. The penalty
+ * is accepted since SLAB redzones aren't enabled in production builds.
+ */
+ if (__slub_debug_enabled() &&
+ init && ((unsigned long)size & KASAN_GRANULE_MASK)) {
+ init = false;
+ memzero_explicit((void *)addr, size);
+ }
size = round_up(size, KASAN_GRANULE_SIZE);
hw_set_mem_tag_range((void *)addr, size, tag, init);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3b97e17806be..3e97e68aef7a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3820,7 +3820,7 @@ static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
#endif /* CONFIG_FAIL_PAGE_ALLOC */
-static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{
return __should_fail_alloc_page(gfp_mask, order);
}
@@ -5221,9 +5221,6 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
unsigned int alloc_flags = ALLOC_WMARK_LOW;
int nr_populated = 0, nr_account = 0;
- if (unlikely(nr_pages <= 0))
- return 0;
-
/*
* Skip populated array elements to determine if any pages need
* to be allocated before disabling IRQs.
@@ -5231,19 +5228,35 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
while (page_array && nr_populated < nr_pages && page_array[nr_populated])
nr_populated++;
+ /* No pages requested? */
+ if (unlikely(nr_pages <= 0))
+ goto out;
+
/* Already populated array? */
if (unlikely(page_array && nr_pages - nr_populated == 0))
- return nr_populated;
+ goto out;
/* Use the single page allocator for one page. */
if (nr_pages - nr_populated == 1)
goto failed;
+#ifdef CONFIG_PAGE_OWNER
+ /*
+ * PAGE_OWNER may recurse into the allocator to allocate space to
+ * save the stack with pagesets.lock held. Releasing/reacquiring
+ * removes much of the performance benefit of bulk allocation so
+ * force the caller to allocate one page at a time as it'll have
+ * similar performance to added complexity to the bulk allocator.
+ */
+ if (static_branch_unlikely(&page_owner_inited))
+ goto failed;
+#endif
+
/* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
gfp &= gfp_allowed_mask;
alloc_gfp = gfp;
if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
- return 0;
+ goto out;
gfp = alloc_gfp;
/* Find an allowed local zone that meets the low watermark. */
@@ -5311,6 +5324,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
+out:
return nr_populated;
failed_irq:
@@ -5326,7 +5340,7 @@ failed:
nr_populated++;
}
- return nr_populated;
+ goto out;
}
EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
diff --git a/mm/slab.h b/mm/slab.h
index 67e06637ff2e..f997fd5e42c8 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -216,10 +216,18 @@ DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
#endif
extern void print_tracking(struct kmem_cache *s, void *object);
long validate_slab_cache(struct kmem_cache *s);
+static inline bool __slub_debug_enabled(void)
+{
+ return static_branch_unlikely(&slub_debug_enabled);
+}
#else
static inline void print_tracking(struct kmem_cache *s, void *object)
{
}
+static inline bool __slub_debug_enabled(void)
+{
+ return false;
+}
#endif
/*
@@ -229,11 +237,10 @@ static inline void print_tracking(struct kmem_cache *s, void *object)
*/
static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
{
-#ifdef CONFIG_SLUB_DEBUG
- VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
- if (static_branch_unlikely(&slub_debug_enabled))
+ if (IS_ENABLED(CONFIG_SLUB_DEBUG))
+ VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
+ if (__slub_debug_enabled())
return s->flags & flags;
-#endif
return false;
}
diff --git a/mm/slub.c b/mm/slub.c
index dc863c1ea324..e1644ac6ee7b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -120,25 +120,11 @@
*/
#ifdef CONFIG_SLUB_DEBUG
-
#ifdef CONFIG_SLUB_DEBUG_ON
DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
#else
DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
#endif
-
-static inline bool __slub_debug_enabled(void)
-{
- return static_branch_unlikely(&slub_debug_enabled);
-}
-
-#else /* CONFIG_SLUB_DEBUG */
-
-static inline bool __slub_debug_enabled(void)
-{
- return false;
-}
-
#endif /* CONFIG_SLUB_DEBUG */
static inline bool kmem_cache_debug(struct kmem_cache *s)