From 7a16d7c7619b26e5e9bfc1360de0d02a94f7d9eb Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 15 Mar 2023 14:31:31 +0300 Subject: mm/slub: fix MAX_ORDER usage in calculate_order() MAX_ORDER is not inclusive: the maximum allocation order buddy allocator can deliver is MAX_ORDER-1. Fix MAX_ORDER usage in calculate_order(). Link: https://lkml.kernel.org/r/20230315113133.11326-9-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Cc: Vlastimil Babka Cc: Christoph Lameter Cc: Pekka Enberg Signed-off-by: Andrew Morton --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 39327e98fce3..32eb6b50fe18 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4171,7 +4171,7 @@ static inline int calculate_order(unsigned int size) /* * Doh this slab cannot be placed using slub_max_order. */ - order = calc_slab_order(size, 1, MAX_ORDER, 1); + order = calc_slab_order(size, 1, MAX_ORDER - 1, 1); if (order < MAX_ORDER) return order; return -ENOSYS; -- cgit From 23baf831a32c04f9a968812511540b1b3e648bf5 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 15 Mar 2023 14:31:33 +0300 Subject: mm, treewide: redefine MAX_ORDER sanely MAX_ORDER currently defined as number of orders page allocator supports: user can ask buddy allocator for page order between 0 and MAX_ORDER-1. This definition is counter-intuitive and lead to number of bugs all over the kernel. Change the definition of MAX_ORDER to be inclusive: the range of orders user can ask from buddy allocator is 0..MAX_ORDER now. [kirill@shutemov.name: fix min() warning] Link: https://lkml.kernel.org/r/20230315153800.32wib3n5rickolvh@box [akpm@linux-foundation.org: fix another min_t warning] [kirill@shutemov.name: fixups per Zi Yan] Link: https://lkml.kernel.org/r/20230316232144.b7ic4cif4kjiabws@box.shutemov.name [akpm@linux-foundation.org: fix underlining in docs] Link: https://lore.kernel.org/oe-kbuild-all/202303191025.VRCTk6mP-lkp@intel.com/ Link: https://lkml.kernel.org/r/20230315113133.11326-11-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Reviewed-by: Michael Ellerman [powerpc] Cc: "Kirill A. Shutemov" Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/slub.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 32eb6b50fe18..f49d669ff604 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4171,8 +4171,8 @@ static inline int calculate_order(unsigned int size) /* * Doh this slab cannot be placed using slub_max_order. */ - order = calc_slab_order(size, 1, MAX_ORDER - 1, 1); - if (order < MAX_ORDER) + order = calc_slab_order(size, 1, MAX_ORDER, 1); + if (order <= MAX_ORDER) return order; return -ENOSYS; } @@ -4697,7 +4697,7 @@ __setup("slub_min_order=", setup_slub_min_order); static int __init setup_slub_max_order(char *str) { get_option(&str, (int *)&slub_max_order); - slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1); + slub_max_order = min_t(unsigned int, slub_max_order, MAX_ORDER); return 1; } -- cgit From c7b23b68e2aa93f86a206222d23ccd9a21f5982a Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Thu, 13 Apr 2023 10:40:34 +0000 Subject: mm: vmscan: refactor updating current->reclaim_state During reclaim, we keep track of pages reclaimed from other means than LRU-based reclaim through scan_control->reclaim_state->reclaimed_slab, which we stash a pointer to in current task_struct. However, we keep track of more than just reclaimed slab pages through this. We also use it for clean file pages dropped through pruned inodes, and xfs buffer pages freed. Rename reclaimed_slab to reclaimed, and add a helper function that wraps updating it through current, so that future changes to this logic are contained within include/linux/swap.h. Link: https://lkml.kernel.org/r/20230413104034.1086717-4-yosryahmed@google.com Signed-off-by: Yosry Ahmed Acked-by: Michal Hocko Cc: Alexander Viro Cc: Christoph Lameter Cc: Darrick J. Wong Cc: Dave Chinner Cc: David Hildenbrand Cc: David Rientjes Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Johannes Weiner Cc: Joonsoo Kim Cc: Matthew Wilcox Cc: Miaohe Lin Cc: NeilBrown Cc: Peter Xu Cc: Roman Gushchin Cc: Shakeel Butt Cc: Tim Chen Cc: Vlastimil Babka Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/slub.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index f49d669ff604..2728d5ae4dc0 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -11,7 +11,7 @@ */ #include -#include /* struct reclaim_state */ +#include /* mm_account_reclaimed_pages() */ #include #include #include @@ -2063,8 +2063,7 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab) /* Make the mapping reset visible before clearing the flag */ smp_wmb(); __folio_clear_slab(folio); - if (current->reclaim_state) - current->reclaim_state->reclaimed_slab += pages; + mm_account_reclaimed_pages(pages); unaccount_slab(slab, order, s); __free_pages(&folio->page, order); } -- cgit