summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@linux.intel.com>2019-05-13 17:21:17 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-14 09:47:49 -0700
commit837566e7e08e3f89444166444836a8a49b9f9322 (patch)
tree189436a67634ff14180bba2925e1a8ef4fcda877 /mm/page_alloc.c
parent56ec43d8b02719402c9fcf984feb52ec2300f8a5 (diff)
mm: implement new zone specific memblock iterator
Introduce a new iterator for_each_free_mem_pfn_range_in_zone. This iterator will take care of making sure a given memory range provided is in fact contained within a zone. It takes are of all the bounds checking we were doing in deferred_grow_zone, and deferred_init_memmap. In addition it should help to speed up the search a bit by iterating until the end of a range is greater than the start of the zone pfn range, and will exit completely if the start is beyond the end of the zone. Link: http://lkml.kernel.org/r/20190405221225.12227.22573.stgit@localhost.localdomain Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com> Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com> Reviewed-by: Mike Rapoport <rppt@linux.ibm.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Jiang <dave.jiang@intel.com> Cc: David S. Miller <davem@davemloft.net> Cc: Ingo Molnar <mingo@kernel.org> Cc: Khalid Aziz <khalid.aziz@oracle.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Laurent Dufour <ldufour@linux.vnet.ibm.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Pavel Tatashin <pavel.tatashin@microsoft.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: <yi.z.zhang@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c31
1 files changed, 12 insertions, 19 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 25b82be438d7..fd42321c02f0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1631,11 +1631,9 @@ static unsigned long __init deferred_init_pages(struct zone *zone,
static int __init deferred_init_memmap(void *data)
{
pg_data_t *pgdat = data;
- int nid = pgdat->node_id;
unsigned long start = jiffies;
unsigned long nr_pages = 0;
unsigned long spfn, epfn, first_init_pfn, flags;
- phys_addr_t spa, epa;
int zid;
struct zone *zone;
const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
@@ -1672,14 +1670,12 @@ static int __init deferred_init_memmap(void *data)
* freeing pages we can access pages that are ahead (computing buddy
* page in __free_one_page()).
*/
- for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
- spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
- epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
+ for_each_free_mem_pfn_range_in_zone(i, zone, &spfn, &epfn) {
+ spfn = max_t(unsigned long, first_init_pfn, spfn);
nr_pages += deferred_init_pages(zone, spfn, epfn);
}
- for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
- spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
- epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
+ for_each_free_mem_pfn_range_in_zone(i, zone, &spfn, &epfn) {
+ spfn = max_t(unsigned long, first_init_pfn, spfn);
deferred_free_pages(spfn, epfn);
}
pgdat_resize_unlock(pgdat, &flags);
@@ -1687,8 +1683,8 @@ static int __init deferred_init_memmap(void *data)
/* Sanity check that the next zone really is unpopulated */
WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
- pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
- jiffies_to_msecs(jiffies - start));
+ pr_info("node %d initialised, %lu pages in %ums\n",
+ pgdat->node_id, nr_pages, jiffies_to_msecs(jiffies - start));
pgdat_init_report_one_done();
return 0;
@@ -1712,13 +1708,11 @@ static int __init deferred_init_memmap(void *data)
static noinline bool __init
deferred_grow_zone(struct zone *zone, unsigned int order)
{
- int nid = zone_to_nid(zone);
- pg_data_t *pgdat = NODE_DATA(nid);
unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
+ pg_data_t *pgdat = zone->zone_pgdat;
unsigned long nr_pages = 0;
unsigned long first_init_pfn, spfn, epfn, t, flags;
unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
- phys_addr_t spa, epa;
u64 i;
/* Only the last zone may have deferred pages */
@@ -1754,9 +1748,8 @@ deferred_grow_zone(struct zone *zone, unsigned int order)
return false;
}
- for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
- spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
- epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
+ for_each_free_mem_pfn_range_in_zone(i, zone, &spfn, &epfn) {
+ spfn = max_t(unsigned long, first_init_pfn, spfn);
while (spfn < epfn && nr_pages < nr_pages_needed) {
t = ALIGN(spfn + PAGES_PER_SECTION, PAGES_PER_SECTION);
@@ -1770,9 +1763,9 @@ deferred_grow_zone(struct zone *zone, unsigned int order)
break;
}
- for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
- spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
- epfn = min_t(unsigned long, first_deferred_pfn, PFN_DOWN(epa));
+ for_each_free_mem_pfn_range_in_zone(i, zone, &spfn, &epfn) {
+ spfn = max_t(unsigned long, first_init_pfn, spfn);
+ epfn = min_t(unsigned long, first_deferred_pfn, epfn);
deferred_free_pages(spfn, epfn);
if (first_deferred_pfn == epfn)