diff options
| author | Jiri Kosina <jkosina@suse.cz> | 2020-10-15 20:37:01 +0200 | 
|---|---|---|
| committer | Jiri Kosina <jkosina@suse.cz> | 2020-10-15 20:37:01 +0200 | 
| commit | 62b31a045757eac81fed94b19df47418a0818528 (patch) | |
| tree | 285fda56df8304dff6ba929bad65ddfb4e4fd726 /mm/page_alloc.c | |
| parent | cc51d171776f3a6acb6828bad0b780a4cacf5423 (diff) | |
| parent | c27e08820bc6cb7d483a8d87589bdbbbf10f2306 (diff) | |
Merge branch 'for-5.10/core' into for-linus
- nonblocking read semantics fix for hid-debug
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 26 | 
1 files changed, 18 insertions, 8 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 167732f4d124..fab5e97dc9ca 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -666,8 +666,6 @@ void prep_compound_page(struct page *page, unsigned int order)  	int i;  	int nr_pages = 1 << order; -	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); -	set_compound_order(page, order);  	__SetPageHead(page);  	for (i = 1; i < nr_pages; i++) {  		struct page *p = page + i; @@ -675,6 +673,9 @@ void prep_compound_page(struct page *page, unsigned int order)  		p->mapping = TAIL_MAPPING;  		set_compound_head(p, page);  	} + +	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); +	set_compound_order(page, order);  	atomic_set(compound_mapcount_ptr(page), -1);  	if (hpage_pincount_available(page))  		atomic_set(compound_pincount_ptr(page), 0); @@ -1301,6 +1302,11 @@ static void free_pcppages_bulk(struct zone *zone, int count,  	struct page *page, *tmp;  	LIST_HEAD(head); +	/* +	 * Ensure proper count is passed which otherwise would stuck in the +	 * below while (list_empty(list)) loop. +	 */ +	count = min(pcp->count, count);  	while (count) {  		struct list_head *list; @@ -4282,7 +4288,7 @@ retry:  	/*  	 * If an allocation failed after direct reclaim, it could be because  	 * pages are pinned on the per-cpu lists or in high alloc reserves. -	 * Shrink them them and try again +	 * Shrink them and try again  	 */  	if (!page && !drained) {  		unreserve_highatomic_pageblock(ac, false); @@ -6192,7 +6198,7 @@ static int zone_batchsize(struct zone *zone)   * locking.   *   * Any new users of pcp->batch and pcp->high should ensure they can cope with - * those fields changing asynchronously (acording the the above rule). + * those fields changing asynchronously (acording to the above rule).   *   * mutex_is_locked(&pcp_batch_high_lock) required when calling this function   * outside of boot time (or some other assurance that no concurrent updaters @@ -7887,7 +7893,7 @@ int __meminit init_per_zone_wmark_min(void)  	return 0;  } -core_initcall(init_per_zone_wmark_min) +postcore_initcall(init_per_zone_wmark_min)  /*   * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so @@ -8203,7 +8209,7 @@ void *__init alloc_large_system_hash(const char *tablename,   * race condition. So you can't expect this function should be exact.   *   * Returns a page without holding a reference. If the caller wants to - * dereference that page (e.g., dumping), it has to make sure that that it + * dereference that page (e.g., dumping), it has to make sure that it   * cannot get removed (e.g., via memory unplug) concurrently.   *   */ @@ -8347,6 +8353,10 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,  	unsigned long pfn = start;  	unsigned int tries = 0;  	int ret = 0; +	struct migration_target_control mtc = { +		.nid = zone_to_nid(cc->zone), +		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, +	};  	migrate_prep(); @@ -8373,8 +8383,8 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,  							&cc->migratepages);  		cc->nr_migratepages -= nr_reclaimed; -		ret = migrate_pages(&cc->migratepages, alloc_migrate_target, -				    NULL, 0, cc->mode, MR_CONTIG_RANGE); +		ret = migrate_pages(&cc->migratepages, alloc_migration_target, +				NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE);  	}  	if (ret < 0) {  		putback_movable_pages(&cc->migratepages);  | 
