diff options
Diffstat (limited to 'mm/swap.c')
| -rw-r--r-- | mm/swap.c | 29 | 
1 files changed, 24 insertions, 5 deletions
diff --git a/mm/swap.c b/mm/swap.c index 38c3fa4308e2..5341ae93861f 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -373,9 +373,16 @@ static void __lru_cache_activate_page(struct page *page)  void mark_page_accessed(struct page *page)  {  	page = compound_head(page); -	if (!PageActive(page) && !PageUnevictable(page) && -			PageReferenced(page)) { +	if (!PageReferenced(page)) { +		SetPageReferenced(page); +	} else if (PageUnevictable(page)) { +		/* +		 * Unevictable pages are on the "LRU_UNEVICTABLE" list. But, +		 * this list is never rotated or maintained, so marking an +		 * evictable page accessed has no effect. +		 */ +	} else if (!PageActive(page)) {  		/*  		 * If the page is on the LRU, queue it for activation via  		 * activate_page_pvecs. Otherwise, assume the page is on a @@ -389,8 +396,6 @@ void mark_page_accessed(struct page *page)  		ClearPageReferenced(page);  		if (page_is_file_cache(page))  			workingset_activation(page); -	} else if (!PageReferenced(page)) { -		SetPageReferenced(page);  	}  	if (page_is_idle(page))  		clear_page_idle(page); @@ -708,9 +713,10 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)   */  void lru_add_drain_all(void)  { +	static seqcount_t seqcount = SEQCNT_ZERO(seqcount);  	static DEFINE_MUTEX(lock);  	static struct cpumask has_work; -	int cpu; +	int cpu, seq;  	/*  	 * Make sure nobody triggers this path before mm_percpu_wq is fully @@ -719,7 +725,19 @@ void lru_add_drain_all(void)  	if (WARN_ON(!mm_percpu_wq))  		return; +	seq = raw_read_seqcount_latch(&seqcount); +  	mutex_lock(&lock); + +	/* +	 * Piggyback on drain started and finished while we waited for lock: +	 * all pages pended at the time of our enter were drained from vectors. +	 */ +	if (__read_seqcount_retry(&seqcount, seq)) +		goto done; + +	raw_write_seqcount_latch(&seqcount); +  	cpumask_clear(&has_work);  	for_each_online_cpu(cpu) { @@ -740,6 +758,7 @@ void lru_add_drain_all(void)  	for_each_cpu(cpu, &has_work)  		flush_work(&per_cpu(lru_add_drain_work, cpu)); +done:  	mutex_unlock(&lock);  }  #else  | 
