summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/kmemleak.c12
-rw-r--r--mm/memcontrol.c6
-rw-r--r--mm/page_owner.c6
-rw-r--r--mm/slab.c1
-rw-r--r--mm/vmstat.c2
5 files changed, 16 insertions, 11 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index e83987c55a08..46c2290a08f1 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1657,8 +1657,7 @@ static void start_scan_thread(void)
}
/*
- * Stop the automatic memory scanning thread. This function must be called
- * with the scan_mutex held.
+ * Stop the automatic memory scanning thread.
*/
static void stop_scan_thread(void)
{
@@ -1921,12 +1920,15 @@ static void kmemleak_do_cleanup(struct work_struct *work)
{
stop_scan_thread();
+ mutex_lock(&scan_mutex);
/*
- * Once the scan thread has stopped, it is safe to no longer track
- * object freeing. Ordering of the scan thread stopping and the memory
- * accesses below is guaranteed by the kthread_stop() function.
+ * Once it is made sure that kmemleak_scan has stopped, it is safe to no
+ * longer track object freeing. Ordering of the scan thread stopping and
+ * the memory accesses below is guaranteed by the kthread_stop()
+ * function.
*/
kmemleak_free_enabled = 0;
+ mutex_unlock(&scan_mutex);
if (!kmemleak_found_leaks)
__kmemleak_do_cleanup();
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 670e99b68aa6..9ec024b862ac 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -714,9 +714,9 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
* invocations for reference counting, or use mem_cgroup_iter_break()
* to cancel a hierarchy walk before the round-trip is complete.
*
- * Reclaimers can specify a zone and a priority level in @reclaim to
+ * Reclaimers can specify a node and a priority level in @reclaim to
* divide up the memcgs in the hierarchy among all concurrent
- * reclaimers operating on the same zone and priority.
+ * reclaimers operating on the same node and priority.
*/
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
struct mem_cgroup *prev,
@@ -2299,7 +2299,7 @@ void memcg_kmem_put_cache(struct kmem_cache *cachep)
}
/**
- * memcg_kmem_charge: charge a kmem page
+ * memcg_kmem_charge_memcg: charge a kmem page
* @page: page to charge
* @gfp: reclaim mode
* @order: allocation order
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 9886c6073828..7172e0a80e13 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -123,13 +123,13 @@ void __reset_page_owner(struct page *page, unsigned int order)
static inline bool check_recursive_alloc(struct stack_trace *trace,
unsigned long ip)
{
- int i, count;
+ int i;
if (!trace->nr_entries)
return false;
- for (i = 0, count = 0; i < trace->nr_entries; i++) {
- if (trace->entries[i] == ip && ++count == 2)
+ for (i = 0; i < trace->nr_entries; i++) {
+ if (trace->entries[i] == ip)
return true;
}
diff --git a/mm/slab.c b/mm/slab.c
index 324446621b3e..9095c3945425 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1283,6 +1283,7 @@ void __init kmem_cache_init(void)
nr_node_ids * sizeof(struct kmem_cache_node *),
SLAB_HWCACHE_ALIGN, 0, 0);
list_add(&kmem_cache->list, &slab_caches);
+ memcg_link_cache(kmem_cache);
slab_state = PARTIAL;
/*
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 40b2db6db6b1..33581be705f0 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1839,9 +1839,11 @@ static void vmstat_update(struct work_struct *w)
* to occur in the future. Keep on running the
* update worker thread.
*/
+ preempt_disable();
queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
this_cpu_ptr(&vmstat_work),
round_jiffies_relative(sysctl_stat_interval));
+ preempt_enable();
}
}