diff options
-rw-r--r-- | mm/memcontrol.c | 49 |
1 files changed, 26 insertions, 23 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d5b1df2326f7..352aaaeaa536 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1779,13 +1779,14 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) */ #define NR_MEMCG_STOCK 7 struct memcg_stock_pcp { - local_trylock_t stock_lock; + local_trylock_t memcg_lock; uint8_t nr_pages[NR_MEMCG_STOCK]; struct mem_cgroup *cached[NR_MEMCG_STOCK]; + local_trylock_t obj_lock; + unsigned int nr_bytes; struct obj_cgroup *cached_objcg; struct pglist_data *cached_pgdat; - unsigned int nr_bytes; int nr_slab_reclaimable_b; int nr_slab_unreclaimable_b; @@ -1794,7 +1795,8 @@ struct memcg_stock_pcp { #define FLUSHING_CACHED_CHARGE 0 }; static DEFINE_PER_CPU_ALIGNED(struct memcg_stock_pcp, memcg_stock) = { - .stock_lock = INIT_LOCAL_TRYLOCK(stock_lock), + .memcg_lock = INIT_LOCAL_TRYLOCK(memcg_lock), + .obj_lock = INIT_LOCAL_TRYLOCK(obj_lock), }; static DEFINE_MUTEX(percpu_charge_mutex); @@ -1822,7 +1824,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) int i; if (nr_pages > MEMCG_CHARGE_BATCH || - !local_trylock_irqsave(&memcg_stock.stock_lock, flags)) + !local_trylock_irqsave(&memcg_stock.memcg_lock, flags)) return ret; stock = this_cpu_ptr(&memcg_stock); @@ -1839,7 +1841,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) break; } - local_unlock_irqrestore(&memcg_stock.stock_lock, flags); + local_unlock_irqrestore(&memcg_stock.memcg_lock, flags); return ret; } @@ -1885,19 +1887,19 @@ static void drain_local_stock(struct work_struct *dummy) struct memcg_stock_pcp *stock; unsigned long flags; - /* - * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs. - * drain_stock races is that we always operate on local CPU stock - * here with IRQ disabled - */ - local_lock_irqsave(&memcg_stock.stock_lock, flags); + if (WARN_ONCE(!in_task(), "drain in non-task context")) + return; + local_lock_irqsave(&memcg_stock.obj_lock, flags); stock = this_cpu_ptr(&memcg_stock); drain_obj_stock(stock); + local_unlock_irqrestore(&memcg_stock.obj_lock, flags); + + local_lock_irqsave(&memcg_stock.memcg_lock, flags); + stock = this_cpu_ptr(&memcg_stock); drain_stock_fully(stock); clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); - - local_unlock_irqrestore(&memcg_stock.stock_lock, flags); + local_unlock_irqrestore(&memcg_stock.memcg_lock, flags); } static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) @@ -1920,10 +1922,10 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg)); if (nr_pages > MEMCG_CHARGE_BATCH || - !local_trylock_irqsave(&memcg_stock.stock_lock, flags)) { + !local_trylock_irqsave(&memcg_stock.memcg_lock, flags)) { /* * In case of larger than batch refill or unlikely failure to - * lock the percpu stock_lock, uncharge memcg directly. + * lock the percpu memcg_lock, uncharge memcg directly. */ memcg_uncharge(memcg, nr_pages); return; @@ -1955,7 +1957,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) WRITE_ONCE(stock->nr_pages[i], nr_pages); } - local_unlock_irqrestore(&memcg_stock.stock_lock, flags); + local_unlock_irqrestore(&memcg_stock.memcg_lock, flags); } static bool is_drain_needed(struct memcg_stock_pcp *stock, @@ -2030,11 +2032,12 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu) stock = &per_cpu(memcg_stock, cpu); - /* drain_obj_stock requires stock_lock */ - local_lock_irqsave(&memcg_stock.stock_lock, flags); + /* drain_obj_stock requires obj_lock */ + local_lock_irqsave(&memcg_stock.obj_lock, flags); drain_obj_stock(stock); - local_unlock_irqrestore(&memcg_stock.stock_lock, flags); + local_unlock_irqrestore(&memcg_stock.obj_lock, flags); + /* no need for the local lock */ drain_stock_fully(stock); return 0; @@ -2887,7 +2890,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, unsigned long flags; bool ret = false; - local_lock_irqsave(&memcg_stock.stock_lock, flags); + local_lock_irqsave(&memcg_stock.obj_lock, flags); stock = this_cpu_ptr(&memcg_stock); if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) { @@ -2898,7 +2901,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, __account_obj_stock(objcg, stock, nr_bytes, pgdat, idx); } - local_unlock_irqrestore(&memcg_stock.stock_lock, flags); + local_unlock_irqrestore(&memcg_stock.obj_lock, flags); return ret; } @@ -2987,7 +2990,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, unsigned long flags; unsigned int nr_pages = 0; - local_lock_irqsave(&memcg_stock.stock_lock, flags); + local_lock_irqsave(&memcg_stock.obj_lock, flags); stock = this_cpu_ptr(&memcg_stock); if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */ @@ -3009,7 +3012,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, stock->nr_bytes &= (PAGE_SIZE - 1); } - local_unlock_irqrestore(&memcg_stock.stock_lock, flags); + local_unlock_irqrestore(&memcg_stock.obj_lock, flags); if (nr_pages) obj_cgroup_uncharge_pages(objcg, nr_pages); |