summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/bootmem.c14
-rw-r--r--mm/memcontrol.c126
-rw-r--r--mm/memory.c48
-rw-r--r--mm/page_alloc.c12
-rw-r--r--mm/page_cgroup.c24
-rw-r--r--mm/rmap.c5
-rw-r--r--mm/slab.c15
-rw-r--r--mm/slub.c12
-rw-r--r--mm/swapfile.c16
-rw-r--r--mm/vmscan.c4
10 files changed, 183 insertions, 93 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 282df0a09e6f..d2a9ce952768 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -536,11 +536,15 @@ static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
return kzalloc(size, GFP_NOWAIT);
#ifdef CONFIG_HAVE_ARCH_BOOTMEM
- bootmem_data_t *p_bdata;
-
- p_bdata = bootmem_arch_preferred_node(bdata, size, align, goal, limit);
- if (p_bdata)
- return alloc_bootmem_core(p_bdata, size, align, goal, limit);
+ {
+ bootmem_data_t *p_bdata;
+
+ p_bdata = bootmem_arch_preferred_node(bdata, size, align,
+ goal, limit);
+ if (p_bdata)
+ return alloc_bootmem_core(p_bdata, size, align,
+ goal, limit);
+ }
#endif
return NULL;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 70db6e0a5eec..e2fa20dadf40 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -45,7 +45,7 @@ struct cgroup_subsys mem_cgroup_subsys __read_mostly;
#define MEM_CGROUP_RECLAIM_RETRIES 5
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
-/* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
+/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
int do_swap_account __read_mostly;
static int really_do_swap_account __initdata = 1; /* for remember boot option*/
#else
@@ -62,7 +62,8 @@ enum mem_cgroup_stat_index {
* For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
*/
MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
- MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */
+ MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
+ MEM_CGROUP_STAT_MAPPED_FILE, /* # of pages charged as file rss */
MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
@@ -176,6 +177,9 @@ struct mem_cgroup {
unsigned int swappiness;
+ /* set when res.limit == memsw.limit */
+ bool memsw_is_minimum;
+
/*
* statistics. This must be placed at the end of memcg.
*/
@@ -188,6 +192,7 @@ enum charge_type {
MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
+ MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
NR_CHARGE_TYPE,
};
@@ -644,6 +649,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
int zid = zone_idx(z);
struct mem_cgroup_per_zone *mz;
int lru = LRU_FILE * !!file + !!active;
+ int ret;
BUG_ON(!mem_cont);
mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
@@ -661,9 +667,19 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
continue;
scan++;
- if (__isolate_lru_page(page, mode, file) == 0) {
+ ret = __isolate_lru_page(page, mode, file);
+ switch (ret) {
+ case 0:
list_move(&page->lru, dst);
+ mem_cgroup_del_lru(page);
nr_taken++;
+ break;
+ case -EBUSY:
+ /* we don't affect global LRU but rotate in our LRU */
+ mem_cgroup_rotate_lru_list(page, page_lru(page));
+ break;
+ default:
+ break;
}
}
@@ -845,6 +861,10 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
int ret, total = 0;
int loop = 0;
+ /* If memsw_is_minimum==1, swap-out is of-no-use. */
+ if (root_mem->memsw_is_minimum)
+ noswap = true;
+
while (loop < 2) {
victim = mem_cgroup_select_victim(root_mem);
if (victim == root_mem)
@@ -900,6 +920,44 @@ static void record_last_oom(struct mem_cgroup *mem)
mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb);
}
+/*
+ * Currently used to update mapped file statistics, but the routine can be
+ * generalized to update other statistics as well.
+ */
+void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
+{
+ struct mem_cgroup *mem;
+ struct mem_cgroup_stat *stat;
+ struct mem_cgroup_stat_cpu *cpustat;
+ int cpu;
+ struct page_cgroup *pc;
+
+ if (!page_is_file_cache(page))
+ return;
+
+ pc = lookup_page_cgroup(page);
+ if (unlikely(!pc))
+ return;
+
+ lock_page_cgroup(pc);
+ mem = pc->mem_cgroup;
+ if (!mem)
+ goto done;
+
+ if (!PageCgroupUsed(pc))
+ goto done;
+
+ /*
+ * Preemption is already disabled, we don't need get_cpu()
+ */
+ cpu = smp_processor_id();
+ stat = &mem->stat;
+ cpustat = &stat->cpustat[cpu];
+
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, val);
+done:
+ unlock_page_cgroup(pc);
+}
/*
* Unlike exported interface, "oom" parameter is added. if oom==true,
@@ -1098,6 +1156,10 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
struct mem_cgroup_per_zone *from_mz, *to_mz;
int nid, zid;
int ret = -EBUSY;
+ struct page *page;
+ int cpu;
+ struct mem_cgroup_stat *stat;
+ struct mem_cgroup_stat_cpu *cpustat;
VM_BUG_ON(from == to);
VM_BUG_ON(PageLRU(pc->page));
@@ -1118,6 +1180,23 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
res_counter_uncharge(&from->res, PAGE_SIZE);
mem_cgroup_charge_statistics(from, pc, false);
+
+ page = pc->page;
+ if (page_is_file_cache(page) && page_mapped(page)) {
+ cpu = smp_processor_id();
+ /* Update mapped_file data for mem_cgroup "from" */
+ stat = &from->stat;
+ cpustat = &stat->cpustat[cpu];
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
+ -1);
+
+ /* Update mapped_file data for mem_cgroup "to" */
+ stat = &to->stat;
+ cpustat = &stat->cpustat[cpu];
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
+ 1);
+ }
+
if (do_swap_account)
res_counter_uncharge(&from->memsw, PAGE_SIZE);
css_put(&from->css);
@@ -1433,6 +1512,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
switch (ctype) {
case MEM_CGROUP_CHARGE_TYPE_MAPPED:
+ case MEM_CGROUP_CHARGE_TYPE_DROP:
if (page_mapped(page))
goto unlock_out;
break;
@@ -1496,18 +1576,23 @@ void mem_cgroup_uncharge_cache_page(struct page *page)
* called after __delete_from_swap_cache() and drop "page" account.
* memcg information is recorded to swap_cgroup of "ent"
*/
-void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
+void
+mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
{
struct mem_cgroup *memcg;
+ int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
+
+ if (!swapout) /* this was a swap cache but the swap is unused ! */
+ ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
+
+ memcg = __mem_cgroup_uncharge_common(page, ctype);
- memcg = __mem_cgroup_uncharge_common(page,
- MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
/* record memcg information */
- if (do_swap_account && memcg) {
+ if (do_swap_account && swapout && memcg) {
swap_cgroup_record(ent, css_id(&memcg->css));
mem_cgroup_get(memcg);
}
- if (memcg)
+ if (swapout && memcg)
css_put(&memcg->css);
}
#endif
@@ -1685,6 +1770,12 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
break;
}
ret = res_counter_set_limit(&memcg->res, val);
+ if (!ret) {
+ if (memswlimit == val)
+ memcg->memsw_is_minimum = true;
+ else
+ memcg->memsw_is_minimum = false;
+ }
mutex_unlock(&set_limit_mutex);
if (!ret)
@@ -1703,16 +1794,14 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
return ret;
}
-int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
- unsigned long long val)
+static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
+ unsigned long long val)
{
int retry_count;
u64 memlimit, oldusage, curusage;
int children = mem_cgroup_count_children(memcg);
int ret = -EBUSY;
- if (!do_swap_account)
- return -EINVAL;
/* see mem_cgroup_resize_res_limit */
retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
@@ -1734,6 +1823,12 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
break;
}
ret = res_counter_set_limit(&memcg->memsw, val);
+ if (!ret) {
+ if (memlimit == val)
+ memcg->memsw_is_minimum = true;
+ else
+ memcg->memsw_is_minimum = false;
+ }
mutex_unlock(&set_limit_mutex);
if (!ret)
@@ -1947,8 +2042,7 @@ static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
val = res_counter_read_u64(&mem->res, name);
break;
case _MEMSWAP:
- if (do_swap_account)
- val = res_counter_read_u64(&mem->memsw, name);
+ val = res_counter_read_u64(&mem->memsw, name);
break;
default:
BUG();
@@ -2046,6 +2140,7 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
enum {
MCS_CACHE,
MCS_RSS,
+ MCS_MAPPED_FILE,
MCS_PGPGIN,
MCS_PGPGOUT,
MCS_INACTIVE_ANON,
@@ -2066,6 +2161,7 @@ struct {
} memcg_stat_strings[NR_MCS_STAT] = {
{"cache", "total_cache"},
{"rss", "total_rss"},
+ {"mapped_file", "total_mapped_file"},
{"pgpgin", "total_pgpgin"},
{"pgpgout", "total_pgpgout"},
{"inactive_anon", "total_inactive_anon"},
@@ -2086,6 +2182,8 @@ static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
s->stat[MCS_CACHE] += val * PAGE_SIZE;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
s->stat[MCS_RSS] += val * PAGE_SIZE;
+ val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_MAPPED_FILE);
+ s->stat[MCS_MAPPED_FILE] += val * PAGE_SIZE;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
s->stat[MCS_PGPGIN] += val;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
diff --git a/mm/memory.c b/mm/memory.c
index d5d1653d60a6..98bcb90d5957 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1310,8 +1310,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
cond_resched();
while (!(page = follow_page(vma, start, foll_flags))) {
int ret;
- ret = handle_mm_fault(mm, vma, start,
- foll_flags & FOLL_WRITE);
+
+ /* FOLL_WRITE matches FAULT_FLAG_WRITE! */
+ ret = handle_mm_fault(mm, vma, start, foll_flags & FOLL_WRITE);
if (ret & VM_FAULT_ERROR) {
if (ret & VM_FAULT_OOM)
return i ? i : -ENOMEM;
@@ -2496,7 +2497,7 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
*/
static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
- int write_access, pte_t orig_pte)
+ unsigned int flags, pte_t orig_pte)
{
spinlock_t *ptl;
struct page *page;
@@ -2572,9 +2573,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
inc_mm_counter(mm, anon_rss);
pte = mk_pte(page, vma->vm_page_prot);
- if (write_access && reuse_swap_page(page)) {
+ if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
- write_access = 0;
+ flags &= ~FAULT_FLAG_WRITE;
}
flush_icache_page(vma, page);
set_pte_at(mm, address, page_table, pte);
@@ -2587,7 +2588,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
try_to_free_swap(page);
unlock_page(page);
- if (write_access) {
+ if (flags & FAULT_FLAG_WRITE) {
ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
if (ret & VM_FAULT_ERROR)
ret &= VM_FAULT_ERROR;
@@ -2616,7 +2617,7 @@ out_page:
*/
static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
- int write_access)
+ unsigned int flags)
{
struct page *page;
spinlock_t *ptl;
@@ -2776,7 +2777,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* due to the bad i386 page protection. But it's valid
* for other architectures too.
*
- * Note that if write_access is true, we either now have
+ * Note that if FAULT_FLAG_WRITE is set, we either now have
* an exclusive copy of the page, or this is a shared mapping,
* so we can make it writable and dirty to avoid having to
* handle that later.
@@ -2847,11 +2848,10 @@ unwritable_page:
static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
- int write_access, pte_t orig_pte)
+ unsigned int flags, pte_t orig_pte)
{
pgoff_t pgoff = (((address & PAGE_MASK)
- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
- unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
pte_unmap(page_table);
return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
@@ -2868,12 +2868,12 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
*/
static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
- int write_access, pte_t orig_pte)
+ unsigned int flags, pte_t orig_pte)
{
- unsigned int flags = FAULT_FLAG_NONLINEAR |
- (write_access ? FAULT_FLAG_WRITE : 0);
pgoff_t pgoff;
+ flags |= FAULT_FLAG_NONLINEAR;
+
if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
return 0;
@@ -2904,7 +2904,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
*/
static inline int handle_pte_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
- pte_t *pte, pmd_t *pmd, int write_access)
+ pte_t *pte, pmd_t *pmd, unsigned int flags)
{
pte_t entry;
spinlock_t *ptl;
@@ -2915,30 +2915,30 @@ static inline int handle_pte_fault(struct mm_struct *mm,
if (vma->vm_ops) {
if (likely(vma->vm_ops->fault))
return do_linear_fault(mm, vma, address,
- pte, pmd, write_access, entry);
+ pte, pmd, flags, entry);
}
return do_anonymous_page(mm, vma, address,
- pte, pmd, write_access);
+ pte, pmd, flags);
}
if (pte_file(entry))
return do_nonlinear_fault(mm, vma, address,
- pte, pmd, write_access, entry);
+ pte, pmd, flags, entry);
return do_swap_page(mm, vma, address,
- pte, pmd, write_access, entry);
+ pte, pmd, flags, entry);
}
ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
if (unlikely(!pte_same(*pte, entry)))
goto unlock;
- if (write_access) {
+ if (flags & FAULT_FLAG_WRITE) {
if (!pte_write(entry))
return do_wp_page(mm, vma, address,
pte, pmd, ptl, entry);
entry = pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);
- if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
+ if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
update_mmu_cache(vma, address, entry);
} else {
/*
@@ -2947,7 +2947,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
* This still avoids useless tlb flushes for .text page faults
* with threads.
*/
- if (write_access)
+ if (flags & FAULT_FLAG_WRITE)
flush_tlb_page(vma, address);
}
unlock:
@@ -2959,7 +2959,7 @@ unlock:
* By the time we get here, we already hold the mm semaphore
*/
int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, int write_access)
+ unsigned long address, unsigned int flags)
{
pgd_t *pgd;
pud_t *pud;
@@ -2971,7 +2971,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
count_vm_event(PGFAULT);
if (unlikely(is_vm_hugetlb_page(vma)))
- return hugetlb_fault(mm, vma, address, write_access);
+ return hugetlb_fault(mm, vma, address, flags);
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
@@ -2984,7 +2984,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (!pte)
return VM_FAULT_OOM;
- return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
+ return handle_pte_fault(mm, vma, address, pte, pmd, flags);
}
#ifndef __PAGETABLE_PUD_FOLDED
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a5f3c278c573..30d5093a099d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -73,6 +73,7 @@ unsigned long totalram_pages __read_mostly;
unsigned long totalreserve_pages __read_mostly;
unsigned long highest_memmap_pfn __read_mostly;
int percpu_pagelist_fraction;
+gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
int pageblock_order __read_mostly;
@@ -487,7 +488,6 @@ static inline void __free_one_page(struct page *page,
*/
static inline void free_page_mlock(struct page *page)
{
- __ClearPageMlocked(page);
__dec_zone_page_state(page, NR_MLOCK);
__count_vm_event(UNEVICTABLE_MLOCKFREED);
}
@@ -557,7 +557,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
unsigned long flags;
int i;
int bad = 0;
- int clearMlocked = PageMlocked(page);
+ int wasMlocked = TestClearPageMlocked(page);
kmemcheck_free_shadow(page, order);
@@ -575,7 +575,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
kernel_map_pages(page, 1 << order, 0);
local_irq_save(flags);
- if (unlikely(clearMlocked))
+ if (unlikely(wasMlocked))
free_page_mlock(page);
__count_vm_events(PGFREE, 1 << order);
free_one_page(page_zone(page), page, order,
@@ -1021,7 +1021,7 @@ static void free_hot_cold_page(struct page *page, int cold)
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
unsigned long flags;
- int clearMlocked = PageMlocked(page);
+ int wasMlocked = TestClearPageMlocked(page);
kmemcheck_free_shadow(page, 0);
@@ -1040,7 +1040,7 @@ static void free_hot_cold_page(struct page *page, int cold)
pcp = &zone_pcp(zone, get_cpu())->pcp;
set_page_private(page, get_pageblock_migratetype(page));
local_irq_save(flags);
- if (unlikely(clearMlocked))
+ if (unlikely(wasMlocked))
free_page_mlock(page);
__count_vm_event(PGFREE);
@@ -1863,6 +1863,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct page *page;
int migratetype = allocflags_to_migratetype(gfp_mask);
+ gfp_mask &= gfp_allowed_mask;
+
lockdep_trace_alloc(gfp_mask);
might_sleep_if(gfp_mask & __GFP_WAIT);
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 11a8a10a3909..f22b4ebbd8dc 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -83,12 +83,12 @@ void __init page_cgroup_init_flatmem(void)
goto fail;
}
printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
- printk(KERN_INFO "please try cgroup_disable=memory option if you"
- " don't want\n");
+ printk(KERN_INFO "please try 'cgroup_disable=memory' option if you"
+ " don't want memory cgroups\n");
return;
fail:
- printk(KERN_CRIT "allocation of page_cgroup was failed.\n");
- printk(KERN_CRIT "please try cgroup_disable=memory boot option\n");
+ printk(KERN_CRIT "allocation of page_cgroup failed.\n");
+ printk(KERN_CRIT "please try 'cgroup_disable=memory' boot option\n");
panic("Out of memory");
}
@@ -99,6 +99,8 @@ struct page_cgroup *lookup_page_cgroup(struct page *page)
unsigned long pfn = page_to_pfn(page);
struct mem_section *section = __pfn_to_section(pfn);
+ if (!section->page_cgroup)
+ return NULL;
return section->page_cgroup + pfn;
}
@@ -252,14 +254,14 @@ void __init page_cgroup_init(void)
fail = init_section_page_cgroup(pfn);
}
if (fail) {
- printk(KERN_CRIT "try cgroup_disable=memory boot option\n");
+ printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
panic("Out of memory");
} else {
hotplug_memory_notifier(page_cgroup_callback, 0);
}
printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
- printk(KERN_INFO "please try cgroup_disable=memory option if you don't"
- " want\n");
+ printk(KERN_INFO "please try 'cgroup_disable=memory' option if you don't"
+ " want memory cgroups\n");
}
void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
@@ -309,8 +311,6 @@ static int swap_cgroup_prepare(int type)
struct swap_cgroup_ctrl *ctrl;
unsigned long idx, max;
- if (!do_swap_account)
- return 0;
ctrl = &swap_cgroup_ctrl[type];
for (idx = 0; idx < ctrl->length; idx++) {
@@ -347,9 +347,6 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
struct swap_cgroup *sc;
unsigned short old;
- if (!do_swap_account)
- return 0;
-
ctrl = &swap_cgroup_ctrl[type];
mappage = ctrl->map[idx];
@@ -378,9 +375,6 @@ unsigned short lookup_swap_cgroup(swp_entry_t ent)
struct swap_cgroup *sc;
unsigned short ret;
- if (!do_swap_account)
- return 0;
-
ctrl = &swap_cgroup_ctrl[type];
mappage = ctrl->map[idx];
sc = page_address(mappage);
diff --git a/mm/rmap.c b/mm/rmap.c
index c9ccc1a72dc3..836c6c63e1f2 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -703,8 +703,10 @@ void page_add_new_anon_rmap(struct page *page,
*/
void page_add_file_rmap(struct page *page)
{
- if (atomic_inc_and_test(&page->_mapcount))
+ if (atomic_inc_and_test(&page->_mapcount)) {
__inc_zone_page_state(page, NR_FILE_MAPPED);
+ mem_cgroup_update_mapped_file_stat(page, 1);
+ }
}
#ifdef CONFIG_DEBUG_VM
@@ -753,6 +755,7 @@ void page_remove_rmap(struct page *page)
mem_cgroup_uncharge_page(page);
__dec_zone_page_state(page,
PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
+ mem_cgroup_update_mapped_file_stat(page, -1);
/*
* It would be tidy to reset the PageAnon mapping here,
* but that might overwrite a racing page_add_anon_rmap
diff --git a/mm/slab.c b/mm/slab.c
index d08692303f6e..e74a16e4ced6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -305,12 +305,6 @@ struct kmem_list3 {
};
/*
- * The slab allocator is initialized with interrupts disabled. Therefore, make
- * sure early boot allocations don't accidentally enable interrupts.
- */
-static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
-
-/*
* Need this for bootstrapping a per node allocator.
*/
#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
@@ -1559,11 +1553,6 @@ void __init kmem_cache_init_late(void)
{
struct kmem_cache *cachep;
- /*
- * Interrupts are enabled now so all GFP allocations are safe.
- */
- slab_gfp_mask = __GFP_BITS_MASK;
-
/* 6) resize the head arrays to their final sizes */
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next)
@@ -3307,7 +3296,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
unsigned long save_flags;
void *ptr;
- flags &= slab_gfp_mask;
+ flags &= gfp_allowed_mask;
lockdep_trace_alloc(flags);
@@ -3392,7 +3381,7 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
unsigned long save_flags;
void *objp;
- flags &= slab_gfp_mask;
+ flags &= gfp_allowed_mask;
lockdep_trace_alloc(flags);
diff --git a/mm/slub.c b/mm/slub.c
index 4c6449310a0e..ce62b770e2fc 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -179,12 +179,6 @@ static enum {
SYSFS /* Sysfs up */
} slab_state = DOWN;
-/*
- * The slab allocator is initialized with interrupts disabled. Therefore, make
- * sure early boot allocations don't accidentally enable interrupts.
- */
-static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
-
/* A list of all slab caches on the system */
static DECLARE_RWSEM(slub_lock);
static LIST_HEAD(slab_caches);
@@ -1692,7 +1686,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
unsigned long flags;
unsigned int objsize;
- gfpflags &= slab_gfp_mask;
+ gfpflags &= gfp_allowed_mask;
lockdep_trace_alloc(gfpflags);
might_sleep_if(gfpflags & __GFP_WAIT);
@@ -3220,10 +3214,6 @@ void __init kmem_cache_init(void)
void __init kmem_cache_init_late(void)
{
- /*
- * Interrupts are enabled now so all GFP allocations are safe.
- */
- slab_gfp_mask = __GFP_BITS_MASK;
}
/*
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 28faa01cf578..d1ade1a48ee7 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -583,8 +583,9 @@ static int swap_entry_free(struct swap_info_struct *p,
swap_list.next = p - swap_info;
nr_swap_pages++;
p->inuse_pages--;
- mem_cgroup_uncharge_swap(ent);
}
+ if (!swap_count(count))
+ mem_cgroup_uncharge_swap(ent);
return count;
}
@@ -609,12 +610,19 @@ void swap_free(swp_entry_t entry)
void swapcache_free(swp_entry_t entry, struct page *page)
{
struct swap_info_struct *p;
+ int ret;
- if (page)
- mem_cgroup_uncharge_swapcache(page, entry);
p = swap_info_get(entry);
if (p) {
- swap_entry_free(p, entry, SWAP_CACHE);
+ ret = swap_entry_free(p, entry, SWAP_CACHE);
+ if (page) {
+ bool swapout;
+ if (ret)
+ swapout = true; /* the end of swap out */
+ else
+ swapout = false; /* no more swap users! */
+ mem_cgroup_uncharge_swapcache(page, entry, swapout);
+ }
spin_unlock(&swap_lock);
}
return;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4139aa52b941..e8fa2d9eb212 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -837,7 +837,6 @@ int __isolate_lru_page(struct page *page, int mode, int file)
*/
ClearPageLRU(page);
ret = 0;
- mem_cgroup_del_lru(page);
}
return ret;
@@ -885,12 +884,14 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
switch (__isolate_lru_page(page, mode, file)) {
case 0:
list_move(&page->lru, dst);
+ mem_cgroup_del_lru(page);
nr_taken++;
break;
case -EBUSY:
/* else it is being freed elsewhere */
list_move(&page->lru, src);
+ mem_cgroup_rotate_lru_list(page, page_lru(page));
continue;
default:
@@ -931,6 +932,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
continue;
if (__isolate_lru_page(cursor_page, mode, file) == 0) {
list_move(&cursor_page->lru, dst);
+ mem_cgroup_del_lru(page);
nr_taken++;
scan++;
}