From 6d76dcf40405144a448040a350fd214ddc243d5e Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 31 Jul 2012 16:42:18 -0700 Subject: hugetlb/cgroup: add charge/uncharge routines for hugetlb cgroup Add the charge and uncharge routines for hugetlb cgroup. We do cgroup charging in page alloc and uncharge in compound page destructor. Assigning page's hugetlb cgroup is protected by hugetlb_lock. [liwp@linux.vnet.ibm.com: add huge_page_order check to avoid incorrect uncharge] Signed-off-by: Aneesh Kumar K.V Cc: David Rientjes Acked-by: KAMEZAWA Hiroyuki Cc: Hillf Danton Cc: Michal Hocko Cc: KOSAKI Motohiro Signed-off-by: Aneesh Kumar K.V Signed-off-by: Wanpeng Li Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb_cgroup.c | 80 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) (limited to 'mm/hugetlb_cgroup.c') diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index 0d1a66e9039b..63e04cfa437d 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c @@ -111,6 +111,86 @@ static int hugetlb_cgroup_pre_destroy(struct cgroup *cgroup) return -EBUSY; } +int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, + struct hugetlb_cgroup **ptr) +{ + int ret = 0; + struct res_counter *fail_res; + struct hugetlb_cgroup *h_cg = NULL; + unsigned long csize = nr_pages * PAGE_SIZE; + + if (hugetlb_cgroup_disabled()) + goto done; + /* + * We don't charge any cgroup if the compound page have less + * than 3 pages. + */ + if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER) + goto done; +again: + rcu_read_lock(); + h_cg = hugetlb_cgroup_from_task(current); + if (!css_tryget(&h_cg->css)) { + rcu_read_unlock(); + goto again; + } + rcu_read_unlock(); + + ret = res_counter_charge(&h_cg->hugepage[idx], csize, &fail_res); + css_put(&h_cg->css); +done: + *ptr = h_cg; + return ret; +} + +void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, + struct hugetlb_cgroup *h_cg, + struct page *page) +{ + if (hugetlb_cgroup_disabled() || !h_cg) + return; + + spin_lock(&hugetlb_lock); + set_hugetlb_cgroup(page, h_cg); + spin_unlock(&hugetlb_lock); + return; +} + +/* + * Should be called with hugetlb_lock held + */ +void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, + struct page *page) +{ + struct hugetlb_cgroup *h_cg; + unsigned long csize = nr_pages * PAGE_SIZE; + + if (hugetlb_cgroup_disabled()) + return; + VM_BUG_ON(!spin_is_locked(&hugetlb_lock)); + h_cg = hugetlb_cgroup_from_page(page); + if (unlikely(!h_cg)) + return; + set_hugetlb_cgroup(page, NULL); + res_counter_uncharge(&h_cg->hugepage[idx], csize); + return; +} + +void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, + struct hugetlb_cgroup *h_cg) +{ + unsigned long csize = nr_pages * PAGE_SIZE; + + if (hugetlb_cgroup_disabled() || !h_cg) + return; + + if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER) + return; + + res_counter_uncharge(&h_cg->hugepage[idx], csize); + return; +} + struct cgroup_subsys hugetlb_subsys = { .name = "hugetlb", .create = hugetlb_cgroup_create, -- cgit