summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/hugetlbfs/inode.c12
-rw-r--r--include/linux/hugetlb.h68
-rw-r--r--mm/hugetlb.c48
3 files changed, 96 insertions, 32 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index b7a72f577aab..907f6405e805 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -973,15 +973,9 @@ static int hugetlbfs_migrate_page(struct address_space *mapping,
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
- /*
- * page_private is subpool pointer in hugetlb pages. Transfer to
- * new page. PagePrivate is not associated with page_private for
- * hugetlb pages and can not be set here as only page_huge_active
- * pages can be migrated.
- */
- if (page_private(page)) {
- set_page_private(newpage, page_private(page));
- set_page_private(page, 0);
+ if (hugetlb_page_subpool(page)) {
+ hugetlb_set_page_subpool(newpage, hugetlb_page_subpool(page));
+ hugetlb_set_page_subpool(page, NULL);
}
if (mode != MIGRATE_SYNC_NO_COPY)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index ac3cb239a7ec..249c65e1d8ca 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -472,6 +472,60 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long flags);
#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
+/*
+ * huegtlb page specific state flags. These flags are located in page.private
+ * of the hugetlb head page. Functions created via the below macros should be
+ * used to manipulate these flags.
+ *
+ * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
+ * allocation time. Cleared when page is fully instantiated. Free
+ * routine checks flag to restore a reservation on error paths.
+ */
+enum hugetlb_page_flags {
+ HPG_restore_reserve = 0,
+ __NR_HPAGEFLAGS,
+};
+
+/*
+ * Macros to create test, set and clear function definitions for
+ * hugetlb specific page flags.
+ */
+#ifdef CONFIG_HUGETLB_PAGE
+#define TESTHPAGEFLAG(uname, flname) \
+static inline int HPage##uname(struct page *page) \
+ { return test_bit(HPG_##flname, &(page->private)); }
+
+#define SETHPAGEFLAG(uname, flname) \
+static inline void SetHPage##uname(struct page *page) \
+ { set_bit(HPG_##flname, &(page->private)); }
+
+#define CLEARHPAGEFLAG(uname, flname) \
+static inline void ClearHPage##uname(struct page *page) \
+ { clear_bit(HPG_##flname, &(page->private)); }
+#else
+#define TESTHPAGEFLAG(uname, flname) \
+static inline int HPage##uname(struct page *page) \
+ { return 0; }
+
+#define SETHPAGEFLAG(uname, flname) \
+static inline void SetHPage##uname(struct page *page) \
+ { }
+
+#define CLEARHPAGEFLAG(uname, flname) \
+static inline void ClearHPage##uname(struct page *page) \
+ { }
+#endif
+
+#define HPAGEFLAG(uname, flname) \
+ TESTHPAGEFLAG(uname, flname) \
+ SETHPAGEFLAG(uname, flname) \
+ CLEARHPAGEFLAG(uname, flname) \
+
+/*
+ * Create functions associated with hugetlb page flags
+ */
+HPAGEFLAG(RestoreReserve, restore_reserve)
+
#ifdef CONFIG_HUGETLB_PAGE
#define HSTATE_NAME_LEN 32
@@ -531,6 +585,20 @@ extern unsigned int default_hstate_idx;
#define default_hstate (hstates[default_hstate_idx])
+/*
+ * hugetlb page subpool pointer located in hpage[1].private
+ */
+static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
+{
+ return (struct hugepage_subpool *)(hpage+1)->private;
+}
+
+static inline void hugetlb_set_page_subpool(struct page *hpage,
+ struct hugepage_subpool *subpool)
+{
+ set_page_private(hpage+1, (unsigned long)subpool);
+}
+
static inline struct hstate *hstate_file(struct file *f)
{
return hstate_inode(file_inode(f));
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 8a1d1f85e21e..f5e85dabb7a3 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1143,7 +1143,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
- SetPagePrivate(page);
+ SetHPageRestoreReserve(page);
h->resv_huge_pages--;
}
@@ -1418,20 +1418,19 @@ static void __free_huge_page(struct page *page)
*/
struct hstate *h = page_hstate(page);
int nid = page_to_nid(page);
- struct hugepage_subpool *spool =
- (struct hugepage_subpool *)page_private(page);
+ struct hugepage_subpool *spool = hugetlb_page_subpool(page);
bool restore_reserve;
VM_BUG_ON_PAGE(page_count(page), page);
VM_BUG_ON_PAGE(page_mapcount(page), page);
- set_page_private(page, 0);
+ hugetlb_set_page_subpool(page, NULL);
page->mapping = NULL;
- restore_reserve = PagePrivate(page);
- ClearPagePrivate(page);
+ restore_reserve = HPageRestoreReserve(page);
+ ClearHPageRestoreReserve(page);
/*
- * If PagePrivate() was set on page, page allocation consumed a
+ * If HPageRestoreReserve was set on page, page allocation consumed a
* reservation. If the page was associated with a subpool, there
* would have been a page reserved in the subpool before allocation
* via hugepage_subpool_get_pages(). Since we are 'restoring' the
@@ -2263,24 +2262,24 @@ static long vma_add_reservation(struct hstate *h,
* This routine is called to restore a reservation on error paths. In the
* specific error paths, a huge page was allocated (via alloc_huge_page)
* and is about to be freed. If a reservation for the page existed,
- * alloc_huge_page would have consumed the reservation and set PagePrivate
- * in the newly allocated page. When the page is freed via free_huge_page,
- * the global reservation count will be incremented if PagePrivate is set.
- * However, free_huge_page can not adjust the reserve map. Adjust the
- * reserve map here to be consistent with global reserve count adjustments
- * to be made by free_huge_page.
+ * alloc_huge_page would have consumed the reservation and set
+ * HPageRestoreReserve in the newly allocated page. When the page is freed
+ * via free_huge_page, the global reservation count will be incremented if
+ * HPageRestoreReserve is set. However, free_huge_page can not adjust the
+ * reserve map. Adjust the reserve map here to be consistent with global
+ * reserve count adjustments to be made by free_huge_page.
*/
static void restore_reserve_on_error(struct hstate *h,
struct vm_area_struct *vma, unsigned long address,
struct page *page)
{
- if (unlikely(PagePrivate(page))) {
+ if (unlikely(HPageRestoreReserve(page))) {
long rc = vma_needs_reservation(h, vma, address);
if (unlikely(rc < 0)) {
/*
* Rare out of memory condition in reserve map
- * manipulation. Clear PagePrivate so that
+ * manipulation. Clear HPageRestoreReserve so that
* global reserve count will not be incremented
* by free_huge_page. This will make it appear
* as though the reservation for this page was
@@ -2289,7 +2288,7 @@ static void restore_reserve_on_error(struct hstate *h,
* is better than inconsistent global huge page
* accounting of reserve counts.
*/
- ClearPagePrivate(page);
+ ClearHPageRestoreReserve(page);
} else if (rc) {
rc = vma_add_reservation(h, vma, address);
if (unlikely(rc < 0))
@@ -2297,7 +2296,7 @@ static void restore_reserve_on_error(struct hstate *h,
* See above comment about rare out of
* memory condition.
*/
- ClearPagePrivate(page);
+ ClearHPageRestoreReserve(page);
} else
vma_end_reservation(h, vma, address);
}
@@ -2378,7 +2377,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
if (!page)
goto out_uncharge_cgroup;
if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
- SetPagePrivate(page);
+ SetHPageRestoreReserve(page);
h->resv_huge_pages--;
}
spin_lock(&hugetlb_lock);
@@ -2396,7 +2395,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
spin_unlock(&hugetlb_lock);
- set_page_private(page, (unsigned long)spool);
+ hugetlb_set_page_subpool(page, spool);
map_commit = vma_commit_reservation(h, vma, addr);
if (unlikely(map_chg > map_commit)) {
@@ -3170,6 +3169,9 @@ static int __init hugetlb_init(void)
{
int i;
+ BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
+ __NR_HPAGEFLAGS);
+
if (!hugepages_supported()) {
if (hugetlb_max_hstate || default_hstate_max_huge_pages)
pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
@@ -4207,7 +4209,7 @@ retry_avoidcopy:
spin_lock(ptl);
ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
- ClearPagePrivate(new_page);
+ ClearHPageRestoreReserve(new_page);
/* Break COW */
huge_ptep_clear_flush(vma, haddr, ptep);
@@ -4274,7 +4276,7 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
if (err)
return err;
- ClearPagePrivate(page);
+ ClearHPageRestoreReserve(page);
/*
* set page dirty so that it will not be removed from cache/file
@@ -4436,7 +4438,7 @@ retry:
goto backout;
if (anon_rmap) {
- ClearPagePrivate(page);
+ ClearHPageRestoreReserve(page);
hugepage_add_new_anon_rmap(page, vma, haddr);
} else
page_dup_rmap(page, true);
@@ -4750,7 +4752,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
if (vm_shared) {
page_dup_rmap(page, true);
} else {
- ClearPagePrivate(page);
+ ClearHPageRestoreReserve(page);
hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
}