summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/hugetlb.h6
-rw-r--r--include/linux/mm.h32
-rw-r--r--mm/internal.h3
-rw-r--r--mm/swap.c33
4 files changed, 60 insertions, 14 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 251233c1494d..d01cc972a1d9 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -31,7 +31,6 @@ struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
void hugepage_put_subpool(struct hugepage_subpool *spool);
int PageHuge(struct page *page);
-int PageHeadHuge(struct page *page_head);
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
@@ -104,11 +103,6 @@ static inline int PageHuge(struct page *page)
return 0;
}
-static inline int PageHeadHuge(struct page *page_head)
-{
- return 0;
-}
-
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9fac6dd69b11..f95c71b7c1fd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -414,15 +414,45 @@ static inline int page_count(struct page *page)
return atomic_read(&compound_head(page)->_count);
}
+#ifdef CONFIG_HUGETLB_PAGE
+extern int PageHeadHuge(struct page *page_head);
+#else /* CONFIG_HUGETLB_PAGE */
+static inline int PageHeadHuge(struct page *page_head)
+{
+ return 0;
+}
+#endif /* CONFIG_HUGETLB_PAGE */
+
+static inline bool __compound_tail_refcounted(struct page *page)
+{
+ return !PageSlab(page) && !PageHeadHuge(page);
+}
+
+/*
+ * This takes a head page as parameter and tells if the
+ * tail page reference counting can be skipped.
+ *
+ * For this to be safe, PageSlab and PageHeadHuge must remain true on
+ * any given page where they return true here, until all tail pins
+ * have been released.
+ */
+static inline bool compound_tail_refcounted(struct page *page)
+{
+ VM_BUG_ON(!PageHead(page));
+ return __compound_tail_refcounted(page);
+}
+
static inline void get_huge_page_tail(struct page *page)
{
/*
* __split_huge_page_refcount() cannot run
* from under us.
+ * In turn no need of compound_trans_head here.
*/
VM_BUG_ON(page_mapcount(page) < 0);
VM_BUG_ON(atomic_read(&page->_count) != 0);
- atomic_inc(&page->_mapcount);
+ if (compound_tail_refcounted(compound_head(page)))
+ atomic_inc(&page->_mapcount);
}
extern bool __get_page_tail(struct page *page);
diff --git a/mm/internal.h b/mm/internal.h
index 684f7aa9692a..a85a3ab1f7ef 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -51,7 +51,8 @@ static inline void __get_page_tail_foll(struct page *page,
VM_BUG_ON(page_mapcount(page) < 0);
if (get_page_head)
atomic_inc(&page->first_page->_count);
- atomic_inc(&page->_mapcount);
+ if (compound_tail_refcounted(page->first_page))
+ atomic_inc(&page->_mapcount);
}
/*
diff --git a/mm/swap.c b/mm/swap.c
index e2757fbb04ea..bba4aa5bf686 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -88,8 +88,9 @@ static void put_compound_page(struct page *page)
/*
* THP can not break up slab pages so avoid taking
- * compound_lock(). Slab performs non-atomic bit ops
- * on page->flags for better performance. In
+ * compound_lock() and skip the tail page refcounting
+ * (in _mapcount) too. Slab performs non-atomic bit
+ * ops on page->flags for better performance. In
* particular slab_unlock() in slub used to be a hot
* path. It is still hot on arches that do not support
* this_cpu_cmpxchg_double().
@@ -102,7 +103,7 @@ static void put_compound_page(struct page *page)
* PageTail clear after smp_rmb() and we'll treat it
* as a single page.
*/
- if (PageSlab(page_head) || PageHeadHuge(page_head)) {
+ if (!__compound_tail_refcounted(page_head)) {
/*
* If "page" is a THP tail, we must read the tail page
* flags after the head page flags. The
@@ -117,10 +118,30 @@ static void put_compound_page(struct page *page)
* cannot race here.
*/
VM_BUG_ON(!PageHead(page_head));
- VM_BUG_ON(page_mapcount(page) <= 0);
- atomic_dec(&page->_mapcount);
- if (put_page_testzero(page_head))
+ VM_BUG_ON(page_mapcount(page) != 0);
+ if (put_page_testzero(page_head)) {
+ /*
+ * If this is the tail of a
+ * slab compound page, the
+ * tail pin must not be the
+ * last reference held on the
+ * page, because the PG_slab
+ * cannot be cleared before
+ * all tail pins (which skips
+ * the _mapcount tail
+ * refcounting) have been
+ * released. For hugetlbfs the
+ * tail pin may be the last
+ * reference on the page
+ * instead, because
+ * PageHeadHuge will not go
+ * away until the compound
+ * page enters the buddy
+ * allocator.
+ */
+ VM_BUG_ON(PageSlab(page_head));
__put_compound_page(page_head);
+ }
return;
} else
/*