summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/hugetlb.h2
-rw-r--r--include/linux/page-flags.h7
-rw-r--r--mm/hugetlb.c42
-rw-r--r--mm/memory_hotplug.c2
4 files changed, 13 insertions, 40 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 5713c49a4a5c..205026175c42 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -84,7 +84,6 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
int dequeue_hwpoisoned_huge_page(struct page *page);
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
-bool is_hugepage_active(struct page *page);
void free_huge_page(struct page *page);
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
@@ -152,7 +151,6 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list)
return false;
}
#define putback_active_hugepage(p) do {} while (0)
-#define is_hugepage_active(x) false
static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot)
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 84d10b65cec6..f34e040b34e9 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -470,11 +470,18 @@ static inline void ClearPageCompound(struct page *page)
#ifdef CONFIG_HUGETLB_PAGE
int PageHuge(struct page *page);
int PageHeadHuge(struct page *page);
+bool page_huge_active(struct page *page);
#else
TESTPAGEFLAG_FALSE(Huge)
TESTPAGEFLAG_FALSE(HeadHuge)
+
+static inline bool page_huge_active(struct page *page)
+{
+ return 0;
+}
#endif
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* PageHuge() only returns true for hugetlbfs pages, but not for
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 05407831016b..271e4432734c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3896,20 +3896,6 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
#ifdef CONFIG_MEMORY_FAILURE
-/* Should be called in hugetlb_lock */
-static int is_hugepage_on_freelist(struct page *hpage)
-{
- struct page *page;
- struct page *tmp;
- struct hstate *h = page_hstate(hpage);
- int nid = page_to_nid(hpage);
-
- list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
- if (page == hpage)
- return 1;
- return 0;
-}
-
/*
* This function is called from memory failure code.
* Assume the caller holds page lock of the head page.
@@ -3921,7 +3907,11 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage)
int ret = -EBUSY;
spin_lock(&hugetlb_lock);
- if (is_hugepage_on_freelist(hpage)) {
+ /*
+ * Just checking !page_huge_active is not enough, because that could be
+ * an isolated/hwpoisoned hugepage (which have >0 refcount).
+ */
+ if (!page_huge_active(hpage) && !page_count(hpage)) {
/*
* Hwpoisoned hugepage isn't linked to activelist or freelist,
* but dangling hpage->lru can trigger list-debug warnings
@@ -3965,25 +3955,3 @@ void putback_active_hugepage(struct page *page)
spin_unlock(&hugetlb_lock);
put_page(page);
}
-
-bool is_hugepage_active(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageHuge(page), page);
- /*
- * This function can be called for a tail page because the caller,
- * scan_movable_pages, scans through a given pfn-range which typically
- * covers one memory block. In systems using gigantic hugepage (1GB
- * for x86_64,) a hugepage is larger than a memory block, and we don't
- * support migrating such large hugepages for now, so return false
- * when called for tail pages.
- */
- if (PageTail(page))
- return false;
- /*
- * Refcount of a hwpoisoned hugepages is 1, but they are not active,
- * so we should return false for them.
- */
- if (unlikely(PageHWPoison(page)))
- return false;
- return page_count(page) > 0;
-}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index e2e8014fb755..457bde530cbe 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1373,7 +1373,7 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
if (PageLRU(page))
return pfn;
if (PageHuge(page)) {
- if (is_hugepage_active(page))
+ if (page_huge_active(page))
return pfn;
else
pfn = round_up(pfn + 1,