summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/tlb.h2
-rw-r--r--arch/ia64/include/asm/tlb.h3
-rw-r--r--arch/s390/include/asm/tlb.h2
-rw-r--r--arch/sh/include/asm/tlb.h3
-rw-r--r--arch/um/include/asm/tlb.h3
-rw-r--r--include/asm-generic/tlb.h6
-rw-r--r--mm/hugetlb.c2
7 files changed, 20 insertions, 1 deletions
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 1e25cd80589e..82841ba1f51f 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -186,6 +186,8 @@ tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
tlb_add_flush(tlb, addr);
}
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
+ tlb_remove_tlb_entry(tlb, ptep, address)
/*
* In the case of tlb vma handling, we can optimise these away in the
* case where we're doing a full MM flush. When we're doing a munmap,
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 77e541cf0e5d..b3f369ab844d 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -283,6 +283,9 @@ do { \
__tlb_remove_tlb_entry(tlb, ptep, addr); \
} while (0)
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
+ tlb_remove_tlb_entry(tlb, ptep, address)
+
#define pte_free_tlb(tlb, ptep, address) \
do { \
tlb->need_flush = 1; \
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 15711de10403..094440b59f9e 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -162,5 +162,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0)
#define tlb_migrate_finish(mm) do { } while (0)
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
+ tlb_remove_tlb_entry(tlb, ptep, address)
#endif /* _S390_TLB_H */
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 025cdb1032f6..e7d15e8c75c1 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -65,6 +65,9 @@ tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
tlb->end = address + PAGE_SIZE;
}
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
+ tlb_remove_tlb_entry(tlb, ptep, address)
+
/*
* In the case of tlb vma handling, we can optimise these away in the
* case where we're doing a full MM flush. When we're doing a munmap,
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 821ff0acfe17..a4427029c3c8 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -141,6 +141,9 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
+ tlb_remove_tlb_entry(tlb, ptep, address)
+
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index dba727becd5f..38c2b708df6e 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -220,6 +220,12 @@ static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *pa
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
+ do { \
+ __tlb_adjust_range(tlb, address, huge_page_size(h)); \
+ __tlb_remove_tlb_entry(tlb, ptep, address); \
+ } while (0)
+
/**
* tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
* This is a nop so far, because only x86 needs it.
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c12296f62e8d..8e519da7242d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3336,7 +3336,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
}
pte = huge_ptep_get_and_clear(mm, address, ptep);
- tlb_remove_tlb_entry(tlb, ptep, address);
+ tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
if (huge_pte_dirty(pte))
set_page_dirty(page);