diff options
Diffstat (limited to 'include/linux/ksm.h')
| -rw-r--r-- | include/linux/ksm.h | 101 |
1 files changed, 68 insertions, 33 deletions
diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 899a314bc487..c982694c987b 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -13,39 +13,70 @@ #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/sched.h> -#include <linux/sched/coredump.h> #ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, - unsigned long end, int advice, unsigned long *vm_flags); - -void ksm_add_vma(struct vm_area_struct *vma); + unsigned long end, int advice, vm_flags_t *vm_flags); +vm_flags_t ksm_vma_flags(struct mm_struct *mm, const struct file *file, + vm_flags_t vm_flags); int ksm_enable_merge_any(struct mm_struct *mm); int ksm_disable_merge_any(struct mm_struct *mm); int ksm_disable(struct mm_struct *mm); int __ksm_enter(struct mm_struct *mm); void __ksm_exit(struct mm_struct *mm); +/* + * To identify zeropages that were mapped by KSM, we reuse the dirty bit + * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when + * deduplicating memory. + */ +#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte)) -static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) +extern atomic_long_t ksm_zero_pages; + +static inline void ksm_map_zero_page(struct mm_struct *mm) { - int ret; + atomic_long_inc(&ksm_zero_pages); + atomic_long_inc(&mm->ksm_zero_pages); +} - if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) { - ret = __ksm_enter(mm); - if (ret) - return ret; +static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) +{ + if (is_ksm_zero_pte(pte)) { + atomic_long_dec(&ksm_zero_pages); + atomic_long_dec(&mm->ksm_zero_pages); } +} - if (test_bit(MMF_VM_MERGE_ANY, &oldmm->flags)) - set_bit(MMF_VM_MERGE_ANY, &mm->flags); +static inline long mm_ksm_zero_pages(struct mm_struct *mm) +{ + return atomic_long_read(&mm->ksm_zero_pages); +} + +static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) +{ + /* Adding mm to ksm is best effort on fork. */ + if (mm_flags_test(MMF_VM_MERGEABLE, oldmm)) { + long nr_ksm_zero_pages = atomic_long_read(&mm->ksm_zero_pages); + + mm->ksm_merging_pages = 0; + mm->ksm_rmap_items = 0; + atomic_long_add(nr_ksm_zero_pages, &ksm_zero_pages); + __ksm_enter(mm); + } +} + +static inline int ksm_execve(struct mm_struct *mm) +{ + if (mm_flags_test(MMF_VM_MERGE_ANY, mm)) + return __ksm_enter(mm); return 0; } static inline void ksm_exit(struct mm_struct *mm) { - if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) + if (mm_flags_test(MMF_VM_MERGEABLE, mm)) __ksm_exit(mm); } @@ -60,25 +91,22 @@ static inline void ksm_exit(struct mm_struct *mm) * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, * but what if the vma was unmerged while the page was swapped out? */ -struct page *ksm_might_need_to_copy(struct page *page, - struct vm_area_struct *vma, unsigned long address); +struct folio *ksm_might_need_to_copy(struct folio *folio, + struct vm_area_struct *vma, unsigned long addr); void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); - -#ifdef CONFIG_MEMORY_FAILURE -void collect_procs_ksm(struct page *page, struct list_head *to_kill, - int force_early); -#endif - -#ifdef CONFIG_PROC_FS +void collect_procs_ksm(const struct folio *folio, const struct page *page, + struct list_head *to_kill, int force_early); long ksm_process_profit(struct mm_struct *); -#endif /* CONFIG_PROC_FS */ +bool ksm_process_mergeable(struct mm_struct *mm); #else /* !CONFIG_KSM */ -static inline void ksm_add_vma(struct vm_area_struct *vma) +static inline vm_flags_t ksm_vma_flags(struct mm_struct *mm, + const struct file *file, vm_flags_t vm_flags) { + return vm_flags; } static inline int ksm_disable(struct mm_struct *mm) @@ -86,7 +114,11 @@ static inline int ksm_disable(struct mm_struct *mm) return 0; } -static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) +static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) +{ +} + +static inline int ksm_execve(struct mm_struct *mm) { return 0; } @@ -95,24 +127,27 @@ static inline void ksm_exit(struct mm_struct *mm) { } -#ifdef CONFIG_MEMORY_FAILURE -static inline void collect_procs_ksm(struct page *page, - struct list_head *to_kill, int force_early) +static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) +{ +} + +static inline void collect_procs_ksm(const struct folio *folio, + const struct page *page, struct list_head *to_kill, + int force_early) { } -#endif #ifdef CONFIG_MMU static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, - unsigned long end, int advice, unsigned long *vm_flags) + unsigned long end, int advice, vm_flags_t *vm_flags) { return 0; } -static inline struct page *ksm_might_need_to_copy(struct page *page, - struct vm_area_struct *vma, unsigned long address) +static inline struct folio *ksm_might_need_to_copy(struct folio *folio, + struct vm_area_struct *vma, unsigned long addr) { - return page; + return folio; } static inline void rmap_walk_ksm(struct folio *folio, |
