summaryrefslogtreecommitdiff
path: root/mm/mprotect.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mprotect.c')
-rw-r--r--mm/mprotect.c58
1 files changed, 24 insertions, 34 deletions
diff --git a/mm/mprotect.c b/mm/mprotect.c
index b94fbb45d5c7..81991102f785 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -114,7 +114,7 @@ static long change_pte_range(struct mmu_gather *tlb,
* pages. See similar comment in change_huge_pmd.
*/
if (prot_numa) {
- struct page *page;
+ struct folio *folio;
int nid;
bool toptier;
@@ -122,13 +122,14 @@ static long change_pte_range(struct mmu_gather *tlb,
if (pte_protnone(oldpte))
continue;
- page = vm_normal_page(vma, addr, oldpte);
- if (!page || is_zone_device_page(page) || PageKsm(page))
+ folio = vm_normal_folio(vma, addr, oldpte);
+ if (!folio || folio_is_zone_device(folio) ||
+ folio_test_ksm(folio))
continue;
/* Also skip shared copy-on-write pages */
if (is_cow_mapping(vma->vm_flags) &&
- page_count(page) != 1)
+ folio_ref_count(folio) != 1)
continue;
/*
@@ -136,14 +137,15 @@ static long change_pte_range(struct mmu_gather *tlb,
* it cannot move them all from MIGRATE_ASYNC
* context.
*/
- if (page_is_file_lru(page) && PageDirty(page))
+ if (folio_is_file_lru(folio) &&
+ folio_test_dirty(folio))
continue;
/*
* Don't mess with PTEs if page is already on the node
* a single-threaded process is running on.
*/
- nid = page_to_nid(page);
+ nid = folio_nid(folio);
if (target_node == nid)
continue;
toptier = node_is_toptier(nid);
@@ -157,7 +159,7 @@ static long change_pte_range(struct mmu_gather *tlb,
continue;
if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
!toptier)
- xchg_page_access_time(page,
+ folio_xchg_access_time(folio,
jiffies_to_msecs(jiffies));
}
@@ -581,7 +583,6 @@ mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
long nrpages = (end - start) >> PAGE_SHIFT;
unsigned int mm_cp_flags = 0;
unsigned long charged = 0;
- pgoff_t pgoff;
int error;
if (newflags == oldflags) {
@@ -608,8 +609,11 @@ mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
/*
* If we make a private mapping writable we increase our commit;
* but (without finer accounting) cannot reduce our commit if we
- * make it unwritable again. hugetlb mapping were accounted for
- * even if read-only so there is no need to account for them here
+ * make it unwritable again except in the anonymous case where no
+ * anon_vma has yet to be assigned.
+ *
+ * hugetlb mapping were accounted for even if read-only so there is
+ * no need to account for them here.
*/
if (newflags & VM_WRITE) {
/* Check space limits when area turns into data. */
@@ -623,36 +627,19 @@ mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
return -ENOMEM;
newflags |= VM_ACCOUNT;
}
+ } else if ((oldflags & VM_ACCOUNT) && vma_is_anonymous(vma) &&
+ !vma->anon_vma) {
+ newflags &= ~VM_ACCOUNT;
}
- /*
- * First try to merge with previous and/or next vma.
- */
- pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
- *pprev = vma_merge(vmi, mm, *pprev, start, end, newflags,
- vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
- vma->vm_userfaultfd_ctx, anon_vma_name(vma));
- if (*pprev) {
- vma = *pprev;
- VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
- goto success;
+ vma = vma_modify_flags(vmi, *pprev, vma, start, end, newflags);
+ if (IS_ERR(vma)) {
+ error = PTR_ERR(vma);
+ goto fail;
}
*pprev = vma;
- if (start != vma->vm_start) {
- error = split_vma(vmi, vma, start, 1);
- if (error)
- goto fail;
- }
-
- if (end != vma->vm_end) {
- error = split_vma(vmi, vma, end, 0);
- if (error)
- goto fail;
- }
-
-success:
/*
* vm_flags and vm_page_prot are protected by the mmap_lock
* held in write mode.
@@ -665,6 +652,9 @@ success:
change_protection(tlb, vma, start, end, mm_cp_flags);
+ if ((oldflags & VM_ACCOUNT) && !(newflags & VM_ACCOUNT))
+ vm_unacct_memory(nrpages);
+
/*
* Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
* fault on access.