diff options
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/fault.c | 23 | ||||
-rw-r--r-- | arch/x86/mm/pat/memtype.c | 21 | ||||
-rw-r--r-- | arch/x86/mm/tlb.c | 2 |
3 files changed, 14 insertions, 32 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 7b0d4ab894c8..a498ae1fbe66 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -260,7 +260,7 @@ static noinline int vmalloc_fault(unsigned long address) } NOKPROBE_SYMBOL(vmalloc_fault); -static void __arch_sync_kernel_mappings(unsigned long start, unsigned long end) +void arch_sync_kernel_mappings(unsigned long start, unsigned long end) { unsigned long addr; @@ -284,27 +284,6 @@ static void __arch_sync_kernel_mappings(unsigned long start, unsigned long end) } } -void arch_sync_kernel_mappings(unsigned long start, unsigned long end) -{ - __arch_sync_kernel_mappings(start, end); -#ifdef CONFIG_KMSAN - /* - * KMSAN maintains two additional metadata page mappings for the - * [VMALLOC_START, VMALLOC_END) range. These mappings start at - * KMSAN_VMALLOC_SHADOW_START and KMSAN_VMALLOC_ORIGIN_START and - * have to be synced together with the vmalloc memory mapping. - */ - if (start >= VMALLOC_START && end < VMALLOC_END) { - __arch_sync_kernel_mappings( - start - VMALLOC_START + KMSAN_VMALLOC_SHADOW_START, - end - VMALLOC_START + KMSAN_VMALLOC_SHADOW_START); - __arch_sync_kernel_mappings( - start - VMALLOC_START + KMSAN_VMALLOC_ORIGIN_START, - end - VMALLOC_START + KMSAN_VMALLOC_ORIGIN_START); - } -#endif -} - static bool low_pfn(unsigned long pfn) { return pfn < max_low_pfn; diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index fb4b1b5e0dea..46a00aa858b6 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -159,10 +159,10 @@ static inline void set_page_memtype(struct page *pg, break; } + old_flags = READ_ONCE(pg->flags); do { - old_flags = pg->flags; new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags; - } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags); + } while (!try_cmpxchg(&pg->flags, &old_flags, new_flags)); } #else static inline enum page_cache_mode get_page_memtype(struct page *pg) @@ -387,8 +387,7 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, u8 mtrr_type, uniform; mtrr_type = mtrr_type_lookup(start, end, &uniform); - if (mtrr_type != MTRR_TYPE_WRBACK && - mtrr_type != MTRR_TYPE_INVALID) + if (mtrr_type != MTRR_TYPE_WRBACK) return _PAGE_CACHE_MODE_UC_MINUS; return _PAGE_CACHE_MODE_WB; @@ -1000,7 +999,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, ret = reserve_pfn_range(paddr, size, prot, 0); if (ret == 0 && vma) - vma->vm_flags |= VM_PAT; + vm_flags_set(vma, VM_PAT); return ret; } @@ -1046,7 +1045,7 @@ void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn) * can be for the entire vma (in which case pfn, size are zero). */ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, - unsigned long size) + unsigned long size, bool mm_wr_locked) { resource_size_t paddr; unsigned long prot; @@ -1065,8 +1064,12 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, size = vma->vm_end - vma->vm_start; } free_pfn_range(paddr, size); - if (vma) - vma->vm_flags &= ~VM_PAT; + if (vma) { + if (mm_wr_locked) + vm_flags_clear(vma, VM_PAT); + else + __vm_flags_mod(vma, 0, VM_PAT); + } } /* @@ -1076,7 +1079,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, */ void untrack_pfn_moved(struct vm_area_struct *vma) { - vma->vm_flags &= ~VM_PAT; + vm_flags_clear(vma, VM_PAT); } pgprot_t pgprot_writecombine(pgprot_t prot) diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index c1e31e9a85d7..92d73ccede70 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -1205,7 +1205,7 @@ void __flush_tlb_all(void) */ VM_WARN_ON_ONCE(preemptible()); - if (boot_cpu_has(X86_FEATURE_PGE)) { + if (cpu_feature_enabled(X86_FEATURE_PGE)) { __flush_tlb_global(); } else { /* |