From 441a627806873c1b63d06dea4391e79c88b8e496 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Tue, 21 May 2019 09:05:03 +0530 Subject: arm64/hugetlb: Use macros for contiguous huge page sizes Replace all open encoded contiguous huge page size computations with available macro encodings CONT_PTE_SIZE and CONT_PMD_SIZE. There are other instances where these macros are used in the file and this change makes it consistently use the same mnemonic. Signed-off-by: Anshuman Khandual Cc: Will Deacon Cc: Steve Capper Cc: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/mm/hugetlbpage.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index f475e54fbc43..bbeb6a5a6ba6 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -228,7 +228,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, if (sz == PUD_SIZE) { ptep = (pte_t *)pudp; - } else if (sz == (PAGE_SIZE * CONT_PTES)) { + } else if (sz == (CONT_PTE_SIZE)) { pmdp = pmd_alloc(mm, pudp, addr); WARN_ON(addr & (sz - 1)); @@ -246,7 +246,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, ptep = huge_pmd_share(mm, addr, pudp); else ptep = (pte_t *)pmd_alloc(mm, pudp, addr); - } else if (sz == (PMD_SIZE * CONT_PMDS)) { + } else if (sz == (CONT_PMD_SIZE)) { pmdp = pmd_alloc(mm, pudp, addr); WARN_ON(addr & (sz - 1)); return (pte_t *)pmdp; @@ -454,9 +454,9 @@ static int __init hugetlbpage_init(void) #ifdef CONFIG_ARM64_4K_PAGES add_huge_page_size(PUD_SIZE); #endif - add_huge_page_size(PMD_SIZE * CONT_PMDS); + add_huge_page_size(CONT_PMD_SIZE); add_huge_page_size(PMD_SIZE); - add_huge_page_size(PAGE_SIZE * CONT_PTES); + add_huge_page_size(CONT_PTE_SIZE); return 0; } @@ -470,9 +470,9 @@ static __init int setup_hugepagesz(char *opt) #ifdef CONFIG_ARM64_4K_PAGES case PUD_SIZE: #endif - case PMD_SIZE * CONT_PMDS: + case CONT_PMD_SIZE: case PMD_SIZE: - case PAGE_SIZE * CONT_PTES: + case CONT_PTE_SIZE: add_huge_page_size(ps); return 1; } -- cgit From f7f0097af67c3c119f6dc7046234630e77f4877e Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 27 May 2019 09:28:15 +0530 Subject: arm64/mm: Simplify protection flag creation for kernel huge mappings Even though they have got the same value, PMD_TYPE_SECT and PUD_TYPE_SECT get used for kernel huge mappings. But before that first the table bit gets cleared using leaf level PTE_TABLE_BIT. Though functionally they are same, we should use page table level specific macros to be consistent as per the MMU specifications. Create page table level specific wrappers for kernel huge mapping entries and just drop mk_sect_prot() which does not have any other user. Signed-off-by: Anshuman Khandual Cc: Will Deacon Cc: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/mm/mmu.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index a1bfc4413982..22c2e4f0768f 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -971,9 +971,7 @@ int __init arch_ioremap_pmd_supported(void) int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) { - pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT | - pgprot_val(mk_sect_prot(prot))); - pud_t new_pud = pfn_pud(__phys_to_pfn(phys), sect_prot); + pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot)); /* Only allow permission changes for now */ if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)), @@ -987,9 +985,7 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot) { - pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT | - pgprot_val(mk_sect_prot(prot))); - pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), sect_prot); + pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot)); /* Only allow permission changes for now */ if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)), -- cgit From 0c1f14ed12262f45a3af1d588e4d7bd12438b8f5 Mon Sep 17 00:00:00 2001 From: Miles Chen Date: Wed, 29 May 2019 00:08:20 +0800 Subject: arm64: mm: make CONFIG_ZONE_DMA32 configurable This change makes CONFIG_ZONE_DMA32 defuly y and allows users to overwrite it only when CONFIG_EXPERT=y. For the SoCs that do not need CONFIG_ZONE_DMA32, this is the first step to manage all available memory by a single zone(normal zone) to reduce the overhead of multiple zones. The change also fixes a build error when CONFIG_NUMA=y and CONFIG_ZONE_DMA32=n. arch/arm64/mm/init.c:195:17: error: use of undeclared identifier 'ZONE_DMA32' max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys()); Change since v1: 1. only expose CONFIG_ZONE_DMA32 when CONFIG_EXPERT=y 2. remove redundant IS_ENABLED(CONFIG_ZONE_DMA32) Cc: Robin Murphy Signed-off-by: Miles Chen Signed-off-by: Catalin Marinas --- arch/arm64/mm/init.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index d2adffb81b5d..f643bd45ff69 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -191,8 +191,9 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) { unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; - if (IS_ENABLED(CONFIG_ZONE_DMA32)) - max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys()); +#ifdef CONFIG_ZONE_DMA32 + max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys()); +#endif max_zone_pfns[ZONE_NORMAL] = max; free_area_init_nodes(max_zone_pfns); -- cgit From 87dedf7c61ab07d7fe53bcf93103d2d845d804d8 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 27 May 2019 12:33:29 +0530 Subject: arm64/mm: Change BUG_ON() to VM_BUG_ON() in [pmd|pud]_set_huge() There are no callers for the functions which will pass unaligned physical addresses. Hence just change these BUG_ON() checks into VM_BUG_ON() which gets compiled out unless CONFIG_VM_DEBUG is enabled. Signed-off-by: Anshuman Khandual Cc: Will Deacon Cc: Mark Rutland Cc: Ard Biesheuvel Signed-off-by: Catalin Marinas --- arch/arm64/mm/mmu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 22c2e4f0768f..69e65b6585e6 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -978,7 +978,7 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) pud_val(new_pud))) return 0; - BUG_ON(phys & ~PUD_MASK); + VM_BUG_ON(phys & ~PUD_MASK); set_pud(pudp, new_pud); return 1; } @@ -992,7 +992,7 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot) pmd_val(new_pmd))) return 0; - BUG_ON(phys & ~PMD_MASK); + VM_BUG_ON(phys & ~PMD_MASK); set_pmd(pmdp, new_pmd); return 1; } -- cgit From 01de1776f62e6437ccd207181ec503e8a56e6128 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Sun, 5 May 2019 09:45:12 +0530 Subject: arm64/mm: Identify user instruction aborts We don't currently set the FAULT_FLAG_INSTRUCTION mm flag for EL0 instruction aborts. This has no functional impact, as we don't override arch_vma_access_permitted(), and the default implementation always returns true. However, it would be helpful to provide the flag so that it can be consumed by tracepoints such as dax_pmd_fault. This patch sets the FAULT_FLAG_INSTRUCTION flag for EL0 instruction aborts. Signed-off-by: Anshuman Khandual Cc: Will Deacon Cc: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/mm/fault.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index a30818ed9c60..392386a693fe 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -464,6 +464,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, if (is_el0_instruction_abort(esr)) { vm_flags = VM_EXEC; + mm_flags |= FAULT_FLAG_INSTRUCTION; } else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) { vm_flags = VM_WRITE; mm_flags |= FAULT_FLAG_WRITE; -- cgit From a0509313d5dea0a27a5968f04bd556d05e6349fd Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 3 Jun 2019 12:11:22 +0530 Subject: arm64/mm: Drop mmap_sem before calling __do_kernel_fault() There is an inconsistency between down_read_trylock() success and failure paths while dealing with kernel access for non exception table areas where it calls __do_kernel_fault(). In case of failure it just bails out without holding mmap_sem but when it succeeds it does so while holding mmap_sem. Fix this inconsistency by just dropping mmap_sem in success path as well. __do_kernel_fault() calls die_kernel_fault() which then calls show_pte(). show_pte() in this path might become bit more unreliable without holding mmap_sem. But there are already instances [1] in do_page_fault() where die_kernel_fault() gets called without holding mmap_sem. show_pte() can be made more robust independently but in a later patch. [1] Conditional block for (is_ttbr0_addr && is_el1_permission_fault) Signed-off-by: Anshuman Khandual Cc: Will Deacon Cc: Mark Rutland Cc: James Morse Cc: Andrey Konovalov Signed-off-by: Catalin Marinas --- arch/arm64/mm/fault.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 392386a693fe..2256a1a09f1b 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -504,8 +504,10 @@ retry: */ might_sleep(); #ifdef CONFIG_DEBUG_VM - if (!user_mode(regs) && !search_exception_tables(regs->pc)) + if (!user_mode(regs) && !search_exception_tables(regs->pc)) { + up_read(&mm->mmap_sem); goto no_context; + } #endif } -- cgit From 616810360043183a9db73e39f5aadbe48952c383 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 3 Jun 2019 12:11:23 +0530 Subject: arm64/mm: Drop task_struct argument from __do_page_fault() The task_struct argument is not getting used in __do_page_fault(). Hence just drop it and use current or cuurent->mm instead where ever required. This does not change any functionality. Signed-off-by: Anshuman Khandual Cc: Will Deacon Cc: Mark Rutland Cc: James Morse Cc: Andrey Konovalov Signed-off-by: Catalin Marinas --- arch/arm64/mm/fault.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 2256a1a09f1b..7c1c8f435f86 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -395,8 +395,7 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re #define VM_FAULT_BADACCESS 0x020000 static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, - unsigned int mm_flags, unsigned long vm_flags, - struct task_struct *tsk) + unsigned int mm_flags, unsigned long vm_flags) { struct vm_area_struct *vma; vm_fault_t fault; @@ -440,8 +439,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, struct pt_regs *regs) { const struct fault_info *inf; - struct task_struct *tsk; - struct mm_struct *mm; + struct mm_struct *mm = current->mm; vm_fault_t fault, major = 0; unsigned long vm_flags = VM_READ | VM_WRITE; unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; @@ -449,9 +447,6 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, if (notify_page_fault(regs, esr)) return 0; - tsk = current; - mm = tsk->mm; - /* * If we're in an interrupt or have no user context, we must not take * the fault. @@ -511,7 +506,7 @@ retry: #endif } - fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk); + fault = __do_page_fault(mm, addr, mm_flags, vm_flags); major |= fault & VM_FAULT_MAJOR; if (fault & VM_FAULT_RETRY) { @@ -551,11 +546,11 @@ retry: * that point. */ if (major) { - tsk->maj_flt++; + current->maj_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr); } else { - tsk->min_flt++; + current->min_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr); } -- cgit From 8e01076afd97521d992e13fb89fb59a6e48fbeec Mon Sep 17 00:00:00 2001 From: Odin Ugedal Date: Fri, 7 Jun 2019 01:49:10 +0200 Subject: arm64: Fix comment after #endif The config value used in the if was changed in b433dce056d3814dc4b33e5a8a533d6401ffcfb0, but the comment on the corresponding end was not changed. Signed-off-by: Odin Ugedal Signed-off-by: Catalin Marinas --- arch/arm64/mm/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 69e65b6585e6..21f3931c73fd 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -776,7 +776,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, return 0; } -#endif /* CONFIG_ARM64_64K_PAGES */ +#endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */ void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap) { -- cgit From c49bd02f4c7412f0182252ae2ef6e916ca4ff359 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Fri, 7 Jun 2019 14:43:05 +0530 Subject: arm64/mm: Document write abort detection from ESR This patch adds an is_write_abort() wrapper and documents the detection of the abort type on cache maintenance operations. Cc: Will Deacon Cc: James Morse Cc: Andrey Konovalov Acked-by: Mark Rutland Signed-off-by: Anshuman Khandual [catalin.marinas@arm.com: only keep the is_write_abort() wrapper] Signed-off-by: Catalin Marinas --- arch/arm64/mm/fault.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 7c1c8f435f86..765b5eb601ca 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -435,6 +435,15 @@ static bool is_el0_instruction_abort(unsigned int esr) return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW; } +/* + * Note: not valid for EL1 DC IVAC, but we never use that such that it + * should fault. EL0 cannot issue DC IVAC (undef). + */ +static bool is_write_abort(unsigned int esr) +{ + return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM); +} + static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, struct pt_regs *regs) { @@ -460,7 +469,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, if (is_el0_instruction_abort(esr)) { vm_flags = VM_EXEC; mm_flags |= FAULT_FLAG_INSTRUCTION; - } else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) { + } else if (is_write_abort(esr)) { vm_flags = VM_WRITE; mm_flags |= FAULT_FLAG_WRITE; } -- cgit From 4745224b45097d333358bce298aea2137246183c Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Fri, 7 Jun 2019 14:43:06 +0530 Subject: arm64/mm: Refactor __do_page_fault() __do_page_fault() is over complicated with multiple goto statements. This cleans up the code flow and while there drops local variable vm_fault_t. Reviewed-by: Mark Rutland Signed-off-by: Anshuman Khandual Cc: Will Deacon Cc: James Morse Cc: Andrey Konovalov Cc: Christoph Hellwig Signed-off-by: Catalin Marinas --- arch/arm64/mm/fault.c | 30 +++++++++++------------------- 1 file changed, 11 insertions(+), 19 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 765b5eb601ca..582061dec89f 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -397,37 +397,29 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int mm_flags, unsigned long vm_flags) { - struct vm_area_struct *vma; - vm_fault_t fault; + struct vm_area_struct *vma = find_vma(mm, addr); - vma = find_vma(mm, addr); - fault = VM_FAULT_BADMAP; if (unlikely(!vma)) - goto out; - if (unlikely(vma->vm_start > addr)) - goto check_stack; + return VM_FAULT_BADMAP; /* * Ok, we have a good vm_area for this memory access, so we can handle * it. */ -good_area: + if (unlikely(vma->vm_start > addr)) { + if (!(vma->vm_flags & VM_GROWSDOWN)) + return VM_FAULT_BADMAP; + if (expand_stack(vma, addr)) + return VM_FAULT_BADMAP; + } + /* * Check that the permissions on the VMA allow for the fault which * occurred. */ - if (!(vma->vm_flags & vm_flags)) { - fault = VM_FAULT_BADACCESS; - goto out; - } - + if (!(vma->vm_flags & vm_flags)) + return VM_FAULT_BADACCESS; return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags); - -check_stack: - if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) - goto good_area; -out: - return fault; } static bool is_el0_instruction_abort(unsigned int esr) -- cgit From 8f5c9037a55b22e847f636f9a39fa98fe67923d1 Mon Sep 17 00:00:00 2001 From: Masayoshi Mizuma Date: Fri, 14 Jun 2019 09:11:41 -0400 Subject: arm64/mm: Correct the cache line size warning with non coherent device If the cache line size is greater than ARCH_DMA_MINALIGN (128), the warning shows and it's tainted as TAINT_CPU_OUT_OF_SPEC. However, it's not good because as discussed in the thread [1], the cpu cache line size will be problem only on non-coherent devices. Since the coherent flag is already introduced to struct device, show the warning only if the device is non-coherent device and ARCH_DMA_MINALIGN is smaller than the cpu cache size. [1] https://lore.kernel.org/linux-arm-kernel/20180514145703.celnlobzn3uh5tc2@localhost/ Signed-off-by: Masayoshi Mizuma Reviewed-by: Hidetoshi Seto Tested-by: Zhang Lei [catalin.marinas@arm.com: removed 'if' block for WARN_TAINT] Signed-off-by: Catalin Marinas --- arch/arm64/mm/dma-mapping.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 674860e3e478..ff410195dc1c 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -91,10 +91,6 @@ static int __swiotlb_mmap_pfn(struct vm_area_struct *vma, static int __init arm64_dma_init(void) { - WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(), - TAINT_CPU_OUT_OF_SPEC, - "ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)", - ARCH_DMA_MINALIGN, cache_line_size()); return dma_atomic_pool_init(GFP_DMA32, __pgprot(PROT_NORMAL_NC)); } arch_initcall(arm64_dma_init); @@ -472,6 +468,14 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) { + int cls = cache_line_size_of_cpu(); + + WARN_TAINT(!coherent && cls > ARCH_DMA_MINALIGN, + TAINT_CPU_OUT_OF_SPEC, + "%s %s: ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)", + dev_driver_string(dev), dev_name(dev), + ARCH_DMA_MINALIGN, cls); + dev->dma_coherent = coherent; __iommu_setup_dma_ops(dev, dma_base, size, iommu); -- cgit From 4739d53fcd1df8a9f6f72bb02a3a1d852ad252b3 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 23 May 2019 11:22:54 +0100 Subject: arm64/mm: wire up CONFIG_ARCH_HAS_SET_DIRECT_MAP Wire up the special helper functions to manipulate aliases of vmalloc regions in the linear map. Acked-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Catalin Marinas --- arch/arm64/mm/pageattr.c | 48 ++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 40 insertions(+), 8 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 6cd645edcf35..9c6b9039ec8f 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -159,17 +159,48 @@ int set_memory_valid(unsigned long addr, int numpages, int enable) __pgprot(PTE_VALID)); } -#ifdef CONFIG_DEBUG_PAGEALLOC +int set_direct_map_invalid_noflush(struct page *page) +{ + struct page_change_data data = { + .set_mask = __pgprot(0), + .clear_mask = __pgprot(PTE_VALID), + }; + + if (!rodata_full) + return 0; + + return apply_to_page_range(&init_mm, + (unsigned long)page_address(page), + PAGE_SIZE, change_page_range, &data); +} + +int set_direct_map_default_noflush(struct page *page) +{ + struct page_change_data data = { + .set_mask = __pgprot(PTE_VALID | PTE_WRITE), + .clear_mask = __pgprot(PTE_RDONLY), + }; + + if (!rodata_full) + return 0; + + return apply_to_page_range(&init_mm, + (unsigned long)page_address(page), + PAGE_SIZE, change_page_range, &data); +} + void __kernel_map_pages(struct page *page, int numpages, int enable) { + if (!debug_pagealloc_enabled() && !rodata_full) + return; + set_memory_valid((unsigned long)page_address(page), numpages, enable); } -#ifdef CONFIG_HIBERNATION + /* - * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function - * is used to determine if a linear map page has been marked as not-valid by - * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit. - * This is based on kern_addr_valid(), which almost does what we need. + * This function is used to determine if a linear map page has been marked as + * not-valid. Walk the page table and check the PTE_VALID bit. This is based + * on kern_addr_valid(), which almost does what we need. * * Because this is only called on the kernel linear map, p?d_sect() implies * p?d_present(). When debug_pagealloc is enabled, sections mappings are @@ -183,6 +214,9 @@ bool kernel_page_present(struct page *page) pte_t *ptep; unsigned long addr = (unsigned long)page_address(page); + if (!debug_pagealloc_enabled() && !rodata_full) + return true; + pgdp = pgd_offset_k(addr); if (pgd_none(READ_ONCE(*pgdp))) return false; @@ -204,5 +238,3 @@ bool kernel_page_present(struct page *page) ptep = pte_offset_kernel(pmdp, addr); return pte_valid(READ_ONCE(*ptep)); } -#endif /* CONFIG_HIBERNATION */ -#endif /* CONFIG_DEBUG_PAGEALLOC */ -- cgit