diff options
Diffstat (limited to 'arch/arm/mm/ioremap.c')
-rw-r--r-- | arch/arm/mm/ioremap.c | 49 |
1 files changed, 36 insertions, 13 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 2129070065c3..27e64f782cb3 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -23,6 +23,7 @@ */ #include <linux/module.h> #include <linux/errno.h> +#include <linux/kasan.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/io.h> @@ -110,21 +111,45 @@ void __init add_static_vm_early(struct static_vm *svm) int ioremap_page(unsigned long virt, unsigned long phys, const struct mem_type *mtype) { - return ioremap_page_range(virt, virt + PAGE_SIZE, phys, - __pgprot(mtype->prot_pte)); + return vmap_page_range(virt, virt + PAGE_SIZE, phys, + __pgprot(mtype->prot_pte)); } EXPORT_SYMBOL(ioremap_page); +#ifdef CONFIG_KASAN +static unsigned long arm_kasan_mem_to_shadow(unsigned long addr) +{ + return (unsigned long)kasan_mem_to_shadow((void *)addr); +} +#else +static unsigned long arm_kasan_mem_to_shadow(unsigned long addr) +{ + return 0; +} +#endif + +static void memcpy_pgd(struct mm_struct *mm, unsigned long start, + unsigned long end) +{ + end = ALIGN(end, PGDIR_SIZE); + memcpy(pgd_offset(mm, start), pgd_offset_k(start), + sizeof(pgd_t) * (pgd_index(end) - pgd_index(start))); +} + void __check_vmalloc_seq(struct mm_struct *mm) { int seq; do { - seq = atomic_read(&init_mm.context.vmalloc_seq); - memcpy(pgd_offset(mm, VMALLOC_START), - pgd_offset_k(VMALLOC_START), - sizeof(pgd_t) * (pgd_index(VMALLOC_END) - - pgd_index(VMALLOC_START))); + seq = atomic_read_acquire(&init_mm.context.vmalloc_seq); + memcpy_pgd(mm, VMALLOC_START, VMALLOC_END); + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) { + unsigned long start = + arm_kasan_mem_to_shadow(VMALLOC_START); + unsigned long end = + arm_kasan_mem_to_shadow(VMALLOC_END); + memcpy_pgd(mm, start, end); + } /* * Use a store-release so that other CPUs that observe the * counter's new value are guaranteed to see the results of the @@ -411,7 +436,7 @@ void __arm_iomem_set_ro(void __iomem *ptr, size_t size) set_memory_ro((unsigned long)ptr, PAGE_ALIGN(size) / PAGE_SIZE); } -void *arch_memremap_wb(phys_addr_t phys_addr, size_t size) +void *arch_memremap_wb(phys_addr_t phys_addr, size_t size, unsigned long flags) { return (__force void *)arch_ioremap_caller(phys_addr, size, MT_MEMORY_RW, @@ -466,8 +491,8 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) if (res->end > IO_SPACE_LIMIT) return -EINVAL; - return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr, - __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte)); + return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr, + __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte)); } EXPORT_SYMBOL(pci_remap_iospace); @@ -490,7 +515,5 @@ void __init early_ioremap_init(void) bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, unsigned long flags) { - unsigned long pfn = PHYS_PFN(offset); - - return memblock_is_map_memory(pfn); + return memblock_is_map_memory(offset); } |