summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/memory.h
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2019-08-13 15:52:23 +0100
committerWill Deacon <will@kernel.org>2019-08-14 13:00:57 +0100
commit68dd8ef321626f14ae9ef2039b7a03c707149489 (patch)
tree28f9129443d076c4ab76bd1e878110ed3c4791a2 /arch/arm64/include/asm/memory.h
parentd2d73d2fef421ca0d447946cc430fdf5c4c5b06a (diff)
arm64: memory: Fix virt_addr_valid() using __is_lm_address()
virt_addr_valid() is intended to test whether or not the passed address is a valid linear map address. Unfortunately, it relies on _virt_addr_is_linear() which is broken because it assumes the linear map is at the top of the address space, which it no longer is. Reimplement virt_addr_valid() using __is_lm_address() and remove _virt_addr_is_linear() entirely. At the same time, ensure we evaluate the macro parameter only once and move it within the __ASSEMBLY__ block. Reported-by: Qian Cai <cai@lca.pw> Reported-by: Geert Uytterhoeven <geert@linux-m68k.org> Tested-by: Steve Capper <steve.capper@arm.com> Reviewed-by: Steve Capper <steve.capper@arm.com> Tested-by: Geert Uytterhoeven <geert+renesas@glider.be> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Mark Rutland <mark.rutland@arm.com> Fixes: 14c127c957c1 ("arm64: mm: Flip kernel VA space") Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/include/asm/memory.h')
-rw-r--r--arch/arm64/include/asm/memory.h16
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 2c3c4b145e95..93ef8e5c6971 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -242,11 +242,11 @@ static inline const void *__tag_set(const void *addr, u8 tag)
/*
- * The linear kernel range starts in the middle of the virtual adddress
+ * The linear kernel range starts at the bottom of the virtual address
* space. Testing the top bit for the start of the region is a
- * sufficient check.
+ * sufficient check and avoids having to worry about the tag.
*/
-#define __is_lm_address(addr) (!((addr) & BIT(vabits_actual - 1)))
+#define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1)))
#define __lm_to_phys(addr) (((addr) + physvirt_offset))
#define __kimg_to_phys(addr) ((addr) - kimage_voffset)
@@ -326,13 +326,13 @@ static inline void *phys_to_virt(phys_addr_t x)
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) + VMEMMAP_START))
#endif
-#endif
-#define _virt_addr_is_linear(kaddr) \
- (__tag_reset((u64)(kaddr)) >= PAGE_OFFSET)
+#define virt_addr_valid(addr) ({ \
+ __typeof__(addr) __addr = addr; \
+ __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \
+})
-#define virt_addr_valid(kaddr) \
- (_virt_addr_is_linear(kaddr) && pfn_valid(virt_to_pfn(kaddr)))
+#endif
/*
* Given that the GIC architecture permits ITS implementations that can only be