summaryrefslogtreecommitdiff
path: root/arch/riscv/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/include/asm')
-rw-r--r--arch/riscv/include/asm/page.h21
-rw-r--r--arch/riscv/include/asm/processor.h8
-rw-r--r--arch/riscv/include/asm/vdso.h23
3 files changed, 24 insertions, 28 deletions
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index b0ca5058e7ae..109c97e991a6 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -79,8 +79,8 @@ typedef struct page *pgtable_t;
#endif
#ifdef CONFIG_MMU
-extern unsigned long pfn_base;
-#define ARCH_PFN_OFFSET (pfn_base)
+extern unsigned long riscv_pfn_base;
+#define ARCH_PFN_OFFSET (riscv_pfn_base)
#else
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
#endif /* CONFIG_MMU */
@@ -91,10 +91,8 @@ struct kernel_mapping {
uintptr_t size;
/* Offset between linear mapping virtual address and kernel load address */
unsigned long va_pa_offset;
-#ifdef CONFIG_64BIT
/* Offset between kernel mapping virtual address and kernel load address */
unsigned long va_kernel_pa_offset;
-#endif
unsigned long va_kernel_xip_pa_offset;
#ifdef CONFIG_XIP_KERNEL
uintptr_t xiprom;
@@ -105,11 +103,11 @@ struct kernel_mapping {
extern struct kernel_mapping kernel_map;
extern phys_addr_t phys_ram_base;
-#ifdef CONFIG_64BIT
#define is_kernel_mapping(x) \
((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
+
#define is_linear_mapping(x) \
- ((x) >= PAGE_OFFSET && (x) < kernel_map.virt_addr)
+ ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < kernel_map.virt_addr))
#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
#define kernel_mapping_pa_to_va(y) ({ \
@@ -123,7 +121,7 @@ extern phys_addr_t phys_ram_base;
#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
#define kernel_mapping_va_to_pa(y) ({ \
unsigned long _y = y; \
- (_y < kernel_map.virt_addr + XIP_OFFSET) ? \
+ (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \
((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : \
((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \
})
@@ -133,15 +131,6 @@ extern phys_addr_t phys_ram_base;
is_linear_mapping(_x) ? \
linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \
})
-#else
-#define is_kernel_mapping(x) \
- ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
-#define is_linear_mapping(x) \
- ((x) >= PAGE_OFFSET)
-
-#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + kernel_map.va_pa_offset))
-#define __va_to_pa_nodebug(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
-#endif /* CONFIG_64BIT */
#ifdef CONFIG_DEBUG_VIRTUAL
extern phys_addr_t __virt_to_phys(unsigned long x);
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 021ed64ee608..46b492c78cbb 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -37,6 +37,14 @@ struct thread_struct {
unsigned long bad_cause;
};
+/* Whitelist the fstate from the task_struct for hardened usercopy */
+static inline void arch_thread_struct_whitelist(unsigned long *offset,
+ unsigned long *size)
+{
+ *offset = offsetof(struct thread_struct, fstate);
+ *size = sizeof_field(struct thread_struct, fstate);
+}
+
#define INIT_THREAD { \
.sp = sizeof(init_stack) + (long)&init_stack, \
}
diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
index 1453a2f563bc..893e47195e30 100644
--- a/arch/riscv/include/asm/vdso.h
+++ b/arch/riscv/include/asm/vdso.h
@@ -8,26 +8,25 @@
#ifndef _ASM_RISCV_VDSO_H
#define _ASM_RISCV_VDSO_H
+
+/*
+ * All systems with an MMU have a VDSO, but systems without an MMU don't
+ * support shared libraries and therefor don't have one.
+ */
+#ifdef CONFIG_MMU
+
#include <linux/types.h>
+#include <generated/vdso-offsets.h>
#ifndef CONFIG_GENERIC_TIME_VSYSCALL
struct vdso_data {
};
#endif
-/*
- * The VDSO symbols are mapped into Linux so we can just use regular symbol
- * addressing to get their offsets in userspace. The symbols are mapped at an
- * offset of 0, but since the linker must support setting weak undefined
- * symbols to the absolute address 0 it also happens to support other low
- * addresses even when the code model suggests those low addresses would not
- * otherwise be availiable.
- */
#define VDSO_SYMBOL(base, name) \
-({ \
- extern const char __vdso_##name[]; \
- (void __user *)((unsigned long)(base) + __vdso_##name); \
-})
+ (void __user *)((unsigned long)(base) + __vdso_##name##_offset)
+
+#endif /* CONFIG_MMU */
asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);