summaryrefslogtreecommitdiff
path: root/arch/riscv
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv')
-rw-r--r--arch/riscv/Kbuild1
-rw-r--r--arch/riscv/Kconfig80
-rw-r--r--arch/riscv/Kconfig.socs34
-rw-r--r--arch/riscv/boot/dts/Makefile2
-rw-r--r--arch/riscv/boot/dts/kendryte/Makefile4
-rw-r--r--arch/riscv/configs/nommu_k210_defconfig7
-rw-r--r--arch/riscv/include/asm/cacheflush.h65
-rw-r--r--arch/riscv/include/asm/cacheinfo.h15
-rw-r--r--arch/riscv/include/asm/clocksource.h7
-rw-r--r--arch/riscv/include/asm/cmpxchg.h8
-rw-r--r--arch/riscv/include/asm/csr.h3
-rw-r--r--arch/riscv/include/asm/fixmap.h2
-rw-r--r--arch/riscv/include/asm/gdb_xml.h117
-rw-r--r--arch/riscv/include/asm/hugetlb.h10
-rw-r--r--arch/riscv/include/asm/hwcap.h22
-rw-r--r--arch/riscv/include/asm/io.h2
-rw-r--r--arch/riscv/include/asm/irq.h5
-rw-r--r--arch/riscv/include/asm/kasan.h2
-rw-r--r--arch/riscv/include/asm/kdebug.h12
-rw-r--r--arch/riscv/include/asm/kgdb.h112
-rw-r--r--arch/riscv/include/asm/mmio.h2
-rw-r--r--arch/riscv/include/asm/mmiowb.h1
-rw-r--r--arch/riscv/include/asm/module.h2
-rw-r--r--arch/riscv/include/asm/parse_asm.h219
-rw-r--r--arch/riscv/include/asm/patch.h4
-rw-r--r--arch/riscv/include/asm/perf_event.h8
-rw-r--r--arch/riscv/include/asm/pgtable-64.h7
-rw-r--r--arch/riscv/include/asm/pgtable.h27
-rw-r--r--arch/riscv/include/asm/processor.h13
-rw-r--r--arch/riscv/include/asm/ptdump.h11
-rw-r--r--arch/riscv/include/asm/set_memory.h8
-rw-r--r--arch/riscv/include/asm/smp.h3
-rw-r--r--arch/riscv/include/asm/soc.h39
-rw-r--r--arch/riscv/include/asm/vdso.h2
-rw-r--r--arch/riscv/include/asm/vdso/clocksource.h8
-rw-r--r--arch/riscv/include/asm/vdso/gettimeofday.h79
-rw-r--r--arch/riscv/include/asm/vdso/processor.h19
-rw-r--r--arch/riscv/include/asm/vdso/vsyscall.h27
-rw-r--r--arch/riscv/include/asm/vermagic.h9
-rw-r--r--arch/riscv/kernel/Makefile3
-rw-r--r--arch/riscv/kernel/cacheinfo.c17
-rw-r--r--arch/riscv/kernel/cpu.c16
-rw-r--r--arch/riscv/kernel/cpu_ops.c4
-rw-r--r--arch/riscv/kernel/cpufeature.c83
-rw-r--r--arch/riscv/kernel/entry.S4
-rw-r--r--arch/riscv/kernel/ftrace.c18
-rw-r--r--arch/riscv/kernel/head.S11
-rw-r--r--arch/riscv/kernel/irq.c33
-rw-r--r--arch/riscv/kernel/kgdb.c390
-rw-r--r--arch/riscv/kernel/module.c2
-rw-r--r--arch/riscv/kernel/patch.c51
-rw-r--r--arch/riscv/kernel/perf_event.c8
-rw-r--r--arch/riscv/kernel/process.c2
-rw-r--r--arch/riscv/kernel/sbi.c17
-rw-r--r--arch/riscv/kernel/setup.c5
-rw-r--r--arch/riscv/kernel/smp.c13
-rw-r--r--arch/riscv/kernel/soc.c29
-rw-r--r--arch/riscv/kernel/stacktrace.c15
-rw-r--r--arch/riscv/kernel/sys_riscv.c6
-rw-r--r--arch/riscv/kernel/time.c9
-rw-r--r--arch/riscv/kernel/traps.c11
-rw-r--r--arch/riscv/kernel/vdso.c28
-rw-r--r--arch/riscv/kernel/vdso/Makefile20
-rw-r--r--arch/riscv/kernel/vdso/clock_getres.S18
-rw-r--r--arch/riscv/kernel/vdso/clock_gettime.S18
-rw-r--r--arch/riscv/kernel/vdso/gettimeofday.S18
-rw-r--r--arch/riscv/kernel/vdso/note.S12
-rw-r--r--arch/riscv/kernel/vdso/vdso.lds.S2
-rw-r--r--arch/riscv/kernel/vdso/vgettimeofday.c25
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S5
-rw-r--r--arch/riscv/mm/cacheflush.c1
-rw-r--r--arch/riscv/mm/fault.c14
-rw-r--r--arch/riscv/mm/hugetlbpage.c24
-rw-r--r--arch/riscv/mm/init.c77
-rw-r--r--arch/riscv/mm/kasan_init.c2
-rw-r--r--arch/riscv/mm/pageattr.c20
-rw-r--r--arch/riscv/mm/ptdump.c4
-rw-r--r--arch/riscv/net/bpf_jit_comp32.c103
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c64
79 files changed, 1718 insertions, 482 deletions
diff --git a/arch/riscv/Kbuild b/arch/riscv/Kbuild
index d1d0aa70fdf1..4614c01ba5b3 100644
--- a/arch/riscv/Kbuild
+++ b/arch/riscv/Kbuild
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y += kernel/ mm/ net/
+obj-$(CONFIG_BUILTIN_DTB) += boot/dts/
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 62f7bfeb709e..128192e14ff2 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -12,62 +12,70 @@ config 32BIT
config RISCV
def_bool y
- select OF
- select OF_EARLY_FLATTREE
- select OF_IRQ
+ select ARCH_CLOCKSOURCE_INIT
select ARCH_HAS_BINFMT_FLAT
+ select ARCH_HAS_DEBUG_VIRTUAL if MMU
+ select ARCH_HAS_DEBUG_WX
+ select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_GIGANTIC_PAGE
+ select ARCH_HAS_MMIOWB
+ select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_SET_DIRECT_MAP
+ select ARCH_HAS_SET_MEMORY
+ select ARCH_HAS_STRICT_KERNEL_RWX if MMU
+ select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_FRAME_POINTERS
+ select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select CLONE_BACKWARDS
select COMMON_CLK
+ select EDAC_SUPPORT
+ select GENERIC_ARCH_TOPOLOGY if SMP
+ select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CLOCKEVENTS
+ select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO
+ select GENERIC_IOREMAP
+ select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP
+ select GENERIC_PTDUMP if MMU
select GENERIC_SCHED_CLOCK
+ select GENERIC_SMP_IDLE_THREAD
select GENERIC_STRNCPY_FROM_USER if MMU
select GENERIC_STRNLEN_USER if MMU
- select GENERIC_SMP_IDLE_THREAD
- select GENERIC_ATOMIC64 if !64BIT
- select GENERIC_IOREMAP
- select GENERIC_PTDUMP if MMU
+ select GENERIC_TIME_VSYSCALL if MMU && 64BIT
+ select HANDLE_DOMAIN_IRQ
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_KASAN if MMU && 64BIT
+ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_KGDB_QXFER_PKT
+ select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_ARCH_TRACEHOOK
select HAVE_ASM_MODVERSIONS
- select HAVE_MEMBLOCK_NODE_MAP
+ select HAVE_COPY_THREAD_TLS
select HAVE_DMA_CONTIGUOUS if MMU
+ select HAVE_EBPF_JIT if MMU
select HAVE_FUTEX_CMPXCHG if FUTEX
+ select HAVE_GENERIC_VDSO if MMU && 64BIT
+ select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
- select SPARSE_IRQ
- select SYSCTL_EXCEPTION_TRACE
- select HAVE_ARCH_TRACEHOOK
- select HAVE_PCI
select MODULES_USE_ELF_RELA if MODULES
select MODULE_SECTIONS if MODULES
- select THREAD_INFO_IN_TASK
+ select OF
+ select OF_EARLY_FLATTREE
+ select OF_IRQ
select PCI_DOMAINS_GENERIC if PCI
select PCI_MSI if PCI
+ select RISCV_INTC
select RISCV_TIMER
- select GENERIC_IRQ_MULTI_HANDLER
- select GENERIC_ARCH_TOPOLOGY if SMP
- select ARCH_HAS_PTE_SPECIAL
- select ARCH_HAS_MMIOWB
- select ARCH_HAS_DEBUG_VIRTUAL
- select HAVE_EBPF_JIT if MMU
- select EDAC_SUPPORT
- select ARCH_HAS_GIGANTIC_PAGE
- select ARCH_HAS_SET_DIRECT_MAP
- select ARCH_HAS_SET_MEMORY
- select ARCH_HAS_STRICT_KERNEL_RWX
- select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select SPARSEMEM_STATIC if 32BIT
- select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
- select HAVE_ARCH_MMAP_RND_BITS if MMU
- select ARCH_HAS_GCOV_PROFILE_ALL
- select HAVE_COPY_THREAD_TLS
- select HAVE_ARCH_KASAN if MMU && 64BIT
+ select SPARSE_IRQ
+ select SYSCTL_EXCEPTION_TRACE
+ select THREAD_INFO_IN_TASK
config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT
@@ -136,6 +144,7 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y
config SYS_SUPPORTS_HUGETLBFS
+ depends on MMU
def_bool y
config STACKTRACE_SUPPORT
@@ -193,11 +202,11 @@ config ARCH_RV64I
bool "RV64I"
select 64BIT
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
- select HAVE_FUNCTION_TRACER
- select HAVE_FUNCTION_GRAPH_TRACER
- select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE if MMU
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
+ select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_TRACER
select SWIOTLB if MMU
endchoice
@@ -380,6 +389,11 @@ endchoice
endmenu
+config BUILTIN_DTB
+ def_bool n
+ depends on RISCV_M_MODE
+ depends on OF
+
menu "Power management options"
source "kernel/power/Kconfig"
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index 216286db81c9..6c88148f1b9b 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -11,23 +11,39 @@ config SOC_SIFIVE
This enables support for SiFive SoC platform hardware.
config SOC_VIRT
- bool "QEMU Virt Machine"
- select POWER_RESET_SYSCON
- select POWER_RESET_SYSCON_POWEROFF
- select GOLDFISH
- select RTC_DRV_GOLDFISH
- select SIFIVE_PLIC
- help
- This enables support for QEMU Virt Machine.
+ bool "QEMU Virt Machine"
+ select POWER_RESET
+ select POWER_RESET_SYSCON
+ select POWER_RESET_SYSCON_POWEROFF
+ select GOLDFISH
+ select RTC_DRV_GOLDFISH if RTC_CLASS
+ select SIFIVE_PLIC
+ help
+ This enables support for QEMU Virt Machine.
config SOC_KENDRYTE
bool "Kendryte K210 SoC"
depends on !MMU
- select BUILTIN_DTB
select SERIAL_SIFIVE if TTY
select SERIAL_SIFIVE_CONSOLE if TTY
select SIFIVE_PLIC
help
This enables support for Kendryte K210 SoC platform hardware.
+config SOC_KENDRYTE_K210_DTB
+ def_bool y
+ depends on SOC_KENDRYTE_K210_DTB_BUILTIN
+
+config SOC_KENDRYTE_K210_DTB_BUILTIN
+ bool "Builtin device tree for the Kendryte K210"
+ depends on SOC_KENDRYTE
+ default y
+ select OF
+ select BUILTIN_DTB
+ select SOC_KENDRYTE_K210_DTB
+ help
+ Builds a device tree for the Kendryte K210 into the Linux image.
+ This option should be selected if no bootloader is being used.
+ If unsure, say Y.
+
endmenu
diff --git a/arch/riscv/boot/dts/Makefile b/arch/riscv/boot/dts/Makefile
index 557f0b519c8e..ca1f8cbd78c0 100644
--- a/arch/riscv/boot/dts/Makefile
+++ b/arch/riscv/boot/dts/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
subdir-y += sifive
subdir-y += kendryte
+
+obj-$(CONFIG_BUILTIN_DTB) := $(addsuffix /, $(subdir-y))
diff --git a/arch/riscv/boot/dts/kendryte/Makefile b/arch/riscv/boot/dts/kendryte/Makefile
index 815444e69e89..1a88e616f18e 100644
--- a/arch/riscv/boot/dts/kendryte/Makefile
+++ b/arch/riscv/boot/dts/kendryte/Makefile
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-dtb-$(CONFIG_SOC_KENDRYTE) += k210.dtb
+dtb-$(CONFIG_SOC_KENDRYTE_K210_DTB) += k210.dtb
+
+obj-$(CONFIG_SOC_KENDRYTE_K210_DTB_BUILTIN) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/riscv/configs/nommu_k210_defconfig b/arch/riscv/configs/nommu_k210_defconfig
index 632aa2f95e57..b48138e329ea 100644
--- a/arch/riscv/configs/nommu_k210_defconfig
+++ b/arch/riscv/configs/nommu_k210_defconfig
@@ -2,14 +2,12 @@
CONFIG_LOG_BUF_SHIFT=15
CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=12
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE=""
CONFIG_INITRAMFS_FORCE=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
-# CONFIG_BOOT_CONFIG is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_SYSFS_SYSCALL is not set
# CONFIG_FHANDLE is not set
@@ -35,8 +33,6 @@ CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_CMDLINE="earlycon console=ttySIF0"
CONFIG_CMDLINE_FORCE=y
-CONFIG_USE_BUILTIN_DTB=y
-CONFIG_BUILTIN_DTB_SOURCE="kendryte/k210"
# CONFIG_BLOCK is not set
CONFIG_BINFMT_FLAT=y
# CONFIG_COREDUMP is not set
@@ -49,8 +45,8 @@ CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_SERIO is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_LDISC_AUTOLOAD is not set
-# CONFIG_DEVMEM is not set
# CONFIG_HW_RANDOM is not set
+# CONFIG_DEVMEM is not set
# CONFIG_HWMON is not set
# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID is not set
@@ -62,6 +58,7 @@ CONFIG_DEVTMPFS_MOUNT=y
CONFIG_LSM="[]"
CONFIG_PRINTK_TIME=y
# CONFIG_DEBUG_MISC is not set
+CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_RCU_TRACE is not set
# CONFIG_FTRACE is not set
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index c8677c75f82c..23ff70350992 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -8,65 +8,6 @@
#include <linux/mm.h>
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-
-/*
- * The cache doesn't need to be flushed when TLB entries change when
- * the cache is mapped to physical memory, not virtual memory
- */
-static inline void flush_cache_all(void)
-{
-}
-
-static inline void flush_cache_mm(struct mm_struct *mm)
-{
-}
-
-static inline void flush_cache_dup_mm(struct mm_struct *mm)
-{
-}
-
-static inline void flush_cache_range(struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end)
-{
-}
-
-static inline void flush_cache_page(struct vm_area_struct *vma,
- unsigned long vmaddr,
- unsigned long pfn)
-{
-}
-
-static inline void flush_dcache_mmap_lock(struct address_space *mapping)
-{
-}
-
-static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
-{
-}
-
-static inline void flush_icache_page(struct vm_area_struct *vma,
- struct page *page)
-{
-}
-
-static inline void flush_cache_vmap(unsigned long start, unsigned long end)
-{
-}
-
-static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
-{
-}
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- do { \
- memcpy(dst, src, len); \
- flush_icache_user_range(vma, page, vaddr, len); \
- } while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-
static inline void local_flush_icache_all(void)
{
asm volatile ("fence.i" ::: "memory");
@@ -79,13 +20,15 @@ static inline void flush_dcache_page(struct page *page)
if (test_bit(PG_dcache_clean, &page->flags))
clear_bit(PG_dcache_clean, &page->flags);
}
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
/*
* RISC-V doesn't have an instruction to flush parts of the instruction cache,
* so instead we just flush the whole thing.
*/
#define flush_icache_range(start, end) flush_icache_all()
-#define flush_icache_user_range(vma, pg, addr, len) flush_icache_mm(vma->vm_mm, 0)
+#define flush_icache_user_page(vma, pg, addr, len) \
+ flush_icache_mm(vma->vm_mm, 0)
#ifndef CONFIG_SMP
@@ -105,4 +48,6 @@ void flush_icache_mm(struct mm_struct *mm, bool local);
#define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
#define SYS_RISCV_FLUSH_ICACHE_ALL (SYS_RISCV_FLUSH_ICACHE_LOCAL)
+#include <asm-generic/cacheflush.h>
+
#endif /* _ASM_RISCV_CACHEFLUSH_H */
diff --git a/arch/riscv/include/asm/cacheinfo.h b/arch/riscv/include/asm/cacheinfo.h
new file mode 100644
index 000000000000..5d9662e9aba8
--- /dev/null
+++ b/arch/riscv/include/asm/cacheinfo.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_CACHEINFO_H
+#define _ASM_RISCV_CACHEINFO_H
+
+#include <linux/cacheinfo.h>
+
+struct riscv_cacheinfo_ops {
+ const struct attribute_group * (*get_priv_group)(struct cacheinfo
+ *this_leaf);
+};
+
+void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops);
+
+#endif /* _ASM_RISCV_CACHEINFO_H */
diff --git a/arch/riscv/include/asm/clocksource.h b/arch/riscv/include/asm/clocksource.h
new file mode 100644
index 000000000000..482185566b0c
--- /dev/null
+++ b/arch/riscv/include/asm/clocksource.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_CLOCKSOURCE_H
+#define _ASM_CLOCKSOURCE_H
+
+#include <asm/vdso/clocksource.h>
+
+#endif
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index d969bab4a26b..262e5bbb2776 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -179,7 +179,7 @@
" bnez %1, 0b\n" \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
+ : "rJ" ((long)__old), "rJ" (__new) \
: "memory"); \
break; \
case 8: \
@@ -224,7 +224,7 @@
RISCV_ACQUIRE_BARRIER \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
+ : "rJ" ((long)__old), "rJ" (__new) \
: "memory"); \
break; \
case 8: \
@@ -270,7 +270,7 @@
" bnez %1, 0b\n" \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
+ : "rJ" ((long)__old), "rJ" (__new) \
: "memory"); \
break; \
case 8: \
@@ -316,7 +316,7 @@
" fence rw, rw\n" \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
+ : "rJ" ((long)__old), "rJ" (__new) \
: "memory"); \
break; \
case 8: \
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 8e18d2c64399..cec462e198ce 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -51,13 +51,10 @@
#define CAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1))
/* Interrupt causes (minus the high bit) */
-#define IRQ_U_SOFT 0
#define IRQ_S_SOFT 1
#define IRQ_M_SOFT 3
-#define IRQ_U_TIMER 4
#define IRQ_S_TIMER 5
#define IRQ_M_TIMER 7
-#define IRQ_U_EXT 8
#define IRQ_S_EXT 9
#define IRQ_M_EXT 11
diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
index 2368d49eb4ef..1ff075a8dfc7 100644
--- a/arch/riscv/include/asm/fixmap.h
+++ b/arch/riscv/include/asm/fixmap.h
@@ -8,8 +8,8 @@
#include <linux/kernel.h>
#include <linux/sizes.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#ifdef CONFIG_MMU
/*
diff --git a/arch/riscv/include/asm/gdb_xml.h b/arch/riscv/include/asm/gdb_xml.h
new file mode 100644
index 000000000000..041b45f5b997
--- /dev/null
+++ b/arch/riscv/include/asm/gdb_xml.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_GDB_XML_H_
+#define __ASM_GDB_XML_H_
+
+#define kgdb_arch_gdb_stub_feature riscv_gdb_stub_feature
+static const char riscv_gdb_stub_feature[64] =
+ "PacketSize=800;qXfer:features:read+;";
+
+static const char gdb_xfer_read_target[31] = "qXfer:features:read:target.xml:";
+
+#ifdef CONFIG_64BIT
+static const char gdb_xfer_read_cpuxml[39] =
+ "qXfer:features:read:riscv-64bit-cpu.xml";
+
+static const char riscv_gdb_stub_target_desc[256] =
+"l<?xml version=\"1.0\"?>"
+"<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
+"<target>"
+"<xi:include href=\"riscv-64bit-cpu.xml\"/>"
+"</target>";
+
+static const char riscv_gdb_stub_cpuxml[2048] =
+"l<?xml version=\"1.0\"?>"
+"<!DOCTYPE feature SYSTEM \"gdb-target.dtd\">"
+"<feature name=\"org.gnu.gdb.riscv.cpu\">"
+"<reg name=\""DBG_REG_ZERO"\" bitsize=\"64\" type=\"int\" regnum=\"0\"/>"
+"<reg name=\""DBG_REG_RA"\" bitsize=\"64\" type=\"code_ptr\"/>"
+"<reg name=\""DBG_REG_SP"\" bitsize=\"64\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_GP"\" bitsize=\"64\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_TP"\" bitsize=\"64\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_T0"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T1"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T2"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_FP"\" bitsize=\"64\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_S1"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A0"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A1"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A2"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A3"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A4"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A5"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A6"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A7"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S2"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S3"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S4"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S5"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S6"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S7"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S8"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S9"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S10"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S11"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T3"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T4"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T5"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T6"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_EPC"\" bitsize=\"64\" type=\"code_ptr\"/>"
+"<reg name=\""DBG_REG_STATUS"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_BADADDR"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_CAUSE"\" bitsize=\"64\" type=\"int\"/>"
+"</feature>";
+#else
+static const char gdb_xfer_read_cpuxml[39] =
+ "qXfer:features:read:riscv-32bit-cpu.xml";
+
+static const char riscv_gdb_stub_target_desc[256] =
+"l<?xml version=\"1.0\"?>"
+"<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
+"<target>"
+"<xi:include href=\"riscv-32bit-cpu.xml\"/>"
+"</target>";
+
+static const char riscv_gdb_stub_cpuxml[2048] =
+"l<?xml version=\"1.0\"?>"
+"<!DOCTYPE feature SYSTEM \"gdb-target.dtd\">"
+"<feature name=\"org.gnu.gdb.riscv.cpu\">"
+"<reg name=\""DBG_REG_ZERO"\" bitsize=\"32\" type=\"int\" regnum=\"0\"/>"
+"<reg name=\""DBG_REG_RA"\" bitsize=\"32\" type=\"code_ptr\"/>"
+"<reg name=\""DBG_REG_SP"\" bitsize=\"32\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_GP"\" bitsize=\"32\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_TP"\" bitsize=\"32\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_T0"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T1"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T2"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_FP"\" bitsize=\"32\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_S1"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A0"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A1"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A2"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A3"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A4"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A5"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A6"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A7"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S2"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S3"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S4"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S5"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S6"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S7"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S8"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S9"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S10"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S11"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T3"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T4"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T5"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T6"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_EPC"\" bitsize=\"32\" type=\"code_ptr\"/>"
+"<reg name=\""DBG_REG_STATUS"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_BADADDR"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_CAUSE"\" bitsize=\"32\" type=\"int\"/>"
+"</feature>";
+#endif
+#endif
diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h
index 728a5db66597..a5c2ca1d1cd8 100644
--- a/arch/riscv/include/asm/hugetlb.h
+++ b/arch/riscv/include/asm/hugetlb.h
@@ -5,14 +5,4 @@
#include <asm-generic/hugetlb.h>
#include <asm/page.h>
-static inline int is_hugepage_only_range(struct mm_struct *mm,
- unsigned long addr,
- unsigned long len) {
- return 0;
-}
-
-static inline void arch_clear_hugepage_flags(struct page *page)
-{
-}
-
#endif /* _ASM_RISCV_HUGETLB_H */
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index 1bb0cd04aec3..5ce50468aff1 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -8,6 +8,7 @@
#ifndef _ASM_RISCV_HWCAP_H
#define _ASM_RISCV_HWCAP_H
+#include <linux/bits.h>
#include <uapi/asm/hwcap.h>
#ifndef __ASSEMBLY__
@@ -22,6 +23,27 @@ enum {
};
extern unsigned long elf_hwcap;
+
+#define RISCV_ISA_EXT_a ('a' - 'a')
+#define RISCV_ISA_EXT_c ('c' - 'a')
+#define RISCV_ISA_EXT_d ('d' - 'a')
+#define RISCV_ISA_EXT_f ('f' - 'a')
+#define RISCV_ISA_EXT_h ('h' - 'a')
+#define RISCV_ISA_EXT_i ('i' - 'a')
+#define RISCV_ISA_EXT_m ('m' - 'a')
+#define RISCV_ISA_EXT_s ('s' - 'a')
+#define RISCV_ISA_EXT_u ('u' - 'a')
+
+#define RISCV_ISA_EXT_MAX 64
+
+unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
+
+#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext)
+
+bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit);
+#define riscv_isa_extension_available(isa_bitmap, ext) \
+ __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
+
#endif
#endif /* _ASM_RISCV_HWCAP_H */
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index 0f477206a4ed..3835c3295dc5 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -12,8 +12,8 @@
#define _ASM_RISCV_IO_H
#include <linux/types.h>
+#include <linux/pgtable.h>
#include <asm/mmiowb.h>
-#include <asm/pgtable.h>
/*
* MMIO access functions are separated out to break dependency cycles
diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h
index 6e1b0e0325eb..9807ad164015 100644
--- a/arch/riscv/include/asm/irq.h
+++ b/arch/riscv/include/asm/irq.h
@@ -10,11 +10,6 @@
#include <linux/interrupt.h>
#include <linux/linkage.h>
-#define NR_IRQS 0
-
-void riscv_timer_interrupt(void);
-void riscv_software_interrupt(void);
-
#include <asm-generic/irq.h>
#endif /* _ASM_RISCV_IRQ_H */
diff --git a/arch/riscv/include/asm/kasan.h b/arch/riscv/include/asm/kasan.h
index b47045cb85ce..b04028c6218c 100644
--- a/arch/riscv/include/asm/kasan.h
+++ b/arch/riscv/include/asm/kasan.h
@@ -8,8 +8,6 @@
#ifdef CONFIG_KASAN
-#include <asm/pgtable.h>
-
#define KASAN_SHADOW_SCALE_SHIFT 3
#define KASAN_SHADOW_SIZE (UL(1) << (38 - KASAN_SHADOW_SCALE_SHIFT))
diff --git a/arch/riscv/include/asm/kdebug.h b/arch/riscv/include/asm/kdebug.h
new file mode 100644
index 000000000000..85ac00411f6e
--- /dev/null
+++ b/arch/riscv/include/asm/kdebug.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ASM_ARC_KDEBUG_H
+#define _ASM_ARC_KDEBUG_H
+
+enum die_val {
+ DIE_UNUSED,
+ DIE_TRAP,
+ DIE_OOPS
+};
+
+#endif
diff --git a/arch/riscv/include/asm/kgdb.h b/arch/riscv/include/asm/kgdb.h
new file mode 100644
index 000000000000..8177a457caff
--- /dev/null
+++ b/arch/riscv/include/asm/kgdb.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_KGDB_H_
+#define __ASM_KGDB_H_
+
+#ifdef __KERNEL__
+
+#define GDB_SIZEOF_REG sizeof(unsigned long)
+
+#define DBG_MAX_REG_NUM (36)
+#define NUMREGBYTES ((DBG_MAX_REG_NUM) * GDB_SIZEOF_REG)
+#define CACHE_FLUSH_IS_SAFE 1
+#define BUFMAX 2048
+#ifdef CONFIG_RISCV_ISA_C
+#define BREAK_INSTR_SIZE 2
+#else
+#define BREAK_INSTR_SIZE 4
+#endif
+
+#ifndef __ASSEMBLY__
+
+extern int kgdb_has_hit_break(unsigned long addr);
+extern unsigned long kgdb_compiled_break;
+
+static inline void arch_kgdb_breakpoint(void)
+{
+ asm(".global kgdb_compiled_break\n"
+ ".option norvc\n"
+ "kgdb_compiled_break: ebreak\n"
+ ".option rvc\n");
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#define DBG_REG_ZERO "zero"
+#define DBG_REG_RA "ra"
+#define DBG_REG_SP "sp"
+#define DBG_REG_GP "gp"
+#define DBG_REG_TP "tp"
+#define DBG_REG_T0 "t0"
+#define DBG_REG_T1 "t1"
+#define DBG_REG_T2 "t2"
+#define DBG_REG_FP "fp"
+#define DBG_REG_S1 "s1"
+#define DBG_REG_A0 "a0"
+#define DBG_REG_A1 "a1"
+#define DBG_REG_A2 "a2"
+#define DBG_REG_A3 "a3"
+#define DBG_REG_A4 "a4"
+#define DBG_REG_A5 "a5"
+#define DBG_REG_A6 "a6"
+#define DBG_REG_A7 "a7"
+#define DBG_REG_S2 "s2"
+#define DBG_REG_S3 "s3"
+#define DBG_REG_S4 "s4"
+#define DBG_REG_S5 "s5"
+#define DBG_REG_S6 "s6"
+#define DBG_REG_S7 "s7"
+#define DBG_REG_S8 "s8"
+#define DBG_REG_S9 "s9"
+#define DBG_REG_S10 "s10"
+#define DBG_REG_S11 "s11"
+#define DBG_REG_T3 "t3"
+#define DBG_REG_T4 "t4"
+#define DBG_REG_T5 "t5"
+#define DBG_REG_T6 "t6"
+#define DBG_REG_EPC "pc"
+#define DBG_REG_STATUS "sstatus"
+#define DBG_REG_BADADDR "stval"
+#define DBG_REG_CAUSE "scause"
+
+#define DBG_REG_ZERO_OFF 0
+#define DBG_REG_RA_OFF 1
+#define DBG_REG_SP_OFF 2
+#define DBG_REG_GP_OFF 3
+#define DBG_REG_TP_OFF 4
+#define DBG_REG_T0_OFF 5
+#define DBG_REG_T1_OFF 6
+#define DBG_REG_T2_OFF 7
+#define DBG_REG_FP_OFF 8
+#define DBG_REG_S1_OFF 9
+#define DBG_REG_A0_OFF 10
+#define DBG_REG_A1_OFF 11
+#define DBG_REG_A2_OFF 12
+#define DBG_REG_A3_OFF 13
+#define DBG_REG_A4_OFF 14
+#define DBG_REG_A5_OFF 15
+#define DBG_REG_A6_OFF 16
+#define DBG_REG_A7_OFF 17
+#define DBG_REG_S2_OFF 18
+#define DBG_REG_S3_OFF 19
+#define DBG_REG_S4_OFF 20
+#define DBG_REG_S5_OFF 21
+#define DBG_REG_S6_OFF 22
+#define DBG_REG_S7_OFF 23
+#define DBG_REG_S8_OFF 24
+#define DBG_REG_S9_OFF 25
+#define DBG_REG_S10_OFF 26
+#define DBG_REG_S11_OFF 27
+#define DBG_REG_T3_OFF 28
+#define DBG_REG_T4_OFF 29
+#define DBG_REG_T5_OFF 30
+#define DBG_REG_T6_OFF 31
+#define DBG_REG_EPC_OFF 32
+#define DBG_REG_STATUS_OFF 33
+#define DBG_REG_BADADDR_OFF 34
+#define DBG_REG_CAUSE_OFF 35
+
+#include <asm/gdb_xml.h>
+
+#endif
+#endif
diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h
index a2c809df2733..56053c9838b2 100644
--- a/arch/riscv/include/asm/mmio.h
+++ b/arch/riscv/include/asm/mmio.h
@@ -16,6 +16,8 @@
#ifndef CONFIG_MMU
#define pgprot_noncached(x) (x)
+#define pgprot_writecombine(x) (x)
+#define pgprot_device(x) (x)
#endif /* CONFIG_MMU */
/* Generic IO read/write. These perform native-endian accesses. */
diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h
index bb4091ff4a21..0b2333e71fdc 100644
--- a/arch/riscv/include/asm/mmiowb.h
+++ b/arch/riscv/include/asm/mmiowb.h
@@ -9,6 +9,7 @@
*/
#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
+#include <linux/smp.h>
#include <asm-generic/mmiowb.h>
#endif /* _ASM_RISCV_MMIOWB_H */
diff --git a/arch/riscv/include/asm/module.h b/arch/riscv/include/asm/module.h
index 46202dad365d..76aa96a9fc08 100644
--- a/arch/riscv/include/asm/module.h
+++ b/arch/riscv/include/asm/module.h
@@ -6,8 +6,6 @@
#include <asm-generic/module.h>
-#define MODULE_ARCH_VERMAGIC "riscv"
-
struct module;
unsigned long module_emit_got_entry(struct module *mod, unsigned long val);
unsigned long module_emit_plt_entry(struct module *mod, unsigned long val);
diff --git a/arch/riscv/include/asm/parse_asm.h b/arch/riscv/include/asm/parse_asm.h
new file mode 100644
index 000000000000..f36368de839f
--- /dev/null
+++ b/arch/riscv/include/asm/parse_asm.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 SiFive
+ */
+
+#include <linux/bits.h>
+
+/* The bit field of immediate value in I-type instruction */
+#define I_IMM_SIGN_OPOFF 31
+#define I_IMM_11_0_OPOFF 20
+#define I_IMM_SIGN_OFF 12
+#define I_IMM_11_0_OFF 0
+#define I_IMM_11_0_MASK GENMASK(11, 0)
+
+/* The bit field of immediate value in J-type instruction */
+#define J_IMM_SIGN_OPOFF 31
+#define J_IMM_10_1_OPOFF 21
+#define J_IMM_11_OPOFF 20
+#define J_IMM_19_12_OPOFF 12
+#define J_IMM_SIGN_OFF 20
+#define J_IMM_10_1_OFF 1
+#define J_IMM_11_OFF 11
+#define J_IMM_19_12_OFF 12
+#define J_IMM_10_1_MASK GENMASK(9, 0)
+#define J_IMM_11_MASK GENMASK(0, 0)
+#define J_IMM_19_12_MASK GENMASK(7, 0)
+
+/* The bit field of immediate value in B-type instruction */
+#define B_IMM_SIGN_OPOFF 31
+#define B_IMM_10_5_OPOFF 25
+#define B_IMM_4_1_OPOFF 8
+#define B_IMM_11_OPOFF 7
+#define B_IMM_SIGN_OFF 12
+#define B_IMM_10_5_OFF 5
+#define B_IMM_4_1_OFF 1
+#define B_IMM_11_OFF 11
+#define B_IMM_10_5_MASK GENMASK(5, 0)
+#define B_IMM_4_1_MASK GENMASK(3, 0)
+#define B_IMM_11_MASK GENMASK(0, 0)
+
+/* The register offset in RVG instruction */
+#define RVG_RS1_OPOFF 15
+#define RVG_RS2_OPOFF 20
+#define RVG_RD_OPOFF 7
+
+/* The bit field of immediate value in RVC J instruction */
+#define RVC_J_IMM_SIGN_OPOFF 12
+#define RVC_J_IMM_4_OPOFF 11
+#define RVC_J_IMM_9_8_OPOFF 9
+#define RVC_J_IMM_10_OPOFF 8
+#define RVC_J_IMM_6_OPOFF 7
+#define RVC_J_IMM_7_OPOFF 6
+#define RVC_J_IMM_3_1_OPOFF 3
+#define RVC_J_IMM_5_OPOFF 2
+#define RVC_J_IMM_SIGN_OFF 11
+#define RVC_J_IMM_4_OFF 4
+#define RVC_J_IMM_9_8_OFF 8
+#define RVC_J_IMM_10_OFF 10
+#define RVC_J_IMM_6_OFF 6
+#define RVC_J_IMM_7_OFF 7
+#define RVC_J_IMM_3_1_OFF 1
+#define RVC_J_IMM_5_OFF 5
+#define RVC_J_IMM_4_MASK GENMASK(0, 0)
+#define RVC_J_IMM_9_8_MASK GENMASK(1, 0)
+#define RVC_J_IMM_10_MASK GENMASK(0, 0)
+#define RVC_J_IMM_6_MASK GENMASK(0, 0)
+#define RVC_J_IMM_7_MASK GENMASK(0, 0)
+#define RVC_J_IMM_3_1_MASK GENMASK(2, 0)
+#define RVC_J_IMM_5_MASK GENMASK(0, 0)
+
+/* The bit field of immediate value in RVC B instruction */
+#define RVC_B_IMM_SIGN_OPOFF 12
+#define RVC_B_IMM_4_3_OPOFF 10
+#define RVC_B_IMM_7_6_OPOFF 5
+#define RVC_B_IMM_2_1_OPOFF 3
+#define RVC_B_IMM_5_OPOFF 2
+#define RVC_B_IMM_SIGN_OFF 8
+#define RVC_B_IMM_4_3_OFF 3
+#define RVC_B_IMM_7_6_OFF 6
+#define RVC_B_IMM_2_1_OFF 1
+#define RVC_B_IMM_5_OFF 5
+#define RVC_B_IMM_4_3_MASK GENMASK(1, 0)
+#define RVC_B_IMM_7_6_MASK GENMASK(1, 0)
+#define RVC_B_IMM_2_1_MASK GENMASK(1, 0)
+#define RVC_B_IMM_5_MASK GENMASK(0, 0)
+
+/* The register offset in RVC op=C0 instruction */
+#define RVC_C0_RS1_OPOFF 7
+#define RVC_C0_RS2_OPOFF 2
+#define RVC_C0_RD_OPOFF 2
+
+/* The register offset in RVC op=C1 instruction */
+#define RVC_C1_RS1_OPOFF 7
+#define RVC_C1_RS2_OPOFF 2
+#define RVC_C1_RD_OPOFF 7
+
+/* The register offset in RVC op=C2 instruction */
+#define RVC_C2_RS1_OPOFF 7
+#define RVC_C2_RS2_OPOFF 2
+#define RVC_C2_RD_OPOFF 7
+
+/* parts of opcode for RVG*/
+#define OPCODE_BRANCH 0x63
+#define OPCODE_JALR 0x67
+#define OPCODE_JAL 0x6f
+#define OPCODE_SYSTEM 0x73
+
+/* parts of opcode for RVC*/
+#define OPCODE_C_0 0x0
+#define OPCODE_C_1 0x1
+#define OPCODE_C_2 0x2
+
+/* parts of funct3 code for I, M, A extension*/
+#define FUNCT3_JALR 0x0
+#define FUNCT3_BEQ 0x0
+#define FUNCT3_BNE 0x1000
+#define FUNCT3_BLT 0x4000
+#define FUNCT3_BGE 0x5000
+#define FUNCT3_BLTU 0x6000
+#define FUNCT3_BGEU 0x7000
+
+/* parts of funct3 code for C extension*/
+#define FUNCT3_C_BEQZ 0xc000
+#define FUNCT3_C_BNEZ 0xe000
+#define FUNCT3_C_J 0xa000
+#define FUNCT3_C_JAL 0x2000
+#define FUNCT4_C_JR 0x8000
+#define FUNCT4_C_JALR 0xf000
+
+#define FUNCT12_SRET 0x10200000
+
+#define MATCH_JALR (FUNCT3_JALR | OPCODE_JALR)
+#define MATCH_JAL (OPCODE_JAL)
+#define MATCH_BEQ (FUNCT3_BEQ | OPCODE_BRANCH)
+#define MATCH_BNE (FUNCT3_BNE | OPCODE_BRANCH)
+#define MATCH_BLT (FUNCT3_BLT | OPCODE_BRANCH)
+#define MATCH_BGE (FUNCT3_BGE | OPCODE_BRANCH)
+#define MATCH_BLTU (FUNCT3_BLTU | OPCODE_BRANCH)
+#define MATCH_BGEU (FUNCT3_BGEU | OPCODE_BRANCH)
+#define MATCH_SRET (FUNCT12_SRET | OPCODE_SYSTEM)
+#define MATCH_C_BEQZ (FUNCT3_C_BEQZ | OPCODE_C_1)
+#define MATCH_C_BNEZ (FUNCT3_C_BNEZ | OPCODE_C_1)
+#define MATCH_C_J (FUNCT3_C_J | OPCODE_C_1)
+#define MATCH_C_JAL (FUNCT3_C_JAL | OPCODE_C_1)
+#define MATCH_C_JR (FUNCT4_C_JR | OPCODE_C_2)
+#define MATCH_C_JALR (FUNCT4_C_JALR | OPCODE_C_2)
+
+#define MASK_JALR 0x707f
+#define MASK_JAL 0x7f
+#define MASK_C_JALR 0xf07f
+#define MASK_C_JR 0xf07f
+#define MASK_C_JAL 0xe003
+#define MASK_C_J 0xe003
+#define MASK_BEQ 0x707f
+#define MASK_BNE 0x707f
+#define MASK_BLT 0x707f
+#define MASK_BGE 0x707f
+#define MASK_BLTU 0x707f
+#define MASK_BGEU 0x707f
+#define MASK_C_BEQZ 0xe003
+#define MASK_C_BNEZ 0xe003
+#define MASK_SRET 0xffffffff
+
+#define __INSN_LENGTH_MASK _UL(0x3)
+#define __INSN_LENGTH_GE_32 _UL(0x3)
+#define __INSN_OPCODE_MASK _UL(0x7F)
+#define __INSN_BRANCH_OPCODE _UL(OPCODE_BRANCH)
+
+/* Define a series of is_XXX_insn functions to check if the value INSN
+ * is an instance of instruction XXX.
+ */
+#define DECLARE_INSN(INSN_NAME, INSN_MATCH, INSN_MASK) \
+static inline bool is_ ## INSN_NAME ## _insn(long insn) \
+{ \
+ return (insn & (INSN_MASK)) == (INSN_MATCH); \
+}
+
+#define RV_IMM_SIGN(x) (-(((x) >> 31) & 1))
+#define RVC_IMM_SIGN(x) (-(((x) >> 12) & 1))
+#define RV_X(X, s, mask) (((X) >> (s)) & (mask))
+#define RVC_X(X, s, mask) RV_X(X, s, mask)
+
+#define EXTRACT_JTYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, J_IMM_10_1_OPOFF, J_IMM_10_1_MASK) << J_IMM_10_1_OFF) | \
+ (RV_X(x_, J_IMM_11_OPOFF, J_IMM_11_MASK) << J_IMM_11_OFF) | \
+ (RV_X(x_, J_IMM_19_12_OPOFF, J_IMM_19_12_MASK) << J_IMM_19_12_OFF) | \
+ (RV_IMM_SIGN(x_) << J_IMM_SIGN_OFF); })
+
+#define EXTRACT_ITYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, I_IMM_11_0_OPOFF, I_IMM_11_0_MASK)) | \
+ (RV_IMM_SIGN(x_) << I_IMM_SIGN_OFF); })
+
+#define EXTRACT_BTYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, B_IMM_4_1_OPOFF, B_IMM_4_1_MASK) << B_IMM_4_1_OFF) | \
+ (RV_X(x_, B_IMM_10_5_OPOFF, B_IMM_10_5_MASK) << B_IMM_10_5_OFF) | \
+ (RV_X(x_, B_IMM_11_OPOFF, B_IMM_11_MASK) << B_IMM_11_OFF) | \
+ (RV_IMM_SIGN(x_) << B_IMM_SIGN_OFF); })
+
+#define EXTRACT_RVC_J_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RVC_X(x_, RVC_J_IMM_3_1_OPOFF, RVC_J_IMM_3_1_MASK) << RVC_J_IMM_3_1_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_4_OPOFF, RVC_J_IMM_4_MASK) << RVC_J_IMM_4_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_5_OPOFF, RVC_J_IMM_5_MASK) << RVC_J_IMM_5_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_6_OPOFF, RVC_J_IMM_6_MASK) << RVC_J_IMM_6_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_7_OPOFF, RVC_J_IMM_7_MASK) << RVC_J_IMM_7_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_9_8_OPOFF, RVC_J_IMM_9_8_MASK) << RVC_J_IMM_9_8_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_10_OPOFF, RVC_J_IMM_10_MASK) << RVC_J_IMM_10_OFF) | \
+ (RVC_IMM_SIGN(x_) << RVC_J_IMM_SIGN_OFF); })
+
+#define EXTRACT_RVC_B_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RVC_X(x_, RVC_B_IMM_2_1_OPOFF, RVC_B_IMM_2_1_MASK) << RVC_B_IMM_2_1_OFF) | \
+ (RVC_X(x_, RVC_B_IMM_4_3_OPOFF, RVC_B_IMM_4_3_MASK) << RVC_B_IMM_4_3_OFF) | \
+ (RVC_X(x_, RVC_B_IMM_5_OPOFF, RVC_B_IMM_5_MASK) << RVC_B_IMM_5_OFF) | \
+ (RVC_X(x_, RVC_B_IMM_7_6_OPOFF, RVC_B_IMM_7_6_MASK) << RVC_B_IMM_7_6_OFF) | \
+ (RVC_IMM_SIGN(x_) << RVC_B_IMM_SIGN_OFF); })
diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h
index b5918a6e0615..9a7d7346001e 100644
--- a/arch/riscv/include/asm/patch.h
+++ b/arch/riscv/include/asm/patch.h
@@ -6,7 +6,7 @@
#ifndef _ASM_RISCV_PATCH_H
#define _ASM_RISCV_PATCH_H
-int riscv_patch_text_nosync(void *addr, const void *insns, size_t len);
-int riscv_patch_text(void *addr, u32 insn);
+int patch_text_nosync(void *addr, const void *insns, size_t len);
+int patch_text(void *addr, u32 insn);
#endif /* _ASM_RISCV_PATCH_H */
diff --git a/arch/riscv/include/asm/perf_event.h b/arch/riscv/include/asm/perf_event.h
index 0234048b12bc..062efd3a1d5d 100644
--- a/arch/riscv/include/asm/perf_event.h
+++ b/arch/riscv/include/asm/perf_event.h
@@ -12,19 +12,14 @@
#include <linux/ptrace.h>
#include <linux/interrupt.h>
+#ifdef CONFIG_RISCV_BASE_PMU
#define RISCV_BASE_COUNTERS 2
/*
* The RISCV_MAX_COUNTERS parameter should be specified.
*/
-#ifdef CONFIG_RISCV_BASE_PMU
#define RISCV_MAX_COUNTERS 2
-#endif
-
-#ifndef RISCV_MAX_COUNTERS
-#error "Please provide a valid RISCV_MAX_COUNTERS for the PMU."
-#endif
/*
* These are the indexes of bits in counteren register *minus* 1,
@@ -82,6 +77,7 @@ struct riscv_pmu {
int irq;
};
+#endif
#ifdef CONFIG_PERF_EVENTS
#define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs
#endif
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index b15f70a1fdfa..f3b0da64c6c8 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -70,13 +70,6 @@ static inline struct page *pud_page(pud_t pud)
return pfn_to_page(pud_val(pud) >> _PAGE_PFN_SHIFT);
}
-#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
-{
- return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
-}
-
static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
{
return __pmd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 9c188ad2e52d..eaea1f717010 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -173,16 +173,6 @@ static inline unsigned long _pgd_pfn(pgd_t pgd)
return pgd_val(pgd) >> _PAGE_PFN_SHIFT;
}
-#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
-
-/* Locate an entry in the page global directory */
-static inline pgd_t *pgd_offset(const struct mm_struct *mm, unsigned long addr)
-{
- return mm->pgd + pgd_index(addr);
-}
-/* Locate an entry in the kernel page global directory */
-#define pgd_offset_k(addr) pgd_offset(&init_mm, (addr))
-
static inline struct page *pmd_page(pmd_t pmd)
{
return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
@@ -209,16 +199,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
-#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
-static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
-{
- return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(addr);
-}
-
-#define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr))
-#define pte_unmap(pte) ((void)(pte))
-
static inline int pte_present(pte_t pte)
{
return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
@@ -470,11 +450,14 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
#else /* CONFIG_MMU */
+#define PAGE_SHARED __pgprot(0)
#define PAGE_KERNEL __pgprot(0)
#define swapper_pg_dir NULL
+#define TASK_SIZE 0xffffffffUL
#define VMALLOC_START 0
+#define VMALLOC_END TASK_SIZE
-#define TASK_SIZE 0xffffffffUL
+static inline void __kernel_map_pages(struct page *page, int numpages, int enable) {}
#endif /* !CONFIG_MMU */
@@ -493,8 +476,6 @@ void paging_init(void);
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-#include <asm-generic/pgtable.h>
-
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_RISCV_PGTABLE_H */
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 3ddb798264f1..bdddcd5c1b71 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -8,6 +8,8 @@
#include <linux/const.h>
+#include <vdso/processor.h>
+
#include <asm/ptrace.h>
/*
@@ -58,16 +60,6 @@ static inline void release_thread(struct task_struct *dead_task)
extern unsigned long get_wchan(struct task_struct *p);
-static inline void cpu_relax(void)
-{
-#ifdef __riscv_muldiv
- int dummy;
- /* In lieu of a halt instruction, induce a long-latency stall. */
- __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
-#endif
- barrier();
-}
-
static inline void wait_for_interrupt(void)
{
__asm__ __volatile__ ("wfi");
@@ -75,6 +67,7 @@ static inline void wait_for_interrupt(void)
struct device_node;
int riscv_of_processor_hartid(struct device_node *node);
+int riscv_of_parent_hartid(struct device_node *node);
extern void riscv_fill_hwcap(void);
diff --git a/arch/riscv/include/asm/ptdump.h b/arch/riscv/include/asm/ptdump.h
index e29af7191909..3c9ea6dd5af7 100644
--- a/arch/riscv/include/asm/ptdump.h
+++ b/arch/riscv/include/asm/ptdump.h
@@ -8,4 +8,15 @@
void ptdump_check_wx(void);
+#ifdef CONFIG_DEBUG_WX
+static inline void debug_checkwx(void)
+{
+ ptdump_check_wx();
+}
+#else
+static inline void debug_checkwx(void)
+{
+}
+#endif
+
#endif /* _ASM_RISCV_PTDUMP_H */
diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
index c38df4771c09..4c5bae7ca01c 100644
--- a/arch/riscv/include/asm/set_memory.h
+++ b/arch/riscv/include/asm/set_memory.h
@@ -22,14 +22,6 @@ static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
#endif
-#ifdef CONFIG_STRICT_KERNEL_RWX
-void set_kernel_text_ro(void);
-void set_kernel_text_rw(void);
-#else
-static inline void set_kernel_text_ro(void) { }
-static inline void set_kernel_text_rw(void) { }
-#endif
-
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
index f4c7cfda6b7f..40bb1c15a731 100644
--- a/arch/riscv/include/asm/smp.h
+++ b/arch/riscv/include/asm/smp.h
@@ -28,6 +28,9 @@ void show_ipi_stats(struct seq_file *p, int prec);
/* SMP initialization hook for setup_arch */
void __init setup_smp(void);
+/* Called from C code, this handles an IPI. */
+void handle_IPI(struct pt_regs *regs);
+
/* Hook for the generic smp_call_function_many() routine. */
void arch_send_call_function_ipi_mask(struct cpumask *mask);
diff --git a/arch/riscv/include/asm/soc.h b/arch/riscv/include/asm/soc.h
index 7cec1968c8b4..136a442ef876 100644
--- a/arch/riscv/include/asm/soc.h
+++ b/arch/riscv/include/asm/soc.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2020 Google, Inc
*/
#ifndef _ASM_RISCV_SOC_H
@@ -20,4 +21,42 @@ void soc_early_init(void);
extern unsigned long __soc_early_init_table_start;
extern unsigned long __soc_early_init_table_end;
+/*
+ * Allows Linux to provide a device tree, which is necessary for SOCs that
+ * don't provide a useful one on their own.
+ */
+struct soc_builtin_dtb {
+ unsigned long vendor_id;
+ unsigned long arch_id;
+ unsigned long imp_id;
+ void *(*dtb_func)(void);
+};
+
+/*
+ * The argument name must specify a valid DTS file name without the dts
+ * extension.
+ */
+#define SOC_BUILTIN_DTB_DECLARE(name, vendor, arch, impl) \
+ extern void *__dtb_##name##_begin; \
+ \
+ static __init __used \
+ void *__soc_builtin_dtb_f__##name(void) \
+ { \
+ return (void *)&__dtb_##name##_begin; \
+ } \
+ \
+ static const struct soc_builtin_dtb __soc_builtin_dtb__##name \
+ __used __section(__soc_builtin_dtb_table) = \
+ { \
+ .vendor_id = vendor, \
+ .arch_id = arch, \
+ .imp_id = impl, \
+ .dtb_func = __soc_builtin_dtb_f__##name, \
+ }
+
+extern unsigned long __soc_builtin_dtb_table_start;
+extern unsigned long __soc_builtin_dtb_table_end;
+
+void *soc_lookup_builtin_dtb(void);
+
#endif
diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
index 7a7fce63c474..8454f746bbfd 100644
--- a/arch/riscv/include/asm/vdso.h
+++ b/arch/riscv/include/asm/vdso.h
@@ -10,8 +10,10 @@
#include <linux/types.h>
+#ifndef GENERIC_TIME_VSYSCALL
struct vdso_data {
};
+#endif
/*
* The VDSO symbols are mapped into Linux so we can just use regular symbol
diff --git a/arch/riscv/include/asm/vdso/clocksource.h b/arch/riscv/include/asm/vdso/clocksource.h
new file mode 100644
index 000000000000..df6ea65c1dec
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/clocksource.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSOCLOCKSOURCE_H
+#define __ASM_VDSOCLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES \
+ VDSO_CLOCKMODE_ARCHTIMER
+
+#endif
diff --git a/arch/riscv/include/asm/vdso/gettimeofday.h b/arch/riscv/include/asm/vdso/gettimeofday.h
new file mode 100644
index 000000000000..c8e818688ec1
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/gettimeofday.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_GETTIMEOFDAY_H
+#define __ASM_VDSO_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/unistd.h>
+#include <asm/csr.h>
+#include <uapi/linux/time.h>
+
+#define VDSO_HAS_CLOCK_GETRES 1
+
+static __always_inline
+int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
+ struct timezone *_tz)
+{
+ register struct __kernel_old_timeval *tv asm("a0") = _tv;
+ register struct timezone *tz asm("a1") = _tz;
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_gettimeofday;
+
+ asm volatile ("ecall\n"
+ : "=r" (ret)
+ : "r"(tv), "r"(tz), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_clock_gettime;
+
+ asm volatile ("ecall\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_clock_getres;
+
+ asm volatile ("ecall\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
+{
+ /*
+ * The purpose of csr_read(CSR_TIME) is to trap the system into
+ * M-mode to obtain the value of CSR_TIME. Hence, unlike other
+ * architecture, no fence instructions surround the csr_read()
+ */
+ return csr_read(CSR_TIME);
+}
+
+static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+{
+ return _vdso_data;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h
new file mode 100644
index 000000000000..82a5693b1861
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/processor.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_VDSO_PROCESSOR_H
+#define __ASM_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+static inline void cpu_relax(void)
+{
+#ifdef __riscv_muldiv
+ int dummy;
+ /* In lieu of a halt instruction, induce a long-latency stall. */
+ __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
+#endif
+ barrier();
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/riscv/include/asm/vdso/vsyscall.h b/arch/riscv/include/asm/vdso/vsyscall.h
new file mode 100644
index 000000000000..82fd5d83bd60
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/vsyscall.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_VSYSCALL_H
+#define __ASM_VDSO_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/timekeeper_internal.h>
+#include <vdso/datapage.h>
+
+extern struct vdso_data *vdso_data;
+
+/*
+ * Update the vDSO data page to keep in sync with kernel timekeeping.
+ */
+static __always_inline struct vdso_data *__riscv_get_k_vdso_data(void)
+{
+ return vdso_data;
+}
+
+#define __arch_get_k_vdso_data __riscv_get_k_vdso_data
+
+/* The asm-generic header needs to be included after the definitions above */
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/arch/riscv/include/asm/vermagic.h b/arch/riscv/include/asm/vermagic.h
new file mode 100644
index 000000000000..7b9441a57466
--- /dev/null
+++ b/arch/riscv/include/asm/vermagic.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Andes Technology Corporation */
+
+#ifndef _ASM_VERMAGIC_H
+#define _ASM_VERMAGIC_H
+
+#define MODULE_ARCH_VERMAGIC "riscv"
+
+#endif /* _ASM_VERMAGIC_H */
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 86c83081044f..b355cf485671 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -43,7 +43,7 @@ obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
-obj-$(CONFIG_PERF_EVENTS) += perf_event.o
+obj-$(CONFIG_RISCV_BASE_PMU) += perf_event.o
obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
obj-$(CONFIG_RISCV_SBI) += sbi.o
@@ -51,5 +51,6 @@ ifeq ($(CONFIG_RISCV_SBI), y)
obj-$(CONFIG_SMP) += cpu_ops_sbi.o
endif
obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o
+obj-$(CONFIG_KGDB) += kgdb.o
clean:
diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
index 4c90c07d8c39..bd0f122965c3 100644
--- a/arch/riscv/kernel/cacheinfo.c
+++ b/arch/riscv/kernel/cacheinfo.c
@@ -7,6 +7,23 @@
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <asm/cacheinfo.h>
+
+static struct riscv_cacheinfo_ops *rv_cache_ops;
+
+void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops)
+{
+ rv_cache_ops = ops;
+}
+EXPORT_SYMBOL_GPL(riscv_set_cacheinfo_ops);
+
+const struct attribute_group *
+cache_get_priv_group(struct cacheinfo *this_leaf)
+{
+ if (rv_cache_ops && rv_cache_ops->get_priv_group)
+ return rv_cache_ops->get_priv_group(this_leaf);
+ return NULL;
+}
static void ci_leaf_init(struct cacheinfo *this_leaf,
struct device_node *node,
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index 40a3c442ac5f..6d59e6906fdd 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -44,6 +44,22 @@ int riscv_of_processor_hartid(struct device_node *node)
return hart;
}
+/*
+ * Find hart ID of the CPU DT node under which given DT node falls.
+ *
+ * To achieve this, we walk up the DT tree until we find an active
+ * RISC-V core (HART) node and extract the cpuid from it.
+ */
+int riscv_of_parent_hartid(struct device_node *node)
+{
+ for (; node; node = node->parent) {
+ if (of_device_is_compatible(node, "riscv"))
+ return riscv_of_processor_hartid(node);
+ }
+
+ return -1;
+}
+
#ifdef CONFIG_PROC_FS
static void print_isa(struct seq_file *f, const char *isa)
diff --git a/arch/riscv/kernel/cpu_ops.c b/arch/riscv/kernel/cpu_ops.c
index c4c33bf02369..0ec22354018c 100644
--- a/arch/riscv/kernel/cpu_ops.c
+++ b/arch/riscv/kernel/cpu_ops.c
@@ -15,8 +15,8 @@
const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
-void *__cpu_up_stack_pointer[NR_CPUS];
-void *__cpu_up_task_pointer[NR_CPUS];
+void *__cpu_up_stack_pointer[NR_CPUS] __section(.data);
+void *__cpu_up_task_pointer[NR_CPUS] __section(.data);
extern const struct cpu_operations cpu_ops_sbi;
extern const struct cpu_operations cpu_ops_spinwait;
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index a5ad00043104..ac202f44a670 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -6,6 +6,7 @@
* Copyright (C) 2017 SiFive
*/
+#include <linux/bitmap.h>
#include <linux/of.h>
#include <asm/processor.h>
#include <asm/hwcap.h>
@@ -13,15 +14,57 @@
#include <asm/switch_to.h>
unsigned long elf_hwcap __read_mostly;
+
+/* Host ISA bitmap */
+static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
+
#ifdef CONFIG_FPU
bool has_fpu __read_mostly;
#endif
+/**
+ * riscv_isa_extension_base() - Get base extension word
+ *
+ * @isa_bitmap: ISA bitmap to use
+ * Return: base extension word as unsigned long value
+ *
+ * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
+ */
+unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap)
+{
+ if (!isa_bitmap)
+ return riscv_isa[0];
+ return isa_bitmap[0];
+}
+EXPORT_SYMBOL_GPL(riscv_isa_extension_base);
+
+/**
+ * __riscv_isa_extension_available() - Check whether given extension
+ * is available or not
+ *
+ * @isa_bitmap: ISA bitmap to use
+ * @bit: bit position of the desired extension
+ * Return: true or false
+ *
+ * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
+ */
+bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit)
+{
+ const unsigned long *bmap = (isa_bitmap) ? isa_bitmap : riscv_isa;
+
+ if (bit >= RISCV_ISA_EXT_MAX)
+ return false;
+
+ return test_bit(bit, bmap) ? true : false;
+}
+EXPORT_SYMBOL_GPL(__riscv_isa_extension_available);
+
void riscv_fill_hwcap(void)
{
struct device_node *node;
const char *isa;
- size_t i;
+ char print_str[BITS_PER_LONG + 1];
+ size_t i, j, isa_len;
static unsigned long isa2hwcap[256] = {0};
isa2hwcap['i'] = isa2hwcap['I'] = COMPAT_HWCAP_ISA_I;
@@ -33,8 +76,11 @@ void riscv_fill_hwcap(void)
elf_hwcap = 0;
+ bitmap_zero(riscv_isa, RISCV_ISA_EXT_MAX);
+
for_each_of_cpu_node(node) {
unsigned long this_hwcap = 0;
+ unsigned long this_isa = 0;
if (riscv_of_processor_hartid(node) < 0)
continue;
@@ -44,8 +90,24 @@ void riscv_fill_hwcap(void)
continue;
}
- for (i = 0; i < strlen(isa); ++i)
+ i = 0;
+ isa_len = strlen(isa);
+#if IS_ENABLED(CONFIG_32BIT)
+ if (!strncmp(isa, "rv32", 4))
+ i += 4;
+#elif IS_ENABLED(CONFIG_64BIT)
+ if (!strncmp(isa, "rv64", 4))
+ i += 4;
+#endif
+ for (; i < isa_len; ++i) {
this_hwcap |= isa2hwcap[(unsigned char)(isa[i])];
+ /*
+ * TODO: X, Y and Z extension parsing for Host ISA
+ * bitmap will be added in-future.
+ */
+ if ('a' <= isa[i] && isa[i] < 'x')
+ this_isa |= (1UL << (isa[i] - 'a'));
+ }
/*
* All "okay" hart should have same isa. Set HWCAP based on
@@ -56,6 +118,11 @@ void riscv_fill_hwcap(void)
elf_hwcap &= this_hwcap;
else
elf_hwcap = this_hwcap;
+
+ if (riscv_isa[0])
+ riscv_isa[0] &= this_isa;
+ else
+ riscv_isa[0] = this_isa;
}
/* We don't support systems with F but without D, so mask those out
@@ -65,7 +132,17 @@ void riscv_fill_hwcap(void)
elf_hwcap &= ~COMPAT_HWCAP_ISA_F;
}
- pr_info("elf_hwcap is 0x%lx\n", elf_hwcap);
+ memset(print_str, 0, sizeof(print_str));
+ for (i = 0, j = 0; i < BITS_PER_LONG; i++)
+ if (riscv_isa[0] & BIT_MASK(i))
+ print_str[j++] = (char)('a' + i);
+ pr_info("riscv: ISA extensions %s\n", print_str);
+
+ memset(print_str, 0, sizeof(print_str));
+ for (i = 0, j = 0; i < BITS_PER_LONG; i++)
+ if (elf_hwcap & BIT_MASK(i))
+ print_str[j++] = (char)('a' + i);
+ pr_info("riscv: ELF capabilities %s\n", print_str);
#ifdef CONFIG_FPU
if (elf_hwcap & (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D))
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 56d071b2c0a1..cae7e6d4c7ef 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -106,7 +106,9 @@ _save_context:
/* Handle interrupts */
move a0, sp /* pt_regs */
- tail do_IRQ
+ la a1, handle_arch_irq
+ REG_L a1, (a1)
+ jr a1
1:
/*
* Exceptions run with interrupts enabled or disabled depending on the
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index ce69b34ff55d..2ff63d0cbb50 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -7,10 +7,23 @@
#include <linux/ftrace.h>
#include <linux/uaccess.h>
+#include <linux/memory.h>
#include <asm/cacheflush.h>
#include <asm/patch.h>
#ifdef CONFIG_DYNAMIC_FTRACE
+int ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
+{
+ mutex_lock(&text_mutex);
+ return 0;
+}
+
+int ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
+{
+ mutex_unlock(&text_mutex);
+ return 0;
+}
+
static int ftrace_check_current_call(unsigned long hook_pos,
unsigned int *expected)
{
@@ -25,7 +38,8 @@ static int ftrace_check_current_call(unsigned long hook_pos,
* Read the text we want to modify;
* return must be -EFAULT on read error
*/
- if (probe_kernel_read(replaced, (void *)hook_pos, MCOUNT_INSN_SIZE))
+ if (copy_from_kernel_nofault(replaced, (void *)hook_pos,
+ MCOUNT_INSN_SIZE))
return -EFAULT;
/*
@@ -51,7 +65,7 @@ static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
make_call(hook_pos, target, call);
/* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
- if (riscv_patch_text_nosync
+ if (patch_text_nosync
((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
return -EPERM;
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 98a406474e7d..7ed1b22950fd 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -161,11 +161,20 @@ ENTRY(_start_kernel)
/* Reset all registers except ra, a0, a1 */
call reset_regs
- /* Setup a PMP to permit access to all of memory. */
+ /*
+ * Setup a PMP to permit access to all of memory. Some machines may
+ * not implement PMPs, so we set up a quick trap handler to just skip
+ * touching the PMPs on any trap.
+ */
+ la a0, pmp_done
+ csrw CSR_TVEC, a0
+
li a0, -1
csrw CSR_PMPADDR0, a0
li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
csrw CSR_PMPCFG0, a0
+.align 2
+pmp_done:
/*
* The hartid in a0 is expected later on, and we have no firmware
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
index 345c4f2eba13..7207fa08d78f 100644
--- a/arch/riscv/kernel/irq.c
+++ b/arch/riscv/kernel/irq.c
@@ -7,7 +7,6 @@
#include <linux/interrupt.h>
#include <linux/irqchip.h>
-#include <linux/irqdomain.h>
#include <linux/seq_file.h>
#include <asm/smp.h>
@@ -17,37 +16,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
return 0;
}
-asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
-{
- struct pt_regs *old_regs = set_irq_regs(regs);
-
- irq_enter();
- switch (regs->cause & ~CAUSE_IRQ_FLAG) {
- case RV_IRQ_TIMER:
- riscv_timer_interrupt();
- break;
-#ifdef CONFIG_SMP
- case RV_IRQ_SOFT:
- /*
- * We only use software interrupts to pass IPIs, so if a non-SMP
- * system gets one, then we don't know what to do.
- */
- riscv_software_interrupt();
- break;
-#endif
- case RV_IRQ_EXT:
- handle_arch_irq(regs);
- break;
- default:
- pr_alert("unexpected interrupt cause 0x%lx", regs->cause);
- BUG();
- }
- irq_exit();
-
- set_irq_regs(old_regs);
-}
-
void __init init_IRQ(void)
{
irqchip_init();
+ if (!handle_arch_irq)
+ panic("No interrupt controller found.");
}
diff --git a/arch/riscv/kernel/kgdb.c b/arch/riscv/kernel/kgdb.c
new file mode 100644
index 000000000000..c3275f42d1ac
--- /dev/null
+++ b/arch/riscv/kernel/kgdb.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 SiFive
+ */
+
+#include <linux/ptrace.h>
+#include <linux/kdebug.h>
+#include <linux/bug.h>
+#include <linux/kgdb.h>
+#include <linux/irqflags.h>
+#include <linux/string.h>
+#include <asm/cacheflush.h>
+#include <asm/gdb_xml.h>
+#include <asm/parse_asm.h>
+
+enum {
+ NOT_KGDB_BREAK = 0,
+ KGDB_SW_BREAK,
+ KGDB_COMPILED_BREAK,
+ KGDB_SW_SINGLE_STEP
+};
+
+static unsigned long stepped_address;
+static unsigned int stepped_opcode;
+
+#if __riscv_xlen == 32
+/* C.JAL is an RV32C-only instruction */
+DECLARE_INSN(c_jal, MATCH_C_JAL, MASK_C_JAL)
+#else
+#define is_c_jal_insn(opcode) 0
+#endif
+DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR)
+DECLARE_INSN(jal, MATCH_JAL, MASK_JAL)
+DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR)
+DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR)
+DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J)
+DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ)
+DECLARE_INSN(bne, MATCH_BNE, MASK_BNE)
+DECLARE_INSN(blt, MATCH_BLT, MASK_BLT)
+DECLARE_INSN(bge, MATCH_BGE, MASK_BGE)
+DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU)
+DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU)
+DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ)
+DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ)
+DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
+
+int decode_register_index(unsigned long opcode, int offset)
+{
+ return (opcode >> offset) & 0x1F;
+}
+
+int decode_register_index_short(unsigned long opcode, int offset)
+{
+ return ((opcode >> offset) & 0x7) + 8;
+}
+
+/* Calculate the new address for after a step */
+int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
+{
+ unsigned long pc = regs->epc;
+ unsigned long *regs_ptr = (unsigned long *)regs;
+ unsigned int rs1_num, rs2_num;
+ int op_code;
+
+ if (get_kernel_nofault(op_code, (void *)pc))
+ return -EINVAL;
+ if ((op_code & __INSN_LENGTH_MASK) != __INSN_LENGTH_GE_32) {
+ if (is_c_jalr_insn(op_code) || is_c_jr_insn(op_code)) {
+ rs1_num = decode_register_index(op_code, RVC_C2_RS1_OPOFF);
+ *next_addr = regs_ptr[rs1_num];
+ } else if (is_c_j_insn(op_code) || is_c_jal_insn(op_code)) {
+ *next_addr = EXTRACT_RVC_J_IMM(op_code) + pc;
+ } else if (is_c_beqz_insn(op_code)) {
+ rs1_num = decode_register_index_short(op_code,
+ RVC_C1_RS1_OPOFF);
+ if (!rs1_num || regs_ptr[rs1_num] == 0)
+ *next_addr = EXTRACT_RVC_B_IMM(op_code) + pc;
+ else
+ *next_addr = pc + 2;
+ } else if (is_c_bnez_insn(op_code)) {
+ rs1_num =
+ decode_register_index_short(op_code, RVC_C1_RS1_OPOFF);
+ if (rs1_num && regs_ptr[rs1_num] != 0)
+ *next_addr = EXTRACT_RVC_B_IMM(op_code) + pc;
+ else
+ *next_addr = pc + 2;
+ } else {
+ *next_addr = pc + 2;
+ }
+ } else {
+ if ((op_code & __INSN_OPCODE_MASK) == __INSN_BRANCH_OPCODE) {
+ bool result = false;
+ long imm = EXTRACT_BTYPE_IMM(op_code);
+ unsigned long rs1_val = 0, rs2_val = 0;
+
+ rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF);
+ rs2_num = decode_register_index(op_code, RVG_RS2_OPOFF);
+ if (rs1_num)
+ rs1_val = regs_ptr[rs1_num];
+ if (rs2_num)
+ rs2_val = regs_ptr[rs2_num];
+
+ if (is_beq_insn(op_code))
+ result = (rs1_val == rs2_val) ? true : false;
+ else if (is_bne_insn(op_code))
+ result = (rs1_val != rs2_val) ? true : false;
+ else if (is_blt_insn(op_code))
+ result =
+ ((long)rs1_val <
+ (long)rs2_val) ? true : false;
+ else if (is_bge_insn(op_code))
+ result =
+ ((long)rs1_val >=
+ (long)rs2_val) ? true : false;
+ else if (is_bltu_insn(op_code))
+ result = (rs1_val < rs2_val) ? true : false;
+ else if (is_bgeu_insn(op_code))
+ result = (rs1_val >= rs2_val) ? true : false;
+ if (result)
+ *next_addr = imm + pc;
+ else
+ *next_addr = pc + 4;
+ } else if (is_jal_insn(op_code)) {
+ *next_addr = EXTRACT_JTYPE_IMM(op_code) + pc;
+ } else if (is_jalr_insn(op_code)) {
+ rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF);
+ if (rs1_num)
+ *next_addr = ((unsigned long *)regs)[rs1_num];
+ *next_addr += EXTRACT_ITYPE_IMM(op_code);
+ } else if (is_sret_insn(op_code)) {
+ *next_addr = pc;
+ } else {
+ *next_addr = pc + 4;
+ }
+ }
+ return 0;
+}
+
+int do_single_step(struct pt_regs *regs)
+{
+ /* Determine where the target instruction will send us to */
+ unsigned long addr = 0;
+ int error = get_step_address(regs, &addr);
+
+ if (error)
+ return error;
+
+ /* Store the op code in the stepped address */
+ error = get_kernel_nofault(stepped_opcode, (void *)addr);
+ if (error)
+ return error;
+
+ stepped_address = addr;
+
+ /* Replace the op code with the break instruction */
+ error = copy_to_kernel_nofault((void *)stepped_address,
+ arch_kgdb_ops.gdb_bpt_instr,
+ BREAK_INSTR_SIZE);
+ /* Flush and return */
+ if (!error) {
+ flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
+ kgdb_single_step = 1;
+ atomic_set(&kgdb_cpu_doing_single_step,
+ raw_smp_processor_id());
+ } else {
+ stepped_address = 0;
+ stepped_opcode = 0;
+ }
+ return error;
+}
+
+/* Undo a single step */
+static void undo_single_step(struct pt_regs *regs)
+{
+ if (stepped_opcode != 0) {
+ copy_to_kernel_nofault((void *)stepped_address,
+ (void *)&stepped_opcode, BREAK_INSTR_SIZE);
+ flush_icache_range(stepped_address,
+ stepped_address + BREAK_INSTR_SIZE);
+ }
+ stepped_address = 0;
+ stepped_opcode = 0;
+ kgdb_single_step = 0;
+ atomic_set(&kgdb_cpu_doing_single_step, -1);
+}
+
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
+ {DBG_REG_ZERO, GDB_SIZEOF_REG, -1},
+ {DBG_REG_RA, GDB_SIZEOF_REG, offsetof(struct pt_regs, ra)},
+ {DBG_REG_SP, GDB_SIZEOF_REG, offsetof(struct pt_regs, sp)},
+ {DBG_REG_GP, GDB_SIZEOF_REG, offsetof(struct pt_regs, gp)},
+ {DBG_REG_TP, GDB_SIZEOF_REG, offsetof(struct pt_regs, tp)},
+ {DBG_REG_T0, GDB_SIZEOF_REG, offsetof(struct pt_regs, t0)},
+ {DBG_REG_T1, GDB_SIZEOF_REG, offsetof(struct pt_regs, t1)},
+ {DBG_REG_T2, GDB_SIZEOF_REG, offsetof(struct pt_regs, t2)},
+ {DBG_REG_FP, GDB_SIZEOF_REG, offsetof(struct pt_regs, s0)},
+ {DBG_REG_S1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)},
+ {DBG_REG_A0, GDB_SIZEOF_REG, offsetof(struct pt_regs, a0)},
+ {DBG_REG_A1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)},
+ {DBG_REG_A2, GDB_SIZEOF_REG, offsetof(struct pt_regs, a2)},
+ {DBG_REG_A3, GDB_SIZEOF_REG, offsetof(struct pt_regs, a3)},
+ {DBG_REG_A4, GDB_SIZEOF_REG, offsetof(struct pt_regs, a4)},
+ {DBG_REG_A5, GDB_SIZEOF_REG, offsetof(struct pt_regs, a5)},
+ {DBG_REG_A6, GDB_SIZEOF_REG, offsetof(struct pt_regs, a6)},
+ {DBG_REG_A7, GDB_SIZEOF_REG, offsetof(struct pt_regs, a7)},
+ {DBG_REG_S2, GDB_SIZEOF_REG, offsetof(struct pt_regs, s2)},
+ {DBG_REG_S3, GDB_SIZEOF_REG, offsetof(struct pt_regs, s3)},
+ {DBG_REG_S4, GDB_SIZEOF_REG, offsetof(struct pt_regs, s4)},
+ {DBG_REG_S5, GDB_SIZEOF_REG, offsetof(struct pt_regs, s5)},
+ {DBG_REG_S6, GDB_SIZEOF_REG, offsetof(struct pt_regs, s6)},
+ {DBG_REG_S7, GDB_SIZEOF_REG, offsetof(struct pt_regs, s7)},
+ {DBG_REG_S8, GDB_SIZEOF_REG, offsetof(struct pt_regs, s8)},
+ {DBG_REG_S9, GDB_SIZEOF_REG, offsetof(struct pt_regs, s9)},
+ {DBG_REG_S10, GDB_SIZEOF_REG, offsetof(struct pt_regs, s10)},
+ {DBG_REG_S11, GDB_SIZEOF_REG, offsetof(struct pt_regs, s11)},
+ {DBG_REG_T3, GDB_SIZEOF_REG, offsetof(struct pt_regs, t3)},
+ {DBG_REG_T4, GDB_SIZEOF_REG, offsetof(struct pt_regs, t4)},
+ {DBG_REG_T5, GDB_SIZEOF_REG, offsetof(struct pt_regs, t5)},
+ {DBG_REG_T6, GDB_SIZEOF_REG, offsetof(struct pt_regs, t6)},
+ {DBG_REG_EPC, GDB_SIZEOF_REG, offsetof(struct pt_regs, epc)},
+ {DBG_REG_STATUS, GDB_SIZEOF_REG, offsetof(struct pt_regs, status)},
+ {DBG_REG_BADADDR, GDB_SIZEOF_REG, offsetof(struct pt_regs, badaddr)},
+ {DBG_REG_CAUSE, GDB_SIZEOF_REG, offsetof(struct pt_regs, cause)},
+};
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return NULL;
+
+ if (dbg_reg_def[regno].offset != -1)
+ memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
+ dbg_reg_def[regno].size);
+ else
+ memset(mem, 0, dbg_reg_def[regno].size);
+ return dbg_reg_def[regno].name;
+}
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return -EINVAL;
+
+ if (dbg_reg_def[regno].offset != -1)
+ memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
+ dbg_reg_def[regno].size);
+ return 0;
+}
+
+void
+sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
+{
+ /* Initialize to zero */
+ memset((char *)gdb_regs, 0, NUMREGBYTES);
+
+ gdb_regs[DBG_REG_SP_OFF] = task->thread.sp;
+ gdb_regs[DBG_REG_FP_OFF] = task->thread.s[0];
+ gdb_regs[DBG_REG_S1_OFF] = task->thread.s[1];
+ gdb_regs[DBG_REG_S2_OFF] = task->thread.s[2];
+ gdb_regs[DBG_REG_S3_OFF] = task->thread.s[3];
+ gdb_regs[DBG_REG_S4_OFF] = task->thread.s[4];
+ gdb_regs[DBG_REG_S5_OFF] = task->thread.s[5];
+ gdb_regs[DBG_REG_S6_OFF] = task->thread.s[6];
+ gdb_regs[DBG_REG_S7_OFF] = task->thread.s[7];
+ gdb_regs[DBG_REG_S8_OFF] = task->thread.s[8];
+ gdb_regs[DBG_REG_S9_OFF] = task->thread.s[10];
+ gdb_regs[DBG_REG_S10_OFF] = task->thread.s[11];
+ gdb_regs[DBG_REG_EPC_OFF] = task->thread.ra;
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->epc = pc;
+}
+
+void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
+ char *remcom_out_buffer)
+{
+ if (!strncmp(remcom_in_buffer, gdb_xfer_read_target,
+ sizeof(gdb_xfer_read_target)))
+ strcpy(remcom_out_buffer, riscv_gdb_stub_target_desc);
+ else if (!strncmp(remcom_in_buffer, gdb_xfer_read_cpuxml,
+ sizeof(gdb_xfer_read_cpuxml)))
+ strcpy(remcom_out_buffer, riscv_gdb_stub_cpuxml);
+}
+
+static inline void kgdb_arch_update_addr(struct pt_regs *regs,
+ char *remcom_in_buffer)
+{
+ unsigned long addr;
+ char *ptr;
+
+ ptr = &remcom_in_buffer[1];
+ if (kgdb_hex2long(&ptr, &addr))
+ regs->epc = addr;
+}
+
+int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+ char *remcom_in_buffer, char *remcom_out_buffer,
+ struct pt_regs *regs)
+{
+ int err = 0;
+
+ undo_single_step(regs);
+
+ switch (remcom_in_buffer[0]) {
+ case 'c':
+ case 'D':
+ case 'k':
+ if (remcom_in_buffer[0] == 'c')
+ kgdb_arch_update_addr(regs, remcom_in_buffer);
+ break;
+ case 's':
+ kgdb_arch_update_addr(regs, remcom_in_buffer);
+ err = do_single_step(regs);
+ break;
+ default:
+ err = -1;
+ }
+ return err;
+}
+
+int kgdb_riscv_kgdbbreak(unsigned long addr)
+{
+ if (stepped_address == addr)
+ return KGDB_SW_SINGLE_STEP;
+ if (atomic_read(&kgdb_setting_breakpoint))
+ if (addr == (unsigned long)&kgdb_compiled_break)
+ return KGDB_COMPILED_BREAK;
+
+ return kgdb_has_hit_break(addr);
+}
+
+static int kgdb_riscv_notify(struct notifier_block *self, unsigned long cmd,
+ void *ptr)
+{
+ struct die_args *args = (struct die_args *)ptr;
+ struct pt_regs *regs = args->regs;
+ unsigned long flags;
+ int type;
+
+ if (user_mode(regs))
+ return NOTIFY_DONE;
+
+ type = kgdb_riscv_kgdbbreak(regs->epc);
+ if (type == NOT_KGDB_BREAK && cmd == DIE_TRAP)
+ return NOTIFY_DONE;
+
+ local_irq_save(flags);
+
+ if (kgdb_handle_exception(type == KGDB_SW_SINGLE_STEP ? 0 : 1,
+ args->signr, cmd, regs))
+ return NOTIFY_DONE;
+
+ if (type == KGDB_COMPILED_BREAK)
+ regs->epc += 4;
+
+ local_irq_restore(flags);
+
+ return NOTIFY_STOP;
+}
+
+static struct notifier_block kgdb_notifier = {
+ .notifier_call = kgdb_riscv_notify,
+};
+
+int kgdb_arch_init(void)
+{
+ register_die_notifier(&kgdb_notifier);
+
+ return 0;
+}
+
+void kgdb_arch_exit(void)
+{
+ unregister_die_notifier(&kgdb_notifier);
+}
+
+/*
+ * Global data
+ */
+#ifdef CONFIG_RISCV_ISA_C
+const struct kgdb_arch arch_kgdb_ops = {
+ .gdb_bpt_instr = {0x02, 0x90}, /* c.ebreak */
+};
+#else
+const struct kgdb_arch arch_kgdb_ops = {
+ .gdb_bpt_instr = {0x73, 0x00, 0x10, 0x00}, /* ebreak */
+};
+#endif
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 8bbe5dbe1341..7191342c54da 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -10,7 +10,7 @@
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
#include <linux/sizes.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/sections.h>
static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 8a4fc65ee022..3fe7a5296aa5 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -5,22 +5,22 @@
#include <linux/spinlock.h>
#include <linux/mm.h>
+#include <linux/memory.h>
#include <linux/uaccess.h>
#include <linux/stop_machine.h>
#include <asm/kprobes.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
+#include <asm/patch.h>
-struct riscv_insn_patch {
+struct patch_insn {
void *addr;
u32 insn;
atomic_t cpu_count;
};
#ifdef CONFIG_MMU
-static DEFINE_RAW_SPINLOCK(patch_lock);
-
-static void __kprobes *patch_map(void *addr, int fixmap)
+static void *patch_map(void *addr, int fixmap)
{
uintptr_t uintaddr = (uintptr_t) addr;
struct page *page;
@@ -37,65 +37,72 @@ static void __kprobes *patch_map(void *addr, int fixmap)
return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
(uintaddr & ~PAGE_MASK));
}
+NOKPROBE_SYMBOL(patch_map);
-static void __kprobes patch_unmap(int fixmap)
+static void patch_unmap(int fixmap)
{
clear_fixmap(fixmap);
}
+NOKPROBE_SYMBOL(patch_unmap);
-static int __kprobes riscv_insn_write(void *addr, const void *insn, size_t len)
+static int patch_insn_write(void *addr, const void *insn, size_t len)
{
void *waddr = addr;
bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE;
- unsigned long flags = 0;
int ret;
- raw_spin_lock_irqsave(&patch_lock, flags);
+ /*
+ * Before reaching here, it was expected to lock the text_mutex
+ * already, so we don't need to give another lock here and could
+ * ensure that it was safe between each cores.
+ */
+ lockdep_assert_held(&text_mutex);
if (across_pages)
patch_map(addr + len, FIX_TEXT_POKE1);
waddr = patch_map(addr, FIX_TEXT_POKE0);
- ret = probe_kernel_write(waddr, insn, len);
+ ret = copy_to_kernel_nofault(waddr, insn, len);
patch_unmap(FIX_TEXT_POKE0);
if (across_pages)
patch_unmap(FIX_TEXT_POKE1);
- raw_spin_unlock_irqrestore(&patch_lock, flags);
-
return ret;
}
+NOKPROBE_SYMBOL(patch_insn_write);
#else
-static int __kprobes riscv_insn_write(void *addr, const void *insn, size_t len)
+static int patch_insn_write(void *addr, const void *insn, size_t len)
{
- return probe_kernel_write(addr, insn, len);
+ return copy_to_kernel_nofault(addr, insn, len);
}
+NOKPROBE_SYMBOL(patch_insn_write);
#endif /* CONFIG_MMU */
-int __kprobes riscv_patch_text_nosync(void *addr, const void *insns, size_t len)
+int patch_text_nosync(void *addr, const void *insns, size_t len)
{
u32 *tp = addr;
int ret;
- ret = riscv_insn_write(tp, insns, len);
+ ret = patch_insn_write(tp, insns, len);
if (!ret)
flush_icache_range((uintptr_t) tp, (uintptr_t) tp + len);
return ret;
}
+NOKPROBE_SYMBOL(patch_text_nosync);
-static int __kprobes riscv_patch_text_cb(void *data)
+static int patch_text_cb(void *data)
{
- struct riscv_insn_patch *patch = data;
+ struct patch_insn *patch = data;
int ret = 0;
if (atomic_inc_return(&patch->cpu_count) == 1) {
ret =
- riscv_patch_text_nosync(patch->addr, &patch->insn,
+ patch_text_nosync(patch->addr, &patch->insn,
GET_INSN_LENGTH(patch->insn));
atomic_inc(&patch->cpu_count);
} else {
@@ -106,15 +113,17 @@ static int __kprobes riscv_patch_text_cb(void *data)
return ret;
}
+NOKPROBE_SYMBOL(patch_text_cb);
-int __kprobes riscv_patch_text(void *addr, u32 insn)
+int patch_text(void *addr, u32 insn)
{
- struct riscv_insn_patch patch = {
+ struct patch_insn patch = {
.addr = addr,
.insn = insn,
.cpu_count = ATOMIC_INIT(0),
};
- return stop_machine_cpuslocked(riscv_patch_text_cb,
+ return stop_machine_cpuslocked(patch_text_cb,
&patch, cpu_online_mask);
}
+NOKPROBE_SYMBOL(patch_text);
diff --git a/arch/riscv/kernel/perf_event.c b/arch/riscv/kernel/perf_event.c
index 91626d9ae5f2..c835f0362d94 100644
--- a/arch/riscv/kernel/perf_event.c
+++ b/arch/riscv/kernel/perf_event.c
@@ -147,7 +147,7 @@ static int riscv_map_hw_event(u64 config)
return riscv_pmu->hw_events[config];
}
-int riscv_map_cache_decode(u64 config, unsigned int *type,
+static int riscv_map_cache_decode(u64 config, unsigned int *type,
unsigned int *op, unsigned int *result)
{
return -ENOENT;
@@ -342,7 +342,7 @@ static void riscv_pmu_del(struct perf_event *event, int flags)
static DEFINE_MUTEX(pmc_reserve_mutex);
-irqreturn_t riscv_base_pmu_handle_irq(int irq_num, void *dev)
+static irqreturn_t riscv_base_pmu_handle_irq(int irq_num, void *dev)
{
return IRQ_NONE;
}
@@ -361,7 +361,7 @@ static int reserve_pmc_hardware(void)
return err;
}
-void release_pmc_hardware(void)
+static void release_pmc_hardware(void)
{
mutex_lock(&pmc_reserve_mutex);
if (riscv_pmu->irq >= 0)
@@ -464,7 +464,7 @@ static const struct of_device_id riscv_pmu_of_ids[] = {
{ /* sentinel value */ }
};
-int __init init_hw_perf_events(void)
+static int __init init_hw_perf_events(void)
{
struct device_node *node = of_find_node_by_type(NULL, "pmu");
const struct of_device_id *of_id;
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 610c11e91606..824d117cf202 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -22,7 +22,7 @@
#include <asm/switch_to.h>
#include <asm/thread_info.h>
-unsigned long gp_in_global __asm__("gp");
+register unsigned long gp_in_global __asm__("gp");
extern asmlinkage void ret_from_fork(void);
extern asmlinkage void ret_from_kernel_thread(void);
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
index 7c24da59bccf..f383ef5672b2 100644
--- a/arch/riscv/kernel/sbi.c
+++ b/arch/riscv/kernel/sbi.c
@@ -102,7 +102,7 @@ void sbi_shutdown(void)
{
sbi_ecall(SBI_EXT_0_1_SHUTDOWN, 0, 0, 0, 0, 0, 0, 0);
}
-EXPORT_SYMBOL(sbi_set_timer);
+EXPORT_SYMBOL(sbi_shutdown);
/**
* sbi_clear_ipi() - Clear any pending IPIs for the calling hart.
@@ -113,7 +113,7 @@ void sbi_clear_ipi(void)
{
sbi_ecall(SBI_EXT_0_1_CLEAR_IPI, 0, 0, 0, 0, 0, 0, 0);
}
-EXPORT_SYMBOL(sbi_shutdown);
+EXPORT_SYMBOL(sbi_clear_ipi);
/**
* sbi_set_timer_v01() - Program the timer for next timer event.
@@ -167,6 +167,11 @@ static int __sbi_rfence_v01(int fid, const unsigned long *hart_mask,
return result;
}
+
+static void sbi_set_power_off(void)
+{
+ pm_power_off = sbi_shutdown;
+}
#else
static void __sbi_set_timer_v01(uint64_t stime_value)
{
@@ -191,6 +196,8 @@ static int __sbi_rfence_v01(int fid, const unsigned long *hart_mask,
return 0;
}
+
+static void sbi_set_power_off(void) {}
#endif /* CONFIG_RISCV_SBI_V01 */
static void __sbi_set_timer_v02(uint64_t stime_value)
@@ -540,16 +547,12 @@ static inline long sbi_get_firmware_version(void)
return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
}
-static void sbi_power_off(void)
-{
- sbi_shutdown();
-}
int __init sbi_init(void)
{
int ret;
- pm_power_off = sbi_power_off;
+ sbi_set_power_off();
ret = sbi_get_spec_version();
if (ret > 0)
sbi_spec_version = ret;
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 145128a7e560..f04373be54a6 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -22,7 +22,6 @@
#include <asm/cpu_ops.h>
#include <asm/setup.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
#include <asm/sbi.h>
#include <asm/tlbflush.h>
#include <asm/thread_info.h>
@@ -75,7 +74,11 @@ void __init setup_arch(char **cmdline_p)
setup_bootmem();
paging_init();
+#if IS_ENABLED(CONFIG_BUILTIN_DTB)
+ unflatten_and_copy_device_tree();
+#else
unflatten_device_tree();
+#endif
clint_init_boot_cpu();
#ifdef CONFIG_SWIOTLB
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index e0a6293093f1..b1d4f452f843 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -10,6 +10,7 @@
#include <linux/cpu.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/profile.h>
#include <linux/smp.h>
#include <linux/sched.h>
@@ -63,6 +64,7 @@ void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
for_each_cpu(cpu, in)
cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
}
+EXPORT_SYMBOL_GPL(riscv_cpuid_to_hartid_mask);
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
@@ -121,11 +123,14 @@ static inline void clear_ipi(void)
clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
}
-void riscv_software_interrupt(void)
+void handle_IPI(struct pt_regs *regs)
{
+ struct pt_regs *old_regs = set_irq_regs(regs);
unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
unsigned long *stats = ipi_data[smp_processor_id()].stats;
+ irq_enter();
+
clear_ipi();
while (true) {
@@ -136,7 +141,7 @@ void riscv_software_interrupt(void)
ops = xchg(pending_ipis, 0);
if (ops == 0)
- return;
+ goto done;
if (ops & (1 << IPI_RESCHEDULE)) {
stats[IPI_RESCHEDULE]++;
@@ -158,6 +163,10 @@ void riscv_software_interrupt(void)
/* Order data access and bit testing. */
mb();
}
+
+done:
+ irq_exit();
+ set_irq_regs(old_regs);
}
static const char * const ipi_names[] = {
diff --git a/arch/riscv/kernel/soc.c b/arch/riscv/kernel/soc.c
index 0b3b3dc9ad0f..c7b0a73e382e 100644
--- a/arch/riscv/kernel/soc.c
+++ b/arch/riscv/kernel/soc.c
@@ -4,7 +4,7 @@
*/
#include <linux/init.h>
#include <linux/libfdt.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/soc.h>
/*
@@ -26,3 +26,30 @@ void __init soc_early_init(void)
}
}
}
+
+static bool soc_builtin_dtb_match(unsigned long vendor_id,
+ unsigned long arch_id, unsigned long imp_id,
+ const struct soc_builtin_dtb *entry)
+{
+ return entry->vendor_id == vendor_id &&
+ entry->arch_id == arch_id &&
+ entry->imp_id == imp_id;
+}
+
+void * __init soc_lookup_builtin_dtb(void)
+{
+ unsigned long vendor_id, arch_id, imp_id;
+ const struct soc_builtin_dtb *s;
+
+ __asm__ ("csrr %0, mvendorid" : "=r"(vendor_id));
+ __asm__ ("csrr %0, marchid" : "=r"(arch_id));
+ __asm__ ("csrr %0, mimpid" : "=r"(imp_id));
+
+ for (s = (void *)&__soc_builtin_dtb_table_start;
+ (void *)s < (void *)&__soc_builtin_dtb_table_end; s++) {
+ if (soc_builtin_dtb_match(vendor_id, arch_id, imp_id, s))
+ return s->dtb_func();
+ }
+
+ return NULL;
+}
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 02087fe539c6..595342910c3f 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -12,6 +12,8 @@
#include <linux/stacktrace.h>
#include <linux/ftrace.h>
+register unsigned long sp_in_global __asm__("sp");
+
#ifdef CONFIG_FRAME_POINTER
struct stackframe {
@@ -19,8 +21,6 @@ struct stackframe {
unsigned long ra;
};
-register unsigned long sp_in_global __asm__("sp");
-
void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
bool (*fn)(unsigned long, void *), void *arg)
{
@@ -65,7 +65,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
#else /* !CONFIG_FRAME_POINTER */
-static void notrace walk_stackframe(struct task_struct *task,
+void notrace walk_stackframe(struct task_struct *task,
struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
{
unsigned long sp, pc;
@@ -99,17 +99,18 @@ static void notrace walk_stackframe(struct task_struct *task,
static bool print_trace_address(unsigned long pc, void *arg)
{
- print_ip_sym(pc);
+ const char *loglvl = arg;
+
+ print_ip_sym(loglvl, pc);
return false;
}
-void show_stack(struct task_struct *task, unsigned long *sp)
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
pr_cont("Call Trace:\n");
- walk_stackframe(task, NULL, print_trace_address, NULL);
+ walk_stackframe(task, NULL, print_trace_address, (void *)loglvl);
}
-
static bool save_wchan(unsigned long pc, void *arg)
{
if (!in_sched_functions(pc)) {
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index f3619f59d85c..12f8a7fce78b 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -8,6 +8,7 @@
#include <linux/syscalls.h>
#include <asm/unistd.h>
#include <asm/cacheflush.h>
+#include <asm-generic/mman-common.h>
static long riscv_sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
@@ -16,6 +17,11 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len,
{
if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
return -EINVAL;
+
+ if ((prot & PROT_WRITE) && (prot & PROT_EXEC))
+ if (unlikely(!(prot & PROT_READ)))
+ return -EINVAL;
+
return ksys_mmap_pgoff(addr, len, prot, flags, fd,
offset >> (PAGE_SHIFT - page_shift_offset));
}
diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c
index 6a53c02e9c73..4d3a1048ad8b 100644
--- a/arch/riscv/kernel/time.c
+++ b/arch/riscv/kernel/time.c
@@ -26,3 +26,12 @@ void __init time_init(void)
lpj_fine = riscv_timebase / HZ;
timer_probe();
}
+
+void clocksource_arch_init(struct clocksource *cs)
+{
+#ifdef CONFIG_GENERIC_GETTIMEOFDAY
+ cs->vdso_clock_mode = VDSO_CLOCKMODE_ARCHTIMER;
+#else
+ cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
+#endif
+}
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 7f58fa53033f..7d95cce5e47c 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -137,7 +137,7 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
{
bug_insn_t insn;
- if (probe_kernel_address((bug_insn_t *)pc, insn))
+ if (get_kernel_nofault(insn, (bug_insn_t *)pc))
return 0;
return GET_INSN_LENGTH(insn);
@@ -147,6 +147,11 @@ asmlinkage __visible void do_trap_break(struct pt_regs *regs)
{
if (user_mode(regs))
force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc);
+#ifdef CONFIG_KGDB
+ else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP)
+ == NOTIFY_STOP)
+ return;
+#endif
else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN)
regs->epc += get_break_insn_length(regs->epc);
else
@@ -160,7 +165,7 @@ int is_valid_bugaddr(unsigned long pc)
if (pc < VMALLOC_START)
return 0;
- if (probe_kernel_address((bug_insn_t *)pc, insn))
+ if (get_kernel_nofault(insn, (bug_insn_t *)pc))
return 0;
if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
return (insn == __BUG_INSN_32);
@@ -178,6 +183,4 @@ void trap_init(void)
csr_write(CSR_SCRATCH, 0);
/* Set the exception vector address */
csr_write(CSR_TVEC, &handle_exception);
- /* Enable interrupts */
- csr_write(CSR_IE, IE_SIE);
}
diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
index 484d95a70907..678204231700 100644
--- a/arch/riscv/kernel/vdso.c
+++ b/arch/riscv/kernel/vdso.c
@@ -11,8 +11,12 @@
#include <linux/slab.h>
#include <linux/binfmts.h>
#include <linux/err.h>
-
+#include <asm/page.h>
+#ifdef GENERIC_TIME_VSYSCALL
+#include <vdso/datapage.h>
+#else
#include <asm/vdso.h>
+#endif
extern char vdso_start[], vdso_end[];
@@ -26,7 +30,7 @@ static union {
struct vdso_data data;
u8 page[PAGE_SIZE];
} vdso_data_store __page_aligned_data;
-static struct vdso_data *vdso_data = &vdso_data_store.data;
+struct vdso_data *vdso_data = &vdso_data_store.data;
static int __init vdso_init(void)
{
@@ -61,7 +65,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
ret = vdso_base;
@@ -75,15 +79,24 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
*/
mm->context.vdso = (void *)vdso_base;
- ret = install_special_mapping(mm, vdso_base, vdso_len,
+ ret =
+ install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
vdso_pagelist);
- if (unlikely(ret))
+ if (unlikely(ret)) {
mm->context.vdso = NULL;
+ goto end;
+ }
+ vdso_base += (vdso_pages << PAGE_SHIFT);
+ ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
+ (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
+
+ if (unlikely(ret))
+ mm->context.vdso = NULL;
end:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
@@ -91,5 +104,8 @@ const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
return "[vdso]";
+ if (vma->vm_mm && (vma->vm_start ==
+ (long)vma->vm_mm->context.vdso + PAGE_SIZE))
+ return "[vdso_data]";
return NULL;
}
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index 33b16f4212f7..38ba55b0eb9d 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -1,18 +1,24 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copied from arch/tile/kernel/vdso/Makefile
+# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before
+# the inclusion of generic Makefile.
+ARCH_REL_TYPE_ABS := R_RISCV_32|R_RISCV_64|R_RISCV_JUMP_SLOT
+include $(srctree)/lib/vdso/Makefile
# Symbols present in the vdso
vdso-syms = rt_sigreturn
ifdef CONFIG_64BIT
-vdso-syms += gettimeofday
-vdso-syms += clock_gettime
-vdso-syms += clock_getres
+vdso-syms += vgettimeofday
endif
vdso-syms += getcpu
vdso-syms += flush_icache
# Files to link into the vdso
-obj-vdso = $(patsubst %, %.o, $(vdso-syms))
+obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o
+
+ifneq ($(c-gettimeofday-y),)
+ CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
+endif
# Build rules
targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o
@@ -33,15 +39,15 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold)
# We also create a special relocatable object that should mirror the symbol
-# table and layout of the linked DSO. With ld -R we can then refer to
-# these symbols in the kernel code rather than hand-coded addresses.
+# table and layout of the linked DSO. With ld --just-symbols we can then
+# refer to these symbols in the kernel code rather than hand-coded addresses.
SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
-Wl,--build-id -Wl,--hash-style=both
$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
$(call if_changed,vdsold)
-LDFLAGS_vdso-syms.o := -r -R
+LDFLAGS_vdso-syms.o := -r --just-symbols
$(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE
$(call if_changed,ld)
diff --git a/arch/riscv/kernel/vdso/clock_getres.S b/arch/riscv/kernel/vdso/clock_getres.S
deleted file mode 100644
index 91378a52eb22..000000000000
--- a/arch/riscv/kernel/vdso/clock_getres.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 SiFive
- */
-
-#include <linux/linkage.h>
-#include <asm/unistd.h>
-
- .text
-/* int __vdso_clock_getres(clockid_t clock_id, struct timespec *res); */
-ENTRY(__vdso_clock_getres)
- .cfi_startproc
- /* For now, just do the syscall. */
- li a7, __NR_clock_getres
- ecall
- ret
- .cfi_endproc
-ENDPROC(__vdso_clock_getres)
diff --git a/arch/riscv/kernel/vdso/clock_gettime.S b/arch/riscv/kernel/vdso/clock_gettime.S
deleted file mode 100644
index 5371fd9bc01f..000000000000
--- a/arch/riscv/kernel/vdso/clock_gettime.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 SiFive
- */
-
-#include <linux/linkage.h>
-#include <asm/unistd.h>
-
- .text
-/* int __vdso_clock_gettime(clockid_t clock_id, struct timespec *tp); */
-ENTRY(__vdso_clock_gettime)
- .cfi_startproc
- /* For now, just do the syscall. */
- li a7, __NR_clock_gettime
- ecall
- ret
- .cfi_endproc
-ENDPROC(__vdso_clock_gettime)
diff --git a/arch/riscv/kernel/vdso/gettimeofday.S b/arch/riscv/kernel/vdso/gettimeofday.S
deleted file mode 100644
index e6fb8af88632..000000000000
--- a/arch/riscv/kernel/vdso/gettimeofday.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 SiFive
- */
-
-#include <linux/linkage.h>
-#include <asm/unistd.h>
-
- .text
-/* int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz); */
-ENTRY(__vdso_gettimeofday)
- .cfi_startproc
- /* For now, just do the syscall. */
- li a7, __NR_gettimeofday
- ecall
- ret
- .cfi_endproc
-ENDPROC(__vdso_gettimeofday)
diff --git a/arch/riscv/kernel/vdso/note.S b/arch/riscv/kernel/vdso/note.S
new file mode 100644
index 000000000000..2a956c942211
--- /dev/null
+++ b/arch/riscv/kernel/vdso/note.S
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+ * Here we can supply some information useful to userland.
+ */
+
+#include <linux/elfnote.h>
+#include <linux/version.h>
+
+ELFNOTE_START(Linux, 0, "a")
+ .long LINUX_VERSION_CODE
+ELFNOTE_END
diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S
index f66a091cb890..e6f558bca71b 100644
--- a/arch/riscv/kernel/vdso/vdso.lds.S
+++ b/arch/riscv/kernel/vdso/vdso.lds.S
@@ -2,11 +2,13 @@
/*
* Copyright (C) 2012 Regents of the University of California
*/
+#include <asm/page.h>
OUTPUT_ARCH(riscv)
SECTIONS
{
+ PROVIDE(_vdso_data = . + PAGE_SIZE);
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
diff --git a/arch/riscv/kernel/vdso/vgettimeofday.c b/arch/riscv/kernel/vdso/vgettimeofday.c
new file mode 100644
index 000000000000..d264943e2e47
--- /dev/null
+++ b/arch/riscv/kernel/vdso/vgettimeofday.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copied from arch/arm64/kernel/vdso/vgettimeofday.c
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2020 SiFive
+ */
+
+#include <linux/time.h>
+#include <linux/types.h>
+
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+{
+ return __cvdso_clock_gettime(clock, ts);
+}
+
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
+{
+ return __cvdso_gettimeofday(tv, tz);
+}
+
+int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res)
+{
+ return __cvdso_clock_getres(clock_id, res);
+}
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index 0339b6bbe11a..e6f8016b366a 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -34,6 +34,11 @@ SECTIONS
KEEP(*(__soc_early_init_table))
__soc_early_init_table_end = .;
}
+ __soc_builtin_dtb_table : {
+ __soc_builtin_dtb_table_start = .;
+ KEEP(*(__soc_builtin_dtb_table))
+ __soc_builtin_dtb_table_end = .;
+ }
/* we have to discard exit text and such at runtime, not link time */
.exit.text :
{
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 8930ab7278e6..094118663285 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -3,7 +3,6 @@
* Copyright (C) 2017 SiFive
*/
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_SMP
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index be84e32adc4c..ae7b7fe24658 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -69,7 +69,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, addr);
if (unlikely(!vma))
goto bad_area;
@@ -114,7 +114,7 @@ good_area:
/*
* If we need to retry but a fatal signal is pending, handle the
- * signal first. We do not need to release the mmap_sem because it
+ * signal first. We do not need to release the mmap_lock because it
* would already be released in __lock_page_or_retry in mm/filemap.c.
*/
if (fault_signal_pending(fault, regs))
@@ -147,7 +147,7 @@ good_area:
flags |= FAULT_FLAG_TRIED;
/*
- * No need to up_read(&mm->mmap_sem) as we would
+ * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -155,7 +155,7 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/*
@@ -163,7 +163,7 @@ good_area:
* Fix it, but check if it's kernel or user first.
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
do_trap(regs, SIGSEGV, code, addr);
@@ -191,14 +191,14 @@ no_context:
* (which will retry the fault, or kill us if we got oom-killed).
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
index a6189ed36c5f..932dadfdca54 100644
--- a/arch/riscv/mm/hugetlbpage.c
+++ b/arch/riscv/mm/hugetlbpage.c
@@ -12,29 +12,21 @@ int pmd_huge(pmd_t pmd)
return pmd_leaf(pmd);
}
-static __init int setup_hugepagesz(char *opt)
+bool __init arch_hugetlb_valid_size(unsigned long size)
{
- unsigned long ps = memparse(opt, &opt);
-
- if (ps == HPAGE_SIZE) {
- hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT);
- } else if (IS_ENABLED(CONFIG_64BIT) && ps == PUD_SIZE) {
- hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
- } else {
- hugetlb_bad_size();
- pr_err("hugepagesz: Unsupported page size %lu M\n", ps >> 20);
- return 0;
- }
-
- return 1;
+ if (size == HPAGE_SIZE)
+ return true;
+ else if (IS_ENABLED(CONFIG_64BIT) && size == PUD_SIZE)
+ return true;
+ else
+ return false;
}
-__setup("hugepagesz=", setup_hugepagesz);
#ifdef CONFIG_CONTIG_ALLOC
static __init int gigantic_pages_init(void)
{
/* With CONTIG_ALLOC, we can allocate gigantic pages at runtime */
- if (IS_ENABLED(CONFIG_64BIT) && !size_to_hstate(1UL << PUD_SHIFT))
+ if (IS_ENABLED(CONFIG_64BIT))
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
return 0;
}
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index b55be44ff9bd..f4adb3684f3d 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -17,8 +17,9 @@
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
+#include <asm/soc.h>
#include <asm/io.h>
+#include <asm/ptdump.h>
#include "../kernel/head.h"
@@ -39,7 +40,7 @@ static void __init zone_sizes_init(void)
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
- free_area_init_nodes(max_zone_pfns);
+ free_area_init(max_zone_pfns);
}
static void setup_zero_page(void)
@@ -47,7 +48,7 @@ static void setup_zero_page(void)
memset((void *)empty_zero_page, 0, PAGE_SIZE);
}
-#ifdef CONFIG_DEBUG_VM
+#if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
static inline void print_mlk(char *name, unsigned long b, unsigned long t)
{
pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld kB)\n", name, b, t,
@@ -150,7 +151,8 @@ void __init setup_bootmem(void)
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
set_max_mapnr(PFN_DOWN(mem_size));
- max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
+ max_pfn = PFN_DOWN(memblock_end_of_DRAM());
+ max_low_pfn = max_pfn;
#ifdef CONFIG_BLK_DEV_INITRD
setup_initrd();
@@ -233,12 +235,12 @@ static void __init create_pte_mapping(pte_t *ptep,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
{
- uintptr_t pte_index = pte_index(va);
+ uintptr_t pte_idx = pte_index(va);
BUG_ON(sz != PAGE_SIZE);
- if (pte_none(ptep[pte_index]))
- ptep[pte_index] = pfn_pte(PFN_DOWN(pa), prot);
+ if (pte_none(ptep[pte_idx]))
+ ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
}
#ifndef __PAGETABLE_PMD_FOLDED
@@ -281,21 +283,21 @@ static void __init create_pmd_mapping(pmd_t *pmdp,
{
pte_t *ptep;
phys_addr_t pte_phys;
- uintptr_t pmd_index = pmd_index(va);
+ uintptr_t pmd_idx = pmd_index(va);
if (sz == PMD_SIZE) {
- if (pmd_none(pmdp[pmd_index]))
- pmdp[pmd_index] = pfn_pmd(PFN_DOWN(pa), prot);
+ if (pmd_none(pmdp[pmd_idx]))
+ pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
return;
}
- if (pmd_none(pmdp[pmd_index])) {
+ if (pmd_none(pmdp[pmd_idx])) {
pte_phys = alloc_pte(va);
- pmdp[pmd_index] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
+ pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
ptep = get_pte_virt(pte_phys);
memset(ptep, 0, PAGE_SIZE);
} else {
- pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_index]));
+ pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
ptep = get_pte_virt(pte_phys);
}
@@ -323,21 +325,21 @@ static void __init create_pgd_mapping(pgd_t *pgdp,
{
pgd_next_t *nextp;
phys_addr_t next_phys;
- uintptr_t pgd_index = pgd_index(va);
+ uintptr_t pgd_idx = pgd_index(va);
if (sz == PGDIR_SIZE) {
- if (pgd_val(pgdp[pgd_index]) == 0)
- pgdp[pgd_index] = pfn_pgd(PFN_DOWN(pa), prot);
+ if (pgd_val(pgdp[pgd_idx]) == 0)
+ pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
return;
}
- if (pgd_val(pgdp[pgd_index]) == 0) {
+ if (pgd_val(pgdp[pgd_idx]) == 0) {
next_phys = alloc_pgd_next(va);
- pgdp[pgd_index] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
+ pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
nextp = get_pgd_next_virt(next_phys);
memset(nextp, 0, PAGE_SIZE);
} else {
- next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_index]));
+ next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
nextp = get_pgd_next_virt(next_phys);
}
@@ -478,21 +480,18 @@ static void __init setup_vm_final(void)
csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
local_flush_tlb_all();
}
-
-void free_initmem(void)
-{
- unsigned long init_begin = (unsigned long)__init_begin;
- unsigned long init_end = (unsigned long)__init_end;
-
- /* Make the region as non-execuatble. */
- set_memory_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
- free_initmem_default(POISON_FREE_INITMEM);
-}
-
#else
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
+#ifdef CONFIG_BUILTIN_DTB
+ dtb_early_va = soc_lookup_builtin_dtb();
+ if (!dtb_early_va) {
+ /* Fallback to first available DTS */
+ dtb_early_va = (void *) __dtb_start;
+ }
+#else
dtb_early_va = (void *)dtb_pa;
+#endif
}
static inline void setup_vm_final(void)
@@ -501,22 +500,6 @@ static inline void setup_vm_final(void)
#endif /* CONFIG_MMU */
#ifdef CONFIG_STRICT_KERNEL_RWX
-void set_kernel_text_rw(void)
-{
- unsigned long text_start = (unsigned long)_text;
- unsigned long text_end = (unsigned long)_etext;
-
- set_memory_rw(text_start, (text_end - text_start) >> PAGE_SHIFT);
-}
-
-void set_kernel_text_ro(void)
-{
- unsigned long text_start = (unsigned long)_text;
- unsigned long text_end = (unsigned long)_etext;
-
- set_memory_ro(text_start, (text_end - text_start) >> PAGE_SHIFT);
-}
-
void mark_rodata_ro(void)
{
unsigned long text_start = (unsigned long)_text;
@@ -529,6 +512,8 @@ void mark_rodata_ro(void)
set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
+
+ debug_checkwx();
}
#endif
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index ec0ca90dd900..4a8b61806633 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -6,8 +6,8 @@
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
+#include <linux/pgtable.h>
#include <asm/tlbflush.h>
-#include <asm/pgtable.h>
#include <asm/fixmap.h>
extern pgd_t early_pg_dir[PTRS_PER_PGD];
diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
index 728759eb530a..289a9a5ea5b5 100644
--- a/arch/riscv/mm/pageattr.c
+++ b/arch/riscv/mm/pageattr.c
@@ -4,7 +4,7 @@
*/
#include <linux/pagewalk.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/bitops.h>
@@ -117,10 +117,10 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
if (!numpages)
return 0;
- down_read(&init_mm.mmap_sem);
+ mmap_read_lock(&init_mm);
ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
&masks);
- up_read(&init_mm.mmap_sem);
+ mmap_read_unlock(&init_mm);
flush_tlb_kernel_range(start, end);
@@ -151,6 +151,7 @@ int set_memory_nx(unsigned long addr, int numpages)
int set_direct_map_invalid_noflush(struct page *page)
{
+ int ret;
unsigned long start = (unsigned long)page_address(page);
unsigned long end = start + PAGE_SIZE;
struct pageattr_masks masks = {
@@ -158,11 +159,16 @@ int set_direct_map_invalid_noflush(struct page *page)
.clear_mask = __pgprot(_PAGE_PRESENT)
};
- return walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
+ mmap_read_lock(&init_mm);
+ ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
+ mmap_read_unlock(&init_mm);
+
+ return ret;
}
int set_direct_map_default_noflush(struct page *page)
{
+ int ret;
unsigned long start = (unsigned long)page_address(page);
unsigned long end = start + PAGE_SIZE;
struct pageattr_masks masks = {
@@ -170,7 +176,11 @@ int set_direct_map_default_noflush(struct page *page)
.clear_mask = __pgprot(0)
};
- return walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
+ mmap_read_lock(&init_mm);
+ ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
+ mmap_read_unlock(&init_mm);
+
+ return ret;
}
void __kernel_map_pages(struct page *page, int numpages, int enable)
diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c
index 7eab76a93106..0831c2e61a8f 100644
--- a/arch/riscv/mm/ptdump.c
+++ b/arch/riscv/mm/ptdump.c
@@ -9,7 +9,7 @@
#include <linux/ptdump.h>
#include <asm/ptdump.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/kasan.h>
#define pt_dump_seq_printf(m, fmt, args...) \
@@ -204,7 +204,7 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
}
static void note_page(struct ptdump_state *pt_st, unsigned long addr,
- int level, unsigned long val)
+ int level, u64 val)
{
struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
u64 pa = PFN_PHYS(pte_pfn(__pte(val)));
diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c
index 302934177760..b198eaa74456 100644
--- a/arch/riscv/net/bpf_jit_comp32.c
+++ b/arch/riscv/net/bpf_jit_comp32.c
@@ -13,8 +13,35 @@
#include <linux/filter.h>
#include "bpf_jit.h"
+/*
+ * Stack layout during BPF program execution:
+ *
+ * high
+ * RV32 fp => +----------+
+ * | saved ra |
+ * | saved fp | RV32 callee-saved registers
+ * | ... |
+ * +----------+ <= (fp - 4 * NR_SAVED_REGISTERS)
+ * | hi(R6) |
+ * | lo(R6) |
+ * | hi(R7) | JIT scratch space for BPF registers
+ * | lo(R7) |
+ * | ... |
+ * BPF_REG_FP => +----------+ <= (fp - 4 * NR_SAVED_REGISTERS
+ * | | - 4 * BPF_JIT_SCRATCH_REGS)
+ * | |
+ * | ... | BPF program stack
+ * | |
+ * RV32 sp => +----------+
+ * | |
+ * | ... | Function call stack
+ * | |
+ * +----------+
+ * low
+ */
+
enum {
- /* Stack layout - these are offsets from (top of stack - 4). */
+ /* Stack layout - these are offsets from top of JIT scratch space. */
BPF_R6_HI,
BPF_R6_LO,
BPF_R7_HI,
@@ -29,7 +56,11 @@ enum {
BPF_JIT_SCRATCH_REGS,
};
-#define STACK_OFFSET(k) (-4 - ((k) * 4))
+/* Number of callee-saved registers stored to stack: ra, fp, s1--s7. */
+#define NR_SAVED_REGISTERS 9
+
+/* Offset from fp for BPF registers stored on stack. */
+#define STACK_OFFSET(k) (-4 - (4 * NR_SAVED_REGISTERS) - (4 * (k)))
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
@@ -111,11 +142,9 @@ static void emit_imm64(const s8 *rd, s32 imm_hi, s32 imm_lo,
static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
{
- int stack_adjust = ctx->stack_size, store_offset = stack_adjust - 4;
+ int stack_adjust = ctx->stack_size;
const s8 *r0 = bpf2rv32[BPF_REG_0];
- store_offset -= 4 * BPF_JIT_SCRATCH_REGS;
-
/* Set return value if not tail call. */
if (!is_tail_call) {
emit(rv_addi(RV_REG_A0, lo(r0), 0), ctx);
@@ -123,15 +152,15 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
}
/* Restore callee-saved registers. */
- emit(rv_lw(RV_REG_RA, store_offset - 0, RV_REG_SP), ctx);
- emit(rv_lw(RV_REG_FP, store_offset - 4, RV_REG_SP), ctx);
- emit(rv_lw(RV_REG_S1, store_offset - 8, RV_REG_SP), ctx);
- emit(rv_lw(RV_REG_S2, store_offset - 12, RV_REG_SP), ctx);
- emit(rv_lw(RV_REG_S3, store_offset - 16, RV_REG_SP), ctx);
- emit(rv_lw(RV_REG_S4, store_offset - 20, RV_REG_SP), ctx);
- emit(rv_lw(RV_REG_S5, store_offset - 24, RV_REG_SP), ctx);
- emit(rv_lw(RV_REG_S6, store_offset - 28, RV_REG_SP), ctx);
- emit(rv_lw(RV_REG_S7, store_offset - 32, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_RA, stack_adjust - 4, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_FP, stack_adjust - 8, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_S1, stack_adjust - 12, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_S2, stack_adjust - 16, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_S3, stack_adjust - 20, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_S4, stack_adjust - 24, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_S5, stack_adjust - 28, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_S6, stack_adjust - 32, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_S7, stack_adjust - 36, RV_REG_SP), ctx);
emit(rv_addi(RV_REG_SP, RV_REG_SP, stack_adjust), ctx);
@@ -770,12 +799,13 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
emit_bcc(BPF_JGE, lo(idx_reg), RV_REG_T1, off, ctx);
/*
- * if ((temp_tcc = tcc - 1) < 0)
+ * temp_tcc = tcc - 1;
+ * if (tcc < 0)
* goto out;
*/
emit(rv_addi(RV_REG_T1, RV_REG_TCC, -1), ctx);
off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
- emit_bcc(BPF_JSLT, RV_REG_T1, RV_REG_ZERO, off, ctx);
+ emit_bcc(BPF_JSLT, RV_REG_TCC, RV_REG_ZERO, off, ctx);
/*
* prog = array->ptrs[index];
@@ -1259,17 +1289,20 @@ notsupported:
void bpf_jit_build_prologue(struct rv_jit_context *ctx)
{
- /* Make space to save 9 registers: ra, fp, s1--s7. */
- int stack_adjust = 9 * sizeof(u32), store_offset, bpf_stack_adjust;
const s8 *fp = bpf2rv32[BPF_REG_FP];
const s8 *r1 = bpf2rv32[BPF_REG_1];
-
- bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
+ int stack_adjust = 0;
+ int bpf_stack_adjust =
+ round_up(ctx->prog->aux->stack_depth, STACK_ALIGN);
+
+ /* Make space for callee-saved registers. */
+ stack_adjust += NR_SAVED_REGISTERS * sizeof(u32);
+ /* Make space for BPF registers on stack. */
+ stack_adjust += BPF_JIT_SCRATCH_REGS * sizeof(u32);
+ /* Make space for BPF stack. */
stack_adjust += bpf_stack_adjust;
-
- store_offset = stack_adjust - 4;
-
- stack_adjust += 4 * BPF_JIT_SCRATCH_REGS;
+ /* Round up for stack alignment. */
+ stack_adjust = round_up(stack_adjust, STACK_ALIGN);
/*
* The first instruction sets the tail-call-counter (TCC) register.
@@ -1280,24 +1313,24 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx)
emit(rv_addi(RV_REG_SP, RV_REG_SP, -stack_adjust), ctx);
/* Save callee-save registers. */
- emit(rv_sw(RV_REG_SP, store_offset - 0, RV_REG_RA), ctx);
- emit(rv_sw(RV_REG_SP, store_offset - 4, RV_REG_FP), ctx);
- emit(rv_sw(RV_REG_SP, store_offset - 8, RV_REG_S1), ctx);
- emit(rv_sw(RV_REG_SP, store_offset - 12, RV_REG_S2), ctx);
- emit(rv_sw(RV_REG_SP, store_offset - 16, RV_REG_S3), ctx);
- emit(rv_sw(RV_REG_SP, store_offset - 20, RV_REG_S4), ctx);
- emit(rv_sw(RV_REG_SP, store_offset - 24, RV_REG_S5), ctx);
- emit(rv_sw(RV_REG_SP, store_offset - 28, RV_REG_S6), ctx);
- emit(rv_sw(RV_REG_SP, store_offset - 32, RV_REG_S7), ctx);
+ emit(rv_sw(RV_REG_SP, stack_adjust - 4, RV_REG_RA), ctx);
+ emit(rv_sw(RV_REG_SP, stack_adjust - 8, RV_REG_FP), ctx);
+ emit(rv_sw(RV_REG_SP, stack_adjust - 12, RV_REG_S1), ctx);
+ emit(rv_sw(RV_REG_SP, stack_adjust - 16, RV_REG_S2), ctx);
+ emit(rv_sw(RV_REG_SP, stack_adjust - 20, RV_REG_S3), ctx);
+ emit(rv_sw(RV_REG_SP, stack_adjust - 24, RV_REG_S4), ctx);
+ emit(rv_sw(RV_REG_SP, stack_adjust - 28, RV_REG_S5), ctx);
+ emit(rv_sw(RV_REG_SP, stack_adjust - 32, RV_REG_S6), ctx);
+ emit(rv_sw(RV_REG_SP, stack_adjust - 36, RV_REG_S7), ctx);
/* Set fp: used as the base address for stacked BPF registers. */
emit(rv_addi(RV_REG_FP, RV_REG_SP, stack_adjust), ctx);
- /* Set up BPF stack pointer. */
+ /* Set up BPF frame pointer. */
emit(rv_addi(lo(fp), RV_REG_SP, bpf_stack_adjust), ctx);
emit(rv_addi(hi(fp), RV_REG_ZERO, 0), ctx);
- /* Set up context pointer. */
+ /* Set up BPF context pointer. */
emit(rv_addi(lo(r1), RV_REG_A0, 0), ctx);
emit(rv_addi(hi(r1), RV_REG_ZERO, 0), ctx);
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index d208a9fd6c52..6cfd164cbe88 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -515,7 +515,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU64 | BPF_LSH | BPF_X:
emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx);
- if (!is64)
+ if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_RSH | BPF_X:
@@ -542,13 +542,21 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
/* dst = BSWAP##imm(dst) */
case BPF_ALU | BPF_END | BPF_FROM_LE:
- {
- int shift = 64 - imm;
-
- emit(rv_slli(rd, rd, shift), ctx);
- emit(rv_srli(rd, rd, shift), ctx);
+ switch (imm) {
+ case 16:
+ emit(rv_slli(rd, rd, 48), ctx);
+ emit(rv_srli(rd, rd, 48), ctx);
+ break;
+ case 32:
+ if (!aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case 64:
+ /* Do nothing */
+ break;
+ }
break;
- }
+
case BPF_ALU | BPF_END | BPF_FROM_BE:
emit(rv_addi(RV_REG_T2, RV_REG_ZERO, 0), ctx);
@@ -692,19 +700,19 @@ out_be:
case BPF_ALU | BPF_LSH | BPF_K:
case BPF_ALU64 | BPF_LSH | BPF_K:
emit(is64 ? rv_slli(rd, rd, imm) : rv_slliw(rd, rd, imm), ctx);
- if (!is64)
+ if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_RSH | BPF_K:
case BPF_ALU64 | BPF_RSH | BPF_K:
emit(is64 ? rv_srli(rd, rd, imm) : rv_srliw(rd, rd, imm), ctx);
- if (!is64)
+ if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_ARSH | BPF_K:
case BPF_ALU64 | BPF_ARSH | BPF_K:
emit(is64 ? rv_srai(rd, rd, imm) : rv_sraiw(rd, rd, imm), ctx);
- if (!is64)
+ if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
@@ -784,11 +792,15 @@ out_be:
case BPF_JMP32 | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP32 | BPF_JSLE | BPF_K:
- case BPF_JMP | BPF_JSET | BPF_K:
- case BPF_JMP32 | BPF_JSET | BPF_K:
rvoff = rv_offset(i, off, ctx);
s = ctx->ninsns;
- emit_imm(RV_REG_T1, imm, ctx);
+ if (imm) {
+ emit_imm(RV_REG_T1, imm, ctx);
+ rs = RV_REG_T1;
+ } else {
+ /* If imm is 0, simply use zero register. */
+ rs = RV_REG_ZERO;
+ }
if (!is64) {
if (is_signed_bpf_cond(BPF_OP(code)))
emit_sext_32_rd(&rd, ctx);
@@ -799,16 +811,28 @@ out_be:
/* Adjust for extra insns */
rvoff -= (e - s) << 2;
+ emit_branch(BPF_OP(code), rd, rs, rvoff, ctx);
+ break;
- if (BPF_OP(code) == BPF_JSET) {
- /* Adjust for and */
- rvoff -= 4;
- emit(rv_and(RV_REG_T1, rd, RV_REG_T1), ctx);
- emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff,
- ctx);
+ case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_K:
+ rvoff = rv_offset(i, off, ctx);
+ s = ctx->ninsns;
+ if (is_12b_int(imm)) {
+ emit(rv_andi(RV_REG_T1, rd, imm), ctx);
} else {
- emit_branch(BPF_OP(code), rd, RV_REG_T1, rvoff, ctx);
+ emit_imm(RV_REG_T1, imm, ctx);
+ emit(rv_and(RV_REG_T1, rd, RV_REG_T1), ctx);
}
+ /* For jset32, we should clear the upper 32 bits of t1, but
+ * sign-extension is sufficient here and saves one instruction,
+ * as t1 is used only in comparison against zero.
+ */
+ if (!is64 && imm < 0)
+ emit(rv_addiw(RV_REG_T1, RV_REG_T1, 0), ctx);
+ e = ctx->ninsns;
+ rvoff -= (e - s) << 2;
+ emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx);
break;
/* function call */