summaryrefslogtreecommitdiff
path: root/arch/loongarch
diff options
context:
space:
mode:
Diffstat (limited to 'arch/loongarch')
-rw-r--r--arch/loongarch/Kbuild1
-rw-r--r--arch/loongarch/Kconfig73
-rw-r--r--arch/loongarch/Kconfig.debug1
-rw-r--r--arch/loongarch/Makefile18
-rw-r--r--arch/loongarch/boot/dts/Makefile2
-rw-r--r--arch/loongarch/boot/dts/loongson-2k0500-ref.dts4
-rw-r--r--arch/loongarch/boot/dts/loongson-2k0500.dtsi86
-rw-r--r--arch/loongarch/boot/dts/loongson-2k1000-ref.dts13
-rw-r--r--arch/loongarch/boot/dts/loongson-2k1000.dtsi18
-rw-r--r--arch/loongarch/boot/dts/loongson-2k2000-ref.dts2
-rw-r--r--arch/loongarch/boot/dts/loongson-2k2000.dtsi71
-rw-r--r--arch/loongarch/configs/loongson3_defconfig143
-rw-r--r--arch/loongarch/crypto/Kconfig9
-rw-r--r--arch/loongarch/crypto/Makefile2
-rw-r--r--arch/loongarch/crypto/crc32-loongarch.c300
-rw-r--r--arch/loongarch/include/asm/Kbuild21
-rw-r--r--arch/loongarch/include/asm/acpi.h1
-rw-r--r--arch/loongarch/include/asm/addrspace.h4
-rw-r--r--arch/loongarch/include/asm/asm-prototypes.h14
-rw-r--r--arch/loongarch/include/asm/asmmacro.h1
-rw-r--r--arch/loongarch/include/asm/atomic.h2
-rw-r--r--arch/loongarch/include/asm/bootinfo.h4
-rw-r--r--arch/loongarch/include/asm/bug.h13
-rw-r--r--arch/loongarch/include/asm/cache.h2
-rw-r--r--arch/loongarch/include/asm/cpu-features.h3
-rw-r--r--arch/loongarch/include/asm/cpu-info.h22
-rw-r--r--arch/loongarch/include/asm/cpu.h30
-rw-r--r--arch/loongarch/include/asm/dma-direct.h11
-rw-r--r--arch/loongarch/include/asm/fprobe.h12
-rw-r--r--arch/loongarch/include/asm/fpu.h40
-rw-r--r--arch/loongarch/include/asm/ftrace.h60
-rw-r--r--arch/loongarch/include/asm/hardirq.h10
-rw-r--r--arch/loongarch/include/asm/hugetlb.h25
-rw-r--r--arch/loongarch/include/asm/hw_breakpoint.h8
-rw-r--r--arch/loongarch/include/asm/hw_irq.h2
-rw-r--r--arch/loongarch/include/asm/inst.h17
-rw-r--r--arch/loongarch/include/asm/io.h28
-rw-r--r--arch/loongarch/include/asm/irq.h45
-rw-r--r--arch/loongarch/include/asm/irq_work.h10
-rw-r--r--arch/loongarch/include/asm/jump_label.h16
-rw-r--r--arch/loongarch/include/asm/kasan.h15
-rw-r--r--arch/loongarch/include/asm/kfence.h7
-rw-r--r--arch/loongarch/include/asm/kvm_csr.h6
-rw-r--r--arch/loongarch/include/asm/kvm_eiointc.h123
-rw-r--r--arch/loongarch/include/asm/kvm_host.h113
-rw-r--r--arch/loongarch/include/asm/kvm_ipi.h45
-rw-r--r--arch/loongarch/include/asm/kvm_para.h187
-rw-r--r--arch/loongarch/include/asm/kvm_pch_pic.h62
-rw-r--r--arch/loongarch/include/asm/kvm_vcpu.h31
-rw-r--r--arch/loongarch/include/asm/lbt.h10
-rw-r--r--arch/loongarch/include/asm/loongarch.h116
-rw-r--r--arch/loongarch/include/asm/mmu_context.h35
-rw-r--r--arch/loongarch/include/asm/mmzone.h16
-rw-r--r--arch/loongarch/include/asm/numa.h1
-rw-r--r--arch/loongarch/include/asm/page.h15
-rw-r--r--arch/loongarch/include/asm/paravirt.h42
-rw-r--r--arch/loongarch/include/asm/paravirt_api_clock.h1
-rw-r--r--arch/loongarch/include/asm/percpu.h124
-rw-r--r--arch/loongarch/include/asm/perf_event.h3
-rw-r--r--arch/loongarch/include/asm/pgalloc.h20
-rw-r--r--arch/loongarch/include/asm/pgtable-bits.h13
-rw-r--r--arch/loongarch/include/asm/pgtable.h116
-rw-r--r--arch/loongarch/include/asm/ptrace.h6
-rw-r--r--arch/loongarch/include/asm/qspinlock.h41
-rw-r--r--arch/loongarch/include/asm/set_memory.h22
-rw-r--r--arch/loongarch/include/asm/setup.h5
-rw-r--r--arch/loongarch/include/asm/smp.h34
-rw-r--r--arch/loongarch/include/asm/stackframe.h13
-rw-r--r--arch/loongarch/include/asm/stacktrace.h3
-rw-r--r--arch/loongarch/include/asm/syscall.h15
-rw-r--r--arch/loongarch/include/asm/thread_info.h8
-rw-r--r--arch/loongarch/include/asm/topology.h1
-rw-r--r--arch/loongarch/include/asm/unistd.h3
-rw-r--r--arch/loongarch/include/asm/unwind_hints.h10
-rw-r--r--arch/loongarch/include/asm/uprobes.h5
-rw-r--r--arch/loongarch/include/asm/vdso.h1
-rw-r--r--arch/loongarch/include/asm/vdso/arch_data.h25
-rw-r--r--arch/loongarch/include/asm/vdso/getrandom.h33
-rw-r--r--arch/loongarch/include/asm/vdso/gettimeofday.h14
-rw-r--r--arch/loongarch/include/asm/vdso/vdso.h58
-rw-r--r--arch/loongarch/include/asm/vdso/vsyscall.h13
-rw-r--r--arch/loongarch/include/asm/video.h (renamed from arch/loongarch/include/asm/fb.h)8
-rw-r--r--arch/loongarch/include/uapi/asm/Kbuild2
-rw-r--r--arch/loongarch/include/uapi/asm/hwcap.h1
-rw-r--r--arch/loongarch/include/uapi/asm/kvm.h48
-rw-r--r--arch/loongarch/include/uapi/asm/kvm_para.h22
-rw-r--r--arch/loongarch/include/uapi/asm/ptrace.h10
-rw-r--r--arch/loongarch/include/uapi/asm/sigcontext.h1
-rw-r--r--arch/loongarch/include/uapi/asm/unistd.h4
-rw-r--r--arch/loongarch/kernel/Makefile13
-rw-r--r--arch/loongarch/kernel/Makefile.syscalls4
-rw-r--r--arch/loongarch/kernel/acpi.c89
-rw-r--r--arch/loongarch/kernel/asm-offsets.c22
-rw-r--r--arch/loongarch/kernel/cacheinfo.c6
-rw-r--r--arch/loongarch/kernel/cpu-probe.c120
-rw-r--r--arch/loongarch/kernel/dma.c9
-rw-r--r--arch/loongarch/kernel/efi-header.S4
-rw-r--r--arch/loongarch/kernel/efi.c8
-rw-r--r--arch/loongarch/kernel/entry.S22
-rw-r--r--arch/loongarch/kernel/env.c2
-rw-r--r--arch/loongarch/kernel/fpu.S10
-rw-r--r--arch/loongarch/kernel/ftrace_dyn.c24
-rw-r--r--arch/loongarch/kernel/genex.S33
-rw-r--r--arch/loongarch/kernel/head.S15
-rw-r--r--arch/loongarch/kernel/hw_breakpoint.c112
-rw-r--r--arch/loongarch/kernel/idle.c3
-rw-r--r--arch/loongarch/kernel/inst.c2
-rw-r--r--arch/loongarch/kernel/io.c94
-rw-r--r--arch/loongarch/kernel/irq.c29
-rw-r--r--arch/loongarch/kernel/kdebugfs.c168
-rw-r--r--arch/loongarch/kernel/kfpu.c22
-rw-r--r--arch/loongarch/kernel/kgdb.c5
-rw-r--r--arch/loongarch/kernel/kprobes.c4
-rw-r--r--arch/loongarch/kernel/lbt.S4
-rw-r--r--arch/loongarch/kernel/machine_kexec.c6
-rw-r--r--arch/loongarch/kernel/mcount.S17
-rw-r--r--arch/loongarch/kernel/mcount_dyn.S14
-rw-r--r--arch/loongarch/kernel/module.c6
-rw-r--r--arch/loongarch/kernel/numa.c27
-rw-r--r--arch/loongarch/kernel/paravirt.c335
-rw-r--r--arch/loongarch/kernel/perf_event.c17
-rw-r--r--arch/loongarch/kernel/proc.c39
-rw-r--r--arch/loongarch/kernel/process.c47
-rw-r--r--arch/loongarch/kernel/ptrace.c56
-rw-r--r--arch/loongarch/kernel/relocate.c52
-rw-r--r--arch/loongarch/kernel/reset.c6
-rw-r--r--arch/loongarch/kernel/setup.c27
-rw-r--r--arch/loongarch/kernel/signal.c21
-rw-r--r--arch/loongarch/kernel/smp.c140
-rw-r--r--arch/loongarch/kernel/switch.S2
-rw-r--r--arch/loongarch/kernel/syscall.c23
-rw-r--r--arch/loongarch/kernel/time.c24
-rw-r--r--arch/loongarch/kernel/traps.c34
-rw-r--r--arch/loongarch/kernel/unaligned.c8
-rw-r--r--arch/loongarch/kernel/unwind_orc.c4
-rw-r--r--arch/loongarch/kernel/uprobes.c11
-rw-r--r--arch/loongarch/kernel/vdso.c100
-rw-r--r--arch/loongarch/kernel/vmlinux.lds.S18
-rw-r--r--arch/loongarch/kvm/Kconfig7
-rw-r--r--arch/loongarch/kvm/Makefile8
-rw-r--r--arch/loongarch/kvm/exit.c359
-rw-r--r--arch/loongarch/kvm/intc/eiointc.c1027
-rw-r--r--arch/loongarch/kvm/intc/ipi.c479
-rw-r--r--arch/loongarch/kvm/intc/pch_pic.c519
-rw-r--r--arch/loongarch/kvm/irqfd.c89
-rw-r--r--arch/loongarch/kvm/main.c60
-rw-r--r--arch/loongarch/kvm/mmu.c135
-rw-r--r--arch/loongarch/kvm/switch.S18
-rw-r--r--arch/loongarch/kvm/timer.c14
-rw-r--r--arch/loongarch/kvm/tlb.c5
-rw-r--r--arch/loongarch/kvm/trace.h20
-rw-r--r--arch/loongarch/kvm/vcpu.c657
-rw-r--r--arch/loongarch/kvm/vm.c107
-rw-r--r--arch/loongarch/lib/Makefile4
-rw-r--r--arch/loongarch/lib/crc32-loongarch.c135
-rw-r--r--arch/loongarch/lib/csum.c2
-rw-r--r--arch/loongarch/lib/tishift.S56
-rw-r--r--arch/loongarch/mm/Makefile3
-rw-r--r--arch/loongarch/mm/fault.c41
-rw-r--r--arch/loongarch/mm/hugetlbpage.c20
-rw-r--r--arch/loongarch/mm/init.c55
-rw-r--r--arch/loongarch/mm/kasan_init.c56
-rw-r--r--arch/loongarch/mm/mmap.c14
-rw-r--r--arch/loongarch/mm/pageattr.c238
-rw-r--r--arch/loongarch/mm/pgtable.c28
-rw-r--r--arch/loongarch/mm/tlb.c2
-rw-r--r--arch/loongarch/mm/tlbex.S9
-rw-r--r--arch/loongarch/net/bpf_jit.c42
-rw-r--r--arch/loongarch/net/bpf_jit.h5
-rw-r--r--arch/loongarch/pci/acpi.c1
-rw-r--r--arch/loongarch/power/hibernate.c3
-rw-r--r--arch/loongarch/power/platform.c39
-rw-r--r--arch/loongarch/power/suspend.c4
-rw-r--r--arch/loongarch/power/suspend_asm.S18
-rw-r--r--arch/loongarch/vdso/Makefile23
-rw-r--r--arch/loongarch/vdso/vdso.lds.S7
-rw-r--r--arch/loongarch/vdso/vgetcpu.c12
-rw-r--r--arch/loongarch/vdso/vgetrandom-chacha.S253
-rw-r--r--arch/loongarch/vdso/vgetrandom.c10
179 files changed, 7238 insertions, 1855 deletions
diff --git a/arch/loongarch/Kbuild b/arch/loongarch/Kbuild
index bfa21465d83a..beb8499dd8ed 100644
--- a/arch/loongarch/Kbuild
+++ b/arch/loongarch/Kbuild
@@ -4,7 +4,6 @@ obj-y += net/
obj-y += vdso/
obj-$(CONFIG_KVM) += kvm/
-obj-$(CONFIG_BUILTIN_DTB) += boot/dts/
# for cleaning
subdir- += boot
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 54ad04dacdee..1a2cf012b8f2 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -15,13 +15,23 @@ config LOONGARCH
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_CPU_FINALIZE_INIT
+ select ARCH_HAS_CRC32
select ARCH_HAS_CURRENT_STACK_POINTER
+ select ARCH_HAS_DEBUG_VM_PGTABLE
+ select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KCOV
+ select ARCH_HAS_KERNEL_FPU_SUPPORT if CPU_HAS_FPU
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ select ARCH_HAS_PREEMPT_LAZY
+ select ARCH_HAS_PTE_DEVMAP
select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_SET_MEMORY
+ select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAS_UBSAN
+ select ARCH_HAS_VDSO_ARCH_DATA
select ARCH_INLINE_READ_LOCK if !PREEMPTION
select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION
@@ -56,28 +66,36 @@ config LOONGARCH
select ARCH_SUPPORTS_ACPI
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_HUGETLBFS
+ select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select ARCH_SUPPORTS_LTO_CLANG
select ARCH_SUPPORTS_LTO_CLANG_THIN
select ARCH_SUPPORTS_NUMA_BALANCING
+ select ARCH_SUPPORTS_RT
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
+ select ARCH_WANT_DEFAULT_BPF_JIT
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
select ARCH_WANTS_NO_INSTR
+ select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE
select BUILDTIME_TABLE_SORT
select COMMON_CLK
select CPU_PM
+ select EDAC_SUPPORT
select EFI
select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_DEVICES
+ select GENERIC_CPU_VULNERABILITIES
select GENERIC_ENTRY
select GENERIC_GETTIMEOFDAY
select GENERIC_IOREMAP if !ARCH_IOREMAP
+ select GENERIC_IRQ_MATRIX_ALLOCATOR
select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
@@ -91,6 +109,7 @@ config LOONGARCH
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
+ select GENERIC_VDSO_DATA_STORE
select GENERIC_VDSO_TIME_NS
select GPIOLIB
select HAS_IOPORT
@@ -101,6 +120,7 @@ config LOONGARCH
select HAVE_ARCH_KFENCE
select HAVE_ARCH_KGDB if PERF_EVENTS
select HAVE_ARCH_MMAP_RND_BITS if MMU
+ select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
@@ -114,16 +134,18 @@ config LOONGARCH
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_ARGS
+ select HAVE_FTRACE_REGS_HAVING_PT_REGS
select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_EBPF_JIT
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !ARCH_STRICT_ALIGN
select HAVE_EXIT_THREAD
- select HAVE_FAST_GUP
+ select HAVE_GUP_FAST
+ select HAVE_FTRACE_GRAPH_FUNC
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_FUNCTION_ERROR_INJECTION
- select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_GRAPH_FREGS
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS
@@ -138,11 +160,12 @@ config LOONGARCH
select HAVE_LIVEPATCH
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
- select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS
+ select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_POSIX_CPU_TIMERS_TASK_WORK
select HAVE_PREEMPT_DYNAMIC_KEY
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if UNWINDER_ORC
@@ -156,7 +179,7 @@ config LOONGARCH
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_TIF_NOHZ
- select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
+ select HAVE_VIRT_CPU_ACCOUNTING_GEN
select IRQ_FORCED_THREADING
select IRQ_LOONGARCH_CPU
select LOCK_MM_AND_FIND_VMA
@@ -174,7 +197,6 @@ config LOONGARCH
select PCI_QUIRKS
select PERF_USE_VMALLOC
select RTC_LIB
- select SMP
select SPARSE_IRQ
select SYSCTL_ARCH_UNALIGN_ALLOW
select SYSCTL_ARCH_UNALIGN_NO_WARN
@@ -183,6 +205,7 @@ config LOONGARCH
select TRACE_IRQFLAGS_SUPPORT
select USE_PERCPU_NUMA_NODE_ID
select USER_STACKTRACE_SUPPORT
+ select VDSO_GETRANDOM
select ZONE_DMA32
config 32BIT
@@ -230,7 +253,7 @@ config MACH_LOONGSON64
def_bool 64BIT
config FIX_EARLYCON_MEM
- def_bool y
+ def_bool !ARCH_IOREMAP
config PGTABLE_2LEVEL
bool
@@ -257,6 +280,9 @@ config AS_HAS_EXPLICIT_RELOCS
config AS_HAS_FCSR_CLASS
def_bool $(as-instr,movfcsr2gr \$t0$(comma)\$fcsr0)
+config AS_HAS_THIN_ADD_SUB
+ def_bool $(cc-option,-Wa$(comma)-mthin-add-sub) || AS_IS_LLVM
+
config AS_HAS_LSX_EXTENSION
def_bool $(as-instr,vld \$vr0$(comma)\$a0$(comma)0)
@@ -269,6 +295,9 @@ config AS_HAS_LBT_EXTENSION
config AS_HAS_LVZ_EXTENSION
def_bool $(as-instr,hvcl 0)
+config CC_HAS_ANNOTATE_TABLEJUMP
+ def_bool $(cc-option,-mannotate-tablejump)
+
menu "Kernel type and options"
source "kernel/Kconfig.hz"
@@ -360,8 +389,8 @@ config CMDLINE_BOOTLOADER
config CMDLINE_EXTEND
bool "Use built-in to extend bootloader kernel arguments"
help
- The command-line arguments provided during boot will be
- appended to the built-in command line. This is useful in
+ The built-in command line will be appended to the command-
+ line arguments provided during boot. This is useful in
cases where the provided arguments are insufficient and
you don't want to or cannot modify them.
@@ -378,6 +407,7 @@ endchoice
config BUILTIN_DTB
bool "Enable built-in dtb in kernel"
depends on OF
+ select GENERIC_BUILTIN_DTB
help
Some existing systems do not provide a canonical device tree to
the kernel at boot time. Let's provide a device tree table in the
@@ -420,6 +450,7 @@ config EFI_STUB
config SCHED_SMT
bool "SMT scheduler support"
+ depends on SMP
default y
help
Improves scheduler's performance when there are multiple
@@ -465,7 +496,6 @@ config NR_CPUS
config NUMA
bool "NUMA Support"
select SMP
- select ACPI_NUMA if ACPI
help
Say Y to compile the kernel with NUMA (Non-Uniform Memory Access)
support. This option improves performance on systems with more
@@ -589,6 +619,9 @@ config ARCH_SUPPORTS_KEXEC
config ARCH_SUPPORTS_CRASH_DUMP
def_bool y
+config ARCH_DEFAULT_CRASH_DUMP
+ def_bool y
+
config ARCH_SELECTS_CRASH_DUMP
def_bool y
depends on CRASH_DUMP
@@ -599,6 +632,7 @@ config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
config RELOCATABLE
bool "Relocatable kernel"
+ select ARCH_HAS_RELR
help
This builds the kernel as a Position Independent Executable (PIE),
which retains all relocation metadata required, so as to relocate
@@ -632,6 +666,26 @@ config RANDOMIZE_BASE_MAX_OFFSET
source "kernel/livepatch/Kconfig"
+config PARAVIRT
+ bool "Enable paravirtualization code"
+ depends on AS_HAS_LVZ_EXTENSION
+ help
+ This changes the kernel so it can modify itself when it is run
+ under a hypervisor, potentially improving performance significantly
+ over full virtualization. However, when run without a hypervisor
+ the kernel is theoretically slower and slightly larger.
+
+config PARAVIRT_TIME_ACCOUNTING
+ bool "Paravirtual steal time accounting"
+ depends on PARAVIRT
+ help
+ Select this option to enable fine granularity task steal time
+ accounting. Time spent executing other tasks in parallel with
+ the current vCPU is discounted from the vCPU power. To account for
+ that, there can be a small performance impact.
+
+ If in doubt, say N here.
+
endmenu
config ARCH_SELECT_MEMORY_MODEL
@@ -682,6 +736,7 @@ config ARCH_HIBERNATION_POSSIBLE
source "kernel/power/Kconfig"
source "drivers/acpi/Kconfig"
+source "drivers/cpufreq/Kconfig"
endmenu
diff --git a/arch/loongarch/Kconfig.debug b/arch/loongarch/Kconfig.debug
index 98d60630c3d4..8b2ce5b5d43e 100644
--- a/arch/loongarch/Kconfig.debug
+++ b/arch/loongarch/Kconfig.debug
@@ -28,6 +28,7 @@ config UNWINDER_PROLOGUE
config UNWINDER_ORC
bool "ORC unwinder"
+ depends on HAVE_OBJTOOL
select OBJTOOL
help
This option enables the ORC (Oops Rewind Capability) unwinder for
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index df6caf79537a..0304eabbe606 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -26,6 +26,9 @@ endif
32bit-emul = elf32loongarch
64bit-emul = elf64loongarch
+CC_FLAGS_FPU := -mfpu=64
+CC_FLAGS_NO_FPU := -msoft-float
+
ifdef CONFIG_UNWINDER_ORC
orc_hash_h := arch/$(SRCARCH)/include/generated/asm/orc_hash.h
orc_hash_sh := $(srctree)/scripts/orc_hash.sh
@@ -56,10 +59,10 @@ endif
ifdef CONFIG_64BIT
ld-emul = $(64bit-emul)
-cflags-y += -mabi=lp64s
+cflags-y += -mabi=lp64s -mcmodel=normal
endif
-cflags-y += -pipe -msoft-float
+cflags-y += -pipe $(CC_FLAGS_NO_FPU)
LDFLAGS_vmlinux += -static -n -nostdlib
# When the assembler supports explicit relocation hint, we must use it.
@@ -98,11 +101,16 @@ KBUILD_AFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)
KBUILD_CFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)-mthin-add-sub)
ifdef CONFIG_OBJTOOL
-KBUILD_CFLAGS += -fno-jump-tables
+ifdef CONFIG_CC_HAS_ANNOTATE_TABLEJUMP
+KBUILD_CFLAGS += -mannotate-tablejump
+else
+KBUILD_CFLAGS += -fno-jump-tables # keep compatibility with older compilers
+endif
endif
-KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
-KBUILD_RUSTFLAGS_MODULE += -Crelocation-model=pic
+KBUILD_RUSTFLAGS += --target=loongarch64-unknown-none-softfloat -Ccode-model=small
+KBUILD_RUSTFLAGS_KERNEL += -Zdirect-access-external-data=yes
+KBUILD_RUSTFLAGS_MODULE += -Zdirect-access-external-data=no
ifeq ($(CONFIG_RELOCATABLE),y)
KBUILD_CFLAGS_KERNEL += -fPIE
diff --git a/arch/loongarch/boot/dts/Makefile b/arch/loongarch/boot/dts/Makefile
index 747d0c3f6389..15d5e14fe418 100644
--- a/arch/loongarch/boot/dts/Makefile
+++ b/arch/loongarch/boot/dts/Makefile
@@ -1,5 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
dtb-y = loongson-2k0500-ref.dtb loongson-2k1000-ref.dtb loongson-2k2000-ref.dtb
-
-obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .dtb.o, $(CONFIG_BUILTIN_DTB_NAME))
diff --git a/arch/loongarch/boot/dts/loongson-2k0500-ref.dts b/arch/loongarch/boot/dts/loongson-2k0500-ref.dts
index 8aefb0c12672..a34734a6c3ce 100644
--- a/arch/loongarch/boot/dts/loongson-2k0500-ref.dts
+++ b/arch/loongarch/boot/dts/loongson-2k0500-ref.dts
@@ -44,14 +44,14 @@
&gmac0 {
status = "okay";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
bus_id = <0x0>;
};
&gmac1 {
status = "okay";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
bus_id = <0x1>;
};
diff --git a/arch/loongarch/boot/dts/loongson-2k0500.dtsi b/arch/loongarch/boot/dts/loongson-2k0500.dtsi
index 444779c21034..3b38ff8853a7 100644
--- a/arch/loongarch/boot/dts/loongson-2k0500.dtsi
+++ b/arch/loongarch/boot/dts/loongson-2k0500.dtsi
@@ -6,6 +6,7 @@
/dts-v1/;
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/clock/loongson,ls2k-clk.h>
/ {
#address-cells = <2>;
@@ -19,14 +20,15 @@
compatible = "loongson,la264";
device_type = "cpu";
reg = <0x0>;
- clocks = <&cpu_clk>;
+ clocks = <&clk LOONGSON2_NODE_CLK>;
};
};
- cpu_clk: cpu-clk {
+ ref_100m: clock-ref-100m {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <500000000>;
+ clock-frequency = <100000000>;
+ clock-output-names = "ref_100m";
};
cpuintc: interrupt-controller {
@@ -35,6 +37,28 @@
interrupt-controller;
};
+ thermal-zones {
+ cpu-thermal {
+ polling-delay-passive = <1000>;
+ polling-delay = <5000>;
+ thermal-sensors = <&tsensor 0>;
+
+ trips {
+ cpu-alert {
+ temperature = <33000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ cpu-crit {
+ temperature = <85000>;
+ hysteresis = <5000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
bus@10000000 {
compatible = "simple-bus";
ranges = <0x0 0x10000000 0x0 0x10000000 0x0 0x10000000>,
@@ -52,6 +76,54 @@
ranges = <1 0x0 0x0 0x16400000 0x4000>;
};
+ clk: clock-controller@1fe10400 {
+ compatible = "loongson,ls2k0500-clk";
+ reg = <0x0 0x1fe10400 0x0 0x2c>;
+ #clock-cells = <1>;
+ clocks = <&ref_100m>;
+ clock-names = "ref_100m";
+ };
+
+ dma-controller@1fe10c00 {
+ compatible = "loongson,ls2k0500-apbdma", "loongson,ls2k1000-apbdma";
+ reg = <0 0x1fe10c00 0 0x8>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <67>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #dma-cells = <1>;
+ status = "disabled";
+ };
+
+ dma-controller@1fe10c10 {
+ compatible = "loongson,ls2k0500-apbdma", "loongson,ls2k1000-apbdma";
+ reg = <0 0x1fe10c10 0 0x8>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <68>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #dma-cells = <1>;
+ status = "disabled";
+ };
+
+ dma-controller@1fe10c20 {
+ compatible = "loongson,ls2k0500-apbdma", "loongson,ls2k1000-apbdma";
+ reg = <0 0x1fe10c20 0 0x8>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <69>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #dma-cells = <1>;
+ status = "disabled";
+ };
+
+ dma-controller@1fe10c30 {
+ compatible = "loongson,ls2k0500-apbdma", "loongson,ls2k1000-apbdma";
+ reg = <0 0x1fe10c30 0 0x8>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <70>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #dma-cells = <1>;
+ status = "disabled";
+ };
+
liointc0: interrupt-controller@1fe11400 {
compatible = "loongson,liointc-2.0";
reg = <0x0 0x1fe11400 0x0 0x40>,
@@ -139,6 +211,14 @@
status = "disabled";
};
+ tsensor: thermal-sensor@1fe11500 {
+ compatible = "loongson,ls2k0500-thermal", "loongson,ls2k1000-thermal";
+ reg = <0x0 0x1fe11500 0x0 0x30>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
+ #thermal-sensor-cells = <1>;
+ };
+
uart0: serial@1ff40800 {
compatible = "ns16550a";
reg = <0x0 0x1ff40800 0x0 0x10>;
diff --git a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
index ed4d32434041..3514ea78f525 100644
--- a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
+++ b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
@@ -43,7 +43,7 @@
&gmac0 {
status = "okay";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&phy0>;
mdio {
compatible = "snps,dwmac-mdio";
@@ -58,7 +58,7 @@
&gmac1 {
status = "okay";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&phy1>;
mdio {
compatible = "snps,dwmac-mdio";
@@ -90,11 +90,6 @@
#address-cells = <1>;
#size-cells = <0>;
- spidev@0 {
- compatible = "rohm,dh2228fv";
- spi-max-frequency = <100000000>;
- reg = <0>;
- };
};
&ehci0 {
@@ -113,10 +108,6 @@
status = "okay";
};
-&clk {
- status = "okay";
-};
-
&rtc0 {
status = "okay";
};
diff --git a/arch/loongarch/boot/dts/loongson-2k1000.dtsi b/arch/loongarch/boot/dts/loongson-2k1000.dtsi
index b6aeb1f70e2a..8dff2aa52417 100644
--- a/arch/loongarch/boot/dts/loongson-2k1000.dtsi
+++ b/arch/loongarch/boot/dts/loongson-2k1000.dtsi
@@ -159,7 +159,6 @@
#clock-cells = <1>;
clocks = <&ref_100m>;
clock-names = "ref_100m";
- status = "disabled";
};
gpio0: gpio@1fe00500 {
@@ -267,7 +266,7 @@
status = "disabled";
};
- dma-controller@1fe00c20 {
+ apbdma2: dma-controller@1fe00c20 {
compatible = "loongson,ls2k1000-apbdma";
reg = <0x0 0x1fe00c20 0x0 0x8>;
interrupt-parent = <&liointc1>;
@@ -277,7 +276,7 @@
status = "disabled";
};
- dma-controller@1fe00c30 {
+ apbdma3: dma-controller@1fe00c30 {
compatible = "loongson,ls2k1000-apbdma";
reg = <0x0 0x1fe00c30 0x0 0x8>;
interrupt-parent = <&liointc1>;
@@ -353,6 +352,19 @@
status = "disabled";
};
+ i2s: i2s@1fe2d000 {
+ compatible = "loongson,ls2k1000-i2s";
+ reg = <0 0x1fe2d000 0 0x14>,
+ <0 0x1fe00438 0 0x8>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ dmas = <&apbdma2 0>, <&apbdma3 0>;
+ dma-names = "tx", "rx";
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
spi0: spi@1fff0220 {
compatible = "loongson,ls2k1000-spi";
reg = <0x0 0x1fff0220 0x0 0x10>;
diff --git a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
index 74b99bd234cc..ea9e6985d0e9 100644
--- a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
+++ b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
@@ -92,7 +92,7 @@
&gmac2 {
status = "okay";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&phy2>;
mdio {
compatible = "snps,dwmac-mdio";
diff --git a/arch/loongarch/boot/dts/loongson-2k2000.dtsi b/arch/loongarch/boot/dts/loongson-2k2000.dtsi
index 9eab2d02cbe8..b4ff55a33e90 100644
--- a/arch/loongarch/boot/dts/loongson-2k2000.dtsi
+++ b/arch/loongarch/boot/dts/loongson-2k2000.dtsi
@@ -6,6 +6,7 @@
/dts-v1/;
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/clock/loongson,ls2k-clk.h>
/ {
#address-cells = <2>;
@@ -19,21 +20,22 @@
compatible = "loongson,la364";
device_type = "cpu";
reg = <0x0>;
- clocks = <&cpu_clk>;
+ clocks = <&clk LOONGSON2_NODE_CLK>;
};
cpu1: cpu@2 {
compatible = "loongson,la364";
device_type = "cpu";
reg = <0x1>;
- clocks = <&cpu_clk>;
+ clocks = <&clk LOONGSON2_NODE_CLK>;
};
};
- cpu_clk: cpu-clk {
+ ref_100m: clock-ref-100m {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <1400000000>;
+ clock-frequency = <100000000>;
+ clock-output-names = "ref_100m";
};
cpuintc: interrupt-controller {
@@ -42,6 +44,28 @@
interrupt-controller;
};
+ thermal-zones {
+ cpu-thermal {
+ polling-delay-passive = <1000>;
+ polling-delay = <5000>;
+ thermal-sensors = <&tsensor 0>;
+
+ trips {
+ cpu-alert {
+ temperature = <40000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ cpu-crit {
+ temperature = <85000>;
+ hysteresis = <5000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
bus@10000000 {
compatible = "simple-bus";
ranges = <0x0 0x10000000 0x0 0x10000000 0x0 0x10000000>,
@@ -58,6 +82,14 @@
ranges = <1 0x0 0x0 0x18400000 0x4000>;
};
+ clk: clock-controller@10010480 {
+ compatible = "loongson,ls2k2000-clk";
+ reg = <0x0 0x10010480 0x0 0x100>;
+ #clock-cells = <1>;
+ clocks = <&ref_100m>;
+ clock-names = "ref_100m";
+ };
+
pmc: power-management@100d0000 {
compatible = "loongson,ls2k2000-pmc", "loongson,ls2k0500-pmc", "syscon";
reg = <0x0 0x100d0000 0x0 0x58>;
@@ -80,6 +112,15 @@
};
};
+ tsensor: thermal-sensor@1fe01460 {
+ compatible = "loongson,ls2k2000-thermal";
+ reg = <0x0 0x1fe01460 0x0 0x30>,
+ <0x0 0x1fe0019c 0x0 0x4>;
+ interrupt-parent = <&liointc>;
+ interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
+ #thermal-sensor-cells = <1>;
+ };
+
liointc: interrupt-controller@1fe01400 {
compatible = "loongson,liointc-1.0";
reg = <0x0 0x1fe01400 0x0 0x64>;
@@ -132,6 +173,22 @@
status = "disabled";
};
+ i2c@1fe00120 {
+ compatible = "loongson,ls2k-i2c";
+ reg = <0x0 0x1fe00120 0x0 0x8>;
+ interrupt-parent = <&liointc>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ i2c@1fe00130 {
+ compatible = "loongson,ls2k-i2c";
+ reg = <0x0 0x1fe00130 0x0 0x8>;
+ interrupt-parent = <&liointc>;
+ interrupts = <9 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
uart0: serial@1fe001e0 {
compatible = "ns16550a";
reg = <0x0 0x1fe001e0 0x0 0x10>;
@@ -202,9 +259,11 @@
status = "disabled";
};
- hda@7,0 {
+ i2s@7,0 {
reg = <0x3800 0x0 0x0 0x0 0x0>;
- interrupts = <58 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <78 IRQ_TYPE_LEVEL_HIGH>,
+ <79 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tx", "rx";
interrupt-parent = <&pic>;
status = "disabled";
};
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
index f18c2ba871ef..0d59af6007b7 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -1,4 +1,5 @@
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_ZSTD=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_NO_HZ=y
@@ -14,14 +15,18 @@ CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_IKHEADERS=y
CONFIG_LOG_BUF_SHIFT=18
CONFIG_NUMA_BALANCING=y
CONFIG_MEMCG=y
CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
-CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_RDMA=y
+CONFIG_CGROUP_DMEM=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
CONFIG_CPUSETS=y
@@ -66,6 +71,14 @@ CONFIG_ACPI_IPMI=m
CONFIG_ACPI_HOTPLUG_CPU=y
CONFIG_ACPI_PCI_SLOT=y
CONFIG_ACPI_HOTPLUG_MEMORY=y
+CONFIG_ACPI_BGRT=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_LOONGSON3_CPUFREQ=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=m
CONFIG_JUMP_LABEL=y
@@ -74,14 +87,18 @@ CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
+CONFIG_MODULE_COMPRESS=y
+CONFIG_MODULE_COMPRESS_ZSTD=y
+CONFIG_MODULE_DECOMPRESS=y
CONFIG_BLK_DEV_ZONED=y
CONFIG_BLK_DEV_THROTTLING=y
-CONFIG_BLK_DEV_THROTTLING_LOW=y
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
CONFIG_BLK_CGROUP_FC_APPID=y
CONFIG_BLK_CGROUP_IOCOST=y
CONFIG_BLK_CGROUP_IOPRIO=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_BSD_DISKLABEL=y
CONFIG_UNIXWARE_DISKLABEL=y
@@ -92,12 +109,13 @@ CONFIG_BINFMT_MISC=m
CONFIG_ZPOOL=y
CONFIG_ZSWAP=y
CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y
-CONFIG_ZBUD=y
-CONFIG_Z3FOLD=y
-CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC=y
# CONFIG_COMPAT_BRK is not set
CONFIG_MEMORY_HOTPLUG=y
-CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
+# CONFIG_MHP_DEFAULT_ONLINE_TYPE_OFFLINE is not set
+CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_AUTO=y
+# CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_KERNEL is not set
+# CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_MOVABLE is not set
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
@@ -130,13 +148,22 @@ CONFIG_IP_MROUTE=y
CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
+CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
+CONFIG_INET_ESPINTCP=y
+CONFIG_INET_IPCOMP=m
CONFIG_INET_UDP_DIAG=y
CONFIG_TCP_CONG_ADVANCED=y
CONFIG_TCP_CONG_BBR=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
+CONFIG_INET6_ESPINTCP=y
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_MROUTE=y
CONFIG_MPTCP=y
CONFIG_NETWORK_PHY_TIMESTAMPING=y
@@ -152,6 +179,8 @@ CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=y
+CONFIG_NFT_CT=m
CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -164,6 +193,7 @@ CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
+CONFIG_NFT_FIB_INET=m
CONFIG_NFT_SOCKET=m
CONFIG_NFT_OSF=m
CONFIG_NFT_TPROXY=m
@@ -260,6 +290,7 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_TABLES_IPV6=y
+CONFIG_NFT_FIB_IPV6=m
CONFIG_IP6_NF_IPTABLES=y
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -280,6 +311,7 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -397,7 +429,16 @@ CONFIG_PARPORT_PC=y
CONFIG_PARPORT_SERIAL=y
CONFIG_PARPORT_PC_FIFO=y
CONFIG_ZRAM=m
+CONFIG_ZRAM_BACKEND_LZ4=y
+CONFIG_ZRAM_BACKEND_LZ4HC=y
+CONFIG_ZRAM_BACKEND_ZSTD=y
+CONFIG_ZRAM_BACKEND_DEFLATE=y
+CONFIG_ZRAM_BACKEND_842=y
+CONFIG_ZRAM_BACKEND_LZO=y
CONFIG_ZRAM_DEF_COMP_ZSTD=y
+CONFIG_ZRAM_WRITEBACK=y
+CONFIG_ZRAM_MEMORY_TRACKING=y
+CONFIG_ZRAM_MULTI_COMP=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
@@ -417,6 +458,9 @@ CONFIG_NVME_TARGET_RDMA=m
CONFIG_NVME_TARGET_FC=m
CONFIG_NVME_TARGET_TCP=m
CONFIG_EEPROM_AT24=m
+CONFIG_PVPANIC=y
+CONFIG_PVPANIC_MMIO=m
+CONFIG_PVPANIC_PCI=m
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
@@ -454,12 +498,10 @@ CONFIG_PATA_ATIIXP=y
CONFIG_PATA_PCMCIA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=m
-CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
CONFIG_MD_RAID456=m
-CONFIG_MD_MULTIPATH=m
CONFIG_BCACHE=m
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=m
@@ -473,6 +515,16 @@ CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_MULTIPATH_QL=m
CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_MULTIPATH_HST=m
+CONFIG_DM_MULTIPATH_IOA=m
+CONFIG_DM_INIT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=m
+CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_INTEGRITY=m
+CONFIG_DM_ZONED=m
+CONFIG_DM_VDO=m
CONFIG_TARGET_CORE=m
CONFIG_TCM_IBLOCK=m
CONFIG_TCM_FILEIO=m
@@ -484,6 +536,13 @@ CONFIG_NETDEVICES=y
CONFIG_BONDING=m
CONFIG_DUMMY=y
CONFIG_WIREGUARD=m
+CONFIG_IFB=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_RANDOM=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
@@ -550,6 +609,7 @@ CONFIG_NGBE=y
CONFIG_TXGBE=y
# CONFIG_NET_VENDOR_WIZNET is not set
# CONFIG_NET_VENDOR_XILINX is not set
+CONFIG_MOTORCOMM_PHY=y
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -563,12 +623,14 @@ CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
CONFIG_USB_RTL8150=m
CONFIG_USB_RTL8152=m
+CONFIG_USB_USBNET=m
# CONFIG_USB_NET_AX8817X is not set
# CONFIG_USB_NET_AX88179_178A is not set
CONFIG_USB_NET_CDC_EEM=m
CONFIG_USB_NET_HUAWEI_CDC_NCM=m
CONFIG_USB_NET_CDC_MBIM=m
# CONFIG_USB_NET_NET1080 is not set
+CONFIG_USB_NET_RNDIS_HOST=m
# CONFIG_USB_BELKIN is not set
# CONFIG_USB_ARMLINUX is not set
# CONFIG_USB_NET_ZAURUS is not set
@@ -577,10 +639,11 @@ CONFIG_ATH9K_HTC=m
CONFIG_IWLWIFI=m
CONFIG_IWLDVM=m
CONFIG_IWLMVM=m
-CONFIG_HOSTAP=m
CONFIG_MT7601U=m
CONFIG_RT2X00=m
CONFIG_RT2800USB=m
+CONFIG_RTL8180=m
+CONFIG_RTL8187=m
CONFIG_RTL8192CE=m
CONFIG_RTL8192SE=m
CONFIG_RTL8192DE=m
@@ -590,18 +653,30 @@ CONFIG_RTL8188EE=m
CONFIG_RTL8192EE=m
CONFIG_RTL8821AE=m
CONFIG_RTL8192CU=m
+CONFIG_RTL8192DU=m
# CONFIG_RTLWIFI_DEBUG is not set
CONFIG_RTL8XXXU=m
CONFIG_RTW88=m
CONFIG_RTW88_8822BE=m
+CONFIG_RTW88_8822BU=m
CONFIG_RTW88_8822CE=m
+CONFIG_RTW88_8822CU=m
CONFIG_RTW88_8723DE=m
+CONFIG_RTW88_8723DU=m
CONFIG_RTW88_8821CE=m
+CONFIG_RTW88_8821CU=m
+CONFIG_RTW88_8821AU=m
+CONFIG_RTW88_8812AU=m
+CONFIG_RTW88_8814AE=m
+CONFIG_RTW88_8814AU=m
CONFIG_RTW89=m
+CONFIG_RTW89_8851BE=m
CONFIG_RTW89_8852AE=m
+CONFIG_RTW89_8852BE=m
+CONFIG_RTW89_8852BTE=m
CONFIG_RTW89_8852CE=m
+CONFIG_RTW89_8922AE=m
CONFIG_ZD1211RW=m
-CONFIG_USB_NET_RNDIS_WLAN=m
CONFIG_USB4_NET=m
CONFIG_INPUT_MOUSEDEV=y
CONFIG_INPUT_MOUSEDEV_PSAUX=y
@@ -634,6 +709,9 @@ CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_PIIX4=y
+CONFIG_I2C_DESIGNWARE_CORE=y
+CONFIG_I2C_DESIGNWARE_SLAVE=y
+CONFIG_I2C_DESIGNWARE_PCI=y
CONFIG_I2C_GPIO=y
CONFIG_I2C_LS2X=y
CONFIG_SPI=y
@@ -674,6 +752,7 @@ CONFIG_MEDIA_PCI_SUPPORT=y
CONFIG_VIDEO_BT848=m
CONFIG_DVB_BT8XX=m
CONFIG_DRM=y
+CONFIG_DRM_LOAD_EDID_FIRMWARE=y
CONFIG_DRM_RADEON=m
CONFIG_DRM_RADEON_USERPTR=y
CONFIG_DRM_AMDGPU=m
@@ -687,6 +766,7 @@ CONFIG_DRM_LOONGSON=y
CONFIG_FB=y
CONFIG_FB_EFI=y
CONFIG_FB_RADEON=y
+CONFIG_FIRMWARE_EDID=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_PLATFORM=m
# CONFIG_VGA_CONSOLE is not set
@@ -710,11 +790,22 @@ CONFIG_SND_HDA_CODEC_CONEXANT=y
CONFIG_SND_USB_AUDIO=m
CONFIG_SND_SOC=m
CONFIG_SND_SOC_LOONGSON_CARD=m
+CONFIG_SND_SOC_ES7134=m
+CONFIG_SND_SOC_ES7241=m
+CONFIG_SND_SOC_ES8311=m
+CONFIG_SND_SOC_ES8316=m
+CONFIG_SND_SOC_ES8323=m
+CONFIG_SND_SOC_ES8326=m
+CONFIG_SND_SOC_ES8328_I2C=m
+CONFIG_SND_SOC_ES8328_SPI=m
+CONFIG_SND_SOC_UDA1334=m
+CONFIG_SND_SOC_UDA1342=m
CONFIG_SND_VIRTIO=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
CONFIG_HID_A4TECH=m
CONFIG_HID_CHERRY=m
+CONFIG_HID_ELAN=m
CONFIG_HID_LOGITECH=m
CONFIG_HID_LOGITECH_DJ=m
CONFIG_LOGITECH_FF=y
@@ -723,7 +814,11 @@ CONFIG_LOGIG940_FF=y
CONFIG_HID_MICROSOFT=m
CONFIG_HID_MULTITOUCH=m
CONFIG_HID_SUNPLUS=m
+CONFIG_HID_WACOM=m
CONFIG_USB_HIDDEV=y
+CONFIG_I2C_HID_ACPI=m
+CONFIG_I2C_HID_OF=m
+CONFIG_I2C_HID_OF_ELAN=m
CONFIG_USB=y
CONFIG_USB_OTG=y
CONFIG_USB_MON=y
@@ -754,11 +849,14 @@ CONFIG_TYPEC_TCPCI=m
CONFIG_TYPEC_UCSI=m
CONFIG_UCSI_ACPI=m
CONFIG_INFINIBAND=m
+CONFIG_EDAC=y
+# CONFIG_EDAC_LEGACY_SYSFS is not set
+CONFIG_EDAC_LOONGSON=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_EFI=y
CONFIG_RTC_DRV_LOONGSON=y
CONFIG_DMADEVICES=y
-CONFIG_LS2X_APB_DMA=y
+CONFIG_LOONGSON2_APB_DMA=y
CONFIG_UDMABUF=y
CONFIG_DMABUF_HEAPS=y
CONFIG_DMABUF_HEAPS_SYSTEM=y
@@ -811,6 +909,7 @@ CONFIG_NTB_SWITCHTEC=m
CONFIG_NTB_PERF=m
CONFIG_NTB_TRANSPORT=m
CONFIG_PWM=y
+CONFIG_GENERIC_PHY=y
CONFIG_USB4=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
@@ -834,6 +933,9 @@ CONFIG_F2FS_FS=m
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_CHECK_FS=y
CONFIG_F2FS_FS_COMPRESSION=y
+CONFIG_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
+CONFIG_FS_VERITY=y
CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA=y
@@ -876,23 +978,23 @@ CONFIG_UBIFS_FS=m
CONFIG_UBIFS_FS_ADVANCED_COMPR=y
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_FILE_DIRECT=y
+CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT=y
CONFIG_SQUASHFS_XATTR=y
CONFIG_SQUASHFS_LZ4=y
CONFIG_SQUASHFS_LZO=y
CONFIG_SQUASHFS_XZ=y
+CONFIG_SQUASHFS_ZSTD=y
CONFIG_MINIX_FS=m
CONFIG_ROMFS_FS=m
CONFIG_PSTORE=m
-CONFIG_PSTORE_LZO_COMPRESS=m
-CONFIG_PSTORE_LZ4_COMPRESS=m
-CONFIG_PSTORE_LZ4HC_COMPRESS=m
-CONFIG_PSTORE_842_COMPRESS=y
-CONFIG_PSTORE_ZSTD_COMPRESS=y
-CONFIG_PSTORE_ZSTD_COMPRESS_DEFAULT=y
-CONFIG_SYSV_FS=m
+CONFIG_PSTORE_COMPRESS=y
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_EROFS_FS_ZIP_LZMA=y
+CONFIG_EROFS_FS_ZIP_DEFLATE=y
+CONFIG_EROFS_FS_ZIP_ZSTD=y
+CONFIG_EROFS_FS_ONDEMAND=y
CONFIG_EROFS_FS_PCPU_KTHREAD=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
@@ -924,7 +1026,7 @@ CONFIG_SECURITY_APPARMOR=y
CONFIG_SECURITY_YAMA=y
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_CRYPTO_USER=m
-# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_SELFTESTS=y
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_ANUBIS=m
@@ -937,7 +1039,6 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_LZO=m
@@ -948,7 +1049,6 @@ CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
-CONFIG_CRYPTO_CRC32_LOONGARCH=m
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_DMA_CMA=y
CONFIG_DMA_NUMA_CMA=y
@@ -961,3 +1061,4 @@ CONFIG_DEBUG_FS=y
CONFIG_SCHEDSTATS=y
# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_FTRACE is not set
+CONFIG_UNWINDER_ORC=y
diff --git a/arch/loongarch/crypto/Kconfig b/arch/loongarch/crypto/Kconfig
index 200a6e8b43b1..a0270b3e5b30 100644
--- a/arch/loongarch/crypto/Kconfig
+++ b/arch/loongarch/crypto/Kconfig
@@ -2,13 +2,4 @@
menu "Accelerated Cryptographic Algorithms for CPU (loongarch)"
-config CRYPTO_CRC32_LOONGARCH
- tristate "CRC32c and CRC32"
- select CRC32
- select CRYPTO_HASH
- help
- CRC32c and CRC32 CRC algorithms
-
- Architecture: LoongArch with CRC32 instructions
-
endmenu
diff --git a/arch/loongarch/crypto/Makefile b/arch/loongarch/crypto/Makefile
index d22613d27ce9..ba83755dde2b 100644
--- a/arch/loongarch/crypto/Makefile
+++ b/arch/loongarch/crypto/Makefile
@@ -2,5 +2,3 @@
#
# Makefile for LoongArch crypto files..
#
-
-obj-$(CONFIG_CRYPTO_CRC32_LOONGARCH) += crc32-loongarch.o
diff --git a/arch/loongarch/crypto/crc32-loongarch.c b/arch/loongarch/crypto/crc32-loongarch.c
deleted file mode 100644
index 3eebea3a7b47..000000000000
--- a/arch/loongarch/crypto/crc32-loongarch.c
+++ /dev/null
@@ -1,300 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * crc32.c - CRC32 and CRC32C using LoongArch crc* instructions
- *
- * Module based on mips/crypto/crc32-mips.c
- *
- * Copyright (C) 2014 Linaro Ltd <yazen.ghannam@linaro.org>
- * Copyright (C) 2018 MIPS Tech, LLC
- * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
- */
-
-#include <linux/module.h>
-#include <crypto/internal/hash.h>
-
-#include <asm/cpu-features.h>
-#include <asm/unaligned.h>
-
-#define _CRC32(crc, value, size, type) \
-do { \
- __asm__ __volatile__( \
- #type ".w." #size ".w" " %0, %1, %0\n\t"\
- : "+r" (crc) \
- : "r" (value) \
- : "memory"); \
-} while (0)
-
-#define CRC32(crc, value, size) _CRC32(crc, value, size, crc)
-#define CRC32C(crc, value, size) _CRC32(crc, value, size, crcc)
-
-static u32 crc32_loongarch_hw(u32 crc_, const u8 *p, unsigned int len)
-{
- u32 crc = crc_;
-
- while (len >= sizeof(u64)) {
- u64 value = get_unaligned_le64(p);
-
- CRC32(crc, value, d);
- p += sizeof(u64);
- len -= sizeof(u64);
- }
-
- if (len & sizeof(u32)) {
- u32 value = get_unaligned_le32(p);
-
- CRC32(crc, value, w);
- p += sizeof(u32);
- }
-
- if (len & sizeof(u16)) {
- u16 value = get_unaligned_le16(p);
-
- CRC32(crc, value, h);
- p += sizeof(u16);
- }
-
- if (len & sizeof(u8)) {
- u8 value = *p++;
-
- CRC32(crc, value, b);
- }
-
- return crc;
-}
-
-static u32 crc32c_loongarch_hw(u32 crc_, const u8 *p, unsigned int len)
-{
- u32 crc = crc_;
-
- while (len >= sizeof(u64)) {
- u64 value = get_unaligned_le64(p);
-
- CRC32C(crc, value, d);
- p += sizeof(u64);
- len -= sizeof(u64);
- }
-
- if (len & sizeof(u32)) {
- u32 value = get_unaligned_le32(p);
-
- CRC32C(crc, value, w);
- p += sizeof(u32);
- }
-
- if (len & sizeof(u16)) {
- u16 value = get_unaligned_le16(p);
-
- CRC32C(crc, value, h);
- p += sizeof(u16);
- }
-
- if (len & sizeof(u8)) {
- u8 value = *p++;
-
- CRC32C(crc, value, b);
- }
-
- return crc;
-}
-
-#define CHKSUM_BLOCK_SIZE 1
-#define CHKSUM_DIGEST_SIZE 4
-
-struct chksum_ctx {
- u32 key;
-};
-
-struct chksum_desc_ctx {
- u32 crc;
-};
-
-static int chksum_init(struct shash_desc *desc)
-{
- struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = mctx->key;
-
- return 0;
-}
-
-/*
- * Setting the seed allows arbitrary accumulators and flexible XOR policy
- * If your algorithm starts with ~0, then XOR with ~0 before you set the seed.
- */
-static int chksum_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen)
-{
- struct chksum_ctx *mctx = crypto_shash_ctx(tfm);
-
- if (keylen != sizeof(mctx->key))
- return -EINVAL;
-
- mctx->key = get_unaligned_le32(key);
-
- return 0;
-}
-
-static int chksum_update(struct shash_desc *desc, const u8 *data, unsigned int length)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = crc32_loongarch_hw(ctx->crc, data, length);
- return 0;
-}
-
-static int chksumc_update(struct shash_desc *desc, const u8 *data, unsigned int length)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = crc32c_loongarch_hw(ctx->crc, data, length);
- return 0;
-}
-
-static int chksum_final(struct shash_desc *desc, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- put_unaligned_le32(ctx->crc, out);
- return 0;
-}
-
-static int chksumc_final(struct shash_desc *desc, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- put_unaligned_le32(~ctx->crc, out);
- return 0;
-}
-
-static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
-{
- put_unaligned_le32(crc32_loongarch_hw(crc, data, len), out);
- return 0;
-}
-
-static int __chksumc_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
-{
- put_unaligned_le32(~crc32c_loongarch_hw(crc, data, len), out);
- return 0;
-}
-
-static int chksum_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksum_finup(ctx->crc, data, len, out);
-}
-
-static int chksumc_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksumc_finup(ctx->crc, data, len, out);
-}
-
-static int chksum_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out)
-{
- struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
-
- return __chksum_finup(mctx->key, data, length, out);
-}
-
-static int chksumc_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out)
-{
- struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
-
- return __chksumc_finup(mctx->key, data, length, out);
-}
-
-static int chksum_cra_init(struct crypto_tfm *tfm)
-{
- struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
-
- mctx->key = 0;
- return 0;
-}
-
-static int chksumc_cra_init(struct crypto_tfm *tfm)
-{
- struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
-
- mctx->key = ~0;
- return 0;
-}
-
-static struct shash_alg crc32_alg = {
- .digestsize = CHKSUM_DIGEST_SIZE,
- .setkey = chksum_setkey,
- .init = chksum_init,
- .update = chksum_update,
- .final = chksum_final,
- .finup = chksum_finup,
- .digest = chksum_digest,
- .descsize = sizeof(struct chksum_desc_ctx),
- .base = {
- .cra_name = "crc32",
- .cra_driver_name = "crc32-loongarch",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct chksum_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = chksum_cra_init,
- }
-};
-
-static struct shash_alg crc32c_alg = {
- .digestsize = CHKSUM_DIGEST_SIZE,
- .setkey = chksum_setkey,
- .init = chksum_init,
- .update = chksumc_update,
- .final = chksumc_final,
- .finup = chksumc_finup,
- .digest = chksumc_digest,
- .descsize = sizeof(struct chksum_desc_ctx),
- .base = {
- .cra_name = "crc32c",
- .cra_driver_name = "crc32c-loongarch",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct chksum_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = chksumc_cra_init,
- }
-};
-
-static int __init crc32_mod_init(void)
-{
- int err;
-
- if (!cpu_has(CPU_FEATURE_CRC32))
- return 0;
-
- err = crypto_register_shash(&crc32_alg);
- if (err)
- return err;
-
- err = crypto_register_shash(&crc32c_alg);
- if (err)
- return err;
-
- return 0;
-}
-
-static void __exit crc32_mod_exit(void)
-{
- if (!cpu_has(CPU_FEATURE_CRC32))
- return;
-
- crypto_unregister_shash(&crc32_alg);
- crypto_unregister_shash(&crc32c_alg);
-}
-
-module_init(crc32_mod_init);
-module_exit(crc32_mod_exit);
-
-MODULE_AUTHOR("Min Zhou <zhoumin@loongson.cn>");
-MODULE_AUTHOR("Huacai Chen <chenhuacai@loongson.cn>");
-MODULE_DESCRIPTION("CRC32 and CRC32C using LoongArch crc* instructions");
-MODULE_LICENSE("GPL v2");
diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild
index 2dbec7853ae8..80ddb5edb845 100644
--- a/arch/loongarch/include/asm/Kbuild
+++ b/arch/loongarch/include/asm/Kbuild
@@ -1,29 +1,14 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscall_table_64.h
generated-y += orc_hash.h
-generic-y += dma-contiguous.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += early_ioremap.h
generic-y += qrwlock.h
-generic-y += qspinlock.h
-generic-y += rwsem.h
-generic-y += segment.h
generic-y += user.h
-generic-y += stat.h
-generic-y += fcntl.h
generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += sembuf.h
-generic-y += shmbuf.h
+generic-y += mmzone.h
generic-y += statfs.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += termbits.h
-generic-y += poll.h
generic-y += param.h
-generic-y += posix_types.h
-generic-y += resource.h
-generic-y += kvm_para.h
+generic-y += text-patching.h
diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h
index 49e29b29996f..313f66f7913a 100644
--- a/arch/loongarch/include/asm/acpi.h
+++ b/arch/loongarch/include/asm/acpi.h
@@ -8,6 +8,7 @@
#ifndef _ASM_LOONGARCH_ACPI_H
#define _ASM_LOONGARCH_ACPI_H
+#include <asm/smp.h>
#include <asm/suspend.h>
#ifdef CONFIG_ACPI
diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h
index 7bd47d65bf7a..fe198b473f84 100644
--- a/arch/loongarch/include/asm/addrspace.h
+++ b/arch/loongarch/include/asm/addrspace.h
@@ -37,6 +37,10 @@ extern unsigned long vm_map_base;
#define UNCACHE_BASE CSR_DMW0_BASE
#endif
+#ifndef WRITECOMBINE_BASE
+#define WRITECOMBINE_BASE CSR_DMW2_BASE
+#endif
+
#define DMW_PABITS 48
#define TO_PHYS_MASK ((1ULL << DMW_PABITS) - 1)
diff --git a/arch/loongarch/include/asm/asm-prototypes.h b/arch/loongarch/include/asm/asm-prototypes.h
index cf8e1a4e7c19..704066b4f736 100644
--- a/arch/loongarch/include/asm/asm-prototypes.h
+++ b/arch/loongarch/include/asm/asm-prototypes.h
@@ -6,3 +6,17 @@
#include <asm/page.h>
#include <asm/ftrace.h>
#include <asm-generic/asm-prototypes.h>
+
+#ifdef CONFIG_ARCH_SUPPORTS_INT128
+__int128_t __ashlti3(__int128_t a, int b);
+__int128_t __ashrti3(__int128_t a, int b);
+__int128_t __lshrti3(__int128_t a, int b);
+#endif
+
+asmlinkage void noinstr __no_stack_protector ret_from_fork(struct task_struct *prev,
+ struct pt_regs *regs);
+
+asmlinkage void noinstr __no_stack_protector ret_from_kernel_thread(struct task_struct *prev,
+ struct pt_regs *regs,
+ int (*fn)(void *),
+ void *fn_arg);
diff --git a/arch/loongarch/include/asm/asmmacro.h b/arch/loongarch/include/asm/asmmacro.h
index 655db7d7a427..8d7f501b0a12 100644
--- a/arch/loongarch/include/asm/asmmacro.h
+++ b/arch/loongarch/include/asm/asmmacro.h
@@ -609,6 +609,7 @@
lu32i.d \reg, 0
lu52i.d \reg, \reg, 0
.pushsection ".la_abs", "aw", %progbits
+ .p2align 3
.dword 766b
.dword \sym
.popsection
diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h
index 99af8b3160a8..c86f0ab922ec 100644
--- a/arch/loongarch/include/asm/atomic.h
+++ b/arch/loongarch/include/asm/atomic.h
@@ -15,6 +15,7 @@
#define __LL "ll.w "
#define __SC "sc.w "
#define __AMADD "amadd.w "
+#define __AMOR "amor.w "
#define __AMAND_DB "amand_db.w "
#define __AMOR_DB "amor_db.w "
#define __AMXOR_DB "amxor_db.w "
@@ -22,6 +23,7 @@
#define __LL "ll.d "
#define __SC "sc.d "
#define __AMADD "amadd.d "
+#define __AMOR "amor.d "
#define __AMAND_DB "amand_db.d "
#define __AMOR_DB "amor_db.d "
#define __AMXOR_DB "amxor_db.d "
diff --git a/arch/loongarch/include/asm/bootinfo.h b/arch/loongarch/include/asm/bootinfo.h
index 6d5846dd075c..7657e016233f 100644
--- a/arch/loongarch/include/asm/bootinfo.h
+++ b/arch/loongarch/include/asm/bootinfo.h
@@ -26,6 +26,10 @@ struct loongson_board_info {
#define NR_WORDS DIV_ROUND_UP(NR_CPUS, BITS_PER_LONG)
+/*
+ * The "core" of cores_per_node and cores_per_package stands for a
+ * logical core, which means in a SMT system it stands for a thread.
+ */
struct loongson_system_configuration {
int nr_cpus;
int nr_nodes;
diff --git a/arch/loongarch/include/asm/bug.h b/arch/loongarch/include/asm/bug.h
index 08388876ade4..f6f254f2c5db 100644
--- a/arch/loongarch/include/asm/bug.h
+++ b/arch/loongarch/include/asm/bug.h
@@ -4,6 +4,7 @@
#include <asm/break.h>
#include <linux/stringify.h>
+#include <linux/objtool.h>
#ifndef CONFIG_DEBUG_BUGVERBOSE
#define _BUGVERBOSE_LOCATION(file, line)
@@ -33,25 +34,25 @@
#define ASM_BUG_FLAGS(flags) \
__BUG_ENTRY(flags) \
- break BRK_BUG
+ break BRK_BUG;
#define ASM_BUG() ASM_BUG_FLAGS(0)
-#define __BUG_FLAGS(flags) \
- asm_inline volatile (__stringify(ASM_BUG_FLAGS(flags)));
+#define __BUG_FLAGS(flags, extra) \
+ asm_inline volatile (__stringify(ASM_BUG_FLAGS(flags)) \
+ extra);
#define __WARN_FLAGS(flags) \
do { \
instrumentation_begin(); \
- __BUG_FLAGS(BUGFLAG_WARNING|(flags)); \
- annotate_reachable(); \
+ __BUG_FLAGS(BUGFLAG_WARNING|(flags), ANNOTATE_REACHABLE(10001b));\
instrumentation_end(); \
} while (0)
#define BUG() \
do { \
instrumentation_begin(); \
- __BUG_FLAGS(0); \
+ __BUG_FLAGS(0, ""); \
unreachable(); \
} while (0)
diff --git a/arch/loongarch/include/asm/cache.h b/arch/loongarch/include/asm/cache.h
index 1b6d09617199..aa622c754414 100644
--- a/arch/loongarch/include/asm/cache.h
+++ b/arch/loongarch/include/asm/cache.h
@@ -8,6 +8,8 @@
#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define ARCH_DMA_MINALIGN (16)
+
#define __read_mostly __section(".data..read_mostly")
#endif /* _ASM_CACHE_H */
diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h
index 2eafe6a6aca8..fc83bb32f9f0 100644
--- a/arch/loongarch/include/asm/cpu-features.h
+++ b/arch/loongarch/include/asm/cpu-features.h
@@ -51,6 +51,7 @@
#define cpu_has_lbt_mips cpu_opt(LOONGARCH_CPU_LBT_MIPS)
#define cpu_has_lbt (cpu_has_lbt_x86|cpu_has_lbt_arm|cpu_has_lbt_mips)
#define cpu_has_csr cpu_opt(LOONGARCH_CPU_CSR)
+#define cpu_has_iocsr cpu_opt(LOONGARCH_CPU_IOCSR)
#define cpu_has_tlb cpu_opt(LOONGARCH_CPU_TLB)
#define cpu_has_watch cpu_opt(LOONGARCH_CPU_WATCH)
#define cpu_has_vint cpu_opt(LOONGARCH_CPU_VINT)
@@ -65,5 +66,7 @@
#define cpu_has_guestid cpu_opt(LOONGARCH_CPU_GUESTID)
#define cpu_has_hypervisor cpu_opt(LOONGARCH_CPU_HYPERVISOR)
#define cpu_has_ptw cpu_opt(LOONGARCH_CPU_PTW)
+#define cpu_has_lspw cpu_opt(LOONGARCH_CPU_LSPW)
+#define cpu_has_avecint cpu_opt(LOONGARCH_CPU_AVECINT)
#endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/loongarch/include/asm/cpu-info.h b/arch/loongarch/include/asm/cpu-info.h
index 900589cb159d..7f5bc0ad9d50 100644
--- a/arch/loongarch/include/asm/cpu-info.h
+++ b/arch/loongarch/include/asm/cpu-info.h
@@ -57,6 +57,7 @@ struct cpuinfo_loongarch {
int global_id; /* physical global thread number */
int vabits; /* Virtual Address size in bits */
int pabits; /* Physical Address size in bits */
+ int timerbits; /* Width of arch timer in bits */
unsigned int ksave_mask; /* Usable KSave mask. */
unsigned int watch_dreg_count; /* Number data breakpoints */
unsigned int watch_ireg_count; /* Number instruction breakpoints */
@@ -75,27 +76,6 @@ extern const char *__cpu_full_name[];
#define cpu_family_string() __cpu_family[raw_smp_processor_id()]
#define cpu_full_name_string() __cpu_full_name[raw_smp_processor_id()]
-struct seq_file;
-struct notifier_block;
-
-extern int register_proc_cpuinfo_notifier(struct notifier_block *nb);
-extern int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v);
-
-#define proc_cpuinfo_notifier(fn, pri) \
-({ \
- static struct notifier_block fn##_nb = { \
- .notifier_call = fn, \
- .priority = pri \
- }; \
- \
- register_proc_cpuinfo_notifier(&fn##_nb); \
-})
-
-struct proc_cpuinfo_notifier_args {
- struct seq_file *m;
- unsigned long n;
-};
-
static inline bool cpus_are_siblings(int cpua, int cpub)
{
struct cpuinfo_loongarch *infoa = &cpu_data[cpua];
diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h
index 48b9f7168bcc..98cf4d7b4b0a 100644
--- a/arch/loongarch/include/asm/cpu.h
+++ b/arch/loongarch/include/asm/cpu.h
@@ -87,18 +87,21 @@ enum cpu_type_enum {
#define CPU_FEATURE_LBT_MIPS 12 /* CPU has MIPS Binary Translation */
#define CPU_FEATURE_TLB 13 /* CPU has TLB */
#define CPU_FEATURE_CSR 14 /* CPU has CSR */
-#define CPU_FEATURE_WATCH 15 /* CPU has watchpoint registers */
-#define CPU_FEATURE_VINT 16 /* CPU has vectored interrupts */
-#define CPU_FEATURE_CSRIPI 17 /* CPU has CSR-IPI */
-#define CPU_FEATURE_EXTIOI 18 /* CPU has EXT-IOI */
-#define CPU_FEATURE_PREFETCH 19 /* CPU has prefetch instructions */
-#define CPU_FEATURE_PMP 20 /* CPU has perfermance counter */
-#define CPU_FEATURE_SCALEFREQ 21 /* CPU supports cpufreq scaling */
-#define CPU_FEATURE_FLATMODE 22 /* CPU has flat mode */
-#define CPU_FEATURE_EIODECODE 23 /* CPU has EXTIOI interrupt pin decode mode */
-#define CPU_FEATURE_GUESTID 24 /* CPU has GuestID feature */
-#define CPU_FEATURE_HYPERVISOR 25 /* CPU has hypervisor (running in VM) */
-#define CPU_FEATURE_PTW 26 /* CPU has hardware page table walker */
+#define CPU_FEATURE_IOCSR 15 /* CPU has IOCSR */
+#define CPU_FEATURE_WATCH 16 /* CPU has watchpoint registers */
+#define CPU_FEATURE_VINT 17 /* CPU has vectored interrupts */
+#define CPU_FEATURE_CSRIPI 18 /* CPU has CSR-IPI */
+#define CPU_FEATURE_EXTIOI 19 /* CPU has EXT-IOI */
+#define CPU_FEATURE_PREFETCH 20 /* CPU has prefetch instructions */
+#define CPU_FEATURE_PMP 21 /* CPU has perfermance counter */
+#define CPU_FEATURE_SCALEFREQ 22 /* CPU supports cpufreq scaling */
+#define CPU_FEATURE_FLATMODE 23 /* CPU has flat mode */
+#define CPU_FEATURE_EIODECODE 24 /* CPU has EXTIOI interrupt pin decode mode */
+#define CPU_FEATURE_GUESTID 25 /* CPU has GuestID feature */
+#define CPU_FEATURE_HYPERVISOR 26 /* CPU has hypervisor (running in VM) */
+#define CPU_FEATURE_PTW 27 /* CPU has hardware page table walker */
+#define CPU_FEATURE_LSPW 28 /* CPU has LSPW (lddir/ldpte instructions) */
+#define CPU_FEATURE_AVECINT 29 /* CPU has AVEC interrupt */
#define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG)
#define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM)
@@ -114,6 +117,7 @@ enum cpu_type_enum {
#define LOONGARCH_CPU_LBT_ARM BIT_ULL(CPU_FEATURE_LBT_ARM)
#define LOONGARCH_CPU_LBT_MIPS BIT_ULL(CPU_FEATURE_LBT_MIPS)
#define LOONGARCH_CPU_TLB BIT_ULL(CPU_FEATURE_TLB)
+#define LOONGARCH_CPU_IOCSR BIT_ULL(CPU_FEATURE_IOCSR)
#define LOONGARCH_CPU_CSR BIT_ULL(CPU_FEATURE_CSR)
#define LOONGARCH_CPU_WATCH BIT_ULL(CPU_FEATURE_WATCH)
#define LOONGARCH_CPU_VINT BIT_ULL(CPU_FEATURE_VINT)
@@ -127,5 +131,7 @@ enum cpu_type_enum {
#define LOONGARCH_CPU_GUESTID BIT_ULL(CPU_FEATURE_GUESTID)
#define LOONGARCH_CPU_HYPERVISOR BIT_ULL(CPU_FEATURE_HYPERVISOR)
#define LOONGARCH_CPU_PTW BIT_ULL(CPU_FEATURE_PTW)
+#define LOONGARCH_CPU_LSPW BIT_ULL(CPU_FEATURE_LSPW)
+#define LOONGARCH_CPU_AVECINT BIT_ULL(CPU_FEATURE_AVECINT)
#endif /* _ASM_CPU_H */
diff --git a/arch/loongarch/include/asm/dma-direct.h b/arch/loongarch/include/asm/dma-direct.h
deleted file mode 100644
index 75ccd808a2af..000000000000
--- a/arch/loongarch/include/asm/dma-direct.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
- */
-#ifndef _LOONGARCH_DMA_DIRECT_H
-#define _LOONGARCH_DMA_DIRECT_H
-
-dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
-phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
-
-#endif /* _LOONGARCH_DMA_DIRECT_H */
diff --git a/arch/loongarch/include/asm/fprobe.h b/arch/loongarch/include/asm/fprobe.h
new file mode 100644
index 000000000000..7af3b3126caf
--- /dev/null
+++ b/arch/loongarch/include/asm/fprobe.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_LOONGARCH_FPROBE_H
+#define _ASM_LOONGARCH_FPROBE_H
+
+/*
+ * Explicitly undef ARCH_DEFINE_ENCODE_FPROBE_HEADER, because loongarch does not
+ * have enough number of fixed MSBs of the address of kernel objects for
+ * encoding the size of data in fprobe_header. Use 2-entries encoding instead.
+ */
+#undef ARCH_DEFINE_ENCODE_FPROBE_HEADER
+
+#endif /* _ASM_LOONGARCH_FPROBE_H */
diff --git a/arch/loongarch/include/asm/fpu.h b/arch/loongarch/include/asm/fpu.h
index c2d8962fda00..45514f314664 100644
--- a/arch/loongarch/include/asm/fpu.h
+++ b/arch/loongarch/include/asm/fpu.h
@@ -21,22 +21,30 @@
struct sigcontext;
-extern void kernel_fpu_begin(void);
-extern void kernel_fpu_end(void);
-
-extern void _init_fpu(unsigned int);
-extern void _save_fp(struct loongarch_fpu *);
-extern void _restore_fp(struct loongarch_fpu *);
-
-extern void _save_lsx(struct loongarch_fpu *fpu);
-extern void _restore_lsx(struct loongarch_fpu *fpu);
-extern void _init_lsx_upper(void);
-extern void _restore_lsx_upper(struct loongarch_fpu *fpu);
-
-extern void _save_lasx(struct loongarch_fpu *fpu);
-extern void _restore_lasx(struct loongarch_fpu *fpu);
-extern void _init_lasx_upper(void);
-extern void _restore_lasx_upper(struct loongarch_fpu *fpu);
+#define kernel_fpu_available() cpu_has_fpu
+
+void kernel_fpu_begin(void);
+void kernel_fpu_end(void);
+
+asmlinkage void _init_fpu(unsigned int);
+asmlinkage void _save_fp(struct loongarch_fpu *);
+asmlinkage void _restore_fp(struct loongarch_fpu *);
+asmlinkage int _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
+asmlinkage int _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
+
+asmlinkage void _save_lsx(struct loongarch_fpu *fpu);
+asmlinkage void _restore_lsx(struct loongarch_fpu *fpu);
+asmlinkage void _init_lsx_upper(void);
+asmlinkage void _restore_lsx_upper(struct loongarch_fpu *fpu);
+asmlinkage int _save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+asmlinkage int _restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+
+asmlinkage void _save_lasx(struct loongarch_fpu *fpu);
+asmlinkage void _restore_lasx(struct loongarch_fpu *fpu);
+asmlinkage void _init_lasx_upper(void);
+asmlinkage void _restore_lasx_upper(struct loongarch_fpu *fpu);
+asmlinkage int _save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+asmlinkage int _restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
static inline void enable_lsx(void);
static inline void disable_lsx(void);
diff --git a/arch/loongarch/include/asm/ftrace.h b/arch/loongarch/include/asm/ftrace.h
index de891c2c83d4..6e0a99763a9a 100644
--- a/arch/loongarch/include/asm/ftrace.h
+++ b/arch/loongarch/include/asm/ftrace.h
@@ -28,7 +28,6 @@ struct dyn_ftrace;
struct dyn_arch_ftrace { };
#define ARCH_SUPPORTS_FTRACE_OPS 1
-#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
#define ftrace_init_nop ftrace_init_nop
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
@@ -45,39 +44,28 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent);
#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
struct ftrace_ops;
-struct ftrace_regs {
- struct pt_regs regs;
-};
+#include <linux/ftrace_regs.h>
static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
{
- return &fregs->regs;
-}
-
-static __always_inline unsigned long
-ftrace_regs_get_instruction_pointer(struct ftrace_regs *fregs)
-{
- return instruction_pointer(&fregs->regs);
+ return &arch_ftrace_regs(fregs)->regs;
}
static __always_inline void
ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, unsigned long ip)
{
- instruction_pointer_set(&fregs->regs, ip);
+ instruction_pointer_set(&arch_ftrace_regs(fregs)->regs, ip);
}
-#define ftrace_regs_get_argument(fregs, n) \
- regs_get_kernel_argument(&(fregs)->regs, n)
-#define ftrace_regs_get_stack_pointer(fregs) \
- kernel_stack_pointer(&(fregs)->regs)
-#define ftrace_regs_return_value(fregs) \
- regs_return_value(&(fregs)->regs)
-#define ftrace_regs_set_return_value(fregs, ret) \
- regs_set_return_value(&(fregs)->regs, ret)
-#define ftrace_override_function_with_return(fregs) \
- override_function_with_return(&(fregs)->regs)
-#define ftrace_regs_query_register_offset(name) \
- regs_query_register_offset(name)
+#undef ftrace_regs_get_frame_pointer
+#define ftrace_regs_get_frame_pointer(fregs) \
+ (arch_ftrace_regs(fregs)->regs.regs[22])
+
+static __always_inline unsigned long
+ftrace_regs_get_return_address(struct ftrace_regs *fregs)
+{
+ return *(unsigned long *)(arch_ftrace_regs(fregs)->regs.regs[1]);
+}
#define ftrace_graph_func ftrace_graph_func
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
@@ -91,7 +79,7 @@ __arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr)
}
#define arch_ftrace_set_direct_caller(fregs, addr) \
- __arch_ftrace_set_direct_caller(&(fregs)->regs, addr)
+ __arch_ftrace_set_direct_caller(&arch_ftrace_regs(fregs)->regs, addr)
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
#endif
@@ -100,26 +88,4 @@ __arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr)
#endif /* CONFIG_FUNCTION_TRACER */
-#ifndef __ASSEMBLY__
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-struct fgraph_ret_regs {
- /* a0 - a1 */
- unsigned long regs[2];
-
- unsigned long fp;
- unsigned long __unused;
-};
-
-static inline unsigned long fgraph_ret_regs_return_value(struct fgraph_ret_regs *ret_regs)
-{
- return ret_regs->regs[0];
-}
-
-static inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs *ret_regs)
-{
- return ret_regs->fp;
-}
-#endif /* ifdef CONFIG_FUNCTION_GRAPH_TRACER */
-#endif
-
#endif /* _ASM_LOONGARCH_FTRACE_H */
diff --git a/arch/loongarch/include/asm/hardirq.h b/arch/loongarch/include/asm/hardirq.h
index 0ef3b18f8980..10da8d6961cb 100644
--- a/arch/loongarch/include/asm/hardirq.h
+++ b/arch/loongarch/include/asm/hardirq.h
@@ -12,11 +12,19 @@
extern void ack_bad_irq(unsigned int irq);
#define ack_bad_irq ack_bad_irq
-#define NR_IPI 2
+#define NR_IPI 4
+
+enum ipi_msg_type {
+ IPI_RESCHEDULE,
+ IPI_CALL_FUNCTION,
+ IPI_IRQ_WORK,
+ IPI_CLEAR_VECTOR,
+};
typedef struct {
unsigned int ipi_irqs[NR_IPI];
unsigned int __softirq_pending;
+ atomic_t message ____cacheline_aligned_in_smp;
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
diff --git a/arch/loongarch/include/asm/hugetlb.h b/arch/loongarch/include/asm/hugetlb.h
index aa44b3fe43dd..4dc4b3e04225 100644
--- a/arch/loongarch/include/asm/hugetlb.h
+++ b/arch/loongarch/include/asm/hugetlb.h
@@ -16,12 +16,7 @@ static inline int prepare_hugepage_range(struct file *file,
unsigned long len)
{
unsigned long task_size = STACK_TOP;
- struct hstate *h = hstate_file(file);
- if (len & ~huge_page_mask(h))
- return -EINVAL;
- if (addr & ~huge_page_mask(h))
- return -EINVAL;
if (len > task_size)
return -ENOMEM;
if (task_size - len < addr)
@@ -29,12 +24,23 @@ static inline int prepare_hugepage_range(struct file *file,
return 0;
}
+#define __HAVE_ARCH_HUGE_PTE_CLEAR
+static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long sz)
+{
+ pte_t clear;
+
+ pte_val(clear) = (unsigned long)invalid_pte_table;
+ set_pte_at(mm, addr, ptep, clear);
+}
+
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep,
+ unsigned long sz)
{
pte_t clear;
- pte_t pte = *ptep;
+ pte_t pte = ptep_get(ptep);
pte_val(clear) = (unsigned long)invalid_pte_table;
set_pte_at(mm, addr, ptep, clear);
@@ -46,8 +52,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte;
+ unsigned long sz = huge_page_size(hstate_vma(vma));
- pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
flush_tlb_page(vma, addr);
return pte;
}
@@ -65,7 +72,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
pte_t *ptep, pte_t pte,
int dirty)
{
- int changed = !pte_same(*ptep, pte);
+ int changed = !pte_same(ptep_get(ptep), pte);
if (changed) {
set_pte_at(vma->vm_mm, addr, ptep, pte);
diff --git a/arch/loongarch/include/asm/hw_breakpoint.h b/arch/loongarch/include/asm/hw_breakpoint.h
index 21447fb1efc7..13b2462f3d8c 100644
--- a/arch/loongarch/include/asm/hw_breakpoint.h
+++ b/arch/loongarch/include/asm/hw_breakpoint.h
@@ -38,8 +38,8 @@ struct arch_hw_breakpoint {
* Limits.
* Changing these will require modifications to the register accessors.
*/
-#define LOONGARCH_MAX_BRP 8
-#define LOONGARCH_MAX_WRP 8
+#define LOONGARCH_MAX_BRP 14
+#define LOONGARCH_MAX_WRP 14
/* Virtual debug register bases. */
#define CSR_CFG_ADDR 0
@@ -75,6 +75,8 @@ do { \
#define CSR_MWPC_NUM 0x3f
#define CTRL_PLV_ENABLE 0x1e
+#define CTRL_PLV0_ENABLE 0x02
+#define CTRL_PLV3_ENABLE 0x10
#define MWPnCFG3_LoadEn 8
#define MWPnCFG3_StoreEn 9
@@ -101,7 +103,7 @@ struct perf_event;
struct perf_event_attr;
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
- int *gen_len, int *gen_type, int *offset);
+ int *gen_len, int *gen_type);
extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_arch_parse(struct perf_event *bp,
const struct perf_event_attr *attr,
diff --git a/arch/loongarch/include/asm/hw_irq.h b/arch/loongarch/include/asm/hw_irq.h
index af4f4e8fbd85..8156ffb67415 100644
--- a/arch/loongarch/include/asm/hw_irq.h
+++ b/arch/loongarch/include/asm/hw_irq.h
@@ -9,6 +9,8 @@
extern atomic_t irq_err_count;
+#define ARCH_IRQ_INIT_FLAGS IRQ_NOPROBE
+
/*
* interrupt-retrigger: NOP for now. This may not be appropriate for all
* machines, we'll see ...
diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
index d8f637f9e400..3089785ca97e 100644
--- a/arch/loongarch/include/asm/inst.h
+++ b/arch/loongarch/include/asm/inst.h
@@ -12,6 +12,7 @@
#define INSN_NOP 0x03400000
#define INSN_BREAK 0x002a0000
+#define INSN_HVCL 0x002b8000
#define ADDR_IMMMASK_LU52ID 0xFFF0000000000000
#define ADDR_IMMMASK_LU32ID 0x000FFFFF00000000
@@ -67,6 +68,7 @@ enum reg2_op {
revhd_op = 0x11,
extwh_op = 0x16,
extwb_op = 0x17,
+ cpucfg_op = 0x1b,
iocsrrdb_op = 0x19200,
iocsrrdh_op = 0x19201,
iocsrrdw_op = 0x19202,
@@ -530,6 +532,9 @@ static inline void emit_##NAME(union loongarch_instruction *insn, \
DEF_EMIT_REG0I15_FORMAT(break, break_op)
+/* like emit_break(imm) but returns a constant expression */
+#define __emit_break(imm) ((u32)((imm) | (break_op << 15)))
+
#define DEF_EMIT_REG0I26_FORMAT(NAME, OP) \
static inline void emit_##NAME(union loongarch_instruction *insn, \
int offset) \
@@ -678,7 +683,17 @@ DEF_EMIT_REG2I16_FORMAT(blt, blt_op)
DEF_EMIT_REG2I16_FORMAT(bge, bge_op)
DEF_EMIT_REG2I16_FORMAT(bltu, bltu_op)
DEF_EMIT_REG2I16_FORMAT(bgeu, bgeu_op)
-DEF_EMIT_REG2I16_FORMAT(jirl, jirl_op)
+
+static inline void emit_jirl(union loongarch_instruction *insn,
+ enum loongarch_gpr rd,
+ enum loongarch_gpr rj,
+ int offset)
+{
+ insn->reg2i16_format.opcode = jirl_op;
+ insn->reg2i16_format.immediate = offset;
+ insn->reg2i16_format.rd = rd;
+ insn->reg2i16_format.rj = rj;
+}
#define DEF_EMIT_REG2BSTRD_FORMAT(NAME, OP) \
static inline void emit_##NAME(union loongarch_instruction *insn, \
diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h
index c2f9979b2979..eaff72b38dc8 100644
--- a/arch/loongarch/include/asm/io.h
+++ b/arch/loongarch/include/asm/io.h
@@ -23,16 +23,22 @@ extern void __init early_iounmap(void __iomem *addr, unsigned long size);
#ifdef CONFIG_ARCH_IOREMAP
static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
- unsigned long prot_val)
+ pgprot_t prot)
{
- if (prot_val & _CACHE_CC)
+ switch (pgprot_val(prot) & _CACHE_MASK) {
+ case _CACHE_CC:
return (void __iomem *)(unsigned long)(CACHE_BASE + offset);
- else
+ case _CACHE_SUC:
return (void __iomem *)(unsigned long)(UNCACHE_BASE + offset);
+ case _CACHE_WUC:
+ return (void __iomem *)(unsigned long)(WRITECOMBINE_BASE + offset);
+ default:
+ return NULL;
+ }
}
#define ioremap(offset, size) \
- ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_SUC))
+ ioremap_prot((offset), (size), PAGE_KERNEL_SUC)
#define iounmap(addr) ((void)(addr))
@@ -49,23 +55,13 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
*/
#define ioremap_wc(offset, size) \
ioremap_prot((offset), (size), \
- pgprot_val(wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC))
+ wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC)
#define ioremap_cache(offset, size) \
- ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL))
+ ioremap_prot((offset), (size), PAGE_KERNEL)
#define mmiowb() wmb()
-/*
- * String version of I/O memory access operations.
- */
-extern void __memset_io(volatile void __iomem *dst, int c, size_t count);
-extern void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count);
-extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count);
-#define memset_io(c, v, l) __memset_io((c), (v), (l))
-#define memcpy_fromio(a, c, l) __memcpy_fromio((a), (c), (l))
-#define memcpy_toio(c, a, l) __memcpy_toio((c), (a), (l))
-
#define __io_aw() mmiowb()
#ifdef CONFIG_KFENCE
diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h
index 218b4da0ea90..12bd15578c33 100644
--- a/arch/loongarch/include/asm/irq.h
+++ b/arch/loongarch/include/asm/irq.h
@@ -39,11 +39,22 @@ void spurious_interrupt(void);
#define NR_IRQS_LEGACY 16
+/*
+ * 256 Vectors Mapping for AVECINTC:
+ *
+ * 0 - 15: Mapping classic IPs, e.g. IP0-12.
+ * 16 - 255: Mapping vectors for external IRQ.
+ *
+ */
+#define NR_VECTORS 256
+#define NR_LEGACY_VECTORS 16
+#define IRQ_MATRIX_BITS NR_VECTORS
+
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu);
-#define MAX_IO_PICS 2
-#define NR_IRQS (64 + (256 * MAX_IO_PICS))
+#define MAX_IO_PICS 8
+#define NR_IRQS (64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS))
struct acpi_vector_group {
int node;
@@ -54,6 +65,7 @@ extern struct acpi_vector_group pch_group[MAX_IO_PICS];
extern struct acpi_vector_group msi_group[MAX_IO_PICS];
#define CORES_PER_EIO_NODE 4
+#define CORES_PER_VEIO_NODE 256
#define LOONGSON_CPU_UART0_VEC 10 /* CPU UART0 */
#define LOONGSON_CPU_THSENS_VEC 14 /* CPU Thsens */
@@ -65,7 +77,7 @@ extern struct acpi_vector_group msi_group[MAX_IO_PICS];
#define LOONGSON_LPC_LAST_IRQ (LOONGSON_LPC_IRQ_BASE + 15)
#define LOONGSON_CPU_IRQ_BASE 16
-#define LOONGSON_CPU_LAST_IRQ (LOONGSON_CPU_IRQ_BASE + 14)
+#define LOONGSON_CPU_LAST_IRQ (LOONGSON_CPU_IRQ_BASE + 15)
#define LOONGSON_PCH_IRQ_BASE 64
#define LOONGSON_PCH_ACPI_IRQ (LOONGSON_PCH_IRQ_BASE + 47)
@@ -88,20 +100,8 @@ struct acpi_madt_bio_pic;
struct acpi_madt_msi_pic;
struct acpi_madt_lpc_pic;
-int liointc_acpi_init(struct irq_domain *parent,
- struct acpi_madt_lio_pic *acpi_liointc);
-int eiointc_acpi_init(struct irq_domain *parent,
- struct acpi_madt_eio_pic *acpi_eiointc);
-
-int htvec_acpi_init(struct irq_domain *parent,
- struct acpi_madt_ht_pic *acpi_htvec);
-int pch_lpc_acpi_init(struct irq_domain *parent,
- struct acpi_madt_lpc_pic *acpi_pchlpc);
-int pch_msi_acpi_init(struct irq_domain *parent,
- struct acpi_madt_msi_pic *acpi_pchmsi);
-int pch_pic_acpi_init(struct irq_domain *parent,
- struct acpi_madt_bio_pic *acpi_pchpic);
-int find_pch_pic(u32 gsi);
+void complete_irq_moving(void);
+
struct fwnode_handle *get_pch_msi_handle(int pci_segment);
extern struct acpi_madt_lio_pic *acpi_liointc;
@@ -117,7 +117,16 @@ extern struct fwnode_handle *liointc_handle;
extern struct fwnode_handle *pch_lpc_handle;
extern struct fwnode_handle *pch_pic_handle[MAX_IO_PICS];
-extern irqreturn_t loongson_ipi_interrupt(int irq, void *dev);
+static inline int get_percpu_irq(int vector)
+{
+ struct irq_domain *d;
+
+ d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
+ if (d)
+ return irq_create_mapping(d, vector);
+
+ return -EINVAL;
+}
#include <asm-generic/irq.h>
diff --git a/arch/loongarch/include/asm/irq_work.h b/arch/loongarch/include/asm/irq_work.h
new file mode 100644
index 000000000000..d63076e9160d
--- /dev/null
+++ b/arch/loongarch/include/asm/irq_work.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_LOONGARCH_IRQ_WORK_H
+#define _ASM_LOONGARCH_IRQ_WORK_H
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+ return IS_ENABLED(CONFIG_SMP);
+}
+
+#endif /* _ASM_LOONGARCH_IRQ_WORK_H */
diff --git a/arch/loongarch/include/asm/jump_label.h b/arch/loongarch/include/asm/jump_label.h
index 29acfe3de3fa..8a924bd69d19 100644
--- a/arch/loongarch/include/asm/jump_label.h
+++ b/arch/loongarch/include/asm/jump_label.h
@@ -13,18 +13,22 @@
#define JUMP_LABEL_NOP_SIZE 4
-#define JUMP_TABLE_ENTRY \
+/* This macro is also expanded on the Rust side. */
+#define JUMP_TABLE_ENTRY(key, label) \
".pushsection __jump_table, \"aw\" \n\t" \
".align 3 \n\t" \
- ".long 1b - ., %l[l_yes] - . \n\t" \
- ".quad %0 - . \n\t" \
+ ".long 1b - ., " label " - . \n\t" \
+ ".quad " key " - . \n\t" \
".popsection \n\t"
+#define ARCH_STATIC_BRANCH_ASM(key, label) \
+ "1: nop \n\t" \
+ JUMP_TABLE_ENTRY(key, label)
+
static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
{
asm goto(
- "1: nop \n\t"
- JUMP_TABLE_ENTRY
+ ARCH_STATIC_BRANCH_ASM("%0", "%l[l_yes]")
: : "i"(&((char *)key)[branch]) : : l_yes);
return false;
@@ -37,7 +41,7 @@ static __always_inline bool arch_static_branch_jump(struct static_key * const ke
{
asm goto(
"1: b %l[l_yes] \n\t"
- JUMP_TABLE_ENTRY
+ JUMP_TABLE_ENTRY("%0", "%l[l_yes]")
: : "i"(&((char *)key)[branch]) : : l_yes);
return false;
diff --git a/arch/loongarch/include/asm/kasan.h b/arch/loongarch/include/asm/kasan.h
index cd6084f4e153..7f52bd31b9d4 100644
--- a/arch/loongarch/include/asm/kasan.h
+++ b/arch/loongarch/include/asm/kasan.h
@@ -16,7 +16,7 @@
#define XRANGE_SHIFT (48)
/* Valid address length */
-#define XRANGE_SHADOW_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
+#define XRANGE_SHADOW_SHIFT min(cpu_vabits, VA_BITS)
/* Used for taking out the valid address */
#define XRANGE_SHADOW_MASK GENMASK_ULL(XRANGE_SHADOW_SHIFT - 1, 0)
/* One segment whole address space size */
@@ -25,6 +25,7 @@
/* 64-bit segment value. */
#define XKPRANGE_UC_SEG (0x8000)
#define XKPRANGE_CC_SEG (0x9000)
+#define XKPRANGE_WC_SEG (0xa000)
#define XKVRANGE_VC_SEG (0xffff)
/* Cached */
@@ -41,20 +42,28 @@
#define XKPRANGE_UC_SHADOW_SIZE (XKPRANGE_UC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
#define XKPRANGE_UC_SHADOW_END (XKPRANGE_UC_KASAN_OFFSET + XKPRANGE_UC_SHADOW_SIZE)
+/* WriteCombine */
+#define XKPRANGE_WC_START WRITECOMBINE_BASE
+#define XKPRANGE_WC_SIZE XRANGE_SIZE
+#define XKPRANGE_WC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END
+#define XKPRANGE_WC_SHADOW_SIZE (XKPRANGE_WC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
+#define XKPRANGE_WC_SHADOW_END (XKPRANGE_WC_KASAN_OFFSET + XKPRANGE_WC_SHADOW_SIZE)
+
/* VMALLOC (Cached or UnCached) */
#define XKVRANGE_VC_START MODULES_VADDR
#define XKVRANGE_VC_SIZE round_up(KFENCE_AREA_END - MODULES_VADDR + 1, PGDIR_SIZE)
-#define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END
+#define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_WC_SHADOW_END
#define XKVRANGE_VC_SHADOW_SIZE (XKVRANGE_VC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
#define XKVRANGE_VC_SHADOW_END (XKVRANGE_VC_KASAN_OFFSET + XKVRANGE_VC_SHADOW_SIZE)
/* KAsan shadow memory start right after vmalloc. */
#define KASAN_SHADOW_START round_up(KFENCE_AREA_END, PGDIR_SIZE)
#define KASAN_SHADOW_SIZE (XKVRANGE_VC_SHADOW_END - XKPRANGE_CC_KASAN_OFFSET)
-#define KASAN_SHADOW_END round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE)
+#define KASAN_SHADOW_END (round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE) - 1)
#define XKPRANGE_CC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_CC_KASAN_OFFSET)
#define XKPRANGE_UC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_UC_KASAN_OFFSET)
+#define XKPRANGE_WC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_WC_KASAN_OFFSET)
#define XKVRANGE_VC_SHADOW_OFFSET (KASAN_SHADOW_START + XKVRANGE_VC_KASAN_OFFSET)
extern bool kasan_early_stage;
diff --git a/arch/loongarch/include/asm/kfence.h b/arch/loongarch/include/asm/kfence.h
index a6a5760da3a3..da9e93024626 100644
--- a/arch/loongarch/include/asm/kfence.h
+++ b/arch/loongarch/include/asm/kfence.h
@@ -10,6 +10,7 @@
#define _ASM_LOONGARCH_KFENCE_H
#include <linux/kfence.h>
+#include <linux/vmalloc.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
@@ -52,13 +53,13 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
pte_t *pte = virt_to_kpte(addr);
- if (WARN_ON(!pte) || pte_none(*pte))
+ if (WARN_ON(!pte) || pte_none(ptep_get(pte)))
return false;
if (protect)
- set_pte(pte, __pte(pte_val(*pte) & ~(_PAGE_VALID | _PAGE_PRESENT)));
+ set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~(_PAGE_VALID | _PAGE_PRESENT)));
else
- set_pte(pte, __pte(pte_val(*pte) | (_PAGE_VALID | _PAGE_PRESENT)));
+ set_pte(pte, __pte(pte_val(ptep_get(pte)) | (_PAGE_VALID | _PAGE_PRESENT)));
preempt_disable();
local_flush_tlb_one(addr);
diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h
index 724ca8b7b401..4a76ce796f1f 100644
--- a/arch/loongarch/include/asm/kvm_csr.h
+++ b/arch/loongarch/include/asm/kvm_csr.h
@@ -30,6 +30,7 @@
: [val] "+r" (__v) \
: [reg] "i" (csr) \
: "memory"); \
+ __v; \
})
#define gcsr_xchg(v, m, csr) \
@@ -181,6 +182,8 @@ __BUILD_GCSR_OP(tlbidx)
#define kvm_save_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_read(gid))
#define kvm_restore_hw_gcsr(csr, gid) (gcsr_write(csr->csrs[gid], gid))
+#define kvm_read_clear_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_write(0, gid))
+
int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu);
static __always_inline unsigned long kvm_read_sw_gcsr(struct loongarch_csrs *csr, int gid)
@@ -208,4 +211,7 @@ static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr,
csr->csrs[gid] |= val & _mask;
}
+#define KVM_PMU_EVENT_ENABLED (CSR_PERFCTRL_PLV0 | CSR_PERFCTRL_PLV1 | \
+ CSR_PERFCTRL_PLV2 | CSR_PERFCTRL_PLV3)
+
#endif /* __ASM_LOONGARCH_KVM_CSR_H__ */
diff --git a/arch/loongarch/include/asm/kvm_eiointc.h b/arch/loongarch/include/asm/kvm_eiointc.h
new file mode 100644
index 000000000000..a3a40aba8acf
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_eiointc.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_KVM_EIOINTC_H
+#define __ASM_KVM_EIOINTC_H
+
+#include <kvm/iodev.h>
+
+#define EIOINTC_IRQS 256
+#define EIOINTC_ROUTE_MAX_VCPUS 256
+#define EIOINTC_IRQS_U8_NUMS (EIOINTC_IRQS / 8)
+#define EIOINTC_IRQS_U16_NUMS (EIOINTC_IRQS_U8_NUMS / 2)
+#define EIOINTC_IRQS_U32_NUMS (EIOINTC_IRQS_U8_NUMS / 4)
+#define EIOINTC_IRQS_U64_NUMS (EIOINTC_IRQS_U8_NUMS / 8)
+/* map to ipnum per 32 irqs */
+#define EIOINTC_IRQS_NODETYPE_COUNT 16
+
+#define EIOINTC_BASE 0x1400
+#define EIOINTC_SIZE 0x900
+
+#define EIOINTC_NODETYPE_START 0xa0
+#define EIOINTC_NODETYPE_END 0xbf
+#define EIOINTC_IPMAP_START 0xc0
+#define EIOINTC_IPMAP_END 0xc7
+#define EIOINTC_ENABLE_START 0x200
+#define EIOINTC_ENABLE_END 0x21f
+#define EIOINTC_BOUNCE_START 0x280
+#define EIOINTC_BOUNCE_END 0x29f
+#define EIOINTC_ISR_START 0x300
+#define EIOINTC_ISR_END 0x31f
+#define EIOINTC_COREISR_START 0x400
+#define EIOINTC_COREISR_END 0x41f
+#define EIOINTC_COREMAP_START 0x800
+#define EIOINTC_COREMAP_END 0x8ff
+
+#define EIOINTC_VIRT_BASE (0x40000000)
+#define EIOINTC_VIRT_SIZE (0x1000)
+
+#define EIOINTC_VIRT_FEATURES (0x0)
+#define EIOINTC_HAS_VIRT_EXTENSION (0)
+#define EIOINTC_HAS_ENABLE_OPTION (1)
+#define EIOINTC_HAS_INT_ENCODE (2)
+#define EIOINTC_HAS_CPU_ENCODE (3)
+#define EIOINTC_VIRT_HAS_FEATURES ((1U << EIOINTC_HAS_VIRT_EXTENSION) \
+ | (1U << EIOINTC_HAS_ENABLE_OPTION) \
+ | (1U << EIOINTC_HAS_INT_ENCODE) \
+ | (1U << EIOINTC_HAS_CPU_ENCODE))
+#define EIOINTC_VIRT_CONFIG (0x4)
+#define EIOINTC_ENABLE (1)
+#define EIOINTC_ENABLE_INT_ENCODE (2)
+#define EIOINTC_ENABLE_CPU_ENCODE (3)
+
+#define LOONGSON_IP_NUM 8
+
+struct loongarch_eiointc {
+ spinlock_t lock;
+ struct kvm *kvm;
+ struct kvm_io_device device;
+ struct kvm_io_device device_vext;
+ uint32_t num_cpu;
+ uint32_t features;
+ uint32_t status;
+
+ /* hardware state */
+ union nodetype {
+ u64 reg_u64[EIOINTC_IRQS_NODETYPE_COUNT / 4];
+ u32 reg_u32[EIOINTC_IRQS_NODETYPE_COUNT / 2];
+ u16 reg_u16[EIOINTC_IRQS_NODETYPE_COUNT];
+ u8 reg_u8[EIOINTC_IRQS_NODETYPE_COUNT * 2];
+ } nodetype;
+
+ /* one bit shows the state of one irq */
+ union bounce {
+ u64 reg_u64[EIOINTC_IRQS_U64_NUMS];
+ u32 reg_u32[EIOINTC_IRQS_U32_NUMS];
+ u16 reg_u16[EIOINTC_IRQS_U16_NUMS];
+ u8 reg_u8[EIOINTC_IRQS_U8_NUMS];
+ } bounce;
+
+ union isr {
+ u64 reg_u64[EIOINTC_IRQS_U64_NUMS];
+ u32 reg_u32[EIOINTC_IRQS_U32_NUMS];
+ u16 reg_u16[EIOINTC_IRQS_U16_NUMS];
+ u8 reg_u8[EIOINTC_IRQS_U8_NUMS];
+ } isr;
+ union coreisr {
+ u64 reg_u64[EIOINTC_ROUTE_MAX_VCPUS][EIOINTC_IRQS_U64_NUMS];
+ u32 reg_u32[EIOINTC_ROUTE_MAX_VCPUS][EIOINTC_IRQS_U32_NUMS];
+ u16 reg_u16[EIOINTC_ROUTE_MAX_VCPUS][EIOINTC_IRQS_U16_NUMS];
+ u8 reg_u8[EIOINTC_ROUTE_MAX_VCPUS][EIOINTC_IRQS_U8_NUMS];
+ } coreisr;
+ union enable {
+ u64 reg_u64[EIOINTC_IRQS_U64_NUMS];
+ u32 reg_u32[EIOINTC_IRQS_U32_NUMS];
+ u16 reg_u16[EIOINTC_IRQS_U16_NUMS];
+ u8 reg_u8[EIOINTC_IRQS_U8_NUMS];
+ } enable;
+
+ /* use one byte to config ipmap for 32 irqs at once */
+ union ipmap {
+ u64 reg_u64;
+ u32 reg_u32[EIOINTC_IRQS_U32_NUMS / 4];
+ u16 reg_u16[EIOINTC_IRQS_U16_NUMS / 4];
+ u8 reg_u8[EIOINTC_IRQS_U8_NUMS / 4];
+ } ipmap;
+ /* use one byte to config coremap for one irq */
+ union coremap {
+ u64 reg_u64[EIOINTC_IRQS / 8];
+ u32 reg_u32[EIOINTC_IRQS / 4];
+ u16 reg_u16[EIOINTC_IRQS / 2];
+ u8 reg_u8[EIOINTC_IRQS];
+ } coremap;
+
+ DECLARE_BITMAP(sw_coreisr[EIOINTC_ROUTE_MAX_VCPUS][LOONGSON_IP_NUM], EIOINTC_IRQS);
+ uint8_t sw_coremap[EIOINTC_IRQS];
+};
+
+int kvm_loongarch_register_eiointc_device(void);
+void eiointc_set_irq(struct loongarch_eiointc *s, int irq, int level);
+
+#endif /* __ASM_KVM_EIOINTC_H */
diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
index 2d62f7b0d377..a3c4cc46c892 100644
--- a/arch/loongarch/include/asm/kvm_host.h
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -12,29 +12,50 @@
#include <linux/kvm.h>
#include <linux/kvm_types.h>
#include <linux/mutex.h>
+#include <linux/perf_event.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
#include <linux/types.h>
#include <asm/inst.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_ipi.h>
+#include <asm/kvm_eiointc.h>
+#include <asm/kvm_pch_pic.h>
#include <asm/loongarch.h>
+#define __KVM_HAVE_ARCH_INTC_INITIALIZED
+
/* Loongarch KVM register ids */
#define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT)
#define KVM_GET_IOC_CPUCFG_IDX(id) ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT)
#define KVM_MAX_VCPUS 256
#define KVM_MAX_CPUCFG_REGS 21
-/* memory slots that does not exposed to userspace */
-#define KVM_PRIVATE_MEM_SLOTS 0
#define KVM_HALT_POLL_NS_DEFAULT 500000
+#define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0)
+#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1)
+#define KVM_REQ_PMU KVM_ARCH_REQ(2)
+
+#define KVM_GUESTDBG_SW_BP_MASK \
+ (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)
+#define KVM_GUESTDBG_VALID_MASK \
+ (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP)
+
+#define KVM_DIRTY_LOG_MANUAL_CAPS \
+ (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | KVM_DIRTY_LOG_INITIALLY_SET)
struct kvm_vm_stat {
struct kvm_vm_stat_generic generic;
u64 pages;
u64 hugepages;
+ u64 ipi_read_exits;
+ u64 ipi_write_exits;
+ u64 eiointc_read_exits;
+ u64 eiointc_write_exits;
+ u64 pch_pic_read_exits;
+ u64 pch_pic_write_exits;
};
struct kvm_vcpu_stat {
@@ -43,6 +64,7 @@ struct kvm_vcpu_stat {
u64 idle_exits;
u64 cpucfg_exits;
u64 signal_exits;
+ u64 hypercall_exits;
};
#define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0)
@@ -51,9 +73,13 @@ struct kvm_arch_memory_slot {
unsigned long flags;
};
+#define HOST_MAX_PMNUM 16
struct kvm_context {
unsigned long vpid_cache;
struct kvm_vcpu *last_vcpu;
+ /* Host PMU CSR */
+ u64 perf_ctrl[HOST_MAX_PMNUM];
+ u64 perf_cntr[HOST_MAX_PMNUM];
};
struct kvm_world_switch {
@@ -64,6 +90,31 @@ struct kvm_world_switch {
#define MAX_PGTABLE_LEVELS 4
+/*
+ * Physical CPUID is used for interrupt routing, there are different
+ * definitions about physical cpuid on different hardwares.
+ *
+ * For LOONGARCH_CSR_CPUID register, max CPUID size if 512
+ * For IPI hardware, max destination CPUID size 1024
+ * For eiointc interrupt controller, max destination CPUID size is 256
+ * For msgint interrupt controller, max supported CPUID size is 65536
+ *
+ * Currently max CPUID is defined as 256 for KVM hypervisor, in future
+ * it will be expanded to 4096, including 16 packages at most. And every
+ * package supports at most 256 vcpus
+ */
+#define KVM_MAX_PHYID 256
+
+struct kvm_phyid_info {
+ struct kvm_vcpu *vcpu;
+ bool enabled;
+};
+
+struct kvm_phyid_map {
+ int max_phyid;
+ struct kvm_phyid_info phys_map[KVM_MAX_PHYID];
+};
+
struct kvm_arch {
/* Guest physical mm */
kvm_pte_t *pgd;
@@ -71,9 +122,16 @@ struct kvm_arch {
unsigned long invalid_ptes[MAX_PGTABLE_LEVELS];
unsigned int pte_shifts[MAX_PGTABLE_LEVELS];
unsigned int root_level;
+ spinlock_t phyid_map_lock;
+ struct kvm_phyid_map *phyid_map;
+ /* Enabled PV features */
+ unsigned long pv_features;
s64 time_offset;
struct kvm_context __percpu *vmcs;
+ struct loongarch_ipi *ipi;
+ struct loongarch_eiointc *eiointc;
+ struct loongarch_pch_pic *pch_pic;
};
#define CSR_MAX_NUMS 0x800
@@ -97,8 +155,16 @@ enum emulation_result {
#define KVM_LARCH_FPU (0x1 << 0)
#define KVM_LARCH_LSX (0x1 << 1)
#define KVM_LARCH_LASX (0x1 << 2)
-#define KVM_LARCH_SWCSR_LATEST (0x1 << 3)
-#define KVM_LARCH_HWCSR_USABLE (0x1 << 4)
+#define KVM_LARCH_LBT (0x1 << 3)
+#define KVM_LARCH_PMU (0x1 << 4)
+#define KVM_LARCH_SWCSR_LATEST (0x1 << 5)
+#define KVM_LARCH_HWCSR_USABLE (0x1 << 6)
+
+#define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63)
+#define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \
+ BIT(KVM_FEATURE_STEAL_TIME) | \
+ BIT(KVM_FEATURE_USER_HCALL) | \
+ BIT(KVM_FEATURE_VIRT_EXTIOI))
struct kvm_vcpu_arch {
/*
@@ -111,6 +177,9 @@ struct kvm_vcpu_arch {
/* Pointers stored here for easy accessing from assembly code */
int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ /* GPA (=HVA) of PGD for secondary mmu */
+ unsigned long kvm_pgd;
+
/* Host registers preserved across guest mode execution */
unsigned long host_sp;
unsigned long host_tp;
@@ -132,10 +201,14 @@ struct kvm_vcpu_arch {
/* FPU state */
struct loongarch_fpu fpu FPU_ALIGN;
+ struct loongarch_lbt lbt;
/* CSR state */
struct loongarch_csrs *csr;
+ /* Guest max PMU CSR id */
+ int max_pmu_csrid;
+
/* GPR used as IO source/target */
u32 io_gpr;
@@ -157,6 +230,7 @@ struct kvm_vcpu_arch {
/* vcpu's vpid */
u64 vpid;
+ gpa_t flush_gpa;
/* Frequency of stable timer in Hz */
u64 timer_mhz;
@@ -166,8 +240,17 @@ struct kvm_vcpu_arch {
int last_sched_cpu;
/* mp state */
struct kvm_mp_state mp_state;
+ /* ipi state */
+ struct ipi_state ipi_state;
/* cpucfg */
u32 cpucfg[KVM_MAX_CPUCFG_REGS];
+
+ /* paravirt steal time */
+ struct {
+ u64 guest_addr;
+ u64 last_steal;
+ struct gfn_to_hva_cache cache;
+ } st;
};
static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
@@ -195,15 +278,31 @@ static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
return arch->cpucfg[2] & CPUCFG2_LASX;
}
+static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch)
+{
+ return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT | CPUCFG2_MIPSBT);
+}
+
+static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch)
+{
+ return arch->cpucfg[6] & CPUCFG6_PMP;
+}
+
+static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch)
+{
+ return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT;
+}
+
+bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu);
+
/* Debug: dump vcpu state */
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
/* MMU handling */
void kvm_flush_tlb_all(void);
void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
-int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
+int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write, int ecode);
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
@@ -227,9 +326,7 @@ static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
/* Misc */
static inline void kvm_arch_hardware_unsetup(void) {}
-static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
-static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
diff --git a/arch/loongarch/include/asm/kvm_ipi.h b/arch/loongarch/include/asm/kvm_ipi.h
new file mode 100644
index 000000000000..060163dfb4a3
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_ipi.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_KVM_IPI_H
+#define __ASM_KVM_IPI_H
+
+#include <kvm/iodev.h>
+
+#define LARCH_INT_IPI 12
+
+struct loongarch_ipi {
+ spinlock_t lock;
+ struct kvm *kvm;
+ struct kvm_io_device device;
+};
+
+struct ipi_state {
+ spinlock_t lock;
+ uint32_t status;
+ uint32_t en;
+ uint32_t set;
+ uint32_t clear;
+ uint64_t buf[4];
+};
+
+#define IOCSR_IPI_BASE 0x1000
+#define IOCSR_IPI_SIZE 0x160
+
+#define IOCSR_IPI_STATUS 0x000
+#define IOCSR_IPI_EN 0x004
+#define IOCSR_IPI_SET 0x008
+#define IOCSR_IPI_CLEAR 0x00c
+#define IOCSR_IPI_BUF_20 0x020
+#define IOCSR_IPI_BUF_28 0x028
+#define IOCSR_IPI_BUF_30 0x030
+#define IOCSR_IPI_BUF_38 0x038
+#define IOCSR_IPI_SEND 0x040
+#define IOCSR_MAIL_SEND 0x048
+#define IOCSR_ANY_SEND 0x158
+
+int kvm_loongarch_register_ipi_device(void);
+
+#endif
diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h
new file mode 100644
index 000000000000..3e4b397f423f
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_para.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_LOONGARCH_KVM_PARA_H
+#define _ASM_LOONGARCH_KVM_PARA_H
+
+#include <uapi/asm/kvm_para.h>
+
+/*
+ * Hypercall code field
+ */
+#define HYPERVISOR_KVM 1
+#define HYPERVISOR_VENDOR_SHIFT 8
+#define HYPERCALL_ENCODE(vendor, code) ((vendor << HYPERVISOR_VENDOR_SHIFT) + code)
+
+#define KVM_HCALL_CODE_SERVICE 0
+#define KVM_HCALL_CODE_SWDBG 1
+#define KVM_HCALL_CODE_USER_SERVICE 2
+
+#define KVM_HCALL_SERVICE HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SERVICE)
+#define KVM_HCALL_FUNC_IPI 1
+#define KVM_HCALL_FUNC_NOTIFY 2
+
+#define KVM_HCALL_SWDBG HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG)
+
+#define KVM_HCALL_USER_SERVICE HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_USER_SERVICE)
+
+/*
+ * LoongArch hypercall return code
+ */
+#define KVM_HCALL_SUCCESS 0
+#define KVM_HCALL_INVALID_CODE -1UL
+#define KVM_HCALL_INVALID_PARAMETER -2UL
+
+#define KVM_STEAL_PHYS_VALID BIT_ULL(0)
+#define KVM_STEAL_PHYS_MASK GENMASK_ULL(63, 6)
+
+struct kvm_steal_time {
+ __u64 steal;
+ __u32 version;
+ __u32 flags;
+ __u32 pad[12];
+};
+
+/*
+ * Hypercall interface for KVM hypervisor
+ *
+ * a0: function identifier
+ * a1-a5: args
+ * Return value will be placed in a0.
+ * Up to 5 arguments are passed in a1, a2, a3, a4, a5.
+ */
+static __always_inline long kvm_hypercall0(u64 fid)
+{
+ register long ret asm("a0");
+ register unsigned long fun asm("a0") = fid;
+
+ __asm__ __volatile__(
+ "hvcl "__stringify(KVM_HCALL_SERVICE)
+ : "=r" (ret)
+ : "r" (fun)
+ : "memory"
+ );
+
+ return ret;
+}
+
+static __always_inline long kvm_hypercall1(u64 fid, unsigned long arg0)
+{
+ register long ret asm("a0");
+ register unsigned long fun asm("a0") = fid;
+ register unsigned long a1 asm("a1") = arg0;
+
+ __asm__ __volatile__(
+ "hvcl "__stringify(KVM_HCALL_SERVICE)
+ : "=r" (ret)
+ : "r" (fun), "r" (a1)
+ : "memory"
+ );
+
+ return ret;
+}
+
+static __always_inline long kvm_hypercall2(u64 fid,
+ unsigned long arg0, unsigned long arg1)
+{
+ register long ret asm("a0");
+ register unsigned long fun asm("a0") = fid;
+ register unsigned long a1 asm("a1") = arg0;
+ register unsigned long a2 asm("a2") = arg1;
+
+ __asm__ __volatile__(
+ "hvcl "__stringify(KVM_HCALL_SERVICE)
+ : "=r" (ret)
+ : "r" (fun), "r" (a1), "r" (a2)
+ : "memory"
+ );
+
+ return ret;
+}
+
+static __always_inline long kvm_hypercall3(u64 fid,
+ unsigned long arg0, unsigned long arg1, unsigned long arg2)
+{
+ register long ret asm("a0");
+ register unsigned long fun asm("a0") = fid;
+ register unsigned long a1 asm("a1") = arg0;
+ register unsigned long a2 asm("a2") = arg1;
+ register unsigned long a3 asm("a3") = arg2;
+
+ __asm__ __volatile__(
+ "hvcl "__stringify(KVM_HCALL_SERVICE)
+ : "=r" (ret)
+ : "r" (fun), "r" (a1), "r" (a2), "r" (a3)
+ : "memory"
+ );
+
+ return ret;
+}
+
+static __always_inline long kvm_hypercall4(u64 fid,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3)
+{
+ register long ret asm("a0");
+ register unsigned long fun asm("a0") = fid;
+ register unsigned long a1 asm("a1") = arg0;
+ register unsigned long a2 asm("a2") = arg1;
+ register unsigned long a3 asm("a3") = arg2;
+ register unsigned long a4 asm("a4") = arg3;
+
+ __asm__ __volatile__(
+ "hvcl "__stringify(KVM_HCALL_SERVICE)
+ : "=r" (ret)
+ : "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4)
+ : "memory"
+ );
+
+ return ret;
+}
+
+static __always_inline long kvm_hypercall5(u64 fid,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3, unsigned long arg4)
+{
+ register long ret asm("a0");
+ register unsigned long fun asm("a0") = fid;
+ register unsigned long a1 asm("a1") = arg0;
+ register unsigned long a2 asm("a2") = arg1;
+ register unsigned long a3 asm("a3") = arg2;
+ register unsigned long a4 asm("a4") = arg3;
+ register unsigned long a5 asm("a5") = arg4;
+
+ __asm__ __volatile__(
+ "hvcl "__stringify(KVM_HCALL_SERVICE)
+ : "=r" (ret)
+ : "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5)
+ : "memory"
+ );
+
+ return ret;
+}
+
+#ifdef CONFIG_PARAVIRT
+bool kvm_para_available(void);
+unsigned int kvm_arch_para_features(void);
+#else
+static inline bool kvm_para_available(void)
+{
+ return false;
+}
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+#endif
+
+static inline unsigned int kvm_arch_para_hints(void)
+{
+ return 0;
+}
+
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+
+#endif /* _ASM_LOONGARCH_KVM_PARA_H */
diff --git a/arch/loongarch/include/asm/kvm_pch_pic.h b/arch/loongarch/include/asm/kvm_pch_pic.h
new file mode 100644
index 000000000000..e6df6a4c1c70
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_pch_pic.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_KVM_PCH_PIC_H
+#define __ASM_KVM_PCH_PIC_H
+
+#include <kvm/iodev.h>
+
+#define PCH_PIC_SIZE 0x3e8
+
+#define PCH_PIC_INT_ID_START 0x0
+#define PCH_PIC_INT_ID_END 0x7
+#define PCH_PIC_MASK_START 0x20
+#define PCH_PIC_MASK_END 0x27
+#define PCH_PIC_HTMSI_EN_START 0x40
+#define PCH_PIC_HTMSI_EN_END 0x47
+#define PCH_PIC_EDGE_START 0x60
+#define PCH_PIC_EDGE_END 0x67
+#define PCH_PIC_CLEAR_START 0x80
+#define PCH_PIC_CLEAR_END 0x87
+#define PCH_PIC_AUTO_CTRL0_START 0xc0
+#define PCH_PIC_AUTO_CTRL0_END 0xc7
+#define PCH_PIC_AUTO_CTRL1_START 0xe0
+#define PCH_PIC_AUTO_CTRL1_END 0xe7
+#define PCH_PIC_ROUTE_ENTRY_START 0x100
+#define PCH_PIC_ROUTE_ENTRY_END 0x13f
+#define PCH_PIC_HTMSI_VEC_START 0x200
+#define PCH_PIC_HTMSI_VEC_END 0x23f
+#define PCH_PIC_INT_IRR_START 0x380
+#define PCH_PIC_INT_IRR_END 0x38f
+#define PCH_PIC_INT_ISR_START 0x3a0
+#define PCH_PIC_INT_ISR_END 0x3af
+#define PCH_PIC_POLARITY_START 0x3e0
+#define PCH_PIC_POLARITY_END 0x3e7
+#define PCH_PIC_INT_ID_VAL 0x7000000UL
+#define PCH_PIC_INT_ID_VER 0x1UL
+
+struct loongarch_pch_pic {
+ spinlock_t lock;
+ struct kvm *kvm;
+ struct kvm_io_device device;
+ uint64_t mask; /* 1:disable irq, 0:enable irq */
+ uint64_t htmsi_en; /* 1:msi */
+ uint64_t edge; /* 1:edge triggered, 0:level triggered */
+ uint64_t auto_ctrl0; /* only use default value 00b */
+ uint64_t auto_ctrl1; /* only use default value 00b */
+ uint64_t last_intirr; /* edge detection */
+ uint64_t irr; /* interrupt request register */
+ uint64_t isr; /* interrupt service register */
+ uint64_t polarity; /* 0: high level trigger, 1: low level trigger */
+ uint8_t route_entry[64]; /* default value 0, route to int0: eiointc */
+ uint8_t htmsi_vector[64]; /* irq route table for routing to eiointc */
+ uint64_t pch_pic_base;
+};
+
+int kvm_loongarch_register_pch_pic_device(void);
+void pch_pic_set_irq(struct loongarch_pch_pic *s, int irq, int level);
+void pch_msi_set_irq(struct kvm *kvm, int irq, int level);
+
+#endif /* __ASM_KVM_PCH_PIC_H */
diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
index 0cb4fdb8a9b5..f1efd7cfbc20 100644
--- a/arch/loongarch/include/asm/kvm_vcpu.h
+++ b/arch/loongarch/include/asm/kvm_vcpu.h
@@ -37,12 +37,13 @@
#define KVM_LOONGSON_IRQ_NUM_MASK 0xffff
typedef union loongarch_instruction larch_inst;
-typedef int (*exit_handle_fn)(struct kvm_vcpu *);
+typedef int (*exit_handle_fn)(struct kvm_vcpu *, int);
int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst);
int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst);
int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_emu_idle(struct kvm_vcpu *vcpu);
int kvm_pending_timer(struct kvm_vcpu *vcpu);
int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault);
@@ -75,12 +76,18 @@ static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
#endif
+#ifdef CONFIG_CPU_HAS_LBT
+int kvm_own_lbt(struct kvm_vcpu *vcpu);
+#else
+static inline int kvm_own_lbt(struct kvm_vcpu *vcpu) { return -EINVAL; }
+#endif
+
void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
-void kvm_reset_timer(struct kvm_vcpu *vcpu);
void kvm_save_timer(struct kvm_vcpu *vcpu);
void kvm_restore_timer(struct kvm_vcpu *vcpu);
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
+struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid);
/*
* Loongarch KVM guest interrupt handling
@@ -109,4 +116,24 @@ static inline int kvm_queue_exception(struct kvm_vcpu *vcpu,
return -1;
}
+static inline unsigned long kvm_read_reg(struct kvm_vcpu *vcpu, int num)
+{
+ return vcpu->arch.gprs[num];
+}
+
+static inline void kvm_write_reg(struct kvm_vcpu *vcpu, int num, unsigned long val)
+{
+ vcpu->arch.gprs[num] = val;
+}
+
+static inline bool kvm_pvtime_supported(void)
+{
+ return !!sched_info_on();
+}
+
+static inline bool kvm_guest_has_pv_feature(struct kvm_vcpu *vcpu, unsigned int feature)
+{
+ return vcpu->kvm->arch.pv_features & BIT(feature);
+}
+
#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */
diff --git a/arch/loongarch/include/asm/lbt.h b/arch/loongarch/include/asm/lbt.h
index e671978bf552..38566574e562 100644
--- a/arch/loongarch/include/asm/lbt.h
+++ b/arch/loongarch/include/asm/lbt.h
@@ -12,9 +12,13 @@
#include <asm/loongarch.h>
#include <asm/processor.h>
-extern void _init_lbt(void);
-extern void _save_lbt(struct loongarch_lbt *);
-extern void _restore_lbt(struct loongarch_lbt *);
+asmlinkage void _init_lbt(void);
+asmlinkage void _save_lbt(struct loongarch_lbt *);
+asmlinkage void _restore_lbt(struct loongarch_lbt *);
+asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
+asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
+asmlinkage int _save_ftop_context(void __user *ftop);
+asmlinkage int _restore_ftop_context(void __user *ftop);
static inline int is_lbt_enabled(void)
{
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index 46366e783c84..52651aa0e583 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -62,6 +62,7 @@
#define LOONGARCH_CPUCFG1 0x1
#define CPUCFG1_ISGR32 BIT(0)
#define CPUCFG1_ISGR64 BIT(1)
+#define CPUCFG1_ISA GENMASK(1, 0)
#define CPUCFG1_PAGING BIT(2)
#define CPUCFG1_IOCSR BIT(3)
#define CPUCFG1_PABITS GENMASK(11, 4)
@@ -107,6 +108,12 @@
#define CPUCFG3_SPW_HG_HF BIT(11)
#define CPUCFG3_RVA BIT(12)
#define CPUCFG3_RVAMAX GENMASK(16, 13)
+#define CPUCFG3_ALDORDER_CAP BIT(18) /* All address load ordered, capability */
+#define CPUCFG3_ASTORDER_CAP BIT(19) /* All address store ordered, capability */
+#define CPUCFG3_ALDORDER_STA BIT(20) /* All address load ordered, status */
+#define CPUCFG3_ASTORDER_STA BIT(21) /* All address store ordered, status */
+#define CPUCFG3_SLDORDER_CAP BIT(22) /* Same address load ordered, capability */
+#define CPUCFG3_SLDORDER_STA BIT(23) /* Same address load ordered, status */
#define LOONGARCH_CPUCFG4 0x4
#define CPUCFG4_CCFREQ GENMASK(31, 0)
@@ -119,6 +126,7 @@
#define CPUCFG6_PMP BIT(0)
#define CPUCFG6_PAMVER GENMASK(3, 1)
#define CPUCFG6_PMNUM GENMASK(7, 4)
+#define CPUCFG6_PMNUM_SHIFT 4
#define CPUCFG6_PMBITS GENMASK(13, 8)
#define CPUCFG6_UPM BIT(14)
@@ -158,6 +166,11 @@
#define CPUCFG48_VFPU_CG BIT(2)
#define CPUCFG48_RAM_CG BIT(3)
+/*
+ * CPUCFG index area: 0x40000000 -- 0x400000ff
+ * SW emulation for KVM hypervirsor, see arch/loongarch/include/uapi/asm/kvm_para.h
+ */
+
#ifndef __ASSEMBLY__
/* CSR */
@@ -240,10 +253,10 @@
#define CSR_ESTAT_EXC_WIDTH 6
#define CSR_ESTAT_EXC (_ULCAST_(0x3f) << CSR_ESTAT_EXC_SHIFT)
#define CSR_ESTAT_IS_SHIFT 0
-#define CSR_ESTAT_IS_WIDTH 14
-#define CSR_ESTAT_IS (_ULCAST_(0x3fff) << CSR_ESTAT_IS_SHIFT)
+#define CSR_ESTAT_IS_WIDTH 15
+#define CSR_ESTAT_IS (_ULCAST_(0x7fff) << CSR_ESTAT_IS_SHIFT)
-#define LOONGARCH_CSR_ERA 0x6 /* ERA */
+#define LOONGARCH_CSR_ERA 0x6 /* Exception return address */
#define LOONGARCH_CSR_BADV 0x7 /* Bad virtual address */
@@ -459,7 +472,6 @@
#define LOONGARCH_CSR_TCFG 0x41 /* Timer config */
#define CSR_TCFG_VAL_SHIFT 2
-#define CSR_TCFG_VAL_WIDTH 48
#define CSR_TCFG_VAL (_ULCAST_(0x3fffffffffff) << CSR_TCFG_VAL_SHIFT)
#define CSR_TCFG_PERIOD_SHIFT 1
#define CSR_TCFG_PERIOD (_ULCAST_(0x1) << CSR_TCFG_PERIOD_SHIFT)
@@ -559,6 +571,15 @@
/* Implement dependent */
#define LOONGARCH_CSR_IMPCTL1 0x80 /* Loongson config1 */
+#define CSR_LDSTORDER_SHIFT 28
+#define CSR_LDSTORDER_WIDTH 3
+#define CSR_LDSTORDER_MASK (_ULCAST_(0x7) << CSR_LDSTORDER_SHIFT)
+#define CSR_LDSTORDER_NLD_NST (_ULCAST_(0x0) << CSR_LDSTORDER_SHIFT) /* 000 = No Load No Store */
+#define CSR_LDSTORDER_ALD_NST (_ULCAST_(0x1) << CSR_LDSTORDER_SHIFT) /* 001 = All Load No Store */
+#define CSR_LDSTORDER_SLD_NST (_ULCAST_(0x3) << CSR_LDSTORDER_SHIFT) /* 011 = Same Load No Store */
+#define CSR_LDSTORDER_NLD_AST (_ULCAST_(0x4) << CSR_LDSTORDER_SHIFT) /* 100 = No Load All Store */
+#define CSR_LDSTORDER_ALD_AST (_ULCAST_(0x5) << CSR_LDSTORDER_SHIFT) /* 101 = All Load All Store */
+#define CSR_LDSTORDER_SLD_AST (_ULCAST_(0x7) << CSR_LDSTORDER_SHIFT) /* 111 = Same Load All Store */
#define CSR_MISPEC_SHIFT 20
#define CSR_MISPEC_WIDTH 8
#define CSR_MISPEC (_ULCAST_(0xff) << CSR_MISPEC_SHIFT)
@@ -636,6 +657,13 @@
#define LOONGARCH_CSR_CTAG 0x98 /* TagLo + TagHi */
+#define LOONGARCH_CSR_ISR0 0xa0
+#define LOONGARCH_CSR_ISR1 0xa1
+#define LOONGARCH_CSR_ISR2 0xa2
+#define LOONGARCH_CSR_ISR3 0xa3
+
+#define LOONGARCH_CSR_IRR 0xa4
+
#define LOONGARCH_CSR_PRID 0xc0
/* Shadow MCSR : 0xc0 ~ 0xff */
@@ -865,7 +893,7 @@
#define LOONGARCH_CSR_DMWIN2 0x182 /* 64 direct map win2: MEM */
#define LOONGARCH_CSR_DMWIN3 0x183 /* 64 direct map win3: MEM */
-/* Direct Map window 0/1 */
+/* Direct Map window 0/1/2/3 */
#define CSR_DMW0_PLV0 _CONST64_(1 << 0)
#define CSR_DMW0_VSEG _CONST64_(0x8000)
#define CSR_DMW0_BASE (CSR_DMW0_VSEG << DMW_PABITS)
@@ -877,6 +905,14 @@
#define CSR_DMW1_BASE (CSR_DMW1_VSEG << DMW_PABITS)
#define CSR_DMW1_INIT (CSR_DMW1_BASE | CSR_DMW1_MAT | CSR_DMW1_PLV0)
+#define CSR_DMW2_PLV0 _CONST64_(1 << 0)
+#define CSR_DMW2_MAT _CONST64_(2 << 4)
+#define CSR_DMW2_VSEG _CONST64_(0xa000)
+#define CSR_DMW2_BASE (CSR_DMW2_VSEG << DMW_PABITS)
+#define CSR_DMW2_INIT (CSR_DMW2_BASE | CSR_DMW2_MAT | CSR_DMW2_PLV0)
+
+#define CSR_DMW3_INIT 0x0
+
/* Performance Counter registers */
#define LOONGARCH_CSR_PERFCTRL0 0x200 /* 32 perf event 0 config */
#define LOONGARCH_CSR_PERFCNTR0 0x201 /* 64 perf event 0 count value */
@@ -937,6 +973,36 @@
#define LOONGARCH_CSR_DB7CTRL 0x34a /* data breakpoint 7 control */
#define LOONGARCH_CSR_DB7ASID 0x34b /* data breakpoint 7 asid */
+#define LOONGARCH_CSR_DB8ADDR 0x350 /* data breakpoint 8 address */
+#define LOONGARCH_CSR_DB8MASK 0x351 /* data breakpoint 8 mask */
+#define LOONGARCH_CSR_DB8CTRL 0x352 /* data breakpoint 8 control */
+#define LOONGARCH_CSR_DB8ASID 0x353 /* data breakpoint 8 asid */
+
+#define LOONGARCH_CSR_DB9ADDR 0x358 /* data breakpoint 9 address */
+#define LOONGARCH_CSR_DB9MASK 0x359 /* data breakpoint 9 mask */
+#define LOONGARCH_CSR_DB9CTRL 0x35a /* data breakpoint 9 control */
+#define LOONGARCH_CSR_DB9ASID 0x35b /* data breakpoint 9 asid */
+
+#define LOONGARCH_CSR_DB10ADDR 0x360 /* data breakpoint 10 address */
+#define LOONGARCH_CSR_DB10MASK 0x361 /* data breakpoint 10 mask */
+#define LOONGARCH_CSR_DB10CTRL 0x362 /* data breakpoint 10 control */
+#define LOONGARCH_CSR_DB10ASID 0x363 /* data breakpoint 10 asid */
+
+#define LOONGARCH_CSR_DB11ADDR 0x368 /* data breakpoint 11 address */
+#define LOONGARCH_CSR_DB11MASK 0x369 /* data breakpoint 11 mask */
+#define LOONGARCH_CSR_DB11CTRL 0x36a /* data breakpoint 11 control */
+#define LOONGARCH_CSR_DB11ASID 0x36b /* data breakpoint 11 asid */
+
+#define LOONGARCH_CSR_DB12ADDR 0x370 /* data breakpoint 12 address */
+#define LOONGARCH_CSR_DB12MASK 0x371 /* data breakpoint 12 mask */
+#define LOONGARCH_CSR_DB12CTRL 0x372 /* data breakpoint 12 control */
+#define LOONGARCH_CSR_DB12ASID 0x373 /* data breakpoint 12 asid */
+
+#define LOONGARCH_CSR_DB13ADDR 0x378 /* data breakpoint 13 address */
+#define LOONGARCH_CSR_DB13MASK 0x379 /* data breakpoint 13 mask */
+#define LOONGARCH_CSR_DB13CTRL 0x37a /* data breakpoint 13 control */
+#define LOONGARCH_CSR_DB13ASID 0x37b /* data breakpoint 13 asid */
+
#define LOONGARCH_CSR_FWPC 0x380 /* instruction breakpoint config */
#define LOONGARCH_CSR_FWPS 0x381 /* instruction breakpoint status */
@@ -980,6 +1046,36 @@
#define LOONGARCH_CSR_IB7CTRL 0x3ca /* inst breakpoint 7 control */
#define LOONGARCH_CSR_IB7ASID 0x3cb /* inst breakpoint 7 asid */
+#define LOONGARCH_CSR_IB8ADDR 0x3d0 /* inst breakpoint 8 address */
+#define LOONGARCH_CSR_IB8MASK 0x3d1 /* inst breakpoint 8 mask */
+#define LOONGARCH_CSR_IB8CTRL 0x3d2 /* inst breakpoint 8 control */
+#define LOONGARCH_CSR_IB8ASID 0x3d3 /* inst breakpoint 8 asid */
+
+#define LOONGARCH_CSR_IB9ADDR 0x3d8 /* inst breakpoint 9 address */
+#define LOONGARCH_CSR_IB9MASK 0x3d9 /* inst breakpoint 9 mask */
+#define LOONGARCH_CSR_IB9CTRL 0x3da /* inst breakpoint 9 control */
+#define LOONGARCH_CSR_IB9ASID 0x3db /* inst breakpoint 9 asid */
+
+#define LOONGARCH_CSR_IB10ADDR 0x3e0 /* inst breakpoint 10 address */
+#define LOONGARCH_CSR_IB10MASK 0x3e1 /* inst breakpoint 10 mask */
+#define LOONGARCH_CSR_IB10CTRL 0x3e2 /* inst breakpoint 10 control */
+#define LOONGARCH_CSR_IB10ASID 0x3e3 /* inst breakpoint 10 asid */
+
+#define LOONGARCH_CSR_IB11ADDR 0x3e8 /* inst breakpoint 11 address */
+#define LOONGARCH_CSR_IB11MASK 0x3e9 /* inst breakpoint 11 mask */
+#define LOONGARCH_CSR_IB11CTRL 0x3ea /* inst breakpoint 11 control */
+#define LOONGARCH_CSR_IB11ASID 0x3eb /* inst breakpoint 11 asid */
+
+#define LOONGARCH_CSR_IB12ADDR 0x3f0 /* inst breakpoint 12 address */
+#define LOONGARCH_CSR_IB12MASK 0x3f1 /* inst breakpoint 12 mask */
+#define LOONGARCH_CSR_IB12CTRL 0x3f2 /* inst breakpoint 12 control */
+#define LOONGARCH_CSR_IB12ASID 0x3f3 /* inst breakpoint 12 asid */
+
+#define LOONGARCH_CSR_IB13ADDR 0x3f8 /* inst breakpoint 13 address */
+#define LOONGARCH_CSR_IB13MASK 0x3f9 /* inst breakpoint 13 mask */
+#define LOONGARCH_CSR_IB13CTRL 0x3fa /* inst breakpoint 13 control */
+#define LOONGARCH_CSR_IB13ASID 0x3fb /* inst breakpoint 13 asid */
+
#define LOONGARCH_CSR_DEBUG 0x500 /* debug config */
#define LOONGARCH_CSR_DERA 0x501 /* debug era */
#define LOONGARCH_CSR_DESAVE 0x502 /* debug save */
@@ -990,7 +1086,7 @@
/*
* CSR_ECFG IM
*/
-#define ECFG0_IM 0x00001fff
+#define ECFG0_IM 0x00005fff
#define ECFGB_SIP0 0
#define ECFGF_SIP0 (_ULCAST_(1) << ECFGB_SIP0)
#define ECFGB_SIP1 1
@@ -1033,6 +1129,7 @@
#define IOCSRF_EIODECODE BIT_ULL(9)
#define IOCSRF_FLATMODE BIT_ULL(10)
#define IOCSRF_VM BIT_ULL(11)
+#define IOCSRF_AVEC BIT_ULL(15)
#define LOONGARCH_IOCSR_VENDOR 0x10
@@ -1041,11 +1138,15 @@
#define LOONGARCH_IOCSR_NODECNT 0x408
#define LOONGARCH_IOCSR_MISC_FUNC 0x420
+#define IOCSR_MISC_FUNC_SOFT_INT BIT_ULL(10)
#define IOCSR_MISC_FUNC_TIMER_RESET BIT_ULL(21)
#define IOCSR_MISC_FUNC_EXT_IOI_EN BIT_ULL(48)
+#define IOCSR_MISC_FUNC_AVEC_EN BIT_ULL(51)
#define LOONGARCH_IOCSR_CPUTEMP 0x428
+#define LOONGARCH_IOCSR_SMCMBX 0x51c
+
/* PerCore CSR, only accessible by local cores */
#define LOONGARCH_IOCSR_IPI_STATUS 0x1000
#define LOONGARCH_IOCSR_IPI_EN 0x1004
@@ -1363,9 +1464,10 @@ __BUILD_CSR_OP(tlbidx)
#define INT_TI 11 /* Timer */
#define INT_IPI 12
#define INT_NMI 13
+#define INT_AVEC 14
/* ExcCodes corresponding to interrupts */
-#define EXCCODE_INT_NUM (INT_NMI + 1)
+#define EXCCODE_INT_NUM (INT_AVEC + 1)
#define EXCCODE_INT_START 64
#define EXCCODE_INT_END (EXCCODE_INT_START + EXCCODE_INT_NUM - 1)
diff --git a/arch/loongarch/include/asm/mmu_context.h b/arch/loongarch/include/asm/mmu_context.h
index 9f97c3453b9c..304363bd3935 100644
--- a/arch/loongarch/include/asm/mmu_context.h
+++ b/arch/loongarch/include/asm/mmu_context.h
@@ -49,12 +49,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
/* Normal, classic get_new_mmu_context */
static inline void
-get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
+get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, bool *need_flush)
{
u64 asid = asid_cache(cpu);
if (!((++asid) & cpu_asid_mask(&cpu_data[cpu])))
- local_flush_tlb_user(); /* start new asid cycle */
+ *need_flush = true; /* start new asid cycle */
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
}
@@ -74,21 +74,34 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return 0;
}
+static inline void atomic_update_pgd_asid(unsigned long asid, unsigned long pgdl)
+{
+ __asm__ __volatile__(
+ "csrwr %[pgdl_val], %[pgdl_reg] \n\t"
+ "csrwr %[asid_val], %[asid_reg] \n\t"
+ : [asid_val] "+r" (asid), [pgdl_val] "+r" (pgdl)
+ : [asid_reg] "i" (LOONGARCH_CSR_ASID), [pgdl_reg] "i" (LOONGARCH_CSR_PGDL)
+ : "memory"
+ );
+}
+
static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
+ bool need_flush = false;
unsigned int cpu = smp_processor_id();
/* Check if our ASID is of an older version and thus invalid */
if (!asid_valid(next, cpu))
- get_new_mmu_context(next, cpu);
-
- write_csr_asid(cpu_asid(cpu, next));
+ get_new_mmu_context(next, cpu, &need_flush);
if (next != &init_mm)
- csr_write64((unsigned long)next->pgd, LOONGARCH_CSR_PGDL);
+ atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)next->pgd);
else
- csr_write64((unsigned long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
+ atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)invalid_pg_dir);
+
+ if (need_flush)
+ local_flush_tlb_user(); /* Flush tlb after update ASID */
/*
* Mark current->active_mm as not "active" anymore.
@@ -135,9 +148,15 @@ drop_mmu_context(struct mm_struct *mm, unsigned int cpu)
asid = read_csr_asid() & cpu_asid_mask(&current_cpu_data);
if (asid == cpu_asid(cpu, mm)) {
+ bool need_flush = false;
+
if (!current->mm || (current->mm == mm)) {
- get_new_mmu_context(mm, cpu);
+ get_new_mmu_context(mm, cpu, &need_flush);
+
write_csr_asid(cpu_asid(cpu, mm));
+ if (need_flush)
+ local_flush_tlb_user(); /* Flush tlb after update ASID */
+
goto out;
}
}
diff --git a/arch/loongarch/include/asm/mmzone.h b/arch/loongarch/include/asm/mmzone.h
deleted file mode 100644
index 2b9a90727e19..000000000000
--- a/arch/loongarch/include/asm/mmzone.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Author: Huacai Chen (chenhuacai@loongson.cn)
- * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
- */
-#ifndef _ASM_MMZONE_H_
-#define _ASM_MMZONE_H_
-
-#include <asm/page.h>
-#include <asm/numa.h>
-
-extern struct pglist_data *node_data[];
-
-#define NODE_DATA(nid) (node_data[(nid)])
-
-#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/loongarch/include/asm/numa.h b/arch/loongarch/include/asm/numa.h
index 27f319b49862..b5f9de9f102e 100644
--- a/arch/loongarch/include/asm/numa.h
+++ b/arch/loongarch/include/asm/numa.h
@@ -56,6 +56,7 @@ extern int early_cpu_to_node(int cpu);
static inline void early_numa_add_cpu(int cpuid, s16 node) { }
static inline void numa_add_cpu(unsigned int cpu) { }
static inline void numa_remove_cpu(unsigned int cpu) { }
+static inline void set_cpuid_to_node(int cpuid, s16 node) { }
static inline int early_cpu_to_node(int cpu)
{
diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h
index e85df33f11c7..7368f12b7cb1 100644
--- a/arch/loongarch/include/asm/page.h
+++ b/arch/loongarch/include/asm/page.h
@@ -8,12 +8,7 @@
#include <linux/const.h>
#include <asm/addrspace.h>
-/*
- * PAGE_SHIFT determines the page size
- */
-#define PAGE_SHIFT CONFIG_PAGE_SHIFT
-#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE - 1))
+#include <vdso/page.h>
#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
@@ -81,9 +76,6 @@ struct page *tlb_virt_to_page(unsigned long kaddr);
#define pfn_to_phys(pfn) __pfn_to_phys(pfn)
#define phys_to_pfn(paddr) __phys_to_pfn(paddr)
-#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
-#define phys_to_page(paddr) pfn_to_page(phys_to_pfn(paddr))
-
#ifndef CONFIG_KFENCE
#define page_to_virt(page) __va(page_to_phys(page))
@@ -113,10 +105,7 @@ struct page *tlb_virt_to_page(unsigned long kaddr);
extern int __virt_addr_valid(volatile void *kaddr);
#define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr))
-#define VM_DATA_DEFAULT_FLAGS \
- (VM_READ | VM_WRITE | \
- ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
diff --git a/arch/loongarch/include/asm/paravirt.h b/arch/loongarch/include/asm/paravirt.h
new file mode 100644
index 000000000000..3f4323603e6a
--- /dev/null
+++ b/arch/loongarch/include/asm/paravirt.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_LOONGARCH_PARAVIRT_H
+#define _ASM_LOONGARCH_PARAVIRT_H
+
+#ifdef CONFIG_PARAVIRT
+
+#include <linux/static_call_types.h>
+struct static_key;
+extern struct static_key paravirt_steal_enabled;
+extern struct static_key paravirt_steal_rq_enabled;
+
+u64 dummy_steal_clock(int cpu);
+DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
+
+static inline u64 paravirt_steal_clock(int cpu)
+{
+ return static_call(pv_steal_clock)(cpu);
+}
+
+int __init pv_ipi_init(void);
+int __init pv_time_init(void);
+int __init pv_spinlock_init(void);
+
+#else
+
+static inline int pv_ipi_init(void)
+{
+ return 0;
+}
+
+static inline int pv_time_init(void)
+{
+ return 0;
+}
+
+static inline int pv_spinlock_init(void)
+{
+ return 0;
+}
+
+#endif // CONFIG_PARAVIRT
+#endif
diff --git a/arch/loongarch/include/asm/paravirt_api_clock.h b/arch/loongarch/include/asm/paravirt_api_clock.h
new file mode 100644
index 000000000000..65ac7cee0dad
--- /dev/null
+++ b/arch/loongarch/include/asm/paravirt_api_clock.h
@@ -0,0 +1 @@
+#include <asm/paravirt.h>
diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
index 8f290e5546cf..87be9b14e9da 100644
--- a/arch/loongarch/include/asm/percpu.h
+++ b/arch/loongarch/include/asm/percpu.h
@@ -68,75 +68,6 @@ PERCPU_OP(and, and, &)
PERCPU_OP(or, or, |)
#undef PERCPU_OP
-static __always_inline unsigned long __percpu_read(void __percpu *ptr, int size)
-{
- unsigned long ret;
-
- switch (size) {
- case 1:
- __asm__ __volatile__ ("ldx.b %[ret], $r21, %[ptr] \n"
- : [ret] "=&r"(ret)
- : [ptr] "r"(ptr)
- : "memory");
- break;
- case 2:
- __asm__ __volatile__ ("ldx.h %[ret], $r21, %[ptr] \n"
- : [ret] "=&r"(ret)
- : [ptr] "r"(ptr)
- : "memory");
- break;
- case 4:
- __asm__ __volatile__ ("ldx.w %[ret], $r21, %[ptr] \n"
- : [ret] "=&r"(ret)
- : [ptr] "r"(ptr)
- : "memory");
- break;
- case 8:
- __asm__ __volatile__ ("ldx.d %[ret], $r21, %[ptr] \n"
- : [ret] "=&r"(ret)
- : [ptr] "r"(ptr)
- : "memory");
- break;
- default:
- ret = 0;
- BUILD_BUG();
- }
-
- return ret;
-}
-
-static __always_inline void __percpu_write(void __percpu *ptr, unsigned long val, int size)
-{
- switch (size) {
- case 1:
- __asm__ __volatile__("stx.b %[val], $r21, %[ptr] \n"
- :
- : [val] "r" (val), [ptr] "r" (ptr)
- : "memory");
- break;
- case 2:
- __asm__ __volatile__("stx.h %[val], $r21, %[ptr] \n"
- :
- : [val] "r" (val), [ptr] "r" (ptr)
- : "memory");
- break;
- case 4:
- __asm__ __volatile__("stx.w %[val], $r21, %[ptr] \n"
- :
- : [val] "r" (val), [ptr] "r" (ptr)
- : "memory");
- break;
- case 8:
- __asm__ __volatile__("stx.d %[val], $r21, %[ptr] \n"
- :
- : [val] "r" (val), [ptr] "r" (ptr)
- : "memory");
- break;
- default:
- BUILD_BUG();
- }
-}
-
static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, int size)
{
switch (size) {
@@ -157,6 +88,33 @@ static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
return 0;
}
+#define __pcpu_op_1(op) op ".b "
+#define __pcpu_op_2(op) op ".h "
+#define __pcpu_op_4(op) op ".w "
+#define __pcpu_op_8(op) op ".d "
+
+#define _percpu_read(size, _pcp) \
+({ \
+ typeof(_pcp) __pcp_ret; \
+ \
+ __asm__ __volatile__( \
+ __pcpu_op_##size("ldx") "%[ret], $r21, %[ptr] \n" \
+ : [ret] "=&r"(__pcp_ret) \
+ : [ptr] "r"(&(_pcp)) \
+ : "memory"); \
+ \
+ __pcp_ret; \
+})
+
+#define _percpu_write(size, _pcp, _val) \
+do { \
+ __asm__ __volatile__( \
+ __pcpu_op_##size("stx") "%[val], $r21, %[ptr] \n" \
+ : \
+ : [val] "r"(_val), [ptr] "r"(&(_pcp)) \
+ : "memory"); \
+} while (0)
+
/* this_cpu_cmpxchg */
#define _protect_cmpxchg_local(pcp, o, n) \
({ \
@@ -167,18 +125,6 @@ static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
__ret; \
})
-#define _percpu_read(pcp) \
-({ \
- typeof(pcp) __retval; \
- __retval = (typeof(pcp))__percpu_read(&(pcp), sizeof(pcp)); \
- __retval; \
-})
-
-#define _percpu_write(pcp, val) \
-do { \
- __percpu_write(&(pcp), (unsigned long)(val), sizeof(pcp)); \
-} while (0) \
-
#define _pcp_protect(operation, pcp, val) \
({ \
typeof(pcp) __retval; \
@@ -215,15 +161,15 @@ do { \
#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
-#define this_cpu_read_1(pcp) _percpu_read(pcp)
-#define this_cpu_read_2(pcp) _percpu_read(pcp)
-#define this_cpu_read_4(pcp) _percpu_read(pcp)
-#define this_cpu_read_8(pcp) _percpu_read(pcp)
+#define this_cpu_read_1(pcp) _percpu_read(1, pcp)
+#define this_cpu_read_2(pcp) _percpu_read(2, pcp)
+#define this_cpu_read_4(pcp) _percpu_read(4, pcp)
+#define this_cpu_read_8(pcp) _percpu_read(8, pcp)
-#define this_cpu_write_1(pcp, val) _percpu_write(pcp, val)
-#define this_cpu_write_2(pcp, val) _percpu_write(pcp, val)
-#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
-#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_1(pcp, val) _percpu_write(1, pcp, val)
+#define this_cpu_write_2(pcp, val) _percpu_write(2, pcp, val)
+#define this_cpu_write_4(pcp, val) _percpu_write(4, pcp, val)
+#define this_cpu_write_8(pcp, val) _percpu_write(8, pcp, val)
#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val)
#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
diff --git a/arch/loongarch/include/asm/perf_event.h b/arch/loongarch/include/asm/perf_event.h
index 52b638059e40..f948a0676daf 100644
--- a/arch/loongarch/include/asm/perf_event.h
+++ b/arch/loongarch/include/asm/perf_event.h
@@ -13,8 +13,7 @@
#define perf_arch_fetch_caller_regs(regs, __ip) { \
(regs)->csr_era = (__ip); \
- (regs)->regs[3] = current_stack_pointer; \
- (regs)->regs[22] = (unsigned long) __builtin_frame_address(0); \
+ (regs)->regs[3] = (unsigned long) __builtin_frame_address(0); \
}
#endif /* __LOONGARCH_PERF_EVENT_H__ */
diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h
index 4e2d6b7ca2ee..1c63a9d9a6d3 100644
--- a/arch/loongarch/include/asm/pgalloc.h
+++ b/arch/loongarch/include/asm/pgalloc.h
@@ -10,6 +10,7 @@
#define __HAVE_ARCH_PMD_ALLOC_ONE
#define __HAVE_ARCH_PUD_ALLOC_ONE
+#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
#include <asm-generic/pgalloc.h>
static inline void pmd_populate_kernel(struct mm_struct *mm,
@@ -44,11 +45,18 @@ extern void pagetable_init(void);
extern pgd_t *pgd_alloc(struct mm_struct *mm);
-#define __pte_free_tlb(tlb, pte, address) \
-do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \
-} while (0)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
+{
+ pte_t *pte = __pte_alloc_one_kernel(mm);
+
+ if (pte)
+ kernel_pte_init(pte);
+
+ return pte;
+}
+
+#define __pte_free_tlb(tlb, pte, address) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#ifndef __PAGETABLE_PMD_FOLDED
@@ -61,7 +69,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
if (!ptdesc)
return NULL;
- if (!pagetable_pmd_ctor(ptdesc)) {
+ if (!pagetable_pmd_ctor(mm, ptdesc)) {
pagetable_free(ptdesc);
return NULL;
}
diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h
index 21319c1e045c..45bfc65a0c9f 100644
--- a/arch/loongarch/include/asm/pgtable-bits.h
+++ b/arch/loongarch/include/asm/pgtable-bits.h
@@ -22,6 +22,7 @@
#define _PAGE_PFN_SHIFT 12
#define _PAGE_SWP_EXCLUSIVE_SHIFT 23
#define _PAGE_PFN_END_SHIFT 48
+#define _PAGE_DEVMAP_SHIFT 59
#define _PAGE_PRESENT_INVALID_SHIFT 60
#define _PAGE_NO_READ_SHIFT 61
#define _PAGE_NO_EXEC_SHIFT 62
@@ -35,6 +36,7 @@
#define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT)
#define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT)
#define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT)
+#define _PAGE_DEVMAP (_ULCAST_(1) << _PAGE_DEVMAP_SHIFT)
/* We borrow bit 23 to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE (_ULCAST_(1) << _PAGE_SWP_EXCLUSIVE_SHIFT)
@@ -74,8 +76,8 @@
#define __READABLE (_PAGE_VALID)
#define __WRITEABLE (_PAGE_DIRTY | _PAGE_WRITE)
-#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV)
-#define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE)
+#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PAGE_DEVMAP | _PFN_MASK | _CACHE_MASK | _PAGE_PLV)
+#define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PAGE_DEVMAP | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE)
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_NO_READ | \
_PAGE_USER | _CACHE_CC)
@@ -94,6 +96,13 @@
#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_SUC)
+#define pgprot_nx pgprot_nx
+
+static inline pgprot_t pgprot_nx(pgprot_t _prot)
+{
+ return __pgprot(pgprot_val(_prot) | _PAGE_NO_EXEC);
+}
+
#define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t _prot)
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index af3acdf3481a..a3f17914dbab 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -106,6 +106,9 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define KFENCE_AREA_START (VMEMMAP_END + 1)
#define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
+#define ptep_get(ptep) READ_ONCE(*(ptep))
+#define pmdp_get(pmdp) READ_ONCE(*(pmdp))
+
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
#ifndef __PAGETABLE_PMD_FOLDED
@@ -147,11 +150,6 @@ static inline int p4d_present(p4d_t p4d)
return p4d_val(p4d) != (unsigned long)invalid_pud_table;
}
-static inline void p4d_clear(p4d_t *p4dp)
-{
- p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
-}
-
static inline pud_t *p4d_pgtable(p4d_t p4d)
{
return (pud_t *)p4d_val(p4d);
@@ -159,7 +157,12 @@ static inline pud_t *p4d_pgtable(p4d_t p4d)
static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
{
- *p4d = p4dval;
+ WRITE_ONCE(*p4d, p4dval);
+}
+
+static inline void p4d_clear(p4d_t *p4dp)
+{
+ set_p4d(p4dp, __p4d((unsigned long)invalid_pud_table));
}
#define p4d_phys(p4d) PHYSADDR(p4d_val(p4d))
@@ -193,17 +196,20 @@ static inline int pud_present(pud_t pud)
return pud_val(pud) != (unsigned long)invalid_pmd_table;
}
-static inline void pud_clear(pud_t *pudp)
+static inline pmd_t *pud_pgtable(pud_t pud)
{
- pud_val(*pudp) = ((unsigned long)invalid_pmd_table);
+ return (pmd_t *)pud_val(pud);
}
-static inline pmd_t *pud_pgtable(pud_t pud)
+static inline void set_pud(pud_t *pud, pud_t pudval)
{
- return (pmd_t *)pud_val(pud);
+ WRITE_ONCE(*pud, pudval);
}
-#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0)
+static inline void pud_clear(pud_t *pudp)
+{
+ set_pud(pudp, __pud((unsigned long)invalid_pmd_table));
+}
#define pud_phys(pud) PHYSADDR(pud_val(pud))
#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
@@ -231,12 +237,15 @@ static inline int pmd_present(pmd_t pmd)
return pmd_val(pmd) != (unsigned long)invalid_pte_table;
}
-static inline void pmd_clear(pmd_t *pmdp)
+static inline void set_pmd(pmd_t *pmd, pmd_t pmdval)
{
- pmd_val(*pmdp) = ((unsigned long)invalid_pte_table);
+ WRITE_ONCE(*pmd, pmdval);
}
-#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0)
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ set_pmd(pmdp, __pmd((unsigned long)invalid_pte_table));
+}
#define pmd_phys(pmd) PHYSADDR(pmd_val(pmd))
@@ -246,7 +255,6 @@ static inline void pmd_clear(pmd_t *pmdp)
#define pmd_page_vaddr(pmd) pmd_val(pmd)
-extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
#define pte_page(x) pfn_to_page(pte_pfn(x))
@@ -259,7 +267,11 @@ extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pm
*/
extern void pgd_init(void *addr);
extern void pud_init(void *addr);
+#define pud_init pud_init
extern void pmd_init(void *addr);
+#define pmd_init pmd_init
+extern void kernel_pte_init(void *addr);
+#define kernel_pte_init kernel_pte_init
/*
* Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
@@ -314,46 +326,19 @@ extern void paging_init(void);
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
- *ptep = pteval;
- if (pte_val(pteval) & _PAGE_GLOBAL) {
- pte_t *buddy = ptep_buddy(ptep);
- /*
- * Make sure the buddy is global too (if it's !none,
- * it better already be global)
- */
+ WRITE_ONCE(*ptep, pteval);
+
#ifdef CONFIG_SMP
- /*
- * For SMP, multiple CPUs can race, so we need to do
- * this atomically.
- */
- unsigned long page_global = _PAGE_GLOBAL;
- unsigned long tmp;
-
- __asm__ __volatile__ (
- "1:" __LL "%[tmp], %[buddy] \n"
- " bnez %[tmp], 2f \n"
- " or %[tmp], %[tmp], %[global] \n"
- __SC "%[tmp], %[buddy] \n"
- " beqz %[tmp], 1b \n"
- " nop \n"
- "2: \n"
- __WEAK_LLSC_MB
- : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
- : [global] "r" (page_global));
-#else /* !CONFIG_SMP */
- if (pte_none(*buddy))
- pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
-#endif /* CONFIG_SMP */
- }
+ if (pte_val(pteval) & _PAGE_GLOBAL)
+ DBAR(0b11000); /* o_wrw = 0b11000 */
+#endif
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
- /* Preserve global status for the pair */
- if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
- set_pte(ptep, __pte(_PAGE_GLOBAL));
- else
- set_pte(ptep, __pte(0));
+ pte_t pte = ptep_get(ptep);
+ pte_val(pte) &= _PAGE_GLOBAL;
+ set_pte(ptep, pte);
}
#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
@@ -424,6 +409,9 @@ static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL;
static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; }
#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
+static inline int pte_devmap(pte_t pte) { return !!(pte_val(pte) & _PAGE_DEVMAP); }
+static inline pte_t pte_mkdevmap(pte_t pte) { pte_val(pte) |= _PAGE_DEVMAP; return pte; }
+
#define pte_accessible pte_accessible
static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
{
@@ -437,12 +425,6 @@ static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
return false;
}
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
@@ -467,8 +449,8 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
#define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
-#define __HAVE_ARCH_UPDATE_MMU_TLB
-#define update_mmu_tlb update_mmu_cache
+#define update_mmu_tlb_range(vma, addr, ptep, nr) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, nr)
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
@@ -558,6 +540,17 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
return pmd;
}
+static inline int pmd_devmap(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_DEVMAP);
+}
+
+static inline pmd_t pmd_mkdevmap(pmd_t pmd)
+{
+ pmd_val(pmd) |= _PAGE_DEVMAP;
+ return pmd;
+}
+
static inline struct page *pmd_page(pmd_t pmd)
{
if (pmd_trans_huge(pmd))
@@ -589,7 +582,7 @@ static inline pmd_t pmd_mkinvalid(pmd_t pmd)
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
- pmd_t old = *pmdp;
+ pmd_t old = pmdp_get(pmdp);
pmd_clear(pmdp);
@@ -613,6 +606,11 @@ static inline long pmd_protnone(pmd_t pmd)
#define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0)
#define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define pud_devmap(pud) (0)
+#define pgd_devmap(pgd) (0)
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
/*
* We provide our own get_unmapped area to cope with the virtual aliasing
* constraints placed on us by the cache architecture.
diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h
index f3ddaed9ef7f..e5d21e836d99 100644
--- a/arch/loongarch/include/asm/ptrace.h
+++ b/arch/loongarch/include/asm/ptrace.h
@@ -33,9 +33,9 @@ struct pt_regs {
unsigned long __last[];
} __aligned(8);
-static inline int regs_irqs_disabled(struct pt_regs *regs)
+static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
{
- return arch_irqs_disabled_flags(regs->csr_prmd);
+ return !(regs->csr_prmd & CSR_PRMD_PIE);
}
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
@@ -55,7 +55,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long v
/* Query offset/name of register from its name/offset */
extern int regs_query_register_offset(const char *name);
-#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last) - sizeof(unsigned long))
/**
* regs_get_register() - get register value from its offset
diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h
new file mode 100644
index 000000000000..e76d3aa1e1eb
--- /dev/null
+++ b/arch/loongarch/include/asm/qspinlock.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_LOONGARCH_QSPINLOCK_H
+#define _ASM_LOONGARCH_QSPINLOCK_H
+
+#include <linux/jump_label.h>
+
+#ifdef CONFIG_PARAVIRT
+
+DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
+
+#define virt_spin_lock virt_spin_lock
+
+static inline bool virt_spin_lock(struct qspinlock *lock)
+{
+ int val;
+
+ if (!static_branch_unlikely(&virt_spin_lock_key))
+ return false;
+
+ /*
+ * On hypervisors without PARAVIRT_SPINLOCKS support we fall
+ * back to a Test-and-Set spinlock, because fair locks have
+ * horrible lock 'holder' preemption issues.
+ */
+
+__retry:
+ val = atomic_read(&lock->val);
+
+ if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) {
+ cpu_relax();
+ goto __retry;
+ }
+
+ return true;
+}
+
+#endif /* CONFIG_PARAVIRT */
+
+#include <asm-generic/qspinlock.h>
+
+#endif // _ASM_LOONGARCH_QSPINLOCK_H
diff --git a/arch/loongarch/include/asm/set_memory.h b/arch/loongarch/include/asm/set_memory.h
new file mode 100644
index 000000000000..55dfaefd02c8
--- /dev/null
+++ b/arch/loongarch/include/asm/set_memory.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_LOONGARCH_SET_MEMORY_H
+#define _ASM_LOONGARCH_SET_MEMORY_H
+
+/*
+ * Functions to change memory attributes.
+ */
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+
+bool kernel_page_present(struct page *page);
+int set_direct_map_default_noflush(struct page *page);
+int set_direct_map_invalid_noflush(struct page *page);
+int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid);
+
+#endif /* _ASM_LOONGARCH_SET_MEMORY_H */
diff --git a/arch/loongarch/include/asm/setup.h b/arch/loongarch/include/asm/setup.h
index ee52fb1e9963..3c2fb16b11b6 100644
--- a/arch/loongarch/include/asm/setup.h
+++ b/arch/loongarch/include/asm/setup.h
@@ -34,6 +34,11 @@ extern long __la_abs_end;
extern long __rela_dyn_begin;
extern long __rela_dyn_end;
+#ifdef CONFIG_RELR
+extern long __relr_dyn_begin;
+extern long __relr_dyn_end;
+#endif
+
extern unsigned long __init relocate_kernel(void);
#endif
diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h
index f81e5f01d619..b87d1d5e5890 100644
--- a/arch/loongarch/include/asm/smp.h
+++ b/arch/loongarch/include/asm/smp.h
@@ -6,12 +6,21 @@
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
+#ifdef CONFIG_SMP
+
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/linkage.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
+struct smp_ops {
+ void (*init_ipi)(void);
+ void (*send_ipi_single)(int cpu, unsigned int action);
+ void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action);
+};
+extern struct smp_ops mp_ops;
+
extern int smp_num_siblings;
extern int num_processors;
extern int disabled_cpus;
@@ -24,8 +33,6 @@ void loongson_prepare_cpus(unsigned int max_cpus);
void loongson_boot_secondary(int cpu, struct task_struct *idle);
void loongson_init_secondary(void);
void loongson_smp_finish(void);
-void loongson_send_ipi_single(int cpu, unsigned int action);
-void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action);
#ifdef CONFIG_HOTPLUG_CPU
int loongson_cpu_disable(void);
void loongson_cpu_die(unsigned int cpu);
@@ -59,9 +66,18 @@ extern int __cpu_logical_map[NR_CPUS];
#define cpu_physical_id(cpu) cpu_logical_map(cpu)
-#define SMP_BOOT_CPU 0x1
-#define SMP_RESCHEDULE 0x2
-#define SMP_CALL_FUNCTION 0x4
+#define ACTION_BOOT_CPU 0
+#define ACTION_RESCHEDULE 1
+#define ACTION_CALL_FUNCTION 2
+#define ACTION_IRQ_WORK 3
+#define ACTION_CLEAR_VECTOR 4
+#define SMP_BOOT_CPU BIT(ACTION_BOOT_CPU)
+#define SMP_RESCHEDULE BIT(ACTION_RESCHEDULE)
+#define SMP_CALL_FUNCTION BIT(ACTION_CALL_FUNCTION)
+#define SMP_IRQ_WORK BIT(ACTION_IRQ_WORK)
+#define SMP_CLEAR_VECTOR BIT(ACTION_CLEAR_VECTOR)
+
+struct seq_file;
struct secondary_data {
unsigned long stack;
@@ -81,12 +97,12 @@ extern void show_ipi_list(struct seq_file *p, int prec);
static inline void arch_send_call_function_single_ipi(int cpu)
{
- loongson_send_ipi_single(cpu, SMP_CALL_FUNCTION);
+ mp_ops.send_ipi_single(cpu, ACTION_CALL_FUNCTION);
}
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
- loongson_send_ipi_mask(mask, SMP_CALL_FUNCTION);
+ mp_ops.send_ipi_mask(mask, ACTION_CALL_FUNCTION);
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -101,4 +117,8 @@ static inline void __cpu_die(unsigned int cpu)
}
#endif
+#else /* !CONFIG_SMP */
+#define cpu_logical_map(cpu) 0
+#endif /* CONFIG_SMP */
+
#endif /* __ASM_SMP_H */
diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h
index 45b507a7b06f..66736837085b 100644
--- a/arch/loongarch/include/asm/stackframe.h
+++ b/arch/loongarch/include/asm/stackframe.h
@@ -38,11 +38,22 @@
cfi_restore \reg \offset \docfi
.endm
+ .macro SETUP_DMWINS temp
+ li.d \temp, CSR_DMW0_INIT # WUC, PLV0, 0x8000 xxxx xxxx xxxx
+ csrwr \temp, LOONGARCH_CSR_DMWIN0
+ li.d \temp, CSR_DMW1_INIT # CAC, PLV0, 0x9000 xxxx xxxx xxxx
+ csrwr \temp, LOONGARCH_CSR_DMWIN1
+ li.d \temp, CSR_DMW2_INIT # WUC, PLV0, 0xa000 xxxx xxxx xxxx
+ csrwr \temp, LOONGARCH_CSR_DMWIN2
+ li.d \temp, CSR_DMW3_INIT # 0x0, unused
+ csrwr \temp, LOONGARCH_CSR_DMWIN3
+ .endm
+
/* Jump to the runtime virtual address. */
.macro JUMP_VIRT_ADDR temp1 temp2
li.d \temp1, CACHE_BASE
pcaddi \temp2, 0
- or \temp1, \temp1, \temp2
+ bstrins.d \temp1, \temp2, (DMW_PABITS - 1), 0
jirl zero, \temp1, 0xc
.endm
diff --git a/arch/loongarch/include/asm/stacktrace.h b/arch/loongarch/include/asm/stacktrace.h
index f23adb15f418..fc8b64773794 100644
--- a/arch/loongarch/include/asm/stacktrace.h
+++ b/arch/loongarch/include/asm/stacktrace.h
@@ -8,6 +8,7 @@
#include <asm/asm.h>
#include <asm/ptrace.h>
#include <asm/loongarch.h>
+#include <asm/unwind_hints.h>
#include <linux/stringify.h>
enum stack_type {
@@ -43,6 +44,7 @@ int get_stack_info(unsigned long stack, struct task_struct *task, struct stack_i
static __always_inline void prepare_frametrace(struct pt_regs *regs)
{
__asm__ __volatile__(
+ UNWIND_HINT_SAVE
/* Save $ra */
STORE_ONE_REG(1)
/* Use $ra to save PC */
@@ -80,6 +82,7 @@ static __always_inline void prepare_frametrace(struct pt_regs *regs)
STORE_ONE_REG(29)
STORE_ONE_REG(30)
STORE_ONE_REG(31)
+ UNWIND_HINT_RESTORE
: "=m" (regs->csr_era)
: "r" (regs->regs)
: "memory");
diff --git a/arch/loongarch/include/asm/syscall.h b/arch/loongarch/include/asm/syscall.h
index e286dc58476e..81d2733f7b94 100644
--- a/arch/loongarch/include/asm/syscall.h
+++ b/arch/loongarch/include/asm/syscall.h
@@ -26,6 +26,13 @@ static inline long syscall_get_nr(struct task_struct *task,
return regs->regs[11];
}
+static inline void syscall_set_nr(struct task_struct *task,
+ struct pt_regs *regs,
+ int nr)
+{
+ regs->regs[11] = nr;
+}
+
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
@@ -61,6 +68,14 @@ static inline void syscall_get_arguments(struct task_struct *task,
memcpy(&args[1], &regs->regs[5], 5 * sizeof(long));
}
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *args)
+{
+ regs->orig_a0 = args[0];
+ memcpy(&regs->regs[5], &args[1], 5 * sizeof(long));
+}
+
static inline int syscall_get_arch(struct task_struct *task)
{
return AUDIT_ARCH_LOONGARCH64;
diff --git a/arch/loongarch/include/asm/thread_info.h b/arch/loongarch/include/asm/thread_info.h
index 8bf0e6f51546..4f5a9441754e 100644
--- a/arch/loongarch/include/asm/thread_info.h
+++ b/arch/loongarch/include/asm/thread_info.h
@@ -66,8 +66,9 @@ register unsigned long current_stack_pointer __asm__("$sp");
* - pending work-to-be-done flags are in LSW
* - other flags in MSW
*/
-#define TIF_SIGPENDING 1 /* signal pending */
-#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+#define TIF_NEED_RESCHED 0 /* rescheduling necessary */
+#define TIF_NEED_RESCHED_LAZY 1 /* lazy rescheduling necessary */
+#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NOTIFY_RESUME 3 /* callback before returning to user */
#define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
@@ -88,8 +89,9 @@ register unsigned long current_stack_pointer __asm__("$sp");
#define TIF_LBT_CTX_LIVE 20 /* LBT context must be preserved */
#define TIF_PATCH_PENDING 21 /* pending live patching update */
-#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL)
#define _TIF_NOHZ (1<<TIF_NOHZ)
diff --git a/arch/loongarch/include/asm/topology.h b/arch/loongarch/include/asm/topology.h
index 66128dec0bf6..50273c9187d0 100644
--- a/arch/loongarch/include/asm/topology.h
+++ b/arch/loongarch/include/asm/topology.h
@@ -8,6 +8,7 @@
#include <linux/smp.h>
#ifdef CONFIG_NUMA
+#include <asm/numa.h>
extern cpumask_t cpus_on_node[];
diff --git a/arch/loongarch/include/asm/unistd.h b/arch/loongarch/include/asm/unistd.h
index cfddb0116a8c..e2c0f3d86c7b 100644
--- a/arch/loongarch/include/asm/unistd.h
+++ b/arch/loongarch/include/asm/unistd.h
@@ -8,4 +8,7 @@
#include <uapi/asm/unistd.h>
+#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_SYS_CLONE
+
#define NR_syscalls (__NR_syscalls)
diff --git a/arch/loongarch/include/asm/unwind_hints.h b/arch/loongarch/include/asm/unwind_hints.h
index a01086ad9dde..2c68bc72736c 100644
--- a/arch/loongarch/include/asm/unwind_hints.h
+++ b/arch/loongarch/include/asm/unwind_hints.h
@@ -23,6 +23,14 @@
UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_CALL
.endm
-#endif /* __ASSEMBLY__ */
+#else /* !__ASSEMBLY__ */
+
+#define UNWIND_HINT_SAVE \
+ UNWIND_HINT(UNWIND_HINT_TYPE_SAVE, 0, 0, 0)
+
+#define UNWIND_HINT_RESTORE \
+ UNWIND_HINT(UNWIND_HINT_TYPE_RESTORE, 0, 0, 0)
+
+#endif /* !__ASSEMBLY__ */
#endif /* _ASM_LOONGARCH_UNWIND_HINTS_H */
diff --git a/arch/loongarch/include/asm/uprobes.h b/arch/loongarch/include/asm/uprobes.h
index c8f59983f702..025fc3f0a102 100644
--- a/arch/loongarch/include/asm/uprobes.h
+++ b/arch/loongarch/include/asm/uprobes.h
@@ -9,13 +9,12 @@ typedef u32 uprobe_opcode_t;
#define MAX_UINSN_BYTES 8
#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
-#define UPROBE_SWBP_INSN larch_insn_gen_break(BRK_UPROBE_BP)
+#define UPROBE_SWBP_INSN __emit_break(BRK_UPROBE_BP)
#define UPROBE_SWBP_INSN_SIZE LOONGARCH_INSN_SIZE
-#define UPROBE_XOLBP_INSN larch_insn_gen_break(BRK_UPROBE_XOLBP)
+#define UPROBE_XOLBP_INSN __emit_break(BRK_UPROBE_XOLBP)
struct arch_uprobe {
- unsigned long resume_era;
u32 insn[2];
u32 ixol[2];
bool simulate;
diff --git a/arch/loongarch/include/asm/vdso.h b/arch/loongarch/include/asm/vdso.h
index d3ba35eb23e7..f72ec79e2dde 100644
--- a/arch/loongarch/include/asm/vdso.h
+++ b/arch/loongarch/include/asm/vdso.h
@@ -31,7 +31,6 @@ struct loongarch_vdso_info {
unsigned long size;
unsigned long offset_sigreturn;
struct vm_special_mapping code_mapping;
- struct vm_special_mapping data_mapping;
};
extern struct loongarch_vdso_info vdso_info;
diff --git a/arch/loongarch/include/asm/vdso/arch_data.h b/arch/loongarch/include/asm/vdso/arch_data.h
new file mode 100644
index 000000000000..322d0a5f1c84
--- /dev/null
+++ b/arch/loongarch/include/asm/vdso/arch_data.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _VDSO_ARCH_DATA_H
+#define _VDSO_ARCH_DATA_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/asm.h>
+#include <asm/vdso.h>
+
+struct vdso_pcpu_data {
+ u32 node;
+} ____cacheline_aligned_in_smp;
+
+struct vdso_arch_data {
+ struct vdso_pcpu_data pdata[NR_CPUS];
+};
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/loongarch/include/asm/vdso/getrandom.h b/arch/loongarch/include/asm/vdso/getrandom.h
new file mode 100644
index 000000000000..48c43f55b039
--- /dev/null
+++ b/arch/loongarch/include/asm/vdso/getrandom.h
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Xi Ruoyao <xry111@xry111.site>. All Rights Reserved.
+ */
+#ifndef __ASM_VDSO_GETRANDOM_H
+#define __ASM_VDSO_GETRANDOM_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/unistd.h>
+#include <asm/vdso/vdso.h>
+
+static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, unsigned int _flags)
+{
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_getrandom;
+ register void *buffer asm("a0") = _buffer;
+ register size_t len asm("a1") = _len;
+ register unsigned int flags asm("a2") = _flags;
+
+ asm volatile(
+ " syscall 0\n"
+ : "+r" (ret)
+ : "r" (nr), "r" (buffer), "r" (len), "r" (flags)
+ : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
+ "memory");
+
+ return ret;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETRANDOM_H */
diff --git a/arch/loongarch/include/asm/vdso/gettimeofday.h b/arch/loongarch/include/asm/vdso/gettimeofday.h
index 89e6b222c2f2..88cfcf133116 100644
--- a/arch/loongarch/include/asm/vdso/gettimeofday.h
+++ b/arch/loongarch/include/asm/vdso/gettimeofday.h
@@ -72,7 +72,7 @@ static __always_inline int clock_getres_fallback(
}
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
uint64_t count;
@@ -89,18 +89,6 @@ static inline bool loongarch_vdso_hres_capable(void)
}
#define __arch_vdso_hres_capable loongarch_vdso_hres_capable
-static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
-{
- return (const struct vdso_data *)get_vdso_data();
-}
-
-#ifdef CONFIG_TIME_NS
-static __always_inline
-const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
-{
- return (const struct vdso_data *)(get_vdso_data() + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE);
-}
-#endif
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/loongarch/include/asm/vdso/vdso.h b/arch/loongarch/include/asm/vdso/vdso.h
index 5a12309d9fb5..50c65fb29daf 100644
--- a/arch/loongarch/include/asm/vdso/vdso.h
+++ b/arch/loongarch/include/asm/vdso/vdso.h
@@ -4,62 +4,18 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
+#ifndef _ASM_VDSO_VDSO_H
+#define _ASM_VDSO_VDSO_H
+
#ifndef __ASSEMBLY__
#include <asm/asm.h>
#include <asm/page.h>
#include <asm/vdso.h>
+#include <vdso/datapage.h>
-struct vdso_pcpu_data {
- u32 node;
-} ____cacheline_aligned_in_smp;
-
-struct loongarch_vdso_data {
- struct vdso_pcpu_data pdata[NR_CPUS];
-};
-
-/*
- * The layout of vvar:
- *
- * high
- * +---------------------+--------------------------+
- * | loongarch vdso data | LOONGARCH_VDSO_DATA_SIZE |
- * +---------------------+--------------------------+
- * | time-ns vdso data | PAGE_SIZE |
- * +---------------------+--------------------------+
- * | generic vdso data | PAGE_SIZE |
- * +---------------------+--------------------------+
- * low
- */
-#define LOONGARCH_VDSO_DATA_SIZE PAGE_ALIGN(sizeof(struct loongarch_vdso_data))
-#define LOONGARCH_VDSO_DATA_PAGES (LOONGARCH_VDSO_DATA_SIZE >> PAGE_SHIFT)
-
-enum vvar_pages {
- VVAR_GENERIC_PAGE_OFFSET,
- VVAR_TIMENS_PAGE_OFFSET,
- VVAR_LOONGARCH_PAGES_START,
- VVAR_LOONGARCH_PAGES_END = VVAR_LOONGARCH_PAGES_START + LOONGARCH_VDSO_DATA_PAGES - 1,
- VVAR_NR_PAGES,
-};
-
-#define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT)
-
-static inline unsigned long get_vdso_base(void)
-{
- unsigned long addr;
-
- __asm__(
- " la.pcrel %0, _start\n"
- : "=r" (addr)
- :
- :);
-
- return addr;
-}
-
-static inline unsigned long get_vdso_data(void)
-{
- return get_vdso_base() - VVAR_SIZE;
-}
+#define VVAR_SIZE (VDSO_NR_PAGES << PAGE_SHIFT)
#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/loongarch/include/asm/vdso/vsyscall.h b/arch/loongarch/include/asm/vdso/vsyscall.h
index 5de615383a22..1140b54b4bc8 100644
--- a/arch/loongarch/include/asm/vdso/vsyscall.h
+++ b/arch/loongarch/include/asm/vdso/vsyscall.h
@@ -4,21 +4,8 @@
#ifndef __ASSEMBLY__
-#include <linux/timekeeper_internal.h>
#include <vdso/datapage.h>
-extern struct vdso_data *vdso_data;
-
-/*
- * Update the vDSO data page to keep in sync with kernel timekeeping.
- */
-static __always_inline
-struct vdso_data *__loongarch_get_k_vdso_data(void)
-{
- return vdso_data;
-}
-#define __arch_get_k_vdso_data __loongarch_get_k_vdso_data
-
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
diff --git a/arch/loongarch/include/asm/fb.h b/arch/loongarch/include/asm/video.h
index 0b218b10a9ec..9f76845f2d4f 100644
--- a/arch/loongarch/include/asm/fb.h
+++ b/arch/loongarch/include/asm/video.h
@@ -2,8 +2,8 @@
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
+#ifndef _ASM_VIDEO_H_
+#define _ASM_VIDEO_H_
#include <linux/compiler.h>
#include <linux/string.h>
@@ -26,6 +26,6 @@ static inline void fb_memset_io(volatile void __iomem *addr, int c, size_t n)
}
#define fb_memset fb_memset_io
-#include <asm-generic/fb.h>
+#include <asm-generic/video.h>
-#endif /* _ASM_FB_H_ */
+#endif /* _ASM_VIDEO_H_ */
diff --git a/arch/loongarch/include/uapi/asm/Kbuild b/arch/loongarch/include/uapi/asm/Kbuild
index 4aa680ca2e5f..517761419999 100644
--- a/arch/loongarch/include/uapi/asm/Kbuild
+++ b/arch/loongarch/include/uapi/asm/Kbuild
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
-generic-y += kvm_para.h
+syscall-y += unistd_64.h
diff --git a/arch/loongarch/include/uapi/asm/hwcap.h b/arch/loongarch/include/uapi/asm/hwcap.h
index 6955a7cb2c65..2b34e56cfa9e 100644
--- a/arch/loongarch/include/uapi/asm/hwcap.h
+++ b/arch/loongarch/include/uapi/asm/hwcap.h
@@ -17,5 +17,6 @@
#define HWCAP_LOONGARCH_LBT_ARM (1 << 11)
#define HWCAP_LOONGARCH_LBT_MIPS (1 << 12)
#define HWCAP_LOONGARCH_PTW (1 << 13)
+#define HWCAP_LOONGARCH_LSPW (1 << 14)
#endif /* _UAPI_ASM_HWCAP_H */
diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h
index 109785922cf9..5f354f5c6847 100644
--- a/arch/loongarch/include/uapi/asm/kvm.h
+++ b/arch/loongarch/include/uapi/asm/kvm.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
+#define __KVM_HAVE_IRQ_LINE
+
/*
* KVM LoongArch specific structures and definitions.
*
@@ -17,6 +19,8 @@
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_DIRTY_LOG_PAGE_OFFSET 64
+#define KVM_GUESTDBG_USE_SW_BP 0x00010000
+
/*
* for KVM_GET_REGS and KVM_SET_REGS
*/
@@ -62,6 +66,7 @@ struct kvm_fpu {
#define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x20000ULL)
#define KVM_REG_LOONGARCH_FPSIMD (KVM_REG_LOONGARCH | 0x30000ULL)
#define KVM_REG_LOONGARCH_CPUCFG (KVM_REG_LOONGARCH | 0x40000ULL)
+#define KVM_REG_LOONGARCH_LBT (KVM_REG_LOONGARCH | 0x50000ULL)
#define KVM_REG_LOONGARCH_MASK (KVM_REG_LOONGARCH | 0x70000ULL)
#define KVM_CSR_IDX_MASK 0x7fff
#define KVM_CPUCFG_IDX_MASK 0x7fff
@@ -72,12 +77,37 @@ struct kvm_fpu {
#define KVM_REG_LOONGARCH_COUNTER (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 1)
#define KVM_REG_LOONGARCH_VCPU_RESET (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 2)
+/* Debugging: Special instruction for software breakpoint */
+#define KVM_REG_LOONGARCH_DEBUG_INST (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3)
+
+/* LBT registers */
+#define KVM_REG_LOONGARCH_LBT_SCR0 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 1)
+#define KVM_REG_LOONGARCH_LBT_SCR1 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 2)
+#define KVM_REG_LOONGARCH_LBT_SCR2 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 3)
+#define KVM_REG_LOONGARCH_LBT_SCR3 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 4)
+#define KVM_REG_LOONGARCH_LBT_EFLAGS (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 5)
+#define KVM_REG_LOONGARCH_LBT_FTOP (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 6)
#define LOONGARCH_REG_SHIFT 3
#define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
#define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
#define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
+
+/* Device Control API on vm fd */
+#define KVM_LOONGARCH_VM_FEAT_CTRL 0
+#define KVM_LOONGARCH_VM_FEAT_LSX 0
+#define KVM_LOONGARCH_VM_FEAT_LASX 1
+#define KVM_LOONGARCH_VM_FEAT_X86BT 2
+#define KVM_LOONGARCH_VM_FEAT_ARMBT 3
+#define KVM_LOONGARCH_VM_FEAT_MIPSBT 4
+#define KVM_LOONGARCH_VM_FEAT_PMU 5
+#define KVM_LOONGARCH_VM_FEAT_PV_IPI 6
+#define KVM_LOONGARCH_VM_FEAT_PV_STEALTIME 7
+
+/* Device Control API on vcpu fd */
#define KVM_LOONGARCH_VCPU_CPUCFG 0
+#define KVM_LOONGARCH_VCPU_PVTIME_CTRL 1
+#define KVM_LOONGARCH_VCPU_PVTIME_GPA 0
struct kvm_debug_exit_arch {
};
@@ -104,4 +134,22 @@ struct kvm_iocsr_entry {
#define KVM_IRQCHIP_NUM_PINS 64
#define KVM_MAX_CORES 256
+#define KVM_DEV_LOONGARCH_IPI_GRP_REGS 0x40000001
+
+#define KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS 0x40000002
+
+#define KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS 0x40000003
+#define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU 0x0
+#define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE 0x1
+#define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE 0x2
+
+#define KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL 0x40000004
+#define KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU 0x0
+#define KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE 0x1
+#define KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED 0x3
+
+#define KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS 0x40000005
+#define KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL 0x40000006
+#define KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT 0
+
#endif /* __UAPI_ASM_LOONGARCH_KVM_H */
diff --git a/arch/loongarch/include/uapi/asm/kvm_para.h b/arch/loongarch/include/uapi/asm/kvm_para.h
new file mode 100644
index 000000000000..76d802ef01ce
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/kvm_para.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_ASM_KVM_PARA_H
+#define _UAPI_ASM_KVM_PARA_H
+
+#include <linux/types.h>
+
+/*
+ * CPUCFG index area: 0x40000000 -- 0x400000ff
+ * SW emulation for KVM hypervirsor
+ */
+#define CPUCFG_KVM_BASE 0x40000000
+#define CPUCFG_KVM_SIZE 0x100
+#define CPUCFG_KVM_SIG (CPUCFG_KVM_BASE + 0)
+#define KVM_SIGNATURE "KVM\0"
+#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4)
+#define KVM_FEATURE_IPI 1
+#define KVM_FEATURE_STEAL_TIME 2
+/* BIT 24 - 31 are features configurable by user space vmm */
+#define KVM_FEATURE_VIRT_EXTIOI 24
+#define KVM_FEATURE_USER_HCALL 25
+
+#endif /* _UAPI_ASM_KVM_PARA_H */
diff --git a/arch/loongarch/include/uapi/asm/ptrace.h b/arch/loongarch/include/uapi/asm/ptrace.h
index ac915f841650..aafb3cd9e943 100644
--- a/arch/loongarch/include/uapi/asm/ptrace.h
+++ b/arch/loongarch/include/uapi/asm/ptrace.h
@@ -72,6 +72,16 @@ struct user_watch_state {
} dbg_regs[8];
};
+struct user_watch_state_v2 {
+ uint64_t dbg_info;
+ struct {
+ uint64_t addr;
+ uint64_t mask;
+ uint32_t ctrl;
+ uint32_t pad;
+ } dbg_regs[14];
+};
+
#define PTRACE_SYSEMU 0x1f
#define PTRACE_SYSEMU_SINGLESTEP 0x20
diff --git a/arch/loongarch/include/uapi/asm/sigcontext.h b/arch/loongarch/include/uapi/asm/sigcontext.h
index 6c22f616b8f1..5cd121275bac 100644
--- a/arch/loongarch/include/uapi/asm/sigcontext.h
+++ b/arch/loongarch/include/uapi/asm/sigcontext.h
@@ -9,7 +9,6 @@
#define _UAPI_ASM_SIGCONTEXT_H
#include <linux/types.h>
-#include <linux/posix_types.h>
/* FP context was used */
#define SC_USED_FP (1 << 0)
diff --git a/arch/loongarch/include/uapi/asm/unistd.h b/arch/loongarch/include/uapi/asm/unistd.h
index fcb668984f03..1f01980f9c94 100644
--- a/arch/loongarch/include/uapi/asm/unistd.h
+++ b/arch/loongarch/include/uapi/asm/unistd.h
@@ -1,5 +1,3 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
-#include <asm-generic/unistd.h>
+#include <asm/unistd_64.h>
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index 3a7620b66bc6..f9dcaa60033d 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -8,9 +8,9 @@ OBJECT_FILES_NON_STANDARD_head.o := y
extra-y := vmlinux.lds
obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \
- traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \
+ traps.o irq.o idle.o process.o dma.o mem.o reset.o switch.o \
elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \
- alternative.o unwind.o
+ alternative.o kdebugfs.o unwind.o
obj-$(CONFIG_ACPI) += acpi.o
obj-$(CONFIG_EFI) += efi.o
@@ -21,10 +21,10 @@ obj-$(CONFIG_CPU_HAS_LBT) += lbt.o
obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o
-CFLAGS_module.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_syscall.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_traps.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_perf_event.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_module.o += $(call cc-disable-warning, override-init)
+CFLAGS_syscall.o += $(call cc-disable-warning, override-init)
+CFLAGS_traps.o += $(call cc-disable-warning, override-init)
+CFLAGS_perf_event.o += $(call cc-disable-warning, override-init)
ifdef CONFIG_FUNCTION_TRACER
ifndef CONFIG_DYNAMIC_FTRACE
@@ -51,6 +51,7 @@ obj-$(CONFIG_MODULES) += module.o module-sections.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_PARAVIRT) += paravirt.o
obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/loongarch/kernel/Makefile.syscalls b/arch/loongarch/kernel/Makefile.syscalls
new file mode 100644
index 000000000000..ab7d9baa2915
--- /dev/null
+++ b/arch/loongarch/kernel/Makefile.syscalls
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# No special ABIs on loongarch so far
+syscall_abis_64 +=
diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
index 5cf59c617126..1120ac2824f6 100644
--- a/arch/loongarch/kernel/acpi.c
+++ b/arch/loongarch/kernel/acpi.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/acpi.h>
+#include <linux/efi-bgrt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/memblock.h>
@@ -58,38 +59,47 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
}
#ifdef CONFIG_SMP
-static int set_processor_mask(u32 id, u32 flags)
+static int set_processor_mask(u32 id, u32 pass)
{
+ int cpu = -1, cpuid = id;
- int cpu, cpuid = id;
-
- if (num_processors >= nr_cpu_ids) {
- pr_warn(PREFIX "nr_cpus/possible_cpus limit of %i reached."
- " processor 0x%x ignored.\n", nr_cpu_ids, cpuid);
+ if (num_processors >= NR_CPUS) {
+ pr_warn(PREFIX "nr_cpus limit of %i reached."
+ " processor 0x%x ignored.\n", NR_CPUS, cpuid);
return -ENODEV;
}
+
if (cpuid == loongson_sysconf.boot_cpu_id)
cpu = 0;
- else
- cpu = cpumask_next_zero(-1, cpu_present_mask);
- if (flags & ACPI_MADT_ENABLED) {
+ switch (pass) {
+ case 1: /* Pass 1 handle enabled processors */
+ if (cpu < 0)
+ cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
num_processors++;
- set_cpu_possible(cpu, true);
set_cpu_present(cpu, true);
- __cpu_number_map[cpuid] = cpu;
- __cpu_logical_map[cpu] = cpuid;
- } else
+ break;
+ case 2: /* Pass 2 handle disabled processors */
+ if (cpu < 0)
+ cpu = find_first_zero_bit(cpumask_bits(cpu_possible_mask), NR_CPUS);
disabled_cpus++;
+ break;
+ default:
+ return cpu;
+ }
+
+ set_cpu_possible(cpu, true);
+ __cpu_number_map[cpuid] = cpu;
+ __cpu_logical_map[cpu] = cpuid;
return cpu;
}
#endif
static int __init
-acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long end)
+acpi_parse_p1_processor(union acpi_subtable_headers *header, const unsigned long end)
{
struct acpi_madt_core_pic *processor = NULL;
@@ -100,13 +110,30 @@ acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long en
acpi_table_print_madt_entry(&header->common);
#ifdef CONFIG_SMP
acpi_core_pic[processor->core_id] = *processor;
- set_processor_mask(processor->core_id, processor->flags);
+ if (processor->flags & ACPI_MADT_ENABLED)
+ set_processor_mask(processor->core_id, 1);
#endif
return 0;
}
static int __init
+acpi_parse_p2_processor(union acpi_subtable_headers *header, const unsigned long end)
+{
+ struct acpi_madt_core_pic *processor = NULL;
+
+ processor = (struct acpi_madt_core_pic *)header;
+ if (BAD_MADT_ENTRY(processor, end))
+ return -EINVAL;
+
+#ifdef CONFIG_SMP
+ if (!(processor->flags & ACPI_MADT_ENABLED))
+ set_processor_mask(processor->core_id, 2);
+#endif
+
+ return 0;
+}
+static int __init
acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end)
{
static int core = 0;
@@ -133,7 +160,10 @@ static void __init acpi_process_madt(void)
}
#endif
acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
- acpi_parse_processor, MAX_CORE_PIC);
+ acpi_parse_p1_processor, MAX_CORE_PIC);
+
+ acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
+ acpi_parse_p2_processor, MAX_CORE_PIC);
acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
acpi_parse_eio_master, MAX_IO_PICS);
@@ -202,6 +232,9 @@ void __init acpi_boot_table_init(void)
/* Do not enable ACPI SPCR console by default */
acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
+ if (IS_ENABLED(CONFIG_ACPI_BGRT))
+ acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
+
return;
fdt_earlycon:
@@ -216,18 +249,6 @@ static __init int setup_node(int pxm)
return acpi_map_pxm_to_node(pxm);
}
-/*
- * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
- * I/O localities since SRAT does not list them. I/O localities are
- * not supported at this point.
- */
-unsigned int numa_distance_cnt;
-
-static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
-{
- return slit->locality_count;
-}
-
void __init numa_set_distance(int from, int to, int distance)
{
if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
@@ -296,6 +317,10 @@ static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
int nid;
nid = acpi_get_node(handle);
+
+ if (nid != NUMA_NO_NODE)
+ nid = early_cpu_to_node(cpu);
+
if (nid != NUMA_NO_NODE) {
set_cpuid_to_node(physid, nid);
node_set(nid, numa_nodes_parsed);
@@ -310,12 +335,14 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu
{
int cpu;
- cpu = set_processor_mask(physid, ACPI_MADT_ENABLED);
- if (cpu < 0) {
+ cpu = cpu_number_map(physid);
+ if (cpu < 0 || cpu >= nr_cpu_ids) {
pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
- return cpu;
+ return -ERANGE;
}
+ num_processors++;
+ set_cpu_present(cpu, true);
acpi_map_cpu2node(handle, cpu, physid);
*pcpu = cpu;
diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c
index bee9f7a3108f..db1e4bb26b6a 100644
--- a/arch/loongarch/kernel/asm-offsets.c
+++ b/arch/loongarch/kernel/asm-offsets.c
@@ -14,6 +14,7 @@
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/ftrace.h>
+#include <vdso/datapage.h>
static void __used output_ptreg_defines(void)
{
@@ -279,18 +280,6 @@ static void __used output_pbe_defines(void)
}
#endif
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-static void __used output_fgraph_ret_regs_defines(void)
-{
- COMMENT("LoongArch fgraph_ret_regs offsets.");
- OFFSET(FGRET_REGS_A0, fgraph_ret_regs, regs[0]);
- OFFSET(FGRET_REGS_A1, fgraph_ret_regs, regs[1]);
- OFFSET(FGRET_REGS_FP, fgraph_ret_regs, fp);
- DEFINE(FGRET_REGS_SIZE, sizeof(struct fgraph_ret_regs));
- BLANK();
-}
-#endif
-
static void __used output_kvm_defines(void)
{
COMMENT("KVM/LoongArch Specific offsets.");
@@ -307,6 +296,7 @@ static void __used output_kvm_defines(void)
OFFSET(KVM_ARCH_HSP, kvm_vcpu_arch, host_sp);
OFFSET(KVM_ARCH_HTP, kvm_vcpu_arch, host_tp);
OFFSET(KVM_ARCH_HPGD, kvm_vcpu_arch, host_pgd);
+ OFFSET(KVM_ARCH_KVMPGD, kvm_vcpu_arch, kvm_pgd);
OFFSET(KVM_ARCH_HANDLE_EXIT, kvm_vcpu_arch, handle_exit);
OFFSET(KVM_ARCH_HEENTRY, kvm_vcpu_arch, host_eentry);
OFFSET(KVM_ARCH_GEENTRY, kvm_vcpu_arch, guest_eentry);
@@ -321,3 +311,11 @@ static void __used output_kvm_defines(void)
OFFSET(KVM_GPGD, kvm, arch.pgd);
BLANK();
}
+
+static void __used output_vdso_defines(void)
+{
+ COMMENT("LoongArch vDSO offsets.");
+
+ DEFINE(__VDSO_PAGES, VDSO_NR_PAGES);
+ BLANK();
+}
diff --git a/arch/loongarch/kernel/cacheinfo.c b/arch/loongarch/kernel/cacheinfo.c
index c7988f757281..8e231b0d2cd6 100644
--- a/arch/loongarch/kernel/cacheinfo.c
+++ b/arch/loongarch/kernel/cacheinfo.c
@@ -51,6 +51,12 @@ static void cache_cpumap_setup(unsigned int cpu)
continue;
sib_leaf = sib_cpu_ci->info_list + index;
+ /* SMT cores share all caches */
+ if (cpus_are_siblings(i, cpu)) {
+ cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
+ cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
+ }
+ /* Node's cores share shared caches */
if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
index 55320813ee08..fedaa67cde41 100644
--- a/arch/loongarch/kernel/cpu-probe.c
+++ b/arch/loongarch/kernel/cpu-probe.c
@@ -91,12 +91,30 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
unsigned int config;
unsigned long asid_mask;
- c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR |
- LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH;
+ c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR | LOONGARCH_CPU_VINT;
elf_hwcap = HWCAP_LOONGARCH_CPUCFG;
config = read_cpucfg(LOONGARCH_CPUCFG1);
+
+ switch (config & CPUCFG1_ISA) {
+ case 0:
+ set_isa(c, LOONGARCH_CPU_ISA_LA32R);
+ break;
+ case 1:
+ set_isa(c, LOONGARCH_CPU_ISA_LA32S);
+ break;
+ case 2:
+ set_isa(c, LOONGARCH_CPU_ISA_LA64);
+ break;
+ default:
+ pr_warn("Warning: unknown ISA level\n");
+ }
+
+ if (config & CPUCFG1_PAGING)
+ c->options |= LOONGARCH_CPU_TLB;
+ if (config & CPUCFG1_IOCSR)
+ c->options |= LOONGARCH_CPU_IOCSR;
if (config & CPUCFG1_UAL) {
c->options |= LOONGARCH_CPU_UAL;
elf_hwcap |= HWCAP_LOONGARCH_UAL;
@@ -106,7 +124,6 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
elf_hwcap |= HWCAP_LOONGARCH_CRC32;
}
-
config = read_cpucfg(LOONGARCH_CPUCFG2);
if (config & CPUCFG2_LAM) {
c->options |= LOONGARCH_CPU_LAM;
@@ -140,6 +157,10 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
c->options |= LOONGARCH_CPU_PTW;
elf_hwcap |= HWCAP_LOONGARCH_PTW;
}
+ if (config & CPUCFG2_LSPW) {
+ c->options |= LOONGARCH_CPU_LSPW;
+ elf_hwcap |= HWCAP_LOONGARCH_LSPW;
+ }
if (config & CPUCFG2_LVZP) {
c->options |= LOONGARCH_CPU_LVZ;
elf_hwcap |= HWCAP_LOONGARCH_LVZ;
@@ -163,26 +184,13 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
if (config & CPUCFG6_PMP)
c->options |= LOONGARCH_CPU_PMP;
- config = iocsr_read32(LOONGARCH_IOCSR_FEATURES);
- if (config & IOCSRF_CSRIPI)
- c->options |= LOONGARCH_CPU_CSRIPI;
- if (config & IOCSRF_EXTIOI)
- c->options |= LOONGARCH_CPU_EXTIOI;
- if (config & IOCSRF_FREQSCALE)
- c->options |= LOONGARCH_CPU_SCALEFREQ;
- if (config & IOCSRF_FLATMODE)
- c->options |= LOONGARCH_CPU_FLATMODE;
- if (config & IOCSRF_EIODECODE)
- c->options |= LOONGARCH_CPU_EIODECODE;
- if (config & IOCSRF_VM)
- c->options |= LOONGARCH_CPU_HYPERVISOR;
-
config = csr_read32(LOONGARCH_CSR_ASID);
config = (config & CSR_ASID_BIT) >> CSR_ASID_BIT_SHIFT;
asid_mask = GENMASK(config - 1, 0);
set_cpu_asid_mask(c, asid_mask);
config = read_csr_prcfg1();
+ c->timerbits = (config & CSR_CONF1_TMRBITS) >> CSR_CONF1_TMRBITS_SHIFT;
c->ksave_mask = GENMASK((config & CSR_CONF1_KSNUM) - 1, 0);
c->ksave_mask &= ~(EXC_KSAVE_MASK | PERCPU_KSAVE_MASK | KVM_KSAVE_MASK);
@@ -209,6 +217,9 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
default:
pr_warn("Warning: unknown TLB type\n");
}
+
+ if (get_num_brps() + get_num_wrps())
+ c->options |= LOONGARCH_CPU_WATCH;
}
#define MAX_NAME_LEN 32
@@ -219,52 +230,67 @@ static char cpu_full_name[MAX_NAME_LEN] = " - ";
static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int cpu)
{
+ uint32_t config;
uint64_t *vendor = (void *)(&cpu_full_name[VENDOR_OFFSET]);
uint64_t *cpuname = (void *)(&cpu_full_name[CPUNAME_OFFSET]);
+ const char *core_name = "Unknown";
- if (!__cpu_full_name[cpu])
- __cpu_full_name[cpu] = cpu_full_name;
-
- *vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
- *cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
-
- switch (c->processor_id & PRID_SERIES_MASK) {
- case PRID_SERIES_LA132:
+ switch (BIT(fls(c->isa_level) - 1)) {
+ case LOONGARCH_CPU_ISA_LA32R:
+ case LOONGARCH_CPU_ISA_LA32S:
c->cputype = CPU_LOONGSON32;
- set_isa(c, LOONGARCH_CPU_ISA_LA32S);
__cpu_family[cpu] = "Loongson-32bit";
- pr_info("32-bit Loongson Processor probed (LA132 Core)\n");
break;
- case PRID_SERIES_LA264:
+ case LOONGARCH_CPU_ISA_LA64:
c->cputype = CPU_LOONGSON64;
- set_isa(c, LOONGARCH_CPU_ISA_LA64);
__cpu_family[cpu] = "Loongson-64bit";
- pr_info("64-bit Loongson Processor probed (LA264 Core)\n");
+ break;
+ }
+
+ switch (c->processor_id & PRID_SERIES_MASK) {
+ case PRID_SERIES_LA132:
+ core_name = "LA132";
+ break;
+ case PRID_SERIES_LA264:
+ core_name = "LA264";
break;
case PRID_SERIES_LA364:
- c->cputype = CPU_LOONGSON64;
- set_isa(c, LOONGARCH_CPU_ISA_LA64);
- __cpu_family[cpu] = "Loongson-64bit";
- pr_info("64-bit Loongson Processor probed (LA364 Core)\n");
+ core_name = "LA364";
break;
case PRID_SERIES_LA464:
- c->cputype = CPU_LOONGSON64;
- set_isa(c, LOONGARCH_CPU_ISA_LA64);
- __cpu_family[cpu] = "Loongson-64bit";
- pr_info("64-bit Loongson Processor probed (LA464 Core)\n");
+ core_name = "LA464";
break;
case PRID_SERIES_LA664:
- c->cputype = CPU_LOONGSON64;
- set_isa(c, LOONGARCH_CPU_ISA_LA64);
- __cpu_family[cpu] = "Loongson-64bit";
- pr_info("64-bit Loongson Processor probed (LA664 Core)\n");
+ core_name = "LA664";
break;
- default: /* Default to 64 bit */
- c->cputype = CPU_LOONGSON64;
- set_isa(c, LOONGARCH_CPU_ISA_LA64);
- __cpu_family[cpu] = "Loongson-64bit";
- pr_info("64-bit Loongson Processor probed (Unknown Core)\n");
}
+
+ pr_info("%s Processor probed (%s Core)\n", __cpu_family[cpu], core_name);
+
+ if (!cpu_has_iocsr)
+ return;
+
+ if (!__cpu_full_name[cpu])
+ __cpu_full_name[cpu] = cpu_full_name;
+
+ *vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
+ *cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
+
+ config = iocsr_read32(LOONGARCH_IOCSR_FEATURES);
+ if (config & IOCSRF_CSRIPI)
+ c->options |= LOONGARCH_CPU_CSRIPI;
+ if (config & IOCSRF_EXTIOI)
+ c->options |= LOONGARCH_CPU_EXTIOI;
+ if (config & IOCSRF_FREQSCALE)
+ c->options |= LOONGARCH_CPU_SCALEFREQ;
+ if (config & IOCSRF_FLATMODE)
+ c->options |= LOONGARCH_CPU_FLATMODE;
+ if (config & IOCSRF_EIODECODE)
+ c->options |= LOONGARCH_CPU_EIODECODE;
+ if (config & IOCSRF_AVEC)
+ c->options |= LOONGARCH_CPU_AVECINT;
+ if (config & IOCSRF_VM)
+ c->options |= LOONGARCH_CPU_HYPERVISOR;
}
#ifdef CONFIG_64BIT
diff --git a/arch/loongarch/kernel/dma.c b/arch/loongarch/kernel/dma.c
index 7a9c6a9dd2d0..429555fb4e13 100644
--- a/arch/loongarch/kernel/dma.c
+++ b/arch/loongarch/kernel/dma.c
@@ -8,17 +8,12 @@
void acpi_arch_dma_setup(struct device *dev)
{
int ret;
- u64 mask, end = 0;
+ u64 mask, end;
const struct bus_dma_region *map = NULL;
ret = acpi_dma_get_range(dev, &map);
if (!ret && map) {
- const struct bus_dma_region *r = map;
-
- for (end = 0; r->size; r++) {
- if (r->dma_start + r->size - 1 > end)
- end = r->dma_start + r->size - 1;
- }
+ end = dma_range_map_max(map);
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->bus_dma_limit = end;
diff --git a/arch/loongarch/kernel/efi-header.S b/arch/loongarch/kernel/efi-header.S
index 5f23b85d78ca..ba0bdbf86aa8 100644
--- a/arch/loongarch/kernel/efi-header.S
+++ b/arch/loongarch/kernel/efi-header.S
@@ -7,7 +7,7 @@
#include <linux/sizes.h>
.macro __EFI_PE_HEADER
- .long PE_MAGIC
+ .long IMAGE_NT_SIGNATURE
.Lcoff_header:
.short IMAGE_FILE_MACHINE_LOONGARCH64 /* Machine */
.short .Lsection_count /* NumberOfSections */
@@ -20,7 +20,7 @@
IMAGE_FILE_LINE_NUMS_STRIPPED /* Characteristics */
.Loptional_header:
- .short PE_OPT_MAGIC_PE32PLUS /* PE32+ format */
+ .short IMAGE_NT_OPTIONAL_HDR64_MAGIC /* PE32+ format */
.byte 0x02 /* MajorLinkerVersion */
.byte 0x14 /* MinorLinkerVersion */
.long __inittext_end - .Lefi_header_end /* SizeOfCode */
diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c
index 000825406c1f..de21e72759ee 100644
--- a/arch/loongarch/kernel/efi.c
+++ b/arch/loongarch/kernel/efi.c
@@ -66,6 +66,12 @@ void __init efi_runtime_init(void)
set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
}
+bool efi_poweroff_required(void)
+{
+ return efi_enabled(EFI_RUNTIME_SERVICES) &&
+ (acpi_gbl_reduced_hardware || acpi_no_s5);
+}
+
unsigned long __initdata screen_info_table = EFI_INVALID_TABLE_ADDR;
#if defined(CONFIG_SYSFB) || defined(CONFIG_EFI_EARLYCON)
@@ -89,7 +95,7 @@ static void __init init_screen_info(void)
memset(si, 0, sizeof(*si));
early_memunmap(si, sizeof(*si));
- memblock_reserve(screen_info.lfb_base, screen_info.lfb_size);
+ memblock_reserve(__screen_info_lfb_base(&screen_info), screen_info.lfb_size);
}
void __init efi_init(void)
diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S
index 48e7e34e355e..2abc29e57381 100644
--- a/arch/loongarch/kernel/entry.S
+++ b/arch/loongarch/kernel/entry.S
@@ -77,24 +77,22 @@ SYM_CODE_START(handle_syscall)
SYM_CODE_END(handle_syscall)
_ASM_NOKPROBE(handle_syscall)
-SYM_CODE_START(ret_from_fork)
+SYM_CODE_START(ret_from_fork_asm)
UNWIND_HINT_REGS
- bl schedule_tail # a0 = struct task_struct *prev
- move a0, sp
- bl syscall_exit_to_user_mode
+ move a1, sp
+ bl ret_from_fork
RESTORE_STATIC
RESTORE_SOME
RESTORE_SP_AND_RET
-SYM_CODE_END(ret_from_fork)
+SYM_CODE_END(ret_from_fork_asm)
-SYM_CODE_START(ret_from_kernel_thread)
+SYM_CODE_START(ret_from_kernel_thread_asm)
UNWIND_HINT_REGS
- bl schedule_tail # a0 = struct task_struct *prev
- move a0, s1
- jirl ra, s0, 0
- move a0, sp
- bl syscall_exit_to_user_mode
+ move a1, sp
+ move a2, s0
+ move a3, s1
+ bl ret_from_kernel_thread
RESTORE_STATIC
RESTORE_SOME
RESTORE_SP_AND_RET
-SYM_CODE_END(ret_from_kernel_thread)
+SYM_CODE_END(ret_from_kernel_thread_asm)
diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c
index 2f1f5b08638f..27144de5c5fe 100644
--- a/arch/loongarch/kernel/env.c
+++ b/arch/loongarch/kernel/env.c
@@ -68,6 +68,8 @@ static int __init fdt_cpu_clk_init(void)
return -ENODEV;
clk = of_clk_get(np, 0);
+ of_node_put(np);
+
if (IS_ERR(clk))
return -ENODEV;
diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
index 69a85f2479fb..28caf416ae36 100644
--- a/arch/loongarch/kernel/fpu.S
+++ b/arch/loongarch/kernel/fpu.S
@@ -458,6 +458,7 @@ SYM_FUNC_START(_save_fp_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_fp_context)
+EXPORT_SYMBOL_GPL(_save_fp_context)
/*
* a0: fpregs
@@ -471,6 +472,7 @@ SYM_FUNC_START(_restore_fp_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_fp_context)
+EXPORT_SYMBOL_GPL(_restore_fp_context)
/*
* a0: fpregs
@@ -484,6 +486,7 @@ SYM_FUNC_START(_save_lsx_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_lsx_context)
+EXPORT_SYMBOL_GPL(_save_lsx_context)
/*
* a0: fpregs
@@ -497,6 +500,7 @@ SYM_FUNC_START(_restore_lsx_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_lsx_context)
+EXPORT_SYMBOL_GPL(_restore_lsx_context)
/*
* a0: fpregs
@@ -510,6 +514,7 @@ SYM_FUNC_START(_save_lasx_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_lasx_context)
+EXPORT_SYMBOL_GPL(_save_lasx_context)
/*
* a0: fpregs
@@ -523,6 +528,7 @@ SYM_FUNC_START(_restore_lasx_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_lasx_context)
+EXPORT_SYMBOL_GPL(_restore_lasx_context)
.L_fpu_fault:
li.w a0, -EFAULT # failure
@@ -530,6 +536,10 @@ SYM_FUNC_END(_restore_lasx_context)
#ifdef CONFIG_CPU_HAS_LBT
STACK_FRAME_NON_STANDARD _restore_fp
+#ifdef CONFIG_CPU_HAS_LSX
STACK_FRAME_NON_STANDARD _restore_lsx
+#endif
+#ifdef CONFIG_CPU_HAS_LASX
STACK_FRAME_NON_STANDARD _restore_lasx
#endif
+#endif
diff --git a/arch/loongarch/kernel/ftrace_dyn.c b/arch/loongarch/kernel/ftrace_dyn.c
index 73858c9029cc..d5d81d74034c 100644
--- a/arch/loongarch/kernel/ftrace_dyn.c
+++ b/arch/loongarch/kernel/ftrace_dyn.c
@@ -85,14 +85,13 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod
* dealing with an out-of-range condition, we can assume it
* is due to a module being loaded far away from the kernel.
*
- * NOTE: __module_text_address() must be called with preemption
- * disabled, but we can rely on ftrace_lock to ensure that 'mod'
+ * NOTE: __module_text_address() must be called within a RCU read
+ * section, but we can rely on ftrace_lock to ensure that 'mod'
* retains its validity throughout the remainder of this code.
*/
if (!mod) {
- preempt_disable();
- mod = __module_text_address(pc);
- preempt_enable();
+ scoped_guard(rcu)
+ mod = __module_text_address(pc);
}
if (WARN_ON(!mod))
@@ -241,10 +240,18 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent)
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
- struct pt_regs *regs = &fregs->regs;
+ struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs;
unsigned long *parent = (unsigned long *)&regs->regs[1];
+ unsigned long return_hooker = (unsigned long)&return_to_handler;
+ unsigned long old;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
- prepare_ftrace_return(ip, (unsigned long *)parent);
+ old = *parent;
+
+ if (!function_graph_enter_regs(old, ip, 0, parent, fregs))
+ *parent = return_hooker;
}
#else
static int ftrace_modify_graph_caller(bool enable)
@@ -287,6 +294,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct kprobe *p;
struct kprobe_ctlblk *kcb;
+ if (unlikely(kprobe_ftrace_disabled))
+ return;
+
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
index 86d5d90ebefe..733a7665e434 100644
--- a/arch/loongarch/kernel/genex.S
+++ b/arch/loongarch/kernel/genex.S
@@ -16,30 +16,35 @@
#include <asm/stackframe.h>
#include <asm/thread_info.h>
+ .section .cpuidle.text, "ax"
.align 5
SYM_FUNC_START(__arch_cpu_idle)
- /* start of rollback region */
- LONG_L t0, tp, TI_FLAGS
- nop
- andi t0, t0, _TIF_NEED_RESCHED
- bnez t0, 1f
- nop
- nop
- nop
+ /* start of idle interrupt region */
+ ori t0, zero, CSR_CRMD_IE
+ /* idle instruction needs irq enabled */
+ csrxchg t0, t0, LOONGARCH_CSR_CRMD
+ /*
+ * If an interrupt lands here; between enabling interrupts above and
+ * going idle on the next instruction, we must *NOT* go idle since the
+ * interrupt could have set TIF_NEED_RESCHED or caused an timer to need
+ * reprogramming. Fall through -- see handle_vint() below -- and have
+ * the idle loop take care of things.
+ */
idle 0
- /* end of rollback region */
-1: jr ra
+ /* end of idle interrupt region */
+idle_exit:
+ jr ra
SYM_FUNC_END(__arch_cpu_idle)
+ .previous
SYM_CODE_START(handle_vint)
UNWIND_HINT_UNDEFINED
BACKUP_T0T1
SAVE_ALL
- la_abs t1, __arch_cpu_idle
+ la_abs t1, idle_exit
LONG_L t0, sp, PT_ERA
- /* 32 byte rollback region */
- ori t0, t0, 0x1f
- xori t0, t0, 0x1f
+ /* 3 instructions idle interrupt region */
+ ori t0, t0, 0b1100
bne t0, t1, 1f
LONG_S t0, sp, PT_ERA
1: move a0, sp
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
index c4f7de2e2805..e3865e92a917 100644
--- a/arch/loongarch/kernel/head.S
+++ b/arch/loongarch/kernel/head.S
@@ -20,9 +20,9 @@
__HEAD
_head:
- .word MZ_MAGIC /* "MZ", MS-DOS header */
+ .word IMAGE_DOS_SIGNATURE /* "MZ", MS-DOS header */
.org 0x8
- .dword kernel_entry /* Kernel entry point */
+ .dword _kernel_entry /* Kernel entry point (physical address) */
.dword _kernel_asize /* Kernel image effective size */
.quad PHYS_LINK_KADDR /* Kernel image load offset from start of RAM */
.org 0x38 /* 0x20 ~ 0x37 reserved */
@@ -44,11 +44,7 @@ SYM_DATA(kernel_fsize, .long _kernel_fsize);
SYM_CODE_START(kernel_entry) # kernel entry point
/* Config direct window and set PG */
- li.d t0, CSR_DMW0_INIT # UC, PLV0, 0x8000 xxxx xxxx xxxx
- csrwr t0, LOONGARCH_CSR_DMWIN0
- li.d t0, CSR_DMW1_INIT # CA, PLV0, 0x9000 xxxx xxxx xxxx
- csrwr t0, LOONGARCH_CSR_DMWIN1
-
+ SETUP_DMWINS t0
JUMP_VIRT_ADDR t0, t1
/* Enable PG */
@@ -124,11 +120,8 @@ SYM_CODE_END(kernel_entry)
* function after setting up the stack and tp registers.
*/
SYM_CODE_START(smpboot_entry)
- li.d t0, CSR_DMW0_INIT # UC, PLV0
- csrwr t0, LOONGARCH_CSR_DMWIN0
- li.d t0, CSR_DMW1_INIT # CA, PLV0
- csrwr t0, LOONGARCH_CSR_DMWIN1
+ SETUP_DMWINS t0
JUMP_VIRT_ADDR t0, t1
#ifdef CONFIG_PAGE_SIZE_4KB
diff --git a/arch/loongarch/kernel/hw_breakpoint.c b/arch/loongarch/kernel/hw_breakpoint.c
index fc55c4de2a11..c35f9bf38033 100644
--- a/arch/loongarch/kernel/hw_breakpoint.c
+++ b/arch/loongarch/kernel/hw_breakpoint.c
@@ -51,7 +51,13 @@ int hw_breakpoint_slots(int type)
READ_WB_REG_CASE(OFF, 4, REG, T, VAL); \
READ_WB_REG_CASE(OFF, 5, REG, T, VAL); \
READ_WB_REG_CASE(OFF, 6, REG, T, VAL); \
- READ_WB_REG_CASE(OFF, 7, REG, T, VAL);
+ READ_WB_REG_CASE(OFF, 7, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 8, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 9, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 10, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 11, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 12, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 13, REG, T, VAL);
#define GEN_WRITE_WB_REG_CASES(OFF, REG, T, VAL) \
WRITE_WB_REG_CASE(OFF, 0, REG, T, VAL); \
@@ -61,7 +67,13 @@ int hw_breakpoint_slots(int type)
WRITE_WB_REG_CASE(OFF, 4, REG, T, VAL); \
WRITE_WB_REG_CASE(OFF, 5, REG, T, VAL); \
WRITE_WB_REG_CASE(OFF, 6, REG, T, VAL); \
- WRITE_WB_REG_CASE(OFF, 7, REG, T, VAL);
+ WRITE_WB_REG_CASE(OFF, 7, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 8, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 9, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 10, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 11, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 12, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 13, REG, T, VAL);
static u64 read_wb_reg(int reg, int n, int t)
{
@@ -174,11 +186,21 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
static int hw_breakpoint_control(struct perf_event *bp,
enum hw_breakpoint_ops ops)
{
- u32 ctrl;
+ u32 ctrl, privilege;
int i, max_slots, enable;
+ struct pt_regs *regs;
struct perf_event **slots;
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ if (arch_check_bp_in_kernelspace(info))
+ privilege = CTRL_PLV0_ENABLE;
+ else
+ privilege = CTRL_PLV3_ENABLE;
+
+ /* Whether bp belongs to a task. */
+ if (bp->hw.target)
+ regs = task_pt_regs(bp->hw.target);
+
if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
/* Breakpoint */
slots = this_cpu_ptr(bp_on_reg);
@@ -197,31 +219,38 @@ static int hw_breakpoint_control(struct perf_event *bp,
switch (ops) {
case HW_BREAKPOINT_INSTALL:
/* Set the FWPnCFG/MWPnCFG 1~4 register. */
- write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
- write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
- write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
- write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
- write_wb_reg(CSR_CFG_ASID, i, 0, 0);
- write_wb_reg(CSR_CFG_ASID, i, 1, 0);
if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
- write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE);
+ write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
+ write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
+ write_wb_reg(CSR_CFG_ASID, i, 0, 0);
+ write_wb_reg(CSR_CFG_CTRL, i, 0, privilege);
} else {
+ write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
+ write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
+ write_wb_reg(CSR_CFG_ASID, i, 1, 0);
ctrl = encode_ctrl_reg(info->ctrl);
- write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | CTRL_PLV_ENABLE);
+ write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | privilege);
}
enable = csr_read64(LOONGARCH_CSR_CRMD);
csr_write64(CSR_CRMD_WE | enable, LOONGARCH_CSR_CRMD);
+ if (bp->hw.target && test_tsk_thread_flag(bp->hw.target, TIF_LOAD_WATCH))
+ regs->csr_prmd |= CSR_PRMD_PWE;
break;
case HW_BREAKPOINT_UNINSTALL:
/* Reset the FWPnCFG/MWPnCFG 1~4 register. */
- write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
- write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
- write_wb_reg(CSR_CFG_MASK, i, 0, 0);
- write_wb_reg(CSR_CFG_MASK, i, 1, 0);
- write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
- write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
- write_wb_reg(CSR_CFG_ASID, i, 0, 0);
- write_wb_reg(CSR_CFG_ASID, i, 1, 0);
+ if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
+ write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
+ write_wb_reg(CSR_CFG_MASK, i, 0, 0);
+ write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
+ write_wb_reg(CSR_CFG_ASID, i, 0, 0);
+ } else {
+ write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
+ write_wb_reg(CSR_CFG_MASK, i, 1, 0);
+ write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
+ write_wb_reg(CSR_CFG_ASID, i, 1, 0);
+ }
+ if (bp->hw.target)
+ regs->csr_prmd &= ~CSR_PRMD_PWE;
break;
}
@@ -283,7 +312,7 @@ int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
* to generic breakpoint descriptions.
*/
int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
- int *gen_len, int *gen_type, int *offset)
+ int *gen_len, int *gen_type)
{
/* Type */
switch (ctrl.type) {
@@ -303,11 +332,6 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
return -EINVAL;
}
- if (!ctrl.len)
- return -EINVAL;
-
- *offset = __ffs(ctrl.len);
-
/* Len */
switch (ctrl.len) {
case LOONGARCH_BREAKPOINT_LEN_1:
@@ -386,21 +410,17 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
struct arch_hw_breakpoint *hw)
{
int ret;
- u64 alignment_mask, offset;
+ u64 alignment_mask;
/* Build the arch_hw_breakpoint. */
ret = arch_build_bp_info(bp, attr, hw);
if (ret)
return ret;
- if (hw->ctrl.type != LOONGARCH_BREAKPOINT_EXECUTE)
- alignment_mask = 0x7;
- else
+ if (hw->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
alignment_mask = 0x3;
- offset = hw->address & alignment_mask;
-
- hw->address &= ~alignment_mask;
- hw->ctrl.len <<= offset;
+ hw->address &= ~alignment_mask;
+ }
return 0;
}
@@ -471,12 +491,15 @@ void breakpoint_handler(struct pt_regs *regs)
slots = this_cpu_ptr(bp_on_reg);
for (i = 0; i < boot_cpu_data.watch_ireg_count; ++i) {
- bp = slots[i];
- if (bp == NULL)
- continue;
- perf_bp_event(bp, regs);
+ if ((csr_read32(LOONGARCH_CSR_FWPS) & (0x1 << i))) {
+ bp = slots[i];
+ if (bp == NULL)
+ continue;
+ perf_bp_event(bp, regs);
+ csr_write32(0x1 << i, LOONGARCH_CSR_FWPS);
+ update_bp_registers(regs, 0, 0);
+ }
}
- update_bp_registers(regs, 0, 0);
}
NOKPROBE_SYMBOL(breakpoint_handler);
@@ -488,12 +511,15 @@ void watchpoint_handler(struct pt_regs *regs)
slots = this_cpu_ptr(wp_on_reg);
for (i = 0; i < boot_cpu_data.watch_dreg_count; ++i) {
- wp = slots[i];
- if (wp == NULL)
- continue;
- perf_bp_event(wp, regs);
+ if ((csr_read32(LOONGARCH_CSR_MWPS) & (0x1 << i))) {
+ wp = slots[i];
+ if (wp == NULL)
+ continue;
+ perf_bp_event(wp, regs);
+ csr_write32(0x1 << i, LOONGARCH_CSR_MWPS);
+ update_bp_registers(regs, 0, 1);
+ }
}
- update_bp_registers(regs, 0, 1);
}
NOKPROBE_SYMBOL(watchpoint_handler);
diff --git a/arch/loongarch/kernel/idle.c b/arch/loongarch/kernel/idle.c
index 0b5dd2faeb90..54b247d8cdb6 100644
--- a/arch/loongarch/kernel/idle.c
+++ b/arch/loongarch/kernel/idle.c
@@ -11,7 +11,6 @@
void __cpuidle arch_cpu_idle(void)
{
- raw_local_irq_enable();
- __arch_cpu_idle(); /* idle instruction needs irq enabled */
+ __arch_cpu_idle();
raw_local_irq_disable();
}
diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c
index 3050329556d1..14d7d700bcb9 100644
--- a/arch/loongarch/kernel/inst.c
+++ b/arch/loongarch/kernel/inst.c
@@ -332,7 +332,7 @@ u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
return INSN_BREAK;
}
- emit_jirl(&insn, rj, rd, imm >> 2);
+ emit_jirl(&insn, rd, rj, imm >> 2);
return insn.word;
}
diff --git a/arch/loongarch/kernel/io.c b/arch/loongarch/kernel/io.c
deleted file mode 100644
index cb85bda5a6ad..000000000000
--- a/arch/loongarch/kernel/io.c
+++ /dev/null
@@ -1,94 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
- */
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/io.h>
-
-/*
- * Copy data from IO memory space to "real" memory space.
- */
-void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
-{
- while (count && !IS_ALIGNED((unsigned long)from, 8)) {
- *(u8 *)to = __raw_readb(from);
- from++;
- to++;
- count--;
- }
-
- while (count >= 8) {
- *(u64 *)to = __raw_readq(from);
- from += 8;
- to += 8;
- count -= 8;
- }
-
- while (count) {
- *(u8 *)to = __raw_readb(from);
- from++;
- to++;
- count--;
- }
-}
-EXPORT_SYMBOL(__memcpy_fromio);
-
-/*
- * Copy data from "real" memory space to IO memory space.
- */
-void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
-{
- while (count && !IS_ALIGNED((unsigned long)to, 8)) {
- __raw_writeb(*(u8 *)from, to);
- from++;
- to++;
- count--;
- }
-
- while (count >= 8) {
- __raw_writeq(*(u64 *)from, to);
- from += 8;
- to += 8;
- count -= 8;
- }
-
- while (count) {
- __raw_writeb(*(u8 *)from, to);
- from++;
- to++;
- count--;
- }
-}
-EXPORT_SYMBOL(__memcpy_toio);
-
-/*
- * "memset" on IO memory space.
- */
-void __memset_io(volatile void __iomem *dst, int c, size_t count)
-{
- u64 qc = (u8)c;
-
- qc |= qc << 8;
- qc |= qc << 16;
- qc |= qc << 32;
-
- while (count && !IS_ALIGNED((unsigned long)dst, 8)) {
- __raw_writeb(c, dst);
- dst++;
- count--;
- }
-
- while (count >= 8) {
- __raw_writeq(qc, dst);
- dst += 8;
- count -= 8;
- }
-
- while (count) {
- __raw_writeb(c, dst);
- dst++;
- count--;
- }
-}
-EXPORT_SYMBOL(__memset_io);
diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c
index 883e5066ae44..80946cafaec1 100644
--- a/arch/loongarch/kernel/irq.c
+++ b/arch/loongarch/kernel/irq.c
@@ -87,23 +87,21 @@ static void __init init_vec_parent_group(void)
acpi_table_parse(ACPI_SIG_MCFG, early_pci_mcfg_parse);
}
-static int __init get_ipi_irq(void)
+int __init arch_probe_nr_irqs(void)
{
- struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
+ int nr_io_pics = bitmap_weight(loongson_sysconf.cores_io_master, NR_CPUS);
- if (d)
- return irq_create_mapping(d, INT_IPI);
+ if (!cpu_has_avecint)
+ irq_set_nr_irqs(64 + NR_VECTORS * nr_io_pics);
+ else
+ irq_set_nr_irqs(64 + NR_VECTORS * (nr_cpu_ids + nr_io_pics));
- return -EINVAL;
+ return NR_IRQS_LEGACY;
}
void __init init_IRQ(void)
{
int i;
-#ifdef CONFIG_SMP
- int r, ipi_irq;
- static int ipi_dummy_dev;
-#endif
unsigned int order = get_order(IRQ_STACK_SIZE);
struct page *page;
@@ -113,18 +111,9 @@ void __init init_IRQ(void)
init_vec_parent_group();
irqchip_init();
#ifdef CONFIG_SMP
- ipi_irq = get_ipi_irq();
- if (ipi_irq < 0)
- panic("IPI IRQ mapping failed\n");
- irq_set_percpu_devid(ipi_irq);
- r = request_percpu_irq(ipi_irq, loongson_ipi_interrupt, "IPI", &ipi_dummy_dev);
- if (r < 0)
- panic("IPI IRQ request failed\n");
+ mp_ops.init_ipi();
#endif
- for (i = 0; i < NR_IRQS; i++)
- irq_set_noprobe(i);
-
for_each_possible_cpu(i) {
page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, order);
@@ -133,5 +122,5 @@ void __init init_IRQ(void)
per_cpu(irq_stack, i), per_cpu(irq_stack, i) + IRQ_STACK_SIZE);
}
- set_csr_ecfg(ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC);
+ set_csr_ecfg(ECFGF_SIP0 | ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC);
}
diff --git a/arch/loongarch/kernel/kdebugfs.c b/arch/loongarch/kernel/kdebugfs.c
new file mode 100644
index 000000000000..80cf64772399
--- /dev/null
+++ b/arch/loongarch/kernel/kdebugfs.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/debugfs.h>
+#include <linux/kstrtox.h>
+#include <asm/loongarch.h>
+
+struct dentry *arch_debugfs_dir;
+EXPORT_SYMBOL(arch_debugfs_dir);
+
+static int sfb_state, tso_state;
+
+static void set_sfb_state(void *info)
+{
+ int val = *(int *)info << CSR_STFILL_SHIFT;
+
+ csr_xchg32(val, CSR_STFILL, LOONGARCH_CSR_IMPCTL1);
+}
+
+static ssize_t sfb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ int s, state;
+ char str[32];
+
+ state = (csr_read32(LOONGARCH_CSR_IMPCTL1) & CSR_STFILL) >> CSR_STFILL_SHIFT;
+
+ s = snprintf(str, sizeof(str), "Boot State: %x\nCurrent State: %x\n", sfb_state, state);
+
+ if (*ppos >= s)
+ return 0;
+
+ s -= *ppos;
+ s = min_t(u32, s, count);
+
+ if (copy_to_user(buf, &str[*ppos], s))
+ return -EFAULT;
+
+ *ppos += s;
+
+ return s;
+}
+
+static ssize_t sfb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+{
+ int state;
+
+ if (kstrtoint_from_user(buf, count, 10, &state))
+ return -EFAULT;
+
+ switch (state) {
+ case 0: case 1:
+ on_each_cpu(set_sfb_state, &state, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations sfb_fops = {
+ .read = sfb_read,
+ .write = sfb_write,
+ .open = simple_open,
+ .llseek = default_llseek
+};
+
+#define LDSTORDER_NLD_NST 0x0 /* 000 = No Load No Store */
+#define LDSTORDER_ALD_NST 0x1 /* 001 = All Load No Store */
+#define LDSTORDER_SLD_NST 0x3 /* 011 = Same Load No Store */
+#define LDSTORDER_NLD_AST 0x4 /* 100 = No Load All Store */
+#define LDSTORDER_ALD_AST 0x5 /* 101 = All Load All Store */
+#define LDSTORDER_SLD_AST 0x7 /* 111 = Same Load All Store */
+
+static char *tso_hints[] = {
+ "No Load No Store",
+ "All Load No Store",
+ "Invalid Config",
+ "Same Load No Store",
+ "No Load All Store",
+ "All Load All Store",
+ "Invalid Config",
+ "Same Load All Store"
+};
+
+static void set_tso_state(void *info)
+{
+ int val = *(int *)info << CSR_LDSTORDER_SHIFT;
+
+ csr_xchg32(val, CSR_LDSTORDER_MASK, LOONGARCH_CSR_IMPCTL1);
+}
+
+static ssize_t tso_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ int s, state;
+ char str[240];
+
+ state = (csr_read32(LOONGARCH_CSR_IMPCTL1) & CSR_LDSTORDER_MASK) >> CSR_LDSTORDER_SHIFT;
+
+ s = snprintf(str, sizeof(str), "Boot State: %d (%s)\n"
+ "Current State: %d (%s)\n\n"
+ "Available States:\n"
+ "0 (%s)\t" "1 (%s)\t" "3 (%s)\n"
+ "4 (%s)\t" "5 (%s)\t" "7 (%s)\n",
+ tso_state, tso_hints[tso_state], state, tso_hints[state],
+ tso_hints[0], tso_hints[1], tso_hints[3], tso_hints[4], tso_hints[5], tso_hints[7]);
+
+ if (*ppos >= s)
+ return 0;
+
+ s -= *ppos;
+ s = min_t(u32, s, count);
+
+ if (copy_to_user(buf, &str[*ppos], s))
+ return -EFAULT;
+
+ *ppos += s;
+
+ return s;
+}
+
+static ssize_t tso_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+{
+ int state;
+
+ if (kstrtoint_from_user(buf, count, 10, &state))
+ return -EFAULT;
+
+ switch (state) {
+ case 0: case 1: case 3:
+ case 4: case 5: case 7:
+ on_each_cpu(set_tso_state, &state, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations tso_fops = {
+ .read = tso_read,
+ .write = tso_write,
+ .open = simple_open,
+ .llseek = default_llseek
+};
+
+static int __init arch_kdebugfs_init(void)
+{
+ unsigned int config = read_cpucfg(LOONGARCH_CPUCFG3);
+
+ arch_debugfs_dir = debugfs_create_dir("loongarch", NULL);
+
+ if (config & CPUCFG3_SFB) {
+ debugfs_create_file("sfb_state", S_IRUGO | S_IWUSR,
+ arch_debugfs_dir, &sfb_state, &sfb_fops);
+ sfb_state = (csr_read32(LOONGARCH_CSR_IMPCTL1) & CSR_STFILL) >> CSR_STFILL_SHIFT;
+ }
+
+ if (config & (CPUCFG3_ALDORDER_CAP | CPUCFG3_ASTORDER_CAP)) {
+ debugfs_create_file("tso_state", S_IRUGO | S_IWUSR,
+ arch_debugfs_dir, &tso_state, &tso_fops);
+ tso_state = (csr_read32(LOONGARCH_CSR_IMPCTL1) & CSR_LDSTORDER_MASK) >> CSR_LDSTORDER_SHIFT;
+ }
+
+ return 0;
+}
+postcore_initcall(arch_kdebugfs_init);
diff --git a/arch/loongarch/kernel/kfpu.c b/arch/loongarch/kernel/kfpu.c
index ec5b28e570c9..4c476904227f 100644
--- a/arch/loongarch/kernel/kfpu.c
+++ b/arch/loongarch/kernel/kfpu.c
@@ -18,11 +18,28 @@ static unsigned int euen_mask = CSR_EUEN_FPEN;
static DEFINE_PER_CPU(bool, in_kernel_fpu);
static DEFINE_PER_CPU(unsigned int, euen_current);
+static inline void fpregs_lock(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+ else
+ local_bh_disable();
+}
+
+static inline void fpregs_unlock(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
+ else
+ local_bh_enable();
+}
+
void kernel_fpu_begin(void)
{
unsigned int *euen_curr;
- preempt_disable();
+ if (!irqs_disabled())
+ fpregs_lock();
WARN_ON(this_cpu_read(in_kernel_fpu));
@@ -73,7 +90,8 @@ void kernel_fpu_end(void)
this_cpu_write(in_kernel_fpu, false);
- preempt_enable();
+ if (!irqs_disabled())
+ fpregs_unlock();
}
EXPORT_SYMBOL_GPL(kernel_fpu_end);
diff --git a/arch/loongarch/kernel/kgdb.c b/arch/loongarch/kernel/kgdb.c
index 445c452d72a7..7be5b4c0c900 100644
--- a/arch/loongarch/kernel/kgdb.c
+++ b/arch/loongarch/kernel/kgdb.c
@@ -8,6 +8,7 @@
#include <linux/hw_breakpoint.h>
#include <linux/kdebug.h>
#include <linux/kgdb.h>
+#include <linux/objtool.h>
#include <linux/processor.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
@@ -224,13 +225,13 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
regs->csr_era = pc;
}
-void arch_kgdb_breakpoint(void)
+noinline void arch_kgdb_breakpoint(void)
{
__asm__ __volatile__ ( \
".globl kgdb_breakinst\n\t" \
- "nop\n" \
"kgdb_breakinst:\tbreak 2\n\t"); /* BRK_KDB = 2 */
}
+STACK_FRAME_NON_STANDARD(arch_kgdb_breakpoint);
/*
* Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
diff --git a/arch/loongarch/kernel/kprobes.c b/arch/loongarch/kernel/kprobes.c
index 17b040bd6067..8ba391cfabb0 100644
--- a/arch/loongarch/kernel/kprobes.c
+++ b/arch/loongarch/kernel/kprobes.c
@@ -4,8 +4,8 @@
#include <linux/preempt.h>
#include <asm/break.h>
-#define KPROBE_BP_INSN larch_insn_gen_break(BRK_KPROBE_BP)
-#define KPROBE_SSTEPBP_INSN larch_insn_gen_break(BRK_KPROBE_SSTEPBP)
+#define KPROBE_BP_INSN __emit_break(BRK_KPROBE_BP)
+#define KPROBE_SSTEPBP_INSN __emit_break(BRK_KPROBE_SSTEPBP)
DEFINE_PER_CPU(struct kprobe *, current_kprobe);
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
diff --git a/arch/loongarch/kernel/lbt.S b/arch/loongarch/kernel/lbt.S
index 001f061d226a..71678912d24c 100644
--- a/arch/loongarch/kernel/lbt.S
+++ b/arch/loongarch/kernel/lbt.S
@@ -90,6 +90,7 @@ SYM_FUNC_START(_save_lbt_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_lbt_context)
+EXPORT_SYMBOL_GPL(_save_lbt_context)
/*
* a0: scr
@@ -110,6 +111,7 @@ SYM_FUNC_START(_restore_lbt_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_lbt_context)
+EXPORT_SYMBOL_GPL(_restore_lbt_context)
/*
* a0: ftop
@@ -120,6 +122,7 @@ SYM_FUNC_START(_save_ftop_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_ftop_context)
+EXPORT_SYMBOL_GPL(_save_ftop_context)
/*
* a0: ftop
@@ -150,6 +153,7 @@ SYM_FUNC_START(_restore_ftop_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_ftop_context)
+EXPORT_SYMBOL_GPL(_restore_ftop_context)
.L_lbt_fault:
li.w a0, -EFAULT # failure
diff --git a/arch/loongarch/kernel/machine_kexec.c b/arch/loongarch/kernel/machine_kexec.c
index 2dcb9e003657..f9381800e291 100644
--- a/arch/loongarch/kernel/machine_kexec.c
+++ b/arch/loongarch/kernel/machine_kexec.c
@@ -126,14 +126,14 @@ void kexec_reboot(void)
/* All secondary cpus go to kexec_smp_wait */
if (smp_processor_id() > 0) {
relocated_kexec_smp_wait(NULL);
- unreachable();
+ BUG();
}
#endif
do_kexec = (void *)reboot_code_buffer;
do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry);
- unreachable();
+ BUG();
}
@@ -225,6 +225,7 @@ void crash_smp_send_stop(void)
void machine_shutdown(void)
{
+#ifdef CONFIG_SMP
int cpu;
/* All CPUs go to reboot_code_buffer */
@@ -232,7 +233,6 @@ void machine_shutdown(void)
if (!cpu_online(cpu))
cpu_device_up(get_cpu_device(cpu));
-#ifdef CONFIG_SMP
smp_call_function(kexec_shutdown_secondary, NULL, 0);
#endif
}
diff --git a/arch/loongarch/kernel/mcount.S b/arch/loongarch/kernel/mcount.S
index 3015896016a0..b6850503e061 100644
--- a/arch/loongarch/kernel/mcount.S
+++ b/arch/loongarch/kernel/mcount.S
@@ -79,10 +79,11 @@ SYM_FUNC_START(ftrace_graph_caller)
SYM_FUNC_END(ftrace_graph_caller)
SYM_FUNC_START(return_to_handler)
- PTR_ADDI sp, sp, -FGRET_REGS_SIZE
- PTR_S a0, sp, FGRET_REGS_A0
- PTR_S a1, sp, FGRET_REGS_A1
- PTR_S zero, sp, FGRET_REGS_FP
+ /* Save return value regs */
+ PTR_ADDI sp, sp, -PT_SIZE
+ PTR_S a0, sp, PT_R4
+ PTR_S a1, sp, PT_R5
+ PTR_S zero, sp, PT_R22
move a0, sp
bl ftrace_return_to_handler
@@ -90,9 +91,11 @@ SYM_FUNC_START(return_to_handler)
/* Restore the real parent address: a0 -> ra */
move ra, a0
- PTR_L a0, sp, FGRET_REGS_A0
- PTR_L a1, sp, FGRET_REGS_A1
- PTR_ADDI sp, sp, FGRET_REGS_SIZE
+ /* Restore return value regs */
+ PTR_L a0, sp, PT_R4
+ PTR_L a1, sp, PT_R5
+ PTR_ADDI sp, sp, PT_SIZE
+
jr ra
SYM_FUNC_END(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/loongarch/kernel/mcount_dyn.S b/arch/loongarch/kernel/mcount_dyn.S
index 0c65cf09110c..d6b474ad1d5e 100644
--- a/arch/loongarch/kernel/mcount_dyn.S
+++ b/arch/loongarch/kernel/mcount_dyn.S
@@ -140,19 +140,19 @@ SYM_CODE_END(ftrace_graph_caller)
SYM_CODE_START(return_to_handler)
UNWIND_HINT_UNDEFINED
/* Save return value regs */
- PTR_ADDI sp, sp, -FGRET_REGS_SIZE
- PTR_S a0, sp, FGRET_REGS_A0
- PTR_S a1, sp, FGRET_REGS_A1
- PTR_S zero, sp, FGRET_REGS_FP
+ PTR_ADDI sp, sp, -PT_SIZE
+ PTR_S a0, sp, PT_R4
+ PTR_S a1, sp, PT_R5
+ PTR_S zero, sp, PT_R22
move a0, sp
bl ftrace_return_to_handler
move ra, a0
/* Restore return value regs */
- PTR_L a0, sp, FGRET_REGS_A0
- PTR_L a1, sp, FGRET_REGS_A1
- PTR_ADDI sp, sp, FGRET_REGS_SIZE
+ PTR_L a0, sp, PT_R4
+ PTR_L a1, sp, PT_R5
+ PTR_ADDI sp, sp, PT_SIZE
jr ra
SYM_CODE_END(return_to_handler)
diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c
index c7d0338d12c1..36d6d9eeb7c7 100644
--- a/arch/loongarch/kernel/module.c
+++ b/arch/loongarch/kernel/module.c
@@ -490,12 +490,6 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
return 0;
}
-void *module_alloc(unsigned long size)
-{
- return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0));
-}
-
static void module_init_ftrace_plt(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, struct module *mod)
{
diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c
index 8fe21f868f72..30a72fd528c0 100644
--- a/arch/loongarch/kernel/numa.c
+++ b/arch/loongarch/kernel/numa.c
@@ -27,10 +27,7 @@
#include <asm/time.h>
int numa_off;
-struct pglist_data *node_data[MAX_NUMNODES];
unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES];
-
-EXPORT_SYMBOL(node_data);
EXPORT_SYMBOL(node_distances);
static struct numa_meminfo numa_meminfo;
@@ -190,24 +187,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end)
return numa_add_memblk_to(nid, start, end, &numa_meminfo);
}
-static void __init alloc_node_data(int nid)
-{
- void *nd;
- unsigned long nd_pa;
- size_t nd_sz = roundup(sizeof(pg_data_t), PAGE_SIZE);
-
- nd_pa = memblock_phys_alloc_try_nid(nd_sz, SMP_CACHE_BYTES, nid);
- if (!nd_pa) {
- pr_err("Cannot find %zu Byte for node_data (initial node: %d)\n", nd_sz, nid);
- return;
- }
-
- nd = __va(nd_pa);
-
- node_data[nid] = nd;
- memset(nd, 0, sizeof(pg_data_t));
-}
-
static void __init node_mem_init(unsigned int node)
{
unsigned long start_pfn, end_pfn;
@@ -408,12 +387,6 @@ void __init paging_init(void)
free_area_init(zones_size);
}
-void __init mem_init(void)
-{
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
- memblock_free_all();
-}
-
int pcibus_to_node(struct pci_bus *bus)
{
return dev_to_node(&bus->dev);
diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
new file mode 100644
index 000000000000..e5a39bbad078
--- /dev/null
+++ b/arch/loongarch/kernel/paravirt.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/irq_work.h>
+#include <linux/jump_label.h>
+#include <linux/kvm_para.h>
+#include <linux/reboot.h>
+#include <linux/static_call.h>
+#include <asm/paravirt.h>
+
+static int has_steal_clock;
+struct static_key paravirt_steal_enabled;
+struct static_key paravirt_steal_rq_enabled;
+static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
+DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
+
+static u64 native_steal_clock(int cpu)
+{
+ return 0;
+}
+
+DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
+
+static bool steal_acc = true;
+
+static int __init parse_no_stealacc(char *arg)
+{
+ steal_acc = false;
+ return 0;
+}
+early_param("no-steal-acc", parse_no_stealacc);
+
+static u64 paravt_steal_clock(int cpu)
+{
+ int version;
+ u64 steal;
+ struct kvm_steal_time *src;
+
+ src = &per_cpu(steal_time, cpu);
+ do {
+
+ version = src->version;
+ virt_rmb(); /* Make sure that the version is read before the steal */
+ steal = src->steal;
+ virt_rmb(); /* Make sure that the steal is read before the next version */
+
+ } while ((version & 1) || (version != src->version));
+
+ return steal;
+}
+
+#ifdef CONFIG_SMP
+static struct smp_ops native_ops;
+
+static void pv_send_ipi_single(int cpu, unsigned int action)
+{
+ int min, old;
+ irq_cpustat_t *info = &per_cpu(irq_stat, cpu);
+
+ if (unlikely(action == ACTION_BOOT_CPU)) {
+ native_ops.send_ipi_single(cpu, action);
+ return;
+ }
+
+ old = atomic_fetch_or(BIT(action), &info->message);
+ if (old)
+ return;
+
+ min = cpu_logical_map(cpu);
+ kvm_hypercall3(KVM_HCALL_FUNC_IPI, 1, 0, min);
+}
+
+#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
+
+static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+{
+ int i, cpu, min = 0, max = 0, old;
+ __uint128_t bitmap = 0;
+ irq_cpustat_t *info;
+
+ if (cpumask_empty(mask))
+ return;
+
+ if (unlikely(action == ACTION_BOOT_CPU)) {
+ native_ops.send_ipi_mask(mask, action);
+ return;
+ }
+
+ action = BIT(action);
+ for_each_cpu(i, mask) {
+ info = &per_cpu(irq_stat, i);
+ old = atomic_fetch_or(action, &info->message);
+ if (old)
+ continue;
+
+ cpu = cpu_logical_map(i);
+ if (!bitmap) {
+ min = max = cpu;
+ } else if (cpu < min && cpu > (max - KVM_IPI_CLUSTER_SIZE)) {
+ /* cpu < min, and bitmap still enough */
+ bitmap <<= min - cpu;
+ min = cpu;
+ } else if (cpu > min && cpu < (min + KVM_IPI_CLUSTER_SIZE)) {
+ /* cpu > min, and bitmap still enough */
+ max = cpu > max ? cpu : max;
+ } else {
+ /*
+ * With cpu, bitmap will exceed KVM_IPI_CLUSTER_SIZE,
+ * send IPI here directly and skip the remaining CPUs.
+ */
+ kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap,
+ (unsigned long)(bitmap >> BITS_PER_LONG), min);
+ min = max = cpu;
+ bitmap = 0;
+ }
+ __set_bit(cpu - min, (unsigned long *)&bitmap);
+ }
+
+ if (bitmap)
+ kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap,
+ (unsigned long)(bitmap >> BITS_PER_LONG), min);
+}
+
+static irqreturn_t pv_ipi_interrupt(int irq, void *dev)
+{
+ u32 action;
+ irq_cpustat_t *info;
+
+ /* Clear SWI interrupt */
+ clear_csr_estat(1 << INT_SWI0);
+ info = this_cpu_ptr(&irq_stat);
+ action = atomic_xchg(&info->message, 0);
+
+ if (action & SMP_RESCHEDULE) {
+ scheduler_ipi();
+ info->ipi_irqs[IPI_RESCHEDULE]++;
+ }
+
+ if (action & SMP_CALL_FUNCTION) {
+ generic_smp_call_function_interrupt();
+ info->ipi_irqs[IPI_CALL_FUNCTION]++;
+ }
+
+ if (action & SMP_IRQ_WORK) {
+ irq_work_run();
+ info->ipi_irqs[IPI_IRQ_WORK]++;
+ }
+
+ if (action & SMP_CLEAR_VECTOR) {
+ complete_irq_moving();
+ info->ipi_irqs[IPI_CLEAR_VECTOR]++;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void pv_init_ipi(void)
+{
+ int r, swi;
+
+ /* Init native ipi irq for ACTION_BOOT_CPU */
+ native_ops.init_ipi();
+ swi = get_percpu_irq(INT_SWI0);
+ if (swi < 0)
+ panic("SWI0 IRQ mapping failed\n");
+ irq_set_percpu_devid(swi);
+ r = request_percpu_irq(swi, pv_ipi_interrupt, "SWI0-IPI", &irq_stat);
+ if (r < 0)
+ panic("SWI0 IRQ request failed\n");
+}
+#endif
+
+bool kvm_para_available(void)
+{
+ int config;
+ static int hypervisor_type;
+
+ if (!cpu_has_hypervisor)
+ return false;
+
+ if (!hypervisor_type) {
+ config = read_cpucfg(CPUCFG_KVM_SIG);
+ if (!memcmp(&config, KVM_SIGNATURE, 4))
+ hypervisor_type = HYPERVISOR_KVM;
+ }
+
+ return hypervisor_type == HYPERVISOR_KVM;
+}
+
+unsigned int kvm_arch_para_features(void)
+{
+ static unsigned int feature;
+
+ if (!kvm_para_available())
+ return 0;
+
+ if (!feature)
+ feature = read_cpucfg(CPUCFG_KVM_FEATURE);
+
+ return feature;
+}
+
+int __init pv_ipi_init(void)
+{
+ if (!kvm_para_has_feature(KVM_FEATURE_IPI))
+ return 0;
+
+#ifdef CONFIG_SMP
+ native_ops = mp_ops;
+ mp_ops.init_ipi = pv_init_ipi;
+ mp_ops.send_ipi_single = pv_send_ipi_single;
+ mp_ops.send_ipi_mask = pv_send_ipi_mask;
+#endif
+
+ return 0;
+}
+
+static int pv_enable_steal_time(void)
+{
+ int cpu = smp_processor_id();
+ unsigned long addr;
+ struct kvm_steal_time *st;
+
+ if (!has_steal_clock)
+ return -EPERM;
+
+ st = &per_cpu(steal_time, cpu);
+ addr = per_cpu_ptr_to_phys(st);
+
+ /* The whole structure kvm_steal_time should be in one page */
+ if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) {
+ pr_warn("Illegal PV steal time addr %lx\n", addr);
+ return -EFAULT;
+ }
+
+ addr |= KVM_STEAL_PHYS_VALID;
+ kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), addr);
+
+ return 0;
+}
+
+static void pv_disable_steal_time(void)
+{
+ if (has_steal_clock)
+ kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), 0);
+}
+
+#ifdef CONFIG_SMP
+static int pv_time_cpu_online(unsigned int cpu)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ pv_enable_steal_time();
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int pv_time_cpu_down_prepare(unsigned int cpu)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ pv_disable_steal_time();
+ local_irq_restore(flags);
+
+ return 0;
+}
+#endif
+
+static void pv_cpu_reboot(void *unused)
+{
+ pv_disable_steal_time();
+}
+
+static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused)
+{
+ on_each_cpu(pv_cpu_reboot, NULL, 1);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block pv_reboot_nb = {
+ .notifier_call = pv_reboot_notify,
+};
+
+int __init pv_time_init(void)
+{
+ int r;
+
+ if (!kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
+ return 0;
+
+ has_steal_clock = 1;
+ r = pv_enable_steal_time();
+ if (r < 0) {
+ has_steal_clock = 0;
+ return 0;
+ }
+ register_reboot_notifier(&pv_reboot_nb);
+
+#ifdef CONFIG_SMP
+ r = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "loongarch/pv_time:online",
+ pv_time_cpu_online, pv_time_cpu_down_prepare);
+ if (r < 0) {
+ has_steal_clock = 0;
+ pr_err("Failed to install cpu hotplug callbacks\n");
+ return r;
+ }
+#endif
+
+ static_call_update(pv_steal_clock, paravt_steal_clock);
+
+ static_key_slow_inc(&paravirt_steal_enabled);
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+ if (steal_acc)
+ static_key_slow_inc(&paravirt_steal_rq_enabled);
+#endif
+
+ pr_info("Using paravirt steal-time\n");
+
+ return 0;
+}
+
+int __init pv_spinlock_init(void)
+{
+ if (!cpu_has_hypervisor)
+ return 0;
+
+ static_branch_enable(&virt_spin_lock_key);
+
+ return 0;
+}
diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c
index cac7cba81b65..8ad098703488 100644
--- a/arch/loongarch/kernel/perf_event.c
+++ b/arch/loongarch/kernel/perf_event.c
@@ -456,16 +456,6 @@ static void loongarch_pmu_disable(struct pmu *pmu)
static DEFINE_MUTEX(pmu_reserve_mutex);
static atomic_t active_events = ATOMIC_INIT(0);
-static int get_pmc_irq(void)
-{
- struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
-
- if (d)
- return irq_create_mapping(d, INT_PCOV);
-
- return -EINVAL;
-}
-
static void reset_counters(void *arg);
static int __hw_perf_event_init(struct perf_event *event);
@@ -473,7 +463,7 @@ static void hw_perf_event_destroy(struct perf_event *event)
{
if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) {
on_each_cpu(reset_counters, NULL, 1);
- free_irq(get_pmc_irq(), &loongarch_pmu);
+ free_irq(get_percpu_irq(INT_PCOV), &loongarch_pmu);
mutex_unlock(&pmu_reserve_mutex);
}
}
@@ -489,8 +479,7 @@ static void handle_associated_event(struct cpu_hw_events *cpuc, int idx,
if (!loongarch_pmu_event_set_period(event, hwc, idx))
return;
- if (perf_event_overflow(event, data, regs))
- loongarch_pmu_disable_event(idx);
+ perf_event_overflow(event, data, regs);
}
static irqreturn_t pmu_handle_irq(int irq, void *dev)
@@ -562,7 +551,7 @@ static int loongarch_pmu_event_init(struct perf_event *event)
if (event->cpu >= 0 && !cpu_online(event->cpu))
return -ENODEV;
- irq = get_pmc_irq();
+ irq = get_percpu_irq(INT_PCOV);
flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_NO_SUSPEND | IRQF_SHARED;
if (!atomic_inc_not_zero(&active_events)) {
mutex_lock(&pmu_reserve_mutex);
diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c
index 0d33cbc47e51..cea30768ae92 100644
--- a/arch/loongarch/kernel/proc.c
+++ b/arch/loongarch/kernel/proc.c
@@ -13,27 +13,12 @@
#include <asm/processor.h>
#include <asm/time.h>
-/*
- * No lock; only written during early bootup by CPU 0.
- */
-static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain);
-
-int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb)
-{
- return raw_notifier_chain_register(&proc_cpuinfo_chain, nb);
-}
-
-int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
-{
- return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v);
-}
-
static int show_cpuinfo(struct seq_file *m, void *v)
{
unsigned long n = (unsigned long) v - 1;
+ unsigned int isa = cpu_data[n].isa_level;
unsigned int version = cpu_data[n].processor_id & 0xff;
unsigned int fp_version = cpu_data[n].fpu_vers;
- struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
#ifdef CONFIG_SMP
if (!cpu_online(n))
@@ -64,9 +49,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
cpu_pabits + 1, cpu_vabits + 1);
seq_printf(m, "ISA\t\t\t:");
- if (cpu_has_loongarch32)
- seq_printf(m, " loongarch32");
- if (cpu_has_loongarch64)
+ if (isa & LOONGARCH_CPU_ISA_LA32R)
+ seq_printf(m, " loongarch32r");
+ if (isa & LOONGARCH_CPU_ISA_LA32S)
+ seq_printf(m, " loongarch32s");
+ if (isa & LOONGARCH_CPU_ISA_LA64)
seq_printf(m, " loongarch64");
seq_printf(m, "\n");
@@ -81,26 +68,20 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu_has_complex) seq_printf(m, " complex");
if (cpu_has_crypto) seq_printf(m, " crypto");
if (cpu_has_ptw) seq_printf(m, " ptw");
+ if (cpu_has_lspw) seq_printf(m, " lspw");
if (cpu_has_lvz) seq_printf(m, " lvz");
if (cpu_has_lbt_x86) seq_printf(m, " lbt_x86");
if (cpu_has_lbt_arm) seq_printf(m, " lbt_arm");
if (cpu_has_lbt_mips) seq_printf(m, " lbt_mips");
seq_printf(m, "\n");
- seq_printf(m, "Hardware Watchpoint\t: %s",
- cpu_has_watch ? "yes, " : "no\n");
+ seq_printf(m, "Hardware Watchpoint\t: %s", str_yes_no(cpu_has_watch));
if (cpu_has_watch) {
- seq_printf(m, "iwatch count: %d, dwatch count: %d\n",
+ seq_printf(m, ", iwatch count: %d, dwatch count: %d",
cpu_data[n].watch_ireg_count, cpu_data[n].watch_dreg_count);
}
- proc_cpuinfo_notifier_args.m = m;
- proc_cpuinfo_notifier_args.n = n;
-
- raw_notifier_call_chain(&proc_cpuinfo_chain, 0,
- &proc_cpuinfo_notifier_args);
-
- seq_printf(m, "\n");
+ seq_printf(m, "\n\n");
return 0;
}
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
index f2ff8b5d591e..3582f591bab2 100644
--- a/arch/loongarch/kernel/process.c
+++ b/arch/loongarch/kernel/process.c
@@ -13,6 +13,7 @@
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/entry-common.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
@@ -34,6 +35,7 @@
#include <linux/nmi.h>
#include <asm/asm.h>
+#include <asm/asm-prototypes.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/elf.h>
@@ -47,6 +49,7 @@
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/reg.h>
+#include <asm/switch_to.h>
#include <asm/unwind.h>
#include <asm/vdso.h>
@@ -63,8 +66,9 @@ EXPORT_SYMBOL(__stack_chk_guard);
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override);
-asmlinkage void ret_from_fork(void);
-asmlinkage void ret_from_kernel_thread(void);
+asmlinkage void restore_and_ret(void);
+asmlinkage void ret_from_fork_asm(void);
+asmlinkage void ret_from_kernel_thread_asm(void);
void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
{
@@ -138,6 +142,23 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
return 0;
}
+asmlinkage void noinstr __no_stack_protector ret_from_fork(struct task_struct *prev,
+ struct pt_regs *regs)
+{
+ schedule_tail(prev);
+ syscall_exit_to_user_mode(regs);
+}
+
+asmlinkage void noinstr __no_stack_protector ret_from_kernel_thread(struct task_struct *prev,
+ struct pt_regs *regs,
+ int (*fn)(void *),
+ void *fn_arg)
+{
+ schedule_tail(prev);
+ fn(fn_arg);
+ syscall_exit_to_user_mode(regs);
+}
+
/*
* Copy architecture-specific thread state
*/
@@ -165,8 +186,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
p->thread.reg03 = childksp;
p->thread.reg23 = (unsigned long)args->fn;
p->thread.reg24 = (unsigned long)args->fn_arg;
- p->thread.reg01 = (unsigned long)ret_from_kernel_thread;
- p->thread.sched_ra = (unsigned long)ret_from_kernel_thread;
+ p->thread.reg01 = (unsigned long)ret_from_kernel_thread_asm;
+ p->thread.sched_ra = (unsigned long)ret_from_kernel_thread_asm;
memset(childregs, 0, sizeof(struct pt_regs));
childregs->csr_euen = p->thread.csr_euen;
childregs->csr_crmd = p->thread.csr_crmd;
@@ -182,8 +203,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
childregs->regs[3] = usp;
p->thread.reg03 = (unsigned long) childregs;
- p->thread.reg01 = (unsigned long) ret_from_fork;
- p->thread.sched_ra = (unsigned long) ret_from_fork;
+ p->thread.reg01 = (unsigned long) ret_from_fork_asm;
+ p->thread.sched_ra = (unsigned long) ret_from_fork_asm;
/*
* New tasks lose permission to use the fpu. This accelerates context
@@ -293,13 +314,15 @@ unsigned long stack_top(void)
{
unsigned long top = TASK_SIZE & PAGE_MASK;
- /* Space for the VDSO & data page */
- top -= PAGE_ALIGN(current->thread.vdso->size);
- top -= VVAR_SIZE;
+ if (current->thread.vdso) {
+ /* Space for the VDSO & data page */
+ top -= PAGE_ALIGN(current->thread.vdso->size);
+ top -= VVAR_SIZE;
- /* Space to randomize the VDSO base */
- if (current->flags & PF_RANDOMIZE)
- top -= VDSO_RANDOMIZE_SIZE;
+ /* Space to randomize the VDSO base */
+ if (current->flags & PF_RANDOMIZE)
+ top -= VDSO_RANDOMIZE_SIZE;
+ }
return top;
}
diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
index c114c5ef1332..5e2402cfcab0 100644
--- a/arch/loongarch/kernel/ptrace.c
+++ b/arch/loongarch/kernel/ptrace.c
@@ -494,28 +494,14 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
struct arch_hw_breakpoint_ctrl ctrl,
struct perf_event_attr *attr)
{
- int err, len, type, offset;
+ int err, len, type;
- err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
+ err = arch_bp_generic_fields(ctrl, &len, &type);
if (err)
return err;
- switch (note_type) {
- case NT_LOONGARCH_HW_BREAK:
- if ((type & HW_BREAKPOINT_X) != type)
- return -EINVAL;
- break;
- case NT_LOONGARCH_HW_WATCH:
- if ((type & HW_BREAKPOINT_RW) != type)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
-
attr->bp_len = len;
attr->bp_type = type;
- attr->bp_addr += offset;
return 0;
}
@@ -603,16 +589,36 @@ static int ptrace_hbp_set_ctrl(unsigned int note_type,
struct perf_event *bp;
struct perf_event_attr attr;
struct arch_hw_breakpoint_ctrl ctrl;
+ struct thread_info *ti = task_thread_info(tsk);
bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
if (IS_ERR(bp))
return PTR_ERR(bp);
attr = bp->attr;
- decode_ctrl_reg(uctrl, &ctrl);
- err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
- if (err)
- return err;
+
+ switch (note_type) {
+ case NT_LOONGARCH_HW_BREAK:
+ ctrl.type = LOONGARCH_BREAKPOINT_EXECUTE;
+ ctrl.len = LOONGARCH_BREAKPOINT_LEN_4;
+ break;
+ case NT_LOONGARCH_HW_WATCH:
+ decode_ctrl_reg(uctrl, &ctrl);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (uctrl & CTRL_PLV_ENABLE) {
+ err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
+ if (err)
+ return err;
+ attr.disabled = 0;
+ set_ti_thread_flag(ti, TIF_LOAD_WATCH);
+ } else {
+ attr.disabled = 1;
+ clear_ti_thread_flag(ti, TIF_LOAD_WATCH);
+ }
return modify_user_hw_breakpoint(bp, &attr);
}
@@ -643,6 +649,10 @@ static int ptrace_hbp_set_addr(unsigned int note_type,
struct perf_event *bp;
struct perf_event_attr attr;
+ /* Kernel-space address cannot be monitored by user-space */
+ if ((unsigned long)addr >= XKPRANGE)
+ return -EINVAL;
+
bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
if (IS_ERR(bp))
return PTR_ERR(bp);
@@ -710,7 +720,7 @@ static int hw_break_set(struct task_struct *target,
unsigned int note_type = regset->core_note_type;
/* Resource info */
- offset = offsetof(struct user_watch_state, dbg_regs);
+ offset = offsetof(struct user_watch_state_v2, dbg_regs);
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
/* (address, mask, ctrl) registers */
@@ -910,7 +920,7 @@ static const struct user_regset loongarch64_regsets[] = {
#ifdef CONFIG_HAVE_HW_BREAKPOINT
[REGSET_HW_BREAK] = {
.core_note_type = NT_LOONGARCH_HW_BREAK,
- .n = sizeof(struct user_watch_state) / sizeof(u32),
+ .n = sizeof(struct user_watch_state_v2) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
.regset_get = hw_break_get,
@@ -918,7 +928,7 @@ static const struct user_regset loongarch64_regsets[] = {
},
[REGSET_HW_WATCH] = {
.core_note_type = NT_LOONGARCH_HW_WATCH,
- .n = sizeof(struct user_watch_state) / sizeof(u32),
+ .n = sizeof(struct user_watch_state_v2) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
.regset_get = hw_break_get,
diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c
index 1acfa704c8d0..50c469067f3a 100644
--- a/arch/loongarch/kernel/relocate.c
+++ b/arch/loongarch/kernel/relocate.c
@@ -13,6 +13,7 @@
#include <asm/bootinfo.h>
#include <asm/early_ioremap.h>
#include <asm/inst.h>
+#include <asm/io.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -34,11 +35,27 @@ static inline void __init relocate_relative(void)
if (rela->r_info != R_LARCH_RELATIVE)
continue;
- if (relocated_addr >= VMLINUX_LOAD_ADDRESS)
- relocated_addr = (Elf64_Addr)RELOCATED(relocated_addr);
-
+ relocated_addr = (Elf64_Addr)RELOCATED(relocated_addr);
*(Elf64_Addr *)RELOCATED(addr) = relocated_addr;
}
+
+#ifdef CONFIG_RELR
+ u64 *addr = NULL;
+ u64 *relr = (u64 *)&__relr_dyn_begin;
+ u64 *relr_end = (u64 *)&__relr_dyn_end;
+
+ for ( ; relr < relr_end; relr++) {
+ if ((*relr & 1) == 0) {
+ addr = (u64 *)(*relr + reloc_offset);
+ *addr++ += reloc_offset;
+ } else {
+ for (u64 *p = addr, r = *relr >> 1; r; p++, r >>= 1)
+ if (r & 1)
+ *p += reloc_offset;
+ addr += 63;
+ }
+ }
+#endif
}
static inline void __init relocate_absolute(long random_offset)
@@ -123,6 +140,32 @@ static inline __init bool kaslr_disabled(void)
if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
return true;
+#ifdef CONFIG_HIBERNATION
+ str = strstr(builtin_cmdline, "nohibernate");
+ if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' '))
+ return false;
+
+ str = strstr(boot_command_line, "nohibernate");
+ if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
+ return false;
+
+ str = strstr(builtin_cmdline, "noresume");
+ if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' '))
+ return false;
+
+ str = strstr(boot_command_line, "noresume");
+ if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
+ return false;
+
+ str = strstr(builtin_cmdline, "resume=");
+ if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' '))
+ return true;
+
+ str = strstr(boot_command_line, "resume=");
+ if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
+ return true;
+#endif
+
return false;
}
@@ -170,7 +213,7 @@ unsigned long __init relocate_kernel(void)
unsigned long kernel_length;
unsigned long random_offset = 0;
void *location_new = _text; /* Default to original kernel start */
- char *cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
+ char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
@@ -182,6 +225,7 @@ unsigned long __init relocate_kernel(void)
random_offset = (unsigned long)location_new - (unsigned long)(_text);
#endif
reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS;
+ early_memunmap(cmdline, COMMAND_LINE_SIZE);
if (random_offset) {
kernel_length = (long)(_end) - (long)(_text);
diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c
index 1ef8c6383535..de8fa5a8a825 100644
--- a/arch/loongarch/kernel/reset.c
+++ b/arch/loongarch/kernel/reset.c
@@ -33,7 +33,7 @@ void machine_halt(void)
console_flush_on_panic(CONSOLE_FLUSH_PENDING);
while (true) {
- __arch_cpu_idle();
+ __asm__ __volatile__("idle 0" : : : "memory");
}
}
@@ -53,7 +53,7 @@ void machine_power_off(void)
#endif
while (true) {
- __arch_cpu_idle();
+ __asm__ __volatile__("idle 0" : : : "memory");
}
}
@@ -74,6 +74,6 @@ void machine_restart(char *command)
acpi_reboot();
while (true) {
- __arch_cpu_idle();
+ __asm__ __volatile__("idle 0" : : : "memory");
}
}
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index 60e0fe97f61a..b99fbb388fe0 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -55,6 +55,7 @@
#define SMBIOS_FREQHIGH_OFFSET 0x17
#define SMBIOS_FREQLOW_MASK 0xFF
#define SMBIOS_CORE_PACKAGE_OFFSET 0x23
+#define SMBIOS_THREAD_PACKAGE_OFFSET 0x25
#define LOONGSON_EFI_ENABLE (1 << 3)
unsigned long fw_arg0, fw_arg1, fw_arg2;
@@ -125,7 +126,7 @@ static void __init parse_cpu_table(const struct dmi_header *dm)
cpu_clock_freq = freq_temp * 1000000;
loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]);
- loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_CORE_PACKAGE_OFFSET);
+ loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_THREAD_PACKAGE_OFFSET);
pr_info("CpuClock = %llu\n", cpu_clock_freq);
}
@@ -258,18 +259,17 @@ static void __init arch_reserve_crashkernel(void)
int ret;
unsigned long long low_size = 0;
unsigned long long crash_base, crash_size;
- char *cmdline = boot_command_line;
bool high = false;
if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
return;
- ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
+ ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&crash_size, &crash_base, &low_size, &high);
if (ret)
return;
- reserve_crashkernel_generic(cmdline, crash_size, crash_base, low_size, high);
+ reserve_crashkernel_generic(crash_size, crash_base, low_size, high);
}
static void __init fdt_setup(void)
@@ -282,7 +282,7 @@ static void __init fdt_setup(void)
return;
/* Prefer to use built-in dtb, checking its legality first. */
- if (!fdt_check_header(__dtb_start))
+ if (IS_ENABLED(CONFIG_BUILTIN_DTB) && !fdt_check_header(__dtb_start))
fdt_pointer = __dtb_start;
else
fdt_pointer = efi_fdt_pointer(); /* Fallback to firmware dtb */
@@ -290,7 +290,7 @@ static void __init fdt_setup(void)
if (!fdt_pointer || fdt_check_header(fdt_pointer))
return;
- early_init_dt_scan(fdt_pointer);
+ early_init_dt_scan(fdt_pointer, __pa(fdt_pointer));
early_init_fdt_reserve_self();
max_low_pfn = PFN_PHYS(memblock_end_of_DRAM());
@@ -351,10 +351,8 @@ void __init platform_init(void)
arch_reserve_vmcore();
arch_reserve_crashkernel();
-#ifdef CONFIG_ACPI_TABLE_UPGRADE
- acpi_table_upgrade();
-#endif
#ifdef CONFIG_ACPI
+ acpi_table_upgrade();
acpi_gbl_use_default_register_widths = false;
acpi_boot_table_init();
#endif
@@ -388,6 +386,9 @@ static void __init check_kernel_sections_mem(void)
*/
static void __init arch_mem_init(char **cmdline_p)
{
+ /* Recalculate max_low_pfn for "mem=xxx" */
+ max_pfn = max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
+
if (usermem)
pr_info("User-defined physical RAM map overwrite\n");
@@ -432,7 +433,7 @@ static void __init resource_init(void)
num_standard_resources = memblock.memory.cnt;
res_size = num_standard_resources * sizeof(*standard_resources);
- standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
+ standard_resources = memblock_alloc_or_panic(res_size, SMP_CACHE_BYTES);
for_each_mem_region(region) {
res = &standard_resources[i++];
@@ -578,8 +579,10 @@ static void __init prefill_possible_map(void)
for (i = 0; i < possible; i++)
set_cpu_possible(i, true);
- for (; i < NR_CPUS; i++)
+ for (; i < NR_CPUS; i++) {
+ set_cpu_present(i, false);
set_cpu_possible(i, false);
+ }
set_nr_cpu_ids(possible);
}
@@ -603,6 +606,8 @@ void __init setup_arch(char **cmdline_p)
arch_mem_init(cmdline_p);
resource_init();
+ jump_label_init(); /* Initialise the static keys for paravirtualization */
+
#ifdef CONFIG_SMP
plat_smp_setup();
prefill_possible_map();
diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c
index 7a555b600171..4740cb5b2388 100644
--- a/arch/loongarch/kernel/signal.c
+++ b/arch/loongarch/kernel/signal.c
@@ -51,27 +51,6 @@
#define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); })
#define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); })
-/* Assembly functions to move context to/from the FPU */
-extern asmlinkage int
-_save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
-extern asmlinkage int
-_restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
-extern asmlinkage int
-_save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
-extern asmlinkage int
-_restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
-extern asmlinkage int
-_save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
-extern asmlinkage int
-_restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
-
-#ifdef CONFIG_CPU_HAS_LBT
-extern asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
-extern asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
-extern asmlinkage int _save_ftop_context(void __user *ftop);
-extern asmlinkage int _restore_ftop_context(void __user *ftop);
-#endif
-
struct rt_sigframe {
struct siginfo rs_info;
struct ucontext rs_uctx;
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
index aabee0b280fe..4b24589c0b56 100644
--- a/arch/loongarch/kernel/smp.c
+++ b/arch/loongarch/kernel/smp.c
@@ -13,11 +13,13 @@
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irq_work.h>
#include <linux/profile.h>
#include <linux/seq_file.h>
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/export.h>
+#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/time.h>
#include <linux/tracepoint.h>
@@ -29,6 +31,7 @@
#include <asm/loongson.h>
#include <asm/mmu_context.h>
#include <asm/numa.h>
+#include <asm/paravirt.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/time.h>
@@ -66,14 +69,11 @@ static cpumask_t cpu_core_setup_map;
struct secondary_data cpuboot_data;
static DEFINE_PER_CPU(int, cpu_state);
-enum ipi_msg_type {
- IPI_RESCHEDULE,
- IPI_CALL_FUNCTION,
-};
-
static const char *ipi_types[NR_IPI] __tracepoint_string = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNCTION] = "Function call interrupts",
+ [IPI_IRQ_WORK] = "IRQ work interrupts",
+ [IPI_CLEAR_VECTOR] = "Clear vector interrupts",
};
void show_ipi_list(struct seq_file *p, int prec)
@@ -83,7 +83,7 @@ void show_ipi_list(struct seq_file *p, int prec)
for (i = 0; i < NR_IPI; i++) {
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : "");
for_each_online_cpu(cpu)
- seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).ipi_irqs[i]);
+ seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, cpu).ipi_irqs[i], 10);
seq_printf(p, " LoongArch %d %s\n", i + 1, ipi_types[i]);
}
}
@@ -190,24 +190,19 @@ static u32 ipi_read_clear(int cpu)
static void ipi_write_action(int cpu, u32 action)
{
- unsigned int irq = 0;
+ uint32_t val;
- while ((irq = ffs(action))) {
- uint32_t val = IOCSR_IPI_SEND_BLOCKING;
-
- val |= (irq - 1);
- val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT);
- iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND);
- action &= ~BIT(irq - 1);
- }
+ val = IOCSR_IPI_SEND_BLOCKING | action;
+ val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT);
+ iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND);
}
-void loongson_send_ipi_single(int cpu, unsigned int action)
+static void loongson_send_ipi_single(int cpu, unsigned int action)
{
ipi_write_action(cpu_logical_map(cpu), (u32)action);
}
-void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+static void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
@@ -222,11 +217,18 @@ void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
*/
void arch_smp_send_reschedule(int cpu)
{
- loongson_send_ipi_single(cpu, SMP_RESCHEDULE);
+ mp_ops.send_ipi_single(cpu, ACTION_RESCHEDULE);
}
EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
-irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
+#ifdef CONFIG_IRQ_WORK
+void arch_irq_work_raise(void)
+{
+ mp_ops.send_ipi_single(smp_processor_id(), ACTION_IRQ_WORK);
+}
+#endif
+
+static irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
{
unsigned int action;
unsigned int cpu = smp_processor_id();
@@ -243,9 +245,39 @@ irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++;
}
+ if (action & SMP_IRQ_WORK) {
+ irq_work_run();
+ per_cpu(irq_stat, cpu).ipi_irqs[IPI_IRQ_WORK]++;
+ }
+
+ if (action & SMP_CLEAR_VECTOR) {
+ complete_irq_moving();
+ per_cpu(irq_stat, cpu).ipi_irqs[IPI_CLEAR_VECTOR]++;
+ }
+
return IRQ_HANDLED;
}
+static void loongson_init_ipi(void)
+{
+ int r, ipi_irq;
+
+ ipi_irq = get_percpu_irq(INT_IPI);
+ if (ipi_irq < 0)
+ panic("IPI IRQ mapping failed\n");
+
+ irq_set_percpu_devid(ipi_irq);
+ r = request_percpu_irq(ipi_irq, loongson_ipi_interrupt, "IPI", &irq_stat);
+ if (r < 0)
+ panic("IPI IRQ request failed\n");
+}
+
+struct smp_ops mp_ops = {
+ .init_ipi = loongson_init_ipi,
+ .send_ipi_single = loongson_send_ipi_single,
+ .send_ipi_mask = loongson_send_ipi_mask,
+};
+
static void __init fdt_smp_setup(void)
{
#ifdef CONFIG_OF
@@ -260,18 +292,19 @@ static void __init fdt_smp_setup(void)
if (cpuid >= nr_cpu_ids)
continue;
- if (cpuid == loongson_sysconf.boot_cpu_id) {
+ if (cpuid == loongson_sysconf.boot_cpu_id)
cpu = 0;
- numa_add_cpu(cpu);
- } else {
- cpu = cpumask_next_zero(-1, cpu_present_mask);
- }
+ else
+ cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
num_processors++;
set_cpu_possible(cpu, true);
set_cpu_present(cpu, true);
__cpu_number_map[cpuid] = cpu;
__cpu_logical_map[cpu] = cpuid;
+
+ early_numa_add_cpu(cpuid, 0);
+ set_cpuid_to_node(cpuid, 0);
}
loongson_sysconf.nr_cpus = num_processors;
@@ -289,6 +322,7 @@ void __init loongson_smp_setup(void)
cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
+ pv_ipi_init();
iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus);
}
@@ -298,11 +332,11 @@ void __init loongson_prepare_cpus(unsigned int max_cpus)
int i = 0;
parse_acpi_topology();
+ cpu_data[0].global_id = cpu_logical_map(0);
for (i = 0; i < loongson_sysconf.nr_cpus; i++) {
set_cpu_present(i, true);
csr_mail_send(0, __cpu_logical_map[i], 0);
- cpu_data[i].global_id = __cpu_logical_map[i];
}
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
@@ -323,7 +357,7 @@ void loongson_boot_secondary(int cpu, struct task_struct *idle)
csr_mail_send(entry, cpu_logical_map(cpu), 0);
- loongson_send_ipi_single(cpu, SMP_BOOT_CPU);
+ loongson_send_ipi_single(cpu, ACTION_BOOT_CPU);
}
/*
@@ -333,7 +367,7 @@ void loongson_init_secondary(void)
{
unsigned int cpu = smp_processor_id();
unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 |
- ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER;
+ ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER | ECFGF_SIP0;
change_csr_ecfg(ECFG0_IM, imask);
@@ -347,6 +381,7 @@ void loongson_init_secondary(void)
cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
cpu_data[cpu].core = pptt_enabled ? cpu_data[cpu].core :
cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
+ cpu_data[cpu].global_id = cpu_logical_map(cpu);
}
void loongson_smp_finish(void)
@@ -389,7 +424,7 @@ void loongson_cpu_die(unsigned int cpu)
mb();
}
-void __noreturn arch_cpu_idle_dead(void)
+static void __noreturn idle_play_dead(void)
{
register uint64_t addr;
register void (*init_fn)(void);
@@ -413,6 +448,50 @@ void __noreturn arch_cpu_idle_dead(void)
BUG();
}
+#ifdef CONFIG_HIBERNATION
+static void __noreturn poll_play_dead(void)
+{
+ register uint64_t addr;
+ register void (*init_fn)(void);
+
+ idle_task_exit();
+ __this_cpu_write(cpu_state, CPU_DEAD);
+
+ __smp_mb();
+ do {
+ __asm__ __volatile__("nop\n\t");
+ addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
+ } while (addr == 0);
+
+ init_fn = (void *)TO_CACHE(addr);
+ iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
+
+ init_fn();
+ BUG();
+}
+#endif
+
+static void (*play_dead)(void) = idle_play_dead;
+
+void __noreturn arch_cpu_idle_dead(void)
+{
+ play_dead();
+ BUG(); /* play_dead() doesn't return */
+}
+
+#ifdef CONFIG_HIBERNATION
+int hibernate_resume_nonboot_cpu_disable(void)
+{
+ int ret;
+
+ play_dead = poll_play_dead;
+ ret = suspend_disable_secondary_cpus();
+ play_dead = idle_play_dead;
+
+ return ret;
+}
+#endif
+
#endif
/*
@@ -449,13 +528,14 @@ core_initcall(ipi_pm_init);
#endif
/* Preload SMP state for boot cpu */
-void smp_prepare_boot_cpu(void)
+void __init smp_prepare_boot_cpu(void)
{
unsigned int cpu, node, rr_node;
set_cpu_possible(0, true);
set_cpu_online(0, true);
set_my_cpu_offset(per_cpu_offset(0));
+ numa_add_cpu(0);
rr_node = first_node(node_online_map);
for_each_possible_cpu(cpu) {
@@ -481,6 +561,8 @@ void smp_prepare_boot_cpu(void)
rr_node = next_node_in(rr_node, node_online_map);
}
}
+
+ pv_spinlock_init();
}
/* called from main before smp_init() */
diff --git a/arch/loongarch/kernel/switch.S b/arch/loongarch/kernel/switch.S
index 31dd8199b245..9c23cb7e432f 100644
--- a/arch/loongarch/kernel/switch.S
+++ b/arch/loongarch/kernel/switch.S
@@ -12,7 +12,7 @@
/*
* task_struct *__switch_to(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti)
+ * struct thread_info *next_ti, void *sched_ra, void *sched_cfa)
*/
.align 5
SYM_FUNC_START(__switch_to)
diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c
index b4c5acd7aa3b..168bd97540f8 100644
--- a/arch/loongarch/kernel/syscall.c
+++ b/arch/loongarch/kernel/syscall.c
@@ -9,20 +9,24 @@
#include <linux/entry-common.h>
#include <linux/errno.h>
#include <linux/linkage.h>
+#include <linux/objtool.h>
+#include <linux/randomize_kstack.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <asm/asm.h>
#include <asm/exception.h>
+#include <asm/loongarch.h>
#include <asm/signal.h>
#include <asm/switch_to.h>
#include <asm-generic/syscalls.h>
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long,
- prot, unsigned long, flags, unsigned long, fd, off_t, offset)
+ prot, unsigned long, flags, unsigned long, fd, unsigned long, offset)
{
if (offset & ~PAGE_MASK)
return -EINVAL;
@@ -32,13 +36,13 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long,
void *sys_call_table[__NR_syscalls] = {
[0 ... __NR_syscalls - 1] = sys_ni_syscall,
-#include <asm/unistd.h>
+#include <asm/syscall_table_64.h>
};
typedef long (*sys_call_fn)(unsigned long, unsigned long,
unsigned long, unsigned long, unsigned long, unsigned long);
-void noinstr do_syscall(struct pt_regs *regs)
+void noinstr __no_stack_protector do_syscall(struct pt_regs *regs)
{
unsigned long nr;
sys_call_fn syscall_fn;
@@ -54,11 +58,24 @@ void noinstr do_syscall(struct pt_regs *regs)
nr = syscall_enter_from_user_mode(regs, nr);
+ add_random_kstack_offset();
+
if (nr < NR_syscalls) {
syscall_fn = sys_call_table[nr];
regs->regs[4] = syscall_fn(regs->orig_a0, regs->regs[5], regs->regs[6],
regs->regs[7], regs->regs[8], regs->regs[9]);
}
+ /*
+ * This value will get limited by KSTACK_OFFSET_MAX(), which is 10
+ * bits. The actual entropy will be further reduced by the compiler
+ * when applying stack alignment constraints: 16-bytes (i.e. 4-bits)
+ * aligned, which will remove the 4 low bits from any entropy chosen
+ * here.
+ *
+ * The resulting 6 bits of entropy is seen in SP[9:4].
+ */
+ choose_random_kstack_offset(drdtime());
+
syscall_exit_to_user_mode(regs);
}
diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
index e7015f7b70e3..bc75a3a69fc8 100644
--- a/arch/loongarch/kernel/time.c
+++ b/arch/loongarch/kernel/time.c
@@ -15,6 +15,7 @@
#include <asm/cpu-features.h>
#include <asm/loongarch.h>
+#include <asm/paravirt.h>
#include <asm/time.h>
u64 cpu_clock_freq;
@@ -110,7 +111,7 @@ static unsigned long __init get_loops_per_jiffy(void)
return lpj;
}
-static long init_offset __nosavedata;
+static long init_offset;
void save_counter(void)
{
@@ -123,26 +124,20 @@ void sync_counter(void)
csr_write64(init_offset, LOONGARCH_CSR_CNTC);
}
-static int get_timer_irq(void)
-{
- struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
-
- if (d)
- return irq_create_mapping(d, INT_TI);
-
- return -EINVAL;
-}
-
int constant_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
- unsigned long min_delta = 0x600;
- unsigned long max_delta = (1UL << 48) - 1;
+#ifdef CONFIG_PREEMPT_RT
+ unsigned long min_delta = 100;
+#else
+ unsigned long min_delta = 1000;
+#endif
+ unsigned long max_delta = GENMASK_ULL(boot_cpu_data.timerbits, 0);
struct clock_event_device *cd;
static int irq = 0, timer_irq_installed = 0;
if (!timer_irq_installed) {
- irq = get_timer_irq();
+ irq = get_percpu_irq(INT_TI);
if (irq < 0)
pr_err("Failed to map irq %d (timer)\n", irq);
}
@@ -224,4 +219,5 @@ void __init time_init(void)
constant_clockevent_init();
constant_clocksource_init();
+ pv_time_init();
}
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
index f9f4eb00c92e..47fc2de6d150 100644
--- a/arch/loongarch/kernel/traps.c
+++ b/arch/loongarch/kernel/traps.c
@@ -553,8 +553,12 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
die_if_kernel("Kernel ale access", regs);
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
#else
+ bool pie = regs_irqs_disabled(regs);
unsigned int *pc;
+ if (!pie)
+ local_irq_enable();
+
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
/*
@@ -579,6 +583,8 @@ sigbus:
die_if_kernel("Kernel ale access", regs);
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
out:
+ if (!pie)
+ local_irq_disable();
#endif
irqentry_exit(regs, state);
}
@@ -592,29 +598,37 @@ int is_valid_bugaddr(unsigned long addr)
static void bug_handler(struct pt_regs *regs)
{
+ if (user_mode(regs)) {
+ force_sig(SIGTRAP);
+ return;
+ }
+
switch (report_bug(regs->csr_era, regs)) {
case BUG_TRAP_TYPE_BUG:
- case BUG_TRAP_TYPE_NONE:
- die_if_kernel("Oops - BUG", regs);
- force_sig(SIGTRAP);
+ die("Oops - BUG", regs);
break;
case BUG_TRAP_TYPE_WARN:
/* Skip the BUG instruction and continue */
regs->csr_era += LOONGARCH_INSN_SIZE;
break;
+
+ default:
+ if (!fixup_exception(regs))
+ die("Oops - BUG", regs);
}
}
asmlinkage void noinstr do_bce(struct pt_regs *regs)
{
bool user = user_mode(regs);
+ bool pie = regs_irqs_disabled(regs);
unsigned long era = exception_era(regs);
u64 badv = 0, lower = 0, upper = ULONG_MAX;
union loongarch_instruction insn;
irqentry_state_t state = irqentry_enter(regs);
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_enable();
current->thread.trap_nr = read_csr_excode();
@@ -680,7 +694,7 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs)
force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
out:
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_disable();
irqentry_exit(regs, state);
@@ -698,11 +712,12 @@ bad_era:
asmlinkage void noinstr do_bp(struct pt_regs *regs)
{
bool user = user_mode(regs);
+ bool pie = regs_irqs_disabled(regs);
unsigned int opcode, bcode;
unsigned long era = exception_era(regs);
irqentry_state_t state = irqentry_enter(regs);
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_enable();
if (__get_inst(&opcode, (u32 *)era, user))
@@ -768,7 +783,7 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
}
out:
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_disable();
irqentry_exit(regs, state);
@@ -1003,6 +1018,7 @@ static void init_restore_lbt(void)
asmlinkage void noinstr do_lbt(struct pt_regs *regs)
{
+ bool pie = regs_irqs_disabled(regs);
irqentry_state_t state = irqentry_enter(regs);
/*
@@ -1012,7 +1028,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
* (including the user using 'MOVGR2GCSR' to turn on TM, which
* will not trigger the BTE), we need to check PRMD first.
*/
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_enable();
if (!cpu_has_lbt) {
@@ -1026,7 +1042,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
preempt_enable();
out:
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_disable();
irqentry_exit(regs, state);
diff --git a/arch/loongarch/kernel/unaligned.c b/arch/loongarch/kernel/unaligned.c
index 3abf163dda05..487be604b96a 100644
--- a/arch/loongarch/kernel/unaligned.c
+++ b/arch/loongarch/kernel/unaligned.c
@@ -482,14 +482,10 @@ sigbus:
#ifdef CONFIG_DEBUG_FS
static int __init debugfs_unaligned(void)
{
- struct dentry *d;
-
- d = debugfs_create_dir("loongarch", NULL);
-
debugfs_create_u32("unaligned_instructions_user",
- S_IRUGO, d, &unaligned_instructions_user);
+ S_IRUGO, arch_debugfs_dir, &unaligned_instructions_user);
debugfs_create_u32("unaligned_instructions_kernel",
- S_IRUGO, d, &unaligned_instructions_kernel);
+ S_IRUGO, arch_debugfs_dir, &unaligned_instructions_kernel);
return 0;
}
diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c
index b25722876331..d623935a7547 100644
--- a/arch/loongarch/kernel/unwind_orc.c
+++ b/arch/loongarch/kernel/unwind_orc.c
@@ -399,7 +399,7 @@ bool unwind_next_frame(struct unwind_state *state)
return false;
/* Don't let modules unload while we're reading their ORC data. */
- preempt_disable();
+ guard(rcu)();
if (is_entry_func(state->pc))
goto end;
@@ -514,14 +514,12 @@ bool unwind_next_frame(struct unwind_state *state)
if (!__kernel_text_address(state->pc))
goto err;
- preempt_enable();
return true;
err:
state->error = true;
end:
- preempt_enable();
state->stack_info.type = STACK_TYPE_UNKNOWN;
return false;
}
diff --git a/arch/loongarch/kernel/uprobes.c b/arch/loongarch/kernel/uprobes.c
index 87abc7137b73..6022eb0f71db 100644
--- a/arch/loongarch/kernel/uprobes.c
+++ b/arch/loongarch/kernel/uprobes.c
@@ -42,7 +42,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
utask->autask.saved_trap_nr = current->thread.trap_nr;
current->thread.trap_nr = UPROBE_TRAP_NR;
instruction_pointer_set(regs, utask->xol_vaddr);
- user_enable_single_step(current);
return 0;
}
@@ -53,13 +52,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
current->thread.trap_nr = utask->autask.saved_trap_nr;
-
- if (auprobe->simulate)
- instruction_pointer_set(regs, auprobe->resume_era);
- else
- instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE);
-
- user_disable_single_step(current);
+ instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE);
return 0;
}
@@ -70,7 +63,6 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
current->thread.trap_nr = utask->autask.saved_trap_nr;
instruction_pointer_set(regs, utask->vaddr);
- user_disable_single_step(current);
}
bool arch_uprobe_xol_was_trapped(struct task_struct *t)
@@ -90,7 +82,6 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
insn.word = auprobe->insn[0];
arch_simulate_insn(insn, regs);
- auprobe->resume_era = regs->csr_era;
return true;
}
diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c
index 90dfccb41c14..10cf1608c7b3 100644
--- a/arch/loongarch/kernel/vdso.c
+++ b/arch/loongarch/kernel/vdso.c
@@ -14,8 +14,7 @@
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/time_namespace.h>
-#include <linux/timekeeper_internal.h>
+#include <linux/vdso_datastore.h>
#include <asm/page.h>
#include <asm/vdso.h>
@@ -26,18 +25,6 @@
extern char vdso_start[], vdso_end[];
-/* Kernel-provided data used by the VDSO. */
-static union vdso_data_store generic_vdso_data __page_aligned_data;
-
-static union {
- u8 page[LOONGARCH_VDSO_DATA_SIZE];
- struct loongarch_vdso_data vdata;
-} loongarch_vdso_data __page_aligned_data;
-
-static struct page *vdso_pages[] = { NULL };
-struct vdso_data *vdso_data = generic_vdso_data.data;
-struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata;
-
static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
{
current->mm->context.vdso = (void *)(new_vma->vm_start);
@@ -45,55 +32,12 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struc
return 0;
}
-static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
- struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- unsigned long pfn;
- struct page *timens_page = find_timens_vvar_page(vma);
-
- switch (vmf->pgoff) {
- case VVAR_GENERIC_PAGE_OFFSET:
- if (!timens_page)
- pfn = sym_to_pfn(vdso_data);
- else
- pfn = page_to_pfn(timens_page);
- break;
-#ifdef CONFIG_TIME_NS
- case VVAR_TIMENS_PAGE_OFFSET:
- /*
- * If a task belongs to a time namespace then a namespace specific
- * VVAR is mapped with the VVAR_GENERIC_PAGE_OFFSET and the real
- * VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET offset.
- * See also the comment near timens_setup_vdso_data().
- */
- if (!timens_page)
- return VM_FAULT_SIGBUS;
- else
- pfn = sym_to_pfn(vdso_data);
- break;
-#endif /* CONFIG_TIME_NS */
- case VVAR_LOONGARCH_PAGES_START ... VVAR_LOONGARCH_PAGES_END:
- pfn = sym_to_pfn(&loongarch_vdso_data) + vmf->pgoff - VVAR_LOONGARCH_PAGES_START;
- break;
- default:
- return VM_FAULT_SIGBUS;
- }
-
- return vmf_insert_pfn(vma, vmf->address, pfn);
-}
-
struct loongarch_vdso_info vdso_info = {
.vdso = vdso_start,
- .size = PAGE_SIZE,
.code_mapping = {
.name = "[vdso]",
- .pages = vdso_pages,
.mremap = vdso_mremap,
},
- .data_mapping = {
- .name = "[vvar]",
- .fault = vvar_fault,
- },
.offset_sigreturn = vdso_offset_sigreturn,
};
@@ -102,10 +46,13 @@ static int __init init_vdso(void)
unsigned long i, cpu, pfn;
BUG_ON(!PAGE_ALIGNED(vdso_info.vdso));
- BUG_ON(!PAGE_ALIGNED(vdso_info.size));
for_each_possible_cpu(cpu)
- vdso_pdata[cpu].node = cpu_to_node(cpu);
+ vdso_k_arch_data->pdata[cpu].node = cpu_to_node(cpu);
+
+ vdso_info.size = PAGE_ALIGN(vdso_end - vdso_start);
+ vdso_info.code_mapping.pages =
+ kcalloc(vdso_info.size / PAGE_SIZE, sizeof(struct page *), GFP_KERNEL);
pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
@@ -115,37 +62,6 @@ static int __init init_vdso(void)
}
subsys_initcall(init_vdso);
-#ifdef CONFIG_TIME_NS
-struct vdso_data *arch_get_vdso_data(void *vvar_page)
-{
- return (struct vdso_data *)(vvar_page);
-}
-
-/*
- * The vvar mapping contains data for a specific time namespace, so when a
- * task changes namespace we must unmap its vvar data for the old namespace.
- * Subsequent faults will map in data for the new namespace.
- *
- * For more details see timens_setup_vdso_data().
- */
-int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
-{
- struct mm_struct *mm = task->mm;
- struct vm_area_struct *vma;
-
- VMA_ITERATOR(vmi, mm, 0);
-
- mmap_read_lock(mm);
- for_each_vma(vmi, vma) {
- if (vma_is_special_mapping(vma, &vdso_info.data_mapping))
- zap_vma_pages(vma);
- }
- mmap_read_unlock(mm);
-
- return 0;
-}
-#endif
-
static unsigned long vdso_base(void)
{
unsigned long base = STACK_TOP;
@@ -181,9 +97,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
goto out;
}
- vma = _install_special_mapping(mm, data_addr, VVAR_SIZE,
- VM_READ | VM_MAYREAD | VM_PFNMAP,
- &info->data_mapping);
+ vma = vdso_install_vvar_mapping(mm, data_addr);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out;
diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
index e8e97dbf9ca4..08ea921cdec1 100644
--- a/arch/loongarch/kernel/vmlinux.lds.S
+++ b/arch/loongarch/kernel/vmlinux.lds.S
@@ -6,6 +6,7 @@
#define PAGE_SIZE _PAGE_SIZE
#define RO_EXCEPTION_TABLE_ALIGN 4
+#define PHYSADDR_MASK 0xffffffffffff /* 48-bit */
/*
* Put .bss..swapper_pg_dir as the first thing in .bss. This will
@@ -112,6 +113,14 @@ SECTIONS
__rela_dyn_end = .;
}
+#ifdef CONFIG_RELR
+ .relr.dyn : ALIGN(8) {
+ __relr_dyn_begin = .;
+ *(.relr.dyn)
+ __relr_dyn_end = .;
+ }
+#endif
+
.data.rel : { *(.data.rel*) }
#ifdef CONFIG_RELOCATABLE
@@ -142,10 +151,11 @@ SECTIONS
#ifdef CONFIG_EFI_STUB
/* header symbols */
- _kernel_asize = _end - _text;
- _kernel_fsize = _edata - _text;
- _kernel_vsize = _end - __initdata_begin;
- _kernel_rsize = _edata - __initdata_begin;
+ _kernel_entry = ABSOLUTE(kernel_entry & PHYSADDR_MASK);
+ _kernel_asize = ABSOLUTE(_end - _text);
+ _kernel_fsize = ABSOLUTE(_edata - _text);
+ _kernel_vsize = ABSOLUTE(_end - __initdata_begin);
+ _kernel_rsize = ABSOLUTE(_edata - __initdata_begin);
#endif
.gptab.sdata : {
diff --git a/arch/loongarch/kvm/Kconfig b/arch/loongarch/kvm/Kconfig
index c4ef2b4d9797..40eea6da7c25 100644
--- a/arch/loongarch/kvm/Kconfig
+++ b/arch/loongarch/kvm/Kconfig
@@ -21,14 +21,19 @@ config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
depends on AS_HAS_LVZ_EXTENSION
select HAVE_KVM_DIRTY_RING_ACQ_REL
+ select HAVE_KVM_IRQ_ROUTING
+ select HAVE_KVM_IRQCHIP
+ select HAVE_KVM_MSI
+ select HAVE_KVM_READONLY_MEM
select HAVE_KVM_VCPU_ASYNC_IOCTL
select KVM_COMMON
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_GENERIC_HARDWARE_ENABLING
select KVM_GENERIC_MMU_NOTIFIER
select KVM_MMIO
- select HAVE_KVM_READONLY_MEM
select KVM_XFER_TO_GUEST_WORK
+ select SCHED_INFO
+ select GUEST_PERF_EVENTS if PERF_EVENTS
help
Support hosting virtualized guest machines using
hardware virtualization extensions. You will need
diff --git a/arch/loongarch/kvm/Makefile b/arch/loongarch/kvm/Makefile
index 244467d7792a..cb41d9265662 100644
--- a/arch/loongarch/kvm/Makefile
+++ b/arch/loongarch/kvm/Makefile
@@ -3,8 +3,6 @@
# Makefile for LoongArch KVM support
#
-ccflags-y += -I $(srctree)/$(src)
-
include $(srctree)/virt/kvm/Makefile.kvm
obj-$(CONFIG_KVM) += kvm.o
@@ -18,5 +16,9 @@ kvm-y += timer.o
kvm-y += tlb.o
kvm-y += vcpu.o
kvm-y += vm.o
+kvm-y += intc/ipi.o
+kvm-y += intc/eiointc.o
+kvm-y += intc/pch_pic.o
+kvm-y += irqfd.o
-CFLAGS_exit.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_exit.o += $(call cc-disable-warning, override-init)
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
index ed1d89d53e2e..fa52251b3bf1 100644
--- a/arch/loongarch/kvm/exit.c
+++ b/arch/loongarch/kvm/exit.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/preempt.h>
#include <linux/vmalloc.h>
+#include <trace/events/kvm.h>
#include <asm/fpu.h>
#include <asm/inst.h>
#include <asm/loongarch.h>
@@ -20,6 +21,47 @@
#include <asm/kvm_vcpu.h>
#include "trace.h"
+static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
+{
+ int rd, rj;
+ unsigned int index, ret;
+
+ if (inst.reg2_format.opcode != cpucfg_op)
+ return EMULATE_FAIL;
+
+ rd = inst.reg2_format.rd;
+ rj = inst.reg2_format.rj;
+ ++vcpu->stat.cpucfg_exits;
+ index = vcpu->arch.gprs[rj];
+
+ /*
+ * By LoongArch Reference Manual 2.2.10.5
+ * Return value is 0 for undefined CPUCFG index
+ *
+ * Disable preemption since hw gcsr is accessed
+ */
+ preempt_disable();
+ switch (index) {
+ case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
+ vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index];
+ break;
+ case CPUCFG_KVM_SIG:
+ /* CPUCFG emulation between 0x40000000 -- 0x400000ff */
+ vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
+ break;
+ case CPUCFG_KVM_FEATURE:
+ ret = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
+ vcpu->arch.gprs[rd] = ret;
+ break;
+ default:
+ vcpu->arch.gprs[rd] = 0;
+ break;
+ }
+ preempt_enable();
+
+ return EMULATE_DONE;
+}
+
static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid)
{
unsigned long val = 0;
@@ -83,6 +125,14 @@ static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
rj = inst.reg2csr_format.rj;
csrid = inst.reg2csr_format.csr;
+ if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= vcpu->arch.max_pmu_csrid) {
+ if (kvm_guest_has_pmu(&vcpu->arch)) {
+ vcpu->arch.pc -= 4;
+ kvm_make_request(KVM_REQ_PMU, vcpu);
+ return EMULATE_DONE;
+ }
+ }
+
/* Process CSR ops */
switch (rj) {
case 0: /* process csrrd */
@@ -106,8 +156,8 @@ static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
{
- int ret;
- unsigned long val;
+ int idx, ret;
+ unsigned long *val;
u32 addr, rd, rj, opcode;
/*
@@ -117,9 +167,9 @@ int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
rj = inst.reg2_format.rj;
opcode = inst.reg2_format.opcode;
addr = vcpu->arch.gprs[rj];
- ret = EMULATE_DO_IOCSR;
run->iocsr_io.phys_addr = addr;
run->iocsr_io.is_write = 0;
+ val = &vcpu->arch.gprs[rd];
/* LoongArch is Little endian */
switch (opcode) {
@@ -152,16 +202,33 @@ int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
run->iocsr_io.is_write = 1;
break;
default:
- ret = EMULATE_FAIL;
- break;
+ return EMULATE_FAIL;
}
- if (ret == EMULATE_DO_IOCSR) {
- if (run->iocsr_io.is_write) {
- val = vcpu->arch.gprs[rd];
- memcpy(run->iocsr_io.data, &val, run->iocsr_io.len);
+ if (run->iocsr_io.is_write) {
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ if (ret == 0)
+ ret = EMULATE_DONE;
+ else {
+ ret = EMULATE_DO_IOCSR;
+ /* Save data and let user space to write it */
+ memcpy(run->iocsr_io.data, val, run->iocsr_io.len);
}
- vcpu->arch.io_gpr = rd;
+ trace_kvm_iocsr(KVM_TRACE_IOCSR_WRITE, run->iocsr_io.len, addr, val);
+ } else {
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ if (ret == 0)
+ ret = EMULATE_DONE;
+ else {
+ ret = EMULATE_DO_IOCSR;
+ /* Save register id for iocsr read completion */
+ vcpu->arch.io_gpr = rd;
+ }
+ trace_kvm_iocsr(KVM_TRACE_IOCSR_READ, run->iocsr_io.len, addr, NULL);
}
return ret;
@@ -208,8 +275,6 @@ int kvm_emu_idle(struct kvm_vcpu *vcpu)
static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
{
- int rd, rj;
- unsigned int index;
unsigned long curr_pc;
larch_inst inst;
enum emulation_result er = EMULATE_DONE;
@@ -224,21 +289,7 @@ static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
er = EMULATE_FAIL;
switch (((inst.word >> 24) & 0xff)) {
case 0x0: /* CPUCFG GSPR */
- if (inst.reg2_format.opcode == 0x1B) {
- rd = inst.reg2_format.rd;
- rj = inst.reg2_format.rj;
- ++vcpu->stat.cpucfg_exits;
- index = vcpu->arch.gprs[rj];
- er = EMULATE_DONE;
- /*
- * By LoongArch Reference Manual 2.2.10.5
- * return value is 0 for undefined cpucfg index
- */
- if (index < KVM_MAX_CPUCFG_REGS)
- vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index];
- else
- vcpu->arch.gprs[rd] = 0;
- }
+ er = kvm_emu_cpucfg(vcpu, inst);
break;
case 0x4: /* CSR{RD,WR,XCHG} GSPR */
er = kvm_handle_csr(vcpu, inst);
@@ -290,7 +341,7 @@ static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
* 2) Execute CACOP/IDLE instructions;
* 3) Access to unimplemented CSRs/IOCSRs.
*/
-static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
+static int kvm_handle_gspr(struct kvm_vcpu *vcpu, int ecode)
{
int ret = RESUME_GUEST;
enum emulation_result er = EMULATE_DONE;
@@ -315,7 +366,7 @@ static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
{
- int ret;
+ int idx, ret;
unsigned int op8, opcode, rd;
struct kvm_run *run = vcpu->run;
@@ -413,17 +464,35 @@ int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
}
if (ret == EMULATE_DO_MMIO) {
+ trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len, run->mmio.phys_addr, NULL);
+
+ /*
+ * If mmio device such as PCH-PIC is emulated in KVM,
+ * it need not return to user space to handle the mmio
+ * exception.
+ */
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, vcpu->arch.badv,
+ run->mmio.len, &vcpu->arch.gprs[rd]);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ if (!ret) {
+ update_pc(&vcpu->arch);
+ vcpu->mmio_needed = 0;
+ return EMULATE_DONE;
+ }
+
/* Set for kvm_complete_mmio_read() use */
vcpu->arch.io_gpr = rd;
run->mmio.is_write = 0;
vcpu->mmio_is_write = 0;
- } else {
- kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
- inst.word, vcpu->arch.pc, vcpu->arch.badv);
- kvm_arch_vcpu_dump_regs(vcpu);
- vcpu->mmio_needed = 0;
+ return EMULATE_DO_MMIO;
}
+ kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
+ inst.word, vcpu->arch.pc, vcpu->arch.badv);
+ kvm_arch_vcpu_dump_regs(vcpu);
+ vcpu->mmio_needed = 0;
+
return ret;
}
@@ -463,12 +532,15 @@ int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
break;
}
+ trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len,
+ run->mmio.phys_addr, run->mmio.data);
+
return er;
}
int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
{
- int ret;
+ int idx, ret;
unsigned int rd, op8, opcode;
unsigned long curr_pc, rd_val = 0;
struct kvm_run *run = vcpu->run;
@@ -561,21 +633,35 @@ int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
}
if (ret == EMULATE_DO_MMIO) {
+ trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, run->mmio.len, run->mmio.phys_addr, data);
+
+ /*
+ * If mmio device such as PCH-PIC is emulated in KVM,
+ * it need not return to user space to handle the mmio
+ * exception.
+ */
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, run->mmio.len, data);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ if (!ret)
+ return EMULATE_DONE;
+
run->mmio.is_write = 1;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 1;
- } else {
- vcpu->arch.pc = curr_pc;
- kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
- inst.word, vcpu->arch.pc, vcpu->arch.badv);
- kvm_arch_vcpu_dump_regs(vcpu);
- /* Rollback PC if emulation was unsuccessful */
+ return EMULATE_DO_MMIO;
}
+ vcpu->arch.pc = curr_pc;
+ kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
+ inst.word, vcpu->arch.pc, vcpu->arch.badv);
+ kvm_arch_vcpu_dump_regs(vcpu);
+ /* Rollback PC if emulation was unsuccessful */
+
return ret;
}
-static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
+static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write, int ecode)
{
int ret;
larch_inst inst;
@@ -583,7 +669,13 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
struct kvm_run *run = vcpu->run;
unsigned long badv = vcpu->arch.badv;
- ret = kvm_handle_mm_fault(vcpu, badv, write);
+ /* Inject ADE exception if exceed max GPA size */
+ if (unlikely(badv >= vcpu->kvm->arch.gpa_size)) {
+ kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
+ return RESUME_GUEST;
+ }
+
+ ret = kvm_handle_mm_fault(vcpu, badv, write, ecode);
if (ret) {
/* Treat as MMIO */
inst.word = vcpu->arch.badi;
@@ -613,24 +705,33 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
return ret;
}
-static int kvm_handle_read_fault(struct kvm_vcpu *vcpu)
+static int kvm_handle_read_fault(struct kvm_vcpu *vcpu, int ecode)
+{
+ return kvm_handle_rdwr_fault(vcpu, false, ecode);
+}
+
+static int kvm_handle_write_fault(struct kvm_vcpu *vcpu, int ecode)
{
- return kvm_handle_rdwr_fault(vcpu, false);
+ return kvm_handle_rdwr_fault(vcpu, true, ecode);
}
-static int kvm_handle_write_fault(struct kvm_vcpu *vcpu)
+int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- return kvm_handle_rdwr_fault(vcpu, true);
+ update_pc(&vcpu->arch);
+ kvm_write_reg(vcpu, LOONGARCH_GPR_A0, run->hypercall.ret);
+
+ return 0;
}
/**
* kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
* @vcpu: Virtual CPU context.
+ * @ecode: Exception code.
*
* Handle when the guest attempts to use fpu which hasn't been allowed
* by the root context.
*/
-static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
+static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu, int ecode)
{
struct kvm_run *run = vcpu->run;
@@ -655,14 +756,40 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
+static long kvm_save_notify(struct kvm_vcpu *vcpu)
+{
+ unsigned long id, data;
+
+ id = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
+ data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
+ switch (id) {
+ case BIT(KVM_FEATURE_STEAL_TIME):
+ if (data & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
+ return KVM_HCALL_INVALID_PARAMETER;
+
+ vcpu->arch.st.guest_addr = data;
+ if (!(data & KVM_STEAL_PHYS_VALID))
+ return 0;
+
+ vcpu->arch.st.last_steal = current->sched_info.run_delay;
+ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+ return 0;
+ default:
+ return KVM_HCALL_INVALID_CODE;
+ };
+
+ return KVM_HCALL_INVALID_CODE;
+};
+
/*
* kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
* @vcpu: Virtual CPU context.
+ * @ecode: Exception code.
*
* Handle when the guest attempts to use LSX when it is disabled in the root
* context.
*/
-static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
+static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode)
{
if (kvm_own_lsx(vcpu))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
@@ -673,11 +800,12 @@ static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
/*
* kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
* @vcpu: Virtual CPU context.
+ * @ecode: Exception code.
*
* Handle when the guest attempts to use LASX when it is disabled in the root
* context.
*/
-static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
+static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu, int ecode)
{
if (kvm_own_lasx(vcpu))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
@@ -685,19 +813,136 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
+static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu, int ecode)
+{
+ if (kvm_own_lbt(vcpu))
+ kvm_queue_exception(vcpu, EXCCODE_INE, 0);
+
+ return RESUME_GUEST;
+}
+
+static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
+{
+ unsigned int min, cpu, i;
+ unsigned long ipi_bitmap;
+ struct kvm_vcpu *dest;
+
+ min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
+ for (i = 0; i < 2; i++, min += BITS_PER_LONG) {
+ ipi_bitmap = kvm_read_reg(vcpu, LOONGARCH_GPR_A1 + i);
+ if (!ipi_bitmap)
+ continue;
+
+ cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG);
+ while (cpu < BITS_PER_LONG) {
+ dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
+ cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1);
+ if (!dest)
+ continue;
+
+ /* Send SWI0 to dest vcpu to emulate IPI interrupt */
+ kvm_queue_irq(dest, INT_SWI0);
+ kvm_vcpu_kick(dest);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Hypercall emulation always return to guest, Caller should check retval.
+ */
+static void kvm_handle_service(struct kvm_vcpu *vcpu)
+{
+ long ret = KVM_HCALL_INVALID_CODE;
+ unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
+
+ switch (func) {
+ case KVM_HCALL_FUNC_IPI:
+ if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_IPI)) {
+ kvm_send_pv_ipi(vcpu);
+ ret = KVM_HCALL_SUCCESS;
+ }
+ break;
+ case KVM_HCALL_FUNC_NOTIFY:
+ if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME))
+ ret = kvm_save_notify(vcpu);
+ break;
+ default:
+ break;
+ }
+
+ kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
+}
+
+static int kvm_handle_hypercall(struct kvm_vcpu *vcpu, int ecode)
+{
+ int ret;
+ larch_inst inst;
+ unsigned int code;
+
+ inst.word = vcpu->arch.badi;
+ code = inst.reg0i15_format.immediate;
+ ret = RESUME_GUEST;
+
+ switch (code) {
+ case KVM_HCALL_SERVICE:
+ vcpu->stat.hypercall_exits++;
+ kvm_handle_service(vcpu);
+ break;
+ case KVM_HCALL_USER_SERVICE:
+ if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_USER_HCALL)) {
+ kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
+ break;
+ }
+
+ vcpu->stat.hypercall_exits++;
+ vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
+ vcpu->run->hypercall.nr = KVM_HCALL_USER_SERVICE;
+ vcpu->run->hypercall.args[0] = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
+ vcpu->run->hypercall.args[1] = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
+ vcpu->run->hypercall.args[2] = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
+ vcpu->run->hypercall.args[3] = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
+ vcpu->run->hypercall.args[4] = kvm_read_reg(vcpu, LOONGARCH_GPR_A4);
+ vcpu->run->hypercall.args[5] = kvm_read_reg(vcpu, LOONGARCH_GPR_A5);
+ vcpu->run->hypercall.flags = 0;
+ /*
+ * Set invalid return value by default, let user-mode VMM modify it.
+ */
+ vcpu->run->hypercall.ret = KVM_HCALL_INVALID_CODE;
+ ret = RESUME_HOST;
+ break;
+ case KVM_HCALL_SWDBG:
+ /* KVM_HCALL_SWDBG only in effective when SW_BP is enabled */
+ if (vcpu->guest_debug & KVM_GUESTDBG_SW_BP_MASK) {
+ vcpu->run->exit_reason = KVM_EXIT_DEBUG;
+ ret = RESUME_HOST;
+ break;
+ }
+ fallthrough;
+ default:
+ /* Treat it as noop intruction, only set return value */
+ kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
+ break;
+ }
+
+ if (ret == RESUME_GUEST)
+ update_pc(&vcpu->arch);
+
+ return ret;
+}
+
/*
* LoongArch KVM callback handling for unimplemented guest exiting
*/
-static int kvm_fault_ni(struct kvm_vcpu *vcpu)
+static int kvm_fault_ni(struct kvm_vcpu *vcpu, int ecode)
{
- unsigned int ecode, inst;
- unsigned long estat, badv;
+ unsigned int inst;
+ unsigned long badv;
/* Fetch the instruction */
inst = vcpu->arch.badi;
badv = vcpu->arch.badv;
- estat = vcpu->arch.host_estat;
- ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n",
ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat());
kvm_arch_vcpu_dump_regs(vcpu);
@@ -715,10 +960,12 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
[EXCCODE_FPDIS] = kvm_handle_fpu_disabled,
[EXCCODE_LSXDIS] = kvm_handle_lsx_disabled,
[EXCCODE_LASXDIS] = kvm_handle_lasx_disabled,
+ [EXCCODE_BTDIS] = kvm_handle_lbt_disabled,
[EXCCODE_GSPR] = kvm_handle_gspr,
+ [EXCCODE_HVC] = kvm_handle_hypercall,
};
int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
{
- return kvm_fault_tables[fault](vcpu);
+ return kvm_fault_tables[fault](vcpu, fault);
}
diff --git a/arch/loongarch/kvm/intc/eiointc.c b/arch/loongarch/kvm/intc/eiointc.c
new file mode 100644
index 000000000000..f39929d7bf8a
--- /dev/null
+++ b/arch/loongarch/kvm/intc/eiointc.c
@@ -0,0 +1,1027 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#include <asm/kvm_eiointc.h>
+#include <asm/kvm_vcpu.h>
+#include <linux/count_zeros.h>
+
+static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
+{
+ int ipnum, cpu, irq_index, irq_mask, irq;
+
+ for (irq = 0; irq < EIOINTC_IRQS; irq++) {
+ ipnum = s->ipmap.reg_u8[irq / 32];
+ if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) {
+ ipnum = count_trailing_zeros(ipnum);
+ ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
+ }
+ irq_index = irq / 32;
+ irq_mask = BIT(irq & 0x1f);
+
+ cpu = s->coremap.reg_u8[irq];
+ if (!!(s->coreisr.reg_u32[cpu][irq_index] & irq_mask))
+ set_bit(irq, s->sw_coreisr[cpu][ipnum]);
+ else
+ clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
+ }
+}
+
+static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
+{
+ int ipnum, cpu, found, irq_index, irq_mask;
+ struct kvm_vcpu *vcpu;
+ struct kvm_interrupt vcpu_irq;
+
+ ipnum = s->ipmap.reg_u8[irq / 32];
+ if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) {
+ ipnum = count_trailing_zeros(ipnum);
+ ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
+ }
+
+ cpu = s->sw_coremap[irq];
+ vcpu = kvm_get_vcpu(s->kvm, cpu);
+ irq_index = irq / 32;
+ irq_mask = BIT(irq & 0x1f);
+
+ if (level) {
+ /* if not enable return false */
+ if (((s->enable.reg_u32[irq_index]) & irq_mask) == 0)
+ return;
+ s->coreisr.reg_u32[cpu][irq_index] |= irq_mask;
+ found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
+ set_bit(irq, s->sw_coreisr[cpu][ipnum]);
+ } else {
+ s->coreisr.reg_u32[cpu][irq_index] &= ~irq_mask;
+ clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
+ found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
+ }
+
+ if (found < EIOINTC_IRQS)
+ return; /* other irq is handling, needn't update parent irq */
+
+ vcpu_irq.irq = level ? (INT_HWI0 + ipnum) : -(INT_HWI0 + ipnum);
+ kvm_vcpu_ioctl_interrupt(vcpu, &vcpu_irq);
+}
+
+static inline void eiointc_update_sw_coremap(struct loongarch_eiointc *s,
+ int irq, void *pvalue, u32 len, bool notify)
+{
+ int i, cpu;
+ u64 val = *(u64 *)pvalue;
+
+ for (i = 0; i < len; i++) {
+ cpu = val & 0xff;
+ val = val >> 8;
+
+ if (!(s->status & BIT(EIOINTC_ENABLE_CPU_ENCODE))) {
+ cpu = ffs(cpu) - 1;
+ cpu = (cpu >= 4) ? 0 : cpu;
+ }
+
+ if (s->sw_coremap[irq + i] == cpu)
+ continue;
+
+ if (notify && test_bit(irq + i, (unsigned long *)s->isr.reg_u8)) {
+ /* lower irq at old cpu and raise irq at new cpu */
+ eiointc_update_irq(s, irq + i, 0);
+ s->sw_coremap[irq + i] = cpu;
+ eiointc_update_irq(s, irq + i, 1);
+ } else {
+ s->sw_coremap[irq + i] = cpu;
+ }
+ }
+}
+
+void eiointc_set_irq(struct loongarch_eiointc *s, int irq, int level)
+{
+ unsigned long flags;
+ unsigned long *isr = (unsigned long *)s->isr.reg_u8;
+
+ level ? set_bit(irq, isr) : clear_bit(irq, isr);
+ spin_lock_irqsave(&s->lock, flags);
+ eiointc_update_irq(s, irq, level);
+ spin_unlock_irqrestore(&s->lock, flags);
+}
+
+static inline void eiointc_enable_irq(struct kvm_vcpu *vcpu,
+ struct loongarch_eiointc *s, int index, u8 mask, int level)
+{
+ u8 val;
+ int irq;
+
+ val = mask & s->isr.reg_u8[index];
+ irq = ffs(val);
+ while (irq != 0) {
+ /*
+ * enable bit change from 0 to 1,
+ * need to update irq by pending bits
+ */
+ eiointc_update_irq(s, irq - 1 + index * 8, level);
+ val &= ~BIT(irq - 1);
+ irq = ffs(val);
+ }
+}
+
+static int loongarch_eiointc_readb(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
+ gpa_t addr, int len, void *val)
+{
+ int index, ret = 0;
+ u8 data = 0;
+ gpa_t offset;
+
+ offset = addr - EIOINTC_BASE;
+ switch (offset) {
+ case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
+ index = offset - EIOINTC_NODETYPE_START;
+ data = s->nodetype.reg_u8[index];
+ break;
+ case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
+ index = offset - EIOINTC_IPMAP_START;
+ data = s->ipmap.reg_u8[index];
+ break;
+ case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
+ index = offset - EIOINTC_ENABLE_START;
+ data = s->enable.reg_u8[index];
+ break;
+ case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
+ index = offset - EIOINTC_BOUNCE_START;
+ data = s->bounce.reg_u8[index];
+ break;
+ case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
+ index = offset - EIOINTC_COREISR_START;
+ data = s->coreisr.reg_u8[vcpu->vcpu_id][index];
+ break;
+ case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
+ index = offset - EIOINTC_COREMAP_START;
+ data = s->coremap.reg_u8[index];
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ *(u8 *)val = data;
+
+ return ret;
+}
+
+static int loongarch_eiointc_readw(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
+ gpa_t addr, int len, void *val)
+{
+ int index, ret = 0;
+ u16 data = 0;
+ gpa_t offset;
+
+ offset = addr - EIOINTC_BASE;
+ switch (offset) {
+ case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
+ index = (offset - EIOINTC_NODETYPE_START) >> 1;
+ data = s->nodetype.reg_u16[index];
+ break;
+ case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
+ index = (offset - EIOINTC_IPMAP_START) >> 1;
+ data = s->ipmap.reg_u16[index];
+ break;
+ case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
+ index = (offset - EIOINTC_ENABLE_START) >> 1;
+ data = s->enable.reg_u16[index];
+ break;
+ case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
+ index = (offset - EIOINTC_BOUNCE_START) >> 1;
+ data = s->bounce.reg_u16[index];
+ break;
+ case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
+ index = (offset - EIOINTC_COREISR_START) >> 1;
+ data = s->coreisr.reg_u16[vcpu->vcpu_id][index];
+ break;
+ case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
+ index = (offset - EIOINTC_COREMAP_START) >> 1;
+ data = s->coremap.reg_u16[index];
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ *(u16 *)val = data;
+
+ return ret;
+}
+
+static int loongarch_eiointc_readl(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
+ gpa_t addr, int len, void *val)
+{
+ int index, ret = 0;
+ u32 data = 0;
+ gpa_t offset;
+
+ offset = addr - EIOINTC_BASE;
+ switch (offset) {
+ case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
+ index = (offset - EIOINTC_NODETYPE_START) >> 2;
+ data = s->nodetype.reg_u32[index];
+ break;
+ case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
+ index = (offset - EIOINTC_IPMAP_START) >> 2;
+ data = s->ipmap.reg_u32[index];
+ break;
+ case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
+ index = (offset - EIOINTC_ENABLE_START) >> 2;
+ data = s->enable.reg_u32[index];
+ break;
+ case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
+ index = (offset - EIOINTC_BOUNCE_START) >> 2;
+ data = s->bounce.reg_u32[index];
+ break;
+ case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
+ index = (offset - EIOINTC_COREISR_START) >> 2;
+ data = s->coreisr.reg_u32[vcpu->vcpu_id][index];
+ break;
+ case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
+ index = (offset - EIOINTC_COREMAP_START) >> 2;
+ data = s->coremap.reg_u32[index];
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ *(u32 *)val = data;
+
+ return ret;
+}
+
+static int loongarch_eiointc_readq(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
+ gpa_t addr, int len, void *val)
+{
+ int index, ret = 0;
+ u64 data = 0;
+ gpa_t offset;
+
+ offset = addr - EIOINTC_BASE;
+ switch (offset) {
+ case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
+ index = (offset - EIOINTC_NODETYPE_START) >> 3;
+ data = s->nodetype.reg_u64[index];
+ break;
+ case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
+ index = (offset - EIOINTC_IPMAP_START) >> 3;
+ data = s->ipmap.reg_u64;
+ break;
+ case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
+ index = (offset - EIOINTC_ENABLE_START) >> 3;
+ data = s->enable.reg_u64[index];
+ break;
+ case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
+ index = (offset - EIOINTC_BOUNCE_START) >> 3;
+ data = s->bounce.reg_u64[index];
+ break;
+ case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
+ index = (offset - EIOINTC_COREISR_START) >> 3;
+ data = s->coreisr.reg_u64[vcpu->vcpu_id][index];
+ break;
+ case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
+ index = (offset - EIOINTC_COREMAP_START) >> 3;
+ data = s->coremap.reg_u64[index];
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ *(u64 *)val = data;
+
+ return ret;
+}
+
+static int kvm_eiointc_read(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *dev,
+ gpa_t addr, int len, void *val)
+{
+ int ret = -EINVAL;
+ unsigned long flags;
+ struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
+
+ if (!eiointc) {
+ kvm_err("%s: eiointc irqchip not valid!\n", __func__);
+ return -EINVAL;
+ }
+
+ vcpu->kvm->stat.eiointc_read_exits++;
+ spin_lock_irqsave(&eiointc->lock, flags);
+ switch (len) {
+ case 1:
+ ret = loongarch_eiointc_readb(vcpu, eiointc, addr, len, val);
+ break;
+ case 2:
+ ret = loongarch_eiointc_readw(vcpu, eiointc, addr, len, val);
+ break;
+ case 4:
+ ret = loongarch_eiointc_readl(vcpu, eiointc, addr, len, val);
+ break;
+ case 8:
+ ret = loongarch_eiointc_readq(vcpu, eiointc, addr, len, val);
+ break;
+ default:
+ WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n",
+ __func__, addr, len);
+ }
+ spin_unlock_irqrestore(&eiointc->lock, flags);
+
+ return ret;
+}
+
+static int loongarch_eiointc_writeb(struct kvm_vcpu *vcpu,
+ struct loongarch_eiointc *s,
+ gpa_t addr, int len, const void *val)
+{
+ int index, irq, bits, ret = 0;
+ u8 cpu;
+ u8 data, old_data;
+ u8 coreisr, old_coreisr;
+ gpa_t offset;
+
+ data = *(u8 *)val;
+ offset = addr - EIOINTC_BASE;
+
+ switch (offset) {
+ case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
+ index = (offset - EIOINTC_NODETYPE_START);
+ s->nodetype.reg_u8[index] = data;
+ break;
+ case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
+ /*
+ * ipmap cannot be set at runtime, can be set only at the beginning
+ * of irqchip driver, need not update upper irq level
+ */
+ index = (offset - EIOINTC_IPMAP_START);
+ s->ipmap.reg_u8[index] = data;
+ break;
+ case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
+ index = (offset - EIOINTC_ENABLE_START);
+ old_data = s->enable.reg_u8[index];
+ s->enable.reg_u8[index] = data;
+ /*
+ * 1: enable irq.
+ * update irq when isr is set.
+ */
+ data = s->enable.reg_u8[index] & ~old_data & s->isr.reg_u8[index];
+ eiointc_enable_irq(vcpu, s, index, data, 1);
+ /*
+ * 0: disable irq.
+ * update irq when isr is set.
+ */
+ data = ~s->enable.reg_u8[index] & old_data & s->isr.reg_u8[index];
+ eiointc_enable_irq(vcpu, s, index, data, 0);
+ break;
+ case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
+ /* do not emulate hw bounced irq routing */
+ index = offset - EIOINTC_BOUNCE_START;
+ s->bounce.reg_u8[index] = data;
+ break;
+ case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
+ index = (offset - EIOINTC_COREISR_START);
+ /* use attrs to get current cpu index */
+ cpu = vcpu->vcpu_id;
+ coreisr = data;
+ old_coreisr = s->coreisr.reg_u8[cpu][index];
+ /* write 1 to clear interrupt */
+ s->coreisr.reg_u8[cpu][index] = old_coreisr & ~coreisr;
+ coreisr &= old_coreisr;
+ bits = sizeof(data) * 8;
+ irq = find_first_bit((void *)&coreisr, bits);
+ while (irq < bits) {
+ eiointc_update_irq(s, irq + index * bits, 0);
+ bitmap_clear((void *)&coreisr, irq, 1);
+ irq = find_first_bit((void *)&coreisr, bits);
+ }
+ break;
+ case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
+ irq = offset - EIOINTC_COREMAP_START;
+ index = irq;
+ s->coremap.reg_u8[index] = data;
+ eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int loongarch_eiointc_writew(struct kvm_vcpu *vcpu,
+ struct loongarch_eiointc *s,
+ gpa_t addr, int len, const void *val)
+{
+ int i, index, irq, bits, ret = 0;
+ u8 cpu;
+ u16 data, old_data;
+ u16 coreisr, old_coreisr;
+ gpa_t offset;
+
+ data = *(u16 *)val;
+ offset = addr - EIOINTC_BASE;
+
+ switch (offset) {
+ case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
+ index = (offset - EIOINTC_NODETYPE_START) >> 1;
+ s->nodetype.reg_u16[index] = data;
+ break;
+ case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
+ /*
+ * ipmap cannot be set at runtime, can be set only at the beginning
+ * of irqchip driver, need not update upper irq level
+ */
+ index = (offset - EIOINTC_IPMAP_START) >> 1;
+ s->ipmap.reg_u16[index] = data;
+ break;
+ case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
+ index = (offset - EIOINTC_ENABLE_START) >> 1;
+ old_data = s->enable.reg_u32[index];
+ s->enable.reg_u16[index] = data;
+ /*
+ * 1: enable irq.
+ * update irq when isr is set.
+ */
+ data = s->enable.reg_u16[index] & ~old_data & s->isr.reg_u16[index];
+ index = index << 1;
+ for (i = 0; i < sizeof(data); i++) {
+ u8 mask = (data >> (i * 8)) & 0xff;
+ eiointc_enable_irq(vcpu, s, index + i, mask, 1);
+ }
+ /*
+ * 0: disable irq.
+ * update irq when isr is set.
+ */
+ data = ~s->enable.reg_u16[index] & old_data & s->isr.reg_u16[index];
+ for (i = 0; i < sizeof(data); i++) {
+ u8 mask = (data >> (i * 8)) & 0xff;
+ eiointc_enable_irq(vcpu, s, index, mask, 0);
+ }
+ break;
+ case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
+ /* do not emulate hw bounced irq routing */
+ index = (offset - EIOINTC_BOUNCE_START) >> 1;
+ s->bounce.reg_u16[index] = data;
+ break;
+ case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
+ index = (offset - EIOINTC_COREISR_START) >> 1;
+ /* use attrs to get current cpu index */
+ cpu = vcpu->vcpu_id;
+ coreisr = data;
+ old_coreisr = s->coreisr.reg_u16[cpu][index];
+ /* write 1 to clear interrupt */
+ s->coreisr.reg_u16[cpu][index] = old_coreisr & ~coreisr;
+ coreisr &= old_coreisr;
+ bits = sizeof(data) * 8;
+ irq = find_first_bit((void *)&coreisr, bits);
+ while (irq < bits) {
+ eiointc_update_irq(s, irq + index * bits, 0);
+ bitmap_clear((void *)&coreisr, irq, 1);
+ irq = find_first_bit((void *)&coreisr, bits);
+ }
+ break;
+ case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
+ irq = offset - EIOINTC_COREMAP_START;
+ index = irq >> 1;
+ s->coremap.reg_u16[index] = data;
+ eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int loongarch_eiointc_writel(struct kvm_vcpu *vcpu,
+ struct loongarch_eiointc *s,
+ gpa_t addr, int len, const void *val)
+{
+ int i, index, irq, bits, ret = 0;
+ u8 cpu;
+ u32 data, old_data;
+ u32 coreisr, old_coreisr;
+ gpa_t offset;
+
+ data = *(u32 *)val;
+ offset = addr - EIOINTC_BASE;
+
+ switch (offset) {
+ case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
+ index = (offset - EIOINTC_NODETYPE_START) >> 2;
+ s->nodetype.reg_u32[index] = data;
+ break;
+ case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
+ /*
+ * ipmap cannot be set at runtime, can be set only at the beginning
+ * of irqchip driver, need not update upper irq level
+ */
+ index = (offset - EIOINTC_IPMAP_START) >> 2;
+ s->ipmap.reg_u32[index] = data;
+ break;
+ case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
+ index = (offset - EIOINTC_ENABLE_START) >> 2;
+ old_data = s->enable.reg_u32[index];
+ s->enable.reg_u32[index] = data;
+ /*
+ * 1: enable irq.
+ * update irq when isr is set.
+ */
+ data = s->enable.reg_u32[index] & ~old_data & s->isr.reg_u32[index];
+ index = index << 2;
+ for (i = 0; i < sizeof(data); i++) {
+ u8 mask = (data >> (i * 8)) & 0xff;
+ eiointc_enable_irq(vcpu, s, index + i, mask, 1);
+ }
+ /*
+ * 0: disable irq.
+ * update irq when isr is set.
+ */
+ data = ~s->enable.reg_u32[index] & old_data & s->isr.reg_u32[index];
+ for (i = 0; i < sizeof(data); i++) {
+ u8 mask = (data >> (i * 8)) & 0xff;
+ eiointc_enable_irq(vcpu, s, index, mask, 0);
+ }
+ break;
+ case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
+ /* do not emulate hw bounced irq routing */
+ index = (offset - EIOINTC_BOUNCE_START) >> 2;
+ s->bounce.reg_u32[index] = data;
+ break;
+ case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
+ index = (offset - EIOINTC_COREISR_START) >> 2;
+ /* use attrs to get current cpu index */
+ cpu = vcpu->vcpu_id;
+ coreisr = data;
+ old_coreisr = s->coreisr.reg_u32[cpu][index];
+ /* write 1 to clear interrupt */
+ s->coreisr.reg_u32[cpu][index] = old_coreisr & ~coreisr;
+ coreisr &= old_coreisr;
+ bits = sizeof(data) * 8;
+ irq = find_first_bit((void *)&coreisr, bits);
+ while (irq < bits) {
+ eiointc_update_irq(s, irq + index * bits, 0);
+ bitmap_clear((void *)&coreisr, irq, 1);
+ irq = find_first_bit((void *)&coreisr, bits);
+ }
+ break;
+ case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
+ irq = offset - EIOINTC_COREMAP_START;
+ index = irq >> 2;
+ s->coremap.reg_u32[index] = data;
+ eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int loongarch_eiointc_writeq(struct kvm_vcpu *vcpu,
+ struct loongarch_eiointc *s,
+ gpa_t addr, int len, const void *val)
+{
+ int i, index, irq, bits, ret = 0;
+ u8 cpu;
+ u64 data, old_data;
+ u64 coreisr, old_coreisr;
+ gpa_t offset;
+
+ data = *(u64 *)val;
+ offset = addr - EIOINTC_BASE;
+
+ switch (offset) {
+ case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
+ index = (offset - EIOINTC_NODETYPE_START) >> 3;
+ s->nodetype.reg_u64[index] = data;
+ break;
+ case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
+ /*
+ * ipmap cannot be set at runtime, can be set only at the beginning
+ * of irqchip driver, need not update upper irq level
+ */
+ index = (offset - EIOINTC_IPMAP_START) >> 3;
+ s->ipmap.reg_u64 = data;
+ break;
+ case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
+ index = (offset - EIOINTC_ENABLE_START) >> 3;
+ old_data = s->enable.reg_u64[index];
+ s->enable.reg_u64[index] = data;
+ /*
+ * 1: enable irq.
+ * update irq when isr is set.
+ */
+ data = s->enable.reg_u64[index] & ~old_data & s->isr.reg_u64[index];
+ index = index << 3;
+ for (i = 0; i < sizeof(data); i++) {
+ u8 mask = (data >> (i * 8)) & 0xff;
+ eiointc_enable_irq(vcpu, s, index + i, mask, 1);
+ }
+ /*
+ * 0: disable irq.
+ * update irq when isr is set.
+ */
+ data = ~s->enable.reg_u64[index] & old_data & s->isr.reg_u64[index];
+ for (i = 0; i < sizeof(data); i++) {
+ u8 mask = (data >> (i * 8)) & 0xff;
+ eiointc_enable_irq(vcpu, s, index, mask, 0);
+ }
+ break;
+ case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
+ /* do not emulate hw bounced irq routing */
+ index = (offset - EIOINTC_BOUNCE_START) >> 3;
+ s->bounce.reg_u64[index] = data;
+ break;
+ case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
+ index = (offset - EIOINTC_COREISR_START) >> 3;
+ /* use attrs to get current cpu index */
+ cpu = vcpu->vcpu_id;
+ coreisr = data;
+ old_coreisr = s->coreisr.reg_u64[cpu][index];
+ /* write 1 to clear interrupt */
+ s->coreisr.reg_u64[cpu][index] = old_coreisr & ~coreisr;
+ coreisr &= old_coreisr;
+ bits = sizeof(data) * 8;
+ irq = find_first_bit((void *)&coreisr, bits);
+ while (irq < bits) {
+ eiointc_update_irq(s, irq + index * bits, 0);
+ bitmap_clear((void *)&coreisr, irq, 1);
+ irq = find_first_bit((void *)&coreisr, bits);
+ }
+ break;
+ case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
+ irq = offset - EIOINTC_COREMAP_START;
+ index = irq >> 3;
+ s->coremap.reg_u64[index] = data;
+ eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int kvm_eiointc_write(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *dev,
+ gpa_t addr, int len, const void *val)
+{
+ int ret = -EINVAL;
+ unsigned long flags;
+ struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
+
+ if (!eiointc) {
+ kvm_err("%s: eiointc irqchip not valid!\n", __func__);
+ return -EINVAL;
+ }
+
+ vcpu->kvm->stat.eiointc_write_exits++;
+ spin_lock_irqsave(&eiointc->lock, flags);
+ switch (len) {
+ case 1:
+ ret = loongarch_eiointc_writeb(vcpu, eiointc, addr, len, val);
+ break;
+ case 2:
+ ret = loongarch_eiointc_writew(vcpu, eiointc, addr, len, val);
+ break;
+ case 4:
+ ret = loongarch_eiointc_writel(vcpu, eiointc, addr, len, val);
+ break;
+ case 8:
+ ret = loongarch_eiointc_writeq(vcpu, eiointc, addr, len, val);
+ break;
+ default:
+ WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n",
+ __func__, addr, len);
+ }
+ spin_unlock_irqrestore(&eiointc->lock, flags);
+
+ return ret;
+}
+
+static const struct kvm_io_device_ops kvm_eiointc_ops = {
+ .read = kvm_eiointc_read,
+ .write = kvm_eiointc_write,
+};
+
+static int kvm_eiointc_virt_read(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *dev,
+ gpa_t addr, int len, void *val)
+{
+ unsigned long flags;
+ u32 *data = val;
+ struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
+
+ if (!eiointc) {
+ kvm_err("%s: eiointc irqchip not valid!\n", __func__);
+ return -EINVAL;
+ }
+
+ addr -= EIOINTC_VIRT_BASE;
+ spin_lock_irqsave(&eiointc->lock, flags);
+ switch (addr) {
+ case EIOINTC_VIRT_FEATURES:
+ *data = eiointc->features;
+ break;
+ case EIOINTC_VIRT_CONFIG:
+ *data = eiointc->status;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&eiointc->lock, flags);
+
+ return 0;
+}
+
+static int kvm_eiointc_virt_write(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *dev,
+ gpa_t addr, int len, const void *val)
+{
+ int ret = 0;
+ unsigned long flags;
+ u32 value = *(u32 *)val;
+ struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
+
+ if (!eiointc) {
+ kvm_err("%s: eiointc irqchip not valid!\n", __func__);
+ return -EINVAL;
+ }
+
+ addr -= EIOINTC_VIRT_BASE;
+ spin_lock_irqsave(&eiointc->lock, flags);
+ switch (addr) {
+ case EIOINTC_VIRT_FEATURES:
+ ret = -EPERM;
+ break;
+ case EIOINTC_VIRT_CONFIG:
+ /*
+ * eiointc features can only be set at disabled status
+ */
+ if ((eiointc->status & BIT(EIOINTC_ENABLE)) && value) {
+ ret = -EPERM;
+ break;
+ }
+ eiointc->status = value & eiointc->features;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&eiointc->lock, flags);
+
+ return ret;
+}
+
+static const struct kvm_io_device_ops kvm_eiointc_virt_ops = {
+ .read = kvm_eiointc_virt_read,
+ .write = kvm_eiointc_virt_write,
+};
+
+static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int ret = 0;
+ unsigned long flags;
+ unsigned long type = (unsigned long)attr->attr;
+ u32 i, start_irq;
+ void __user *data;
+ struct loongarch_eiointc *s = dev->kvm->arch.eiointc;
+
+ data = (void __user *)attr->addr;
+ spin_lock_irqsave(&s->lock, flags);
+ switch (type) {
+ case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU:
+ if (copy_from_user(&s->num_cpu, data, 4))
+ ret = -EFAULT;
+ break;
+ case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE:
+ if (copy_from_user(&s->features, data, 4))
+ ret = -EFAULT;
+ if (!(s->features & BIT(EIOINTC_HAS_VIRT_EXTENSION)))
+ s->status |= BIT(EIOINTC_ENABLE);
+ break;
+ case KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED:
+ eiointc_set_sw_coreisr(s);
+ for (i = 0; i < (EIOINTC_IRQS / 4); i++) {
+ start_irq = i * 4;
+ eiointc_update_sw_coremap(s, start_irq,
+ (void *)&s->coremap.reg_u32[i], sizeof(u32), false);
+ }
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ return ret;
+}
+
+static int kvm_eiointc_regs_access(struct kvm_device *dev,
+ struct kvm_device_attr *attr,
+ bool is_write)
+{
+ int addr, cpuid, offset, ret = 0;
+ unsigned long flags;
+ void *p = NULL;
+ void __user *data;
+ struct loongarch_eiointc *s;
+
+ s = dev->kvm->arch.eiointc;
+ addr = attr->attr;
+ cpuid = addr >> 16;
+ addr &= 0xffff;
+ data = (void __user *)attr->addr;
+ switch (addr) {
+ case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
+ offset = (addr - EIOINTC_NODETYPE_START) / 4;
+ p = &s->nodetype.reg_u32[offset];
+ break;
+ case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
+ offset = (addr - EIOINTC_IPMAP_START) / 4;
+ p = &s->ipmap.reg_u32[offset];
+ break;
+ case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
+ offset = (addr - EIOINTC_ENABLE_START) / 4;
+ p = &s->enable.reg_u32[offset];
+ break;
+ case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
+ offset = (addr - EIOINTC_BOUNCE_START) / 4;
+ p = &s->bounce.reg_u32[offset];
+ break;
+ case EIOINTC_ISR_START ... EIOINTC_ISR_END:
+ offset = (addr - EIOINTC_ISR_START) / 4;
+ p = &s->isr.reg_u32[offset];
+ break;
+ case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
+ offset = (addr - EIOINTC_COREISR_START) / 4;
+ p = &s->coreisr.reg_u32[cpuid][offset];
+ break;
+ case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
+ offset = (addr - EIOINTC_COREMAP_START) / 4;
+ p = &s->coremap.reg_u32[offset];
+ break;
+ default:
+ kvm_err("%s: unknown eiointc register, addr = %d\n", __func__, addr);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&s->lock, flags);
+ if (is_write) {
+ if (copy_from_user(p, data, 4))
+ ret = -EFAULT;
+ } else {
+ if (copy_to_user(data, p, 4))
+ ret = -EFAULT;
+ }
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ return ret;
+}
+
+static int kvm_eiointc_sw_status_access(struct kvm_device *dev,
+ struct kvm_device_attr *attr,
+ bool is_write)
+{
+ int addr, ret = 0;
+ unsigned long flags;
+ void *p = NULL;
+ void __user *data;
+ struct loongarch_eiointc *s;
+
+ s = dev->kvm->arch.eiointc;
+ addr = attr->attr;
+ addr &= 0xffff;
+
+ data = (void __user *)attr->addr;
+ switch (addr) {
+ case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU:
+ p = &s->num_cpu;
+ break;
+ case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE:
+ p = &s->features;
+ break;
+ case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE:
+ p = &s->status;
+ break;
+ default:
+ kvm_err("%s: unknown eiointc register, addr = %d\n", __func__, addr);
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&s->lock, flags);
+ if (is_write) {
+ if (copy_from_user(p, data, 4))
+ ret = -EFAULT;
+ } else {
+ if (copy_to_user(data, p, 4))
+ ret = -EFAULT;
+ }
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ return ret;
+}
+
+static int kvm_eiointc_get_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
+ return kvm_eiointc_regs_access(dev, attr, false);
+ case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
+ return kvm_eiointc_sw_status_access(dev, attr, false);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int kvm_eiointc_set_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL:
+ return kvm_eiointc_ctrl_access(dev, attr);
+ case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
+ return kvm_eiointc_regs_access(dev, attr, true);
+ case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
+ return kvm_eiointc_sw_status_access(dev, attr, true);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int kvm_eiointc_create(struct kvm_device *dev, u32 type)
+{
+ int ret;
+ struct loongarch_eiointc *s;
+ struct kvm_io_device *device, *device1;
+ struct kvm *kvm = dev->kvm;
+
+ /* eiointc has been created */
+ if (kvm->arch.eiointc)
+ return -EINVAL;
+
+ s = kzalloc(sizeof(struct loongarch_eiointc), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ spin_lock_init(&s->lock);
+ s->kvm = kvm;
+
+ /*
+ * Initialize IOCSR device
+ */
+ device = &s->device;
+ kvm_iodevice_init(device, &kvm_eiointc_ops);
+ mutex_lock(&kvm->slots_lock);
+ ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS,
+ EIOINTC_BASE, EIOINTC_SIZE, device);
+ mutex_unlock(&kvm->slots_lock);
+ if (ret < 0) {
+ kfree(s);
+ return ret;
+ }
+
+ device1 = &s->device_vext;
+ kvm_iodevice_init(device1, &kvm_eiointc_virt_ops);
+ ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS,
+ EIOINTC_VIRT_BASE, EIOINTC_VIRT_SIZE, device1);
+ if (ret < 0) {
+ kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &s->device);
+ kfree(s);
+ return ret;
+ }
+ kvm->arch.eiointc = s;
+
+ return 0;
+}
+
+static void kvm_eiointc_destroy(struct kvm_device *dev)
+{
+ struct kvm *kvm;
+ struct loongarch_eiointc *eiointc;
+
+ if (!dev || !dev->kvm || !dev->kvm->arch.eiointc)
+ return;
+
+ kvm = dev->kvm;
+ eiointc = kvm->arch.eiointc;
+ kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device);
+ kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device_vext);
+ kfree(eiointc);
+}
+
+static struct kvm_device_ops kvm_eiointc_dev_ops = {
+ .name = "kvm-loongarch-eiointc",
+ .create = kvm_eiointc_create,
+ .destroy = kvm_eiointc_destroy,
+ .set_attr = kvm_eiointc_set_attr,
+ .get_attr = kvm_eiointc_get_attr,
+};
+
+int kvm_loongarch_register_eiointc_device(void)
+{
+ return kvm_register_device_ops(&kvm_eiointc_dev_ops, KVM_DEV_TYPE_LOONGARCH_EIOINTC);
+}
diff --git a/arch/loongarch/kvm/intc/ipi.c b/arch/loongarch/kvm/intc/ipi.c
new file mode 100644
index 000000000000..fe734dc062ed
--- /dev/null
+++ b/arch/loongarch/kvm/intc/ipi.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_ipi.h>
+#include <asm/kvm_vcpu.h>
+
+static void ipi_send(struct kvm *kvm, uint64_t data)
+{
+ int cpu, action;
+ uint32_t status;
+ struct kvm_vcpu *vcpu;
+ struct kvm_interrupt irq;
+
+ cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
+ vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
+ if (unlikely(vcpu == NULL)) {
+ kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
+ return;
+ }
+
+ action = BIT(data & 0x1f);
+ spin_lock(&vcpu->arch.ipi_state.lock);
+ status = vcpu->arch.ipi_state.status;
+ vcpu->arch.ipi_state.status |= action;
+ spin_unlock(&vcpu->arch.ipi_state.lock);
+ if (status == 0) {
+ irq.irq = LARCH_INT_IPI;
+ kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+ }
+}
+
+static void ipi_clear(struct kvm_vcpu *vcpu, uint64_t data)
+{
+ uint32_t status;
+ struct kvm_interrupt irq;
+
+ spin_lock(&vcpu->arch.ipi_state.lock);
+ vcpu->arch.ipi_state.status &= ~data;
+ status = vcpu->arch.ipi_state.status;
+ spin_unlock(&vcpu->arch.ipi_state.lock);
+ if (status == 0) {
+ irq.irq = -LARCH_INT_IPI;
+ kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+ }
+}
+
+static uint64_t read_mailbox(struct kvm_vcpu *vcpu, int offset, int len)
+{
+ uint64_t data = 0;
+
+ spin_lock(&vcpu->arch.ipi_state.lock);
+ data = *(ulong *)((void *)vcpu->arch.ipi_state.buf + (offset - 0x20));
+ spin_unlock(&vcpu->arch.ipi_state.lock);
+
+ switch (len) {
+ case 1:
+ return data & 0xff;
+ case 2:
+ return data & 0xffff;
+ case 4:
+ return data & 0xffffffff;
+ case 8:
+ return data;
+ default:
+ kvm_err("%s: unknown data len: %d\n", __func__, len);
+ return 0;
+ }
+}
+
+static void write_mailbox(struct kvm_vcpu *vcpu, int offset, uint64_t data, int len)
+{
+ void *pbuf;
+
+ spin_lock(&vcpu->arch.ipi_state.lock);
+ pbuf = (void *)vcpu->arch.ipi_state.buf + (offset - 0x20);
+
+ switch (len) {
+ case 1:
+ *(unsigned char *)pbuf = (unsigned char)data;
+ break;
+ case 2:
+ *(unsigned short *)pbuf = (unsigned short)data;
+ break;
+ case 4:
+ *(unsigned int *)pbuf = (unsigned int)data;
+ break;
+ case 8:
+ *(unsigned long *)pbuf = (unsigned long)data;
+ break;
+ default:
+ kvm_err("%s: unknown data len: %d\n", __func__, len);
+ }
+ spin_unlock(&vcpu->arch.ipi_state.lock);
+}
+
+static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
+{
+ int i, idx, ret;
+ uint32_t val = 0, mask = 0;
+
+ /*
+ * Bit 27-30 is mask for byte writing.
+ * If the mask is 0, we need not to do anything.
+ */
+ if ((data >> 27) & 0xf) {
+ /* Read the old val */
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ if (unlikely(ret)) {
+ kvm_err("%s: : read data from addr %llx failed\n", __func__, addr);
+ return ret;
+ }
+ /* Construct the mask by scanning the bit 27-30 */
+ for (i = 0; i < 4; i++) {
+ if (data & (BIT(27 + i)))
+ mask |= (0xff << (i * 8));
+ }
+ /* Save the old part of val */
+ val &= mask;
+ }
+ val |= ((uint32_t)(data >> 32) & ~mask);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ if (unlikely(ret))
+ kvm_err("%s: : write data to addr %llx failed\n", __func__, addr);
+
+ return ret;
+}
+
+static int mail_send(struct kvm *kvm, uint64_t data)
+{
+ int cpu, mailbox, offset;
+ struct kvm_vcpu *vcpu;
+
+ cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
+ vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
+ if (unlikely(vcpu == NULL)) {
+ kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
+ return -EINVAL;
+ }
+ mailbox = ((data & 0xffffffff) >> 2) & 0x7;
+ offset = IOCSR_IPI_BASE + IOCSR_IPI_BUF_20 + mailbox * 4;
+
+ return send_ipi_data(vcpu, offset, data);
+}
+
+static int any_send(struct kvm *kvm, uint64_t data)
+{
+ int cpu, offset;
+ struct kvm_vcpu *vcpu;
+
+ cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
+ vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
+ if (unlikely(vcpu == NULL)) {
+ kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
+ return -EINVAL;
+ }
+ offset = data & 0xffff;
+
+ return send_ipi_data(vcpu, offset, data);
+}
+
+static int loongarch_ipi_readl(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *val)
+{
+ int ret = 0;
+ uint32_t offset;
+ uint64_t res = 0;
+
+ offset = (uint32_t)(addr & 0x1ff);
+ WARN_ON_ONCE(offset & (len - 1));
+
+ switch (offset) {
+ case IOCSR_IPI_STATUS:
+ spin_lock(&vcpu->arch.ipi_state.lock);
+ res = vcpu->arch.ipi_state.status;
+ spin_unlock(&vcpu->arch.ipi_state.lock);
+ break;
+ case IOCSR_IPI_EN:
+ spin_lock(&vcpu->arch.ipi_state.lock);
+ res = vcpu->arch.ipi_state.en;
+ spin_unlock(&vcpu->arch.ipi_state.lock);
+ break;
+ case IOCSR_IPI_SET:
+ res = 0;
+ break;
+ case IOCSR_IPI_CLEAR:
+ res = 0;
+ break;
+ case IOCSR_IPI_BUF_20 ... IOCSR_IPI_BUF_38 + 7:
+ if (offset + len > IOCSR_IPI_BUF_38 + 8) {
+ kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
+ __func__, offset, len);
+ ret = -EINVAL;
+ break;
+ }
+ res = read_mailbox(vcpu, offset, len);
+ break;
+ default:
+ kvm_err("%s: unknown addr: %llx\n", __func__, addr);
+ ret = -EINVAL;
+ break;
+ }
+ *(uint64_t *)val = res;
+
+ return ret;
+}
+
+static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *val)
+{
+ int ret = 0;
+ uint64_t data;
+ uint32_t offset;
+
+ data = *(uint64_t *)val;
+
+ offset = (uint32_t)(addr & 0x1ff);
+ WARN_ON_ONCE(offset & (len - 1));
+
+ switch (offset) {
+ case IOCSR_IPI_STATUS:
+ ret = -EINVAL;
+ break;
+ case IOCSR_IPI_EN:
+ spin_lock(&vcpu->arch.ipi_state.lock);
+ vcpu->arch.ipi_state.en = data;
+ spin_unlock(&vcpu->arch.ipi_state.lock);
+ break;
+ case IOCSR_IPI_SET:
+ ret = -EINVAL;
+ break;
+ case IOCSR_IPI_CLEAR:
+ /* Just clear the status of the current vcpu */
+ ipi_clear(vcpu, data);
+ break;
+ case IOCSR_IPI_BUF_20 ... IOCSR_IPI_BUF_38 + 7:
+ if (offset + len > IOCSR_IPI_BUF_38 + 8) {
+ kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
+ __func__, offset, len);
+ ret = -EINVAL;
+ break;
+ }
+ write_mailbox(vcpu, offset, data, len);
+ break;
+ case IOCSR_IPI_SEND:
+ ipi_send(vcpu->kvm, data);
+ break;
+ case IOCSR_MAIL_SEND:
+ ret = mail_send(vcpu->kvm, *(uint64_t *)val);
+ break;
+ case IOCSR_ANY_SEND:
+ ret = any_send(vcpu->kvm, *(uint64_t *)val);
+ break;
+ default:
+ kvm_err("%s: unknown addr: %llx\n", __func__, addr);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int kvm_ipi_read(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *dev,
+ gpa_t addr, int len, void *val)
+{
+ int ret;
+ struct loongarch_ipi *ipi;
+
+ ipi = vcpu->kvm->arch.ipi;
+ if (!ipi) {
+ kvm_err("%s: ipi irqchip not valid!\n", __func__);
+ return -EINVAL;
+ }
+ ipi->kvm->stat.ipi_read_exits++;
+ ret = loongarch_ipi_readl(vcpu, addr, len, val);
+
+ return ret;
+}
+
+static int kvm_ipi_write(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *dev,
+ gpa_t addr, int len, const void *val)
+{
+ int ret;
+ struct loongarch_ipi *ipi;
+
+ ipi = vcpu->kvm->arch.ipi;
+ if (!ipi) {
+ kvm_err("%s: ipi irqchip not valid!\n", __func__);
+ return -EINVAL;
+ }
+ ipi->kvm->stat.ipi_write_exits++;
+ ret = loongarch_ipi_writel(vcpu, addr, len, val);
+
+ return ret;
+}
+
+static const struct kvm_io_device_ops kvm_ipi_ops = {
+ .read = kvm_ipi_read,
+ .write = kvm_ipi_write,
+};
+
+static int kvm_ipi_regs_access(struct kvm_device *dev,
+ struct kvm_device_attr *attr,
+ bool is_write)
+{
+ int len = 4;
+ int cpu, addr;
+ uint64_t val;
+ void *p = NULL;
+ struct kvm_vcpu *vcpu;
+
+ cpu = (attr->attr >> 16) & 0x3ff;
+ addr = attr->attr & 0xff;
+
+ vcpu = kvm_get_vcpu(dev->kvm, cpu);
+ if (unlikely(vcpu == NULL)) {
+ kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
+ return -EINVAL;
+ }
+
+ switch (addr) {
+ case IOCSR_IPI_STATUS:
+ p = &vcpu->arch.ipi_state.status;
+ break;
+ case IOCSR_IPI_EN:
+ p = &vcpu->arch.ipi_state.en;
+ break;
+ case IOCSR_IPI_SET:
+ p = &vcpu->arch.ipi_state.set;
+ break;
+ case IOCSR_IPI_CLEAR:
+ p = &vcpu->arch.ipi_state.clear;
+ break;
+ case IOCSR_IPI_BUF_20:
+ p = &vcpu->arch.ipi_state.buf[0];
+ len = 8;
+ break;
+ case IOCSR_IPI_BUF_28:
+ p = &vcpu->arch.ipi_state.buf[1];
+ len = 8;
+ break;
+ case IOCSR_IPI_BUF_30:
+ p = &vcpu->arch.ipi_state.buf[2];
+ len = 8;
+ break;
+ case IOCSR_IPI_BUF_38:
+ p = &vcpu->arch.ipi_state.buf[3];
+ len = 8;
+ break;
+ default:
+ kvm_err("%s: unknown ipi register, addr = %d\n", __func__, addr);
+ return -EINVAL;
+ }
+
+ if (is_write) {
+ if (len == 4) {
+ if (get_user(val, (uint32_t __user *)attr->addr))
+ return -EFAULT;
+ *(uint32_t *)p = (uint32_t)val;
+ } else if (len == 8) {
+ if (get_user(val, (uint64_t __user *)attr->addr))
+ return -EFAULT;
+ *(uint64_t *)p = val;
+ }
+ } else {
+ if (len == 4) {
+ val = *(uint32_t *)p;
+ return put_user(val, (uint32_t __user *)attr->addr);
+ } else if (len == 8) {
+ val = *(uint64_t *)p;
+ return put_user(val, (uint64_t __user *)attr->addr);
+ }
+ }
+
+ return 0;
+}
+
+static int kvm_ipi_get_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_LOONGARCH_IPI_GRP_REGS:
+ return kvm_ipi_regs_access(dev, attr, false);
+ default:
+ kvm_err("%s: unknown group (%d)\n", __func__, attr->group);
+ return -EINVAL;
+ }
+}
+
+static int kvm_ipi_set_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_LOONGARCH_IPI_GRP_REGS:
+ return kvm_ipi_regs_access(dev, attr, true);
+ default:
+ kvm_err("%s: unknown group (%d)\n", __func__, attr->group);
+ return -EINVAL;
+ }
+}
+
+static int kvm_ipi_create(struct kvm_device *dev, u32 type)
+{
+ int ret;
+ struct kvm *kvm;
+ struct kvm_io_device *device;
+ struct loongarch_ipi *s;
+
+ if (!dev) {
+ kvm_err("%s: kvm_device ptr is invalid!\n", __func__);
+ return -EINVAL;
+ }
+
+ kvm = dev->kvm;
+ if (kvm->arch.ipi) {
+ kvm_err("%s: LoongArch IPI has already been created!\n", __func__);
+ return -EINVAL;
+ }
+
+ s = kzalloc(sizeof(struct loongarch_ipi), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ spin_lock_init(&s->lock);
+ s->kvm = kvm;
+
+ /*
+ * Initialize IOCSR device
+ */
+ device = &s->device;
+ kvm_iodevice_init(device, &kvm_ipi_ops);
+ mutex_lock(&kvm->slots_lock);
+ ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, IOCSR_IPI_BASE, IOCSR_IPI_SIZE, device);
+ mutex_unlock(&kvm->slots_lock);
+ if (ret < 0) {
+ kvm_err("%s: Initialize IOCSR dev failed, ret = %d\n", __func__, ret);
+ goto err;
+ }
+
+ kvm->arch.ipi = s;
+ return 0;
+
+err:
+ kfree(s);
+ return -EFAULT;
+}
+
+static void kvm_ipi_destroy(struct kvm_device *dev)
+{
+ struct kvm *kvm;
+ struct loongarch_ipi *ipi;
+
+ if (!dev || !dev->kvm || !dev->kvm->arch.ipi)
+ return;
+
+ kvm = dev->kvm;
+ ipi = kvm->arch.ipi;
+ kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &ipi->device);
+ kfree(ipi);
+}
+
+static struct kvm_device_ops kvm_ipi_dev_ops = {
+ .name = "kvm-loongarch-ipi",
+ .create = kvm_ipi_create,
+ .destroy = kvm_ipi_destroy,
+ .set_attr = kvm_ipi_set_attr,
+ .get_attr = kvm_ipi_get_attr,
+};
+
+int kvm_loongarch_register_ipi_device(void)
+{
+ return kvm_register_device_ops(&kvm_ipi_dev_ops, KVM_DEV_TYPE_LOONGARCH_IPI);
+}
diff --git a/arch/loongarch/kvm/intc/pch_pic.c b/arch/loongarch/kvm/intc/pch_pic.c
new file mode 100644
index 000000000000..08fce845f668
--- /dev/null
+++ b/arch/loongarch/kvm/intc/pch_pic.c
@@ -0,0 +1,519 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#include <asm/kvm_eiointc.h>
+#include <asm/kvm_pch_pic.h>
+#include <asm/kvm_vcpu.h>
+#include <linux/count_zeros.h>
+
+/* update the isr according to irq level and route irq to eiointc */
+static void pch_pic_update_irq(struct loongarch_pch_pic *s, int irq, int level)
+{
+ u64 mask = BIT(irq);
+
+ /*
+ * set isr and route irq to eiointc and
+ * the route table is in htmsi_vector[]
+ */
+ if (level) {
+ if (mask & s->irr & ~s->mask) {
+ s->isr |= mask;
+ irq = s->htmsi_vector[irq];
+ eiointc_set_irq(s->kvm->arch.eiointc, irq, level);
+ }
+ } else {
+ if (mask & s->isr & ~s->irr) {
+ s->isr &= ~mask;
+ irq = s->htmsi_vector[irq];
+ eiointc_set_irq(s->kvm->arch.eiointc, irq, level);
+ }
+ }
+}
+
+/* update batch irqs, the irq_mask is a bitmap of irqs */
+static void pch_pic_update_batch_irqs(struct loongarch_pch_pic *s, u64 irq_mask, int level)
+{
+ int irq, bits;
+
+ /* find each irq by irqs bitmap and update each irq */
+ bits = sizeof(irq_mask) * 8;
+ irq = find_first_bit((void *)&irq_mask, bits);
+ while (irq < bits) {
+ pch_pic_update_irq(s, irq, level);
+ bitmap_clear((void *)&irq_mask, irq, 1);
+ irq = find_first_bit((void *)&irq_mask, bits);
+ }
+}
+
+/* called when a irq is triggered in pch pic */
+void pch_pic_set_irq(struct loongarch_pch_pic *s, int irq, int level)
+{
+ u64 mask = BIT(irq);
+
+ spin_lock(&s->lock);
+ if (level)
+ s->irr |= mask; /* set irr */
+ else {
+ /*
+ * In edge triggered mode, 0 does not mean to clear irq
+ * The irr register variable is cleared when cpu writes to the
+ * PCH_PIC_CLEAR_START address area
+ */
+ if (s->edge & mask) {
+ spin_unlock(&s->lock);
+ return;
+ }
+ s->irr &= ~mask;
+ }
+ pch_pic_update_irq(s, irq, level);
+ spin_unlock(&s->lock);
+}
+
+/* msi irq handler */
+void pch_msi_set_irq(struct kvm *kvm, int irq, int level)
+{
+ eiointc_set_irq(kvm->arch.eiointc, irq, level);
+}
+
+/*
+ * pch pic register is 64-bit, but it is accessed by 32-bit,
+ * so we use high to get whether low or high 32 bits we want
+ * to read.
+ */
+static u32 pch_pic_read_reg(u64 *s, int high)
+{
+ u64 val = *s;
+
+ /* read the high 32 bits when high is 1 */
+ return high ? (u32)(val >> 32) : (u32)val;
+}
+
+/*
+ * pch pic register is 64-bit, but it is accessed by 32-bit,
+ * so we use high to get whether low or high 32 bits we want
+ * to write.
+ */
+static u32 pch_pic_write_reg(u64 *s, int high, u32 v)
+{
+ u64 val = *s, data = v;
+
+ if (high) {
+ /*
+ * Clear val high 32 bits
+ * Write the high 32 bits when the high is 1
+ */
+ *s = (val << 32 >> 32) | (data << 32);
+ val >>= 32;
+ } else
+ /*
+ * Clear val low 32 bits
+ * Write the low 32 bits when the high is 0
+ */
+ *s = (val >> 32 << 32) | v;
+
+ return (u32)val;
+}
+
+static int loongarch_pch_pic_read(struct loongarch_pch_pic *s, gpa_t addr, int len, void *val)
+{
+ int offset, index, ret = 0;
+ u32 data = 0;
+ u64 int_id = 0;
+
+ offset = addr - s->pch_pic_base;
+
+ spin_lock(&s->lock);
+ switch (offset) {
+ case PCH_PIC_INT_ID_START ... PCH_PIC_INT_ID_END:
+ /* int id version */
+ int_id |= (u64)PCH_PIC_INT_ID_VER << 32;
+ /* irq number */
+ int_id |= (u64)31 << (32 + 16);
+ /* int id value */
+ int_id |= PCH_PIC_INT_ID_VAL;
+ *(u64 *)val = int_id;
+ break;
+ case PCH_PIC_MASK_START ... PCH_PIC_MASK_END:
+ offset -= PCH_PIC_MASK_START;
+ index = offset >> 2;
+ /* read mask reg */
+ data = pch_pic_read_reg(&s->mask, index);
+ *(u32 *)val = data;
+ break;
+ case PCH_PIC_HTMSI_EN_START ... PCH_PIC_HTMSI_EN_END:
+ offset -= PCH_PIC_HTMSI_EN_START;
+ index = offset >> 2;
+ /* read htmsi enable reg */
+ data = pch_pic_read_reg(&s->htmsi_en, index);
+ *(u32 *)val = data;
+ break;
+ case PCH_PIC_EDGE_START ... PCH_PIC_EDGE_END:
+ offset -= PCH_PIC_EDGE_START;
+ index = offset >> 2;
+ /* read edge enable reg */
+ data = pch_pic_read_reg(&s->edge, index);
+ *(u32 *)val = data;
+ break;
+ case PCH_PIC_AUTO_CTRL0_START ... PCH_PIC_AUTO_CTRL0_END:
+ case PCH_PIC_AUTO_CTRL1_START ... PCH_PIC_AUTO_CTRL1_END:
+ /* we only use default mode: fixed interrupt distribution mode */
+ *(u32 *)val = 0;
+ break;
+ case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END:
+ /* only route to int0: eiointc */
+ *(u8 *)val = 1;
+ break;
+ case PCH_PIC_HTMSI_VEC_START ... PCH_PIC_HTMSI_VEC_END:
+ offset -= PCH_PIC_HTMSI_VEC_START;
+ /* read htmsi vector */
+ data = s->htmsi_vector[offset];
+ *(u8 *)val = data;
+ break;
+ case PCH_PIC_POLARITY_START ... PCH_PIC_POLARITY_END:
+ /* we only use defalut value 0: high level triggered */
+ *(u32 *)val = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ spin_unlock(&s->lock);
+
+ return ret;
+}
+
+static int kvm_pch_pic_read(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *dev,
+ gpa_t addr, int len, void *val)
+{
+ int ret;
+ struct loongarch_pch_pic *s = vcpu->kvm->arch.pch_pic;
+
+ if (!s) {
+ kvm_err("%s: pch pic irqchip not valid!\n", __func__);
+ return -EINVAL;
+ }
+
+ /* statistics of pch pic reading */
+ vcpu->kvm->stat.pch_pic_read_exits++;
+ ret = loongarch_pch_pic_read(s, addr, len, val);
+
+ return ret;
+}
+
+static int loongarch_pch_pic_write(struct loongarch_pch_pic *s, gpa_t addr,
+ int len, const void *val)
+{
+ int ret;
+ u32 old, data, offset, index;
+ u64 irq;
+
+ ret = 0;
+ data = *(u32 *)val;
+ offset = addr - s->pch_pic_base;
+
+ spin_lock(&s->lock);
+ switch (offset) {
+ case PCH_PIC_MASK_START ... PCH_PIC_MASK_END:
+ offset -= PCH_PIC_MASK_START;
+ /* get whether high or low 32 bits we want to write */
+ index = offset >> 2;
+ old = pch_pic_write_reg(&s->mask, index, data);
+ /* enable irq when mask value change to 0 */
+ irq = (old & ~data) << (32 * index);
+ pch_pic_update_batch_irqs(s, irq, 1);
+ /* disable irq when mask value change to 1 */
+ irq = (~old & data) << (32 * index);
+ pch_pic_update_batch_irqs(s, irq, 0);
+ break;
+ case PCH_PIC_HTMSI_EN_START ... PCH_PIC_HTMSI_EN_END:
+ offset -= PCH_PIC_HTMSI_EN_START;
+ index = offset >> 2;
+ pch_pic_write_reg(&s->htmsi_en, index, data);
+ break;
+ case PCH_PIC_EDGE_START ... PCH_PIC_EDGE_END:
+ offset -= PCH_PIC_EDGE_START;
+ index = offset >> 2;
+ /* 1: edge triggered, 0: level triggered */
+ pch_pic_write_reg(&s->edge, index, data);
+ break;
+ case PCH_PIC_CLEAR_START ... PCH_PIC_CLEAR_END:
+ offset -= PCH_PIC_CLEAR_START;
+ index = offset >> 2;
+ /* write 1 to clear edge irq */
+ old = pch_pic_read_reg(&s->irr, index);
+ /*
+ * get the irq bitmap which is edge triggered and
+ * already set and to be cleared
+ */
+ irq = old & pch_pic_read_reg(&s->edge, index) & data;
+ /* write irr to the new state where irqs have been cleared */
+ pch_pic_write_reg(&s->irr, index, old & ~irq);
+ /* update cleared irqs */
+ pch_pic_update_batch_irqs(s, irq, 0);
+ break;
+ case PCH_PIC_AUTO_CTRL0_START ... PCH_PIC_AUTO_CTRL0_END:
+ offset -= PCH_PIC_AUTO_CTRL0_START;
+ index = offset >> 2;
+ /* we only use default mode: fixed interrupt distribution mode */
+ pch_pic_write_reg(&s->auto_ctrl0, index, 0);
+ break;
+ case PCH_PIC_AUTO_CTRL1_START ... PCH_PIC_AUTO_CTRL1_END:
+ offset -= PCH_PIC_AUTO_CTRL1_START;
+ index = offset >> 2;
+ /* we only use default mode: fixed interrupt distribution mode */
+ pch_pic_write_reg(&s->auto_ctrl1, index, 0);
+ break;
+ case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END:
+ offset -= PCH_PIC_ROUTE_ENTRY_START;
+ /* only route to int0: eiointc */
+ s->route_entry[offset] = 1;
+ break;
+ case PCH_PIC_HTMSI_VEC_START ... PCH_PIC_HTMSI_VEC_END:
+ /* route table to eiointc */
+ offset -= PCH_PIC_HTMSI_VEC_START;
+ s->htmsi_vector[offset] = (u8)data;
+ break;
+ case PCH_PIC_POLARITY_START ... PCH_PIC_POLARITY_END:
+ offset -= PCH_PIC_POLARITY_START;
+ index = offset >> 2;
+ /* we only use defalut value 0: high level triggered */
+ pch_pic_write_reg(&s->polarity, index, 0);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ spin_unlock(&s->lock);
+
+ return ret;
+}
+
+static int kvm_pch_pic_write(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *dev,
+ gpa_t addr, int len, const void *val)
+{
+ int ret;
+ struct loongarch_pch_pic *s = vcpu->kvm->arch.pch_pic;
+
+ if (!s) {
+ kvm_err("%s: pch pic irqchip not valid!\n", __func__);
+ return -EINVAL;
+ }
+
+ /* statistics of pch pic writing */
+ vcpu->kvm->stat.pch_pic_write_exits++;
+ ret = loongarch_pch_pic_write(s, addr, len, val);
+
+ return ret;
+}
+
+static const struct kvm_io_device_ops kvm_pch_pic_ops = {
+ .read = kvm_pch_pic_read,
+ .write = kvm_pch_pic_write,
+};
+
+static int kvm_pch_pic_init(struct kvm_device *dev, u64 addr)
+{
+ int ret;
+ struct kvm *kvm = dev->kvm;
+ struct kvm_io_device *device;
+ struct loongarch_pch_pic *s = dev->kvm->arch.pch_pic;
+
+ s->pch_pic_base = addr;
+ device = &s->device;
+ /* init device by pch pic writing and reading ops */
+ kvm_iodevice_init(device, &kvm_pch_pic_ops);
+ mutex_lock(&kvm->slots_lock);
+ /* register pch pic device */
+ ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, addr, PCH_PIC_SIZE, device);
+ mutex_unlock(&kvm->slots_lock);
+
+ return (ret < 0) ? -EFAULT : 0;
+}
+
+/* used by user space to get or set pch pic registers */
+static int kvm_pch_pic_regs_access(struct kvm_device *dev,
+ struct kvm_device_attr *attr,
+ bool is_write)
+{
+ int addr, offset, len = 8, ret = 0;
+ void __user *data;
+ void *p = NULL;
+ struct loongarch_pch_pic *s;
+
+ s = dev->kvm->arch.pch_pic;
+ addr = attr->attr;
+ data = (void __user *)attr->addr;
+
+ /* get pointer to pch pic register by addr */
+ switch (addr) {
+ case PCH_PIC_MASK_START:
+ p = &s->mask;
+ break;
+ case PCH_PIC_HTMSI_EN_START:
+ p = &s->htmsi_en;
+ break;
+ case PCH_PIC_EDGE_START:
+ p = &s->edge;
+ break;
+ case PCH_PIC_AUTO_CTRL0_START:
+ p = &s->auto_ctrl0;
+ break;
+ case PCH_PIC_AUTO_CTRL1_START:
+ p = &s->auto_ctrl1;
+ break;
+ case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END:
+ offset = addr - PCH_PIC_ROUTE_ENTRY_START;
+ p = &s->route_entry[offset];
+ len = 1;
+ break;
+ case PCH_PIC_HTMSI_VEC_START ... PCH_PIC_HTMSI_VEC_END:
+ offset = addr - PCH_PIC_HTMSI_VEC_START;
+ p = &s->htmsi_vector[offset];
+ len = 1;
+ break;
+ case PCH_PIC_INT_IRR_START:
+ p = &s->irr;
+ break;
+ case PCH_PIC_INT_ISR_START:
+ p = &s->isr;
+ break;
+ case PCH_PIC_POLARITY_START:
+ p = &s->polarity;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock(&s->lock);
+ /* write or read value according to is_write */
+ if (is_write) {
+ if (copy_from_user(p, data, len))
+ ret = -EFAULT;
+ } else {
+ if (copy_to_user(data, p, len))
+ ret = -EFAULT;
+ }
+ spin_unlock(&s->lock);
+
+ return ret;
+}
+
+static int kvm_pch_pic_get_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS:
+ return kvm_pch_pic_regs_access(dev, attr, false);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int kvm_pch_pic_set_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ u64 addr;
+ void __user *uaddr = (void __user *)(long)attr->addr;
+
+ switch (attr->group) {
+ case KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL:
+ switch (attr->attr) {
+ case KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT:
+ if (copy_from_user(&addr, uaddr, sizeof(addr)))
+ return -EFAULT;
+
+ if (!dev->kvm->arch.pch_pic) {
+ kvm_err("%s: please create pch_pic irqchip first!\n", __func__);
+ return -ENODEV;
+ }
+
+ return kvm_pch_pic_init(dev, addr);
+ default:
+ kvm_err("%s: unknown group (%d) attr (%lld)\n", __func__, attr->group,
+ attr->attr);
+ return -EINVAL;
+ }
+ case KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS:
+ return kvm_pch_pic_regs_access(dev, attr, true);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int kvm_setup_default_irq_routing(struct kvm *kvm)
+{
+ int i, ret;
+ u32 nr = KVM_IRQCHIP_NUM_PINS;
+ struct kvm_irq_routing_entry *entries;
+
+ entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ for (i = 0; i < nr; i++) {
+ entries[i].gsi = i;
+ entries[i].type = KVM_IRQ_ROUTING_IRQCHIP;
+ entries[i].u.irqchip.irqchip = 0;
+ entries[i].u.irqchip.pin = i;
+ }
+ ret = kvm_set_irq_routing(kvm, entries, nr, 0);
+ kfree(entries);
+
+ return ret;
+}
+
+static int kvm_pch_pic_create(struct kvm_device *dev, u32 type)
+{
+ int ret;
+ struct kvm *kvm = dev->kvm;
+ struct loongarch_pch_pic *s;
+
+ /* pch pic should not has been created */
+ if (kvm->arch.pch_pic)
+ return -EINVAL;
+
+ ret = kvm_setup_default_irq_routing(kvm);
+ if (ret)
+ return -ENOMEM;
+
+ s = kzalloc(sizeof(struct loongarch_pch_pic), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ spin_lock_init(&s->lock);
+ s->kvm = kvm;
+ kvm->arch.pch_pic = s;
+
+ return 0;
+}
+
+static void kvm_pch_pic_destroy(struct kvm_device *dev)
+{
+ struct kvm *kvm;
+ struct loongarch_pch_pic *s;
+
+ if (!dev || !dev->kvm || !dev->kvm->arch.pch_pic)
+ return;
+
+ kvm = dev->kvm;
+ s = kvm->arch.pch_pic;
+ /* unregister pch pic device and free it's memory */
+ kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &s->device);
+ kfree(s);
+}
+
+static struct kvm_device_ops kvm_pch_pic_dev_ops = {
+ .name = "kvm-loongarch-pch-pic",
+ .create = kvm_pch_pic_create,
+ .destroy = kvm_pch_pic_destroy,
+ .set_attr = kvm_pch_pic_set_attr,
+ .get_attr = kvm_pch_pic_get_attr,
+};
+
+int kvm_loongarch_register_pch_pic_device(void)
+{
+ return kvm_register_device_ops(&kvm_pch_pic_dev_ops, KVM_DEV_TYPE_LOONGARCH_PCHPIC);
+}
diff --git a/arch/loongarch/kvm/irqfd.c b/arch/loongarch/kvm/irqfd.c
new file mode 100644
index 000000000000..9a39627aecf0
--- /dev/null
+++ b/arch/loongarch/kvm/irqfd.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#include <linux/kvm_host.h>
+#include <trace/events/kvm.h>
+#include <asm/kvm_pch_pic.h>
+
+static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id, int level, bool line_status)
+{
+ /* PCH-PIC pin (0 ~ 64) <---> GSI (0 ~ 64) */
+ pch_pic_set_irq(kvm->arch.pch_pic, e->irqchip.pin, level);
+
+ return 0;
+}
+
+/*
+ * kvm_set_msi: inject the MSI corresponding to the
+ * MSI routing entry
+ *
+ * This is the entry point for irqfd MSI injection
+ * and userspace MSI injection.
+ */
+int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id, int level, bool line_status)
+{
+ if (!level)
+ return -1;
+
+ pch_msi_set_irq(kvm, e->msi.data, level);
+
+ return 0;
+}
+
+/*
+ * kvm_set_routing_entry: populate a kvm routing entry
+ * from a user routing entry
+ *
+ * @kvm: the VM this entry is applied to
+ * @e: kvm kernel routing entry handle
+ * @ue: user api routing entry handle
+ * return 0 on success, -EINVAL on errors.
+ */
+int kvm_set_routing_entry(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *e,
+ const struct kvm_irq_routing_entry *ue)
+{
+ switch (ue->type) {
+ case KVM_IRQ_ROUTING_IRQCHIP:
+ e->set = kvm_set_pic_irq;
+ e->irqchip.irqchip = ue->u.irqchip.irqchip;
+ e->irqchip.pin = ue->u.irqchip.pin;
+
+ if (e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS)
+ return -EINVAL;
+
+ return 0;
+ case KVM_IRQ_ROUTING_MSI:
+ e->set = kvm_set_msi;
+ e->msi.address_lo = ue->u.msi.address_lo;
+ e->msi.address_hi = ue->u.msi.address_hi;
+ e->msi.data = ue->u.msi.data;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id, int level, bool line_status)
+{
+ switch (e->type) {
+ case KVM_IRQ_ROUTING_IRQCHIP:
+ pch_pic_set_irq(kvm->arch.pch_pic, e->irqchip.pin, level);
+ return 0;
+ case KVM_IRQ_ROUTING_MSI:
+ pch_msi_set_irq(kvm, e->msi.data, level);
+ return 0;
+ default:
+ return -EWOULDBLOCK;
+ }
+}
+
+bool kvm_arch_intc_initialized(struct kvm *kvm)
+{
+ return kvm_arch_irqchip_in_kernel(kvm);
+}
diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c
index 86a2f2d0cb27..80ea63d465b8 100644
--- a/arch/loongarch/kvm/main.c
+++ b/arch/loongarch/kvm/main.c
@@ -9,6 +9,8 @@
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/kvm_csr.h>
+#include <asm/kvm_eiointc.h>
+#include <asm/kvm_pch_pic.h>
#include "trace.h"
unsigned long vpid_mask;
@@ -242,6 +244,25 @@ void kvm_check_vpid(struct kvm_vcpu *vcpu)
kvm_update_vpid(vcpu, cpu);
trace_kvm_vpid_change(vcpu, vcpu->arch.vpid);
vcpu->cpu = cpu;
+ kvm_clear_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
+
+ /*
+ * LLBCTL is a separated guest CSR register from host, a general
+ * exception ERET instruction clears the host LLBCTL register in
+ * host mode, and clears the guest LLBCTL register in guest mode.
+ * ERET in tlb refill exception does not clear LLBCTL register.
+ *
+ * When secondary mmu mapping is changed, guest OS does not know
+ * even if the content is changed after mapping is changed.
+ *
+ * Here clear WCLLB of the guest LLBCTL register when mapping is
+ * changed. Otherwise, if mmu mapping is changed while guest is
+ * executing LL/SC pair, LL loads with the old address and set
+ * the LLBCTL flag, SC checks the LLBCTL flag and will store the
+ * new address successfully since LLBCTL_WCLLB is on, even if
+ * memory with new address is changed on other VCPUs.
+ */
+ set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
}
/* Restore GSTAT(0x50).vpid */
@@ -260,7 +281,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
return -ENOIOCTLCMD;
}
-int kvm_arch_hardware_enable(void)
+int kvm_arch_enable_virtualization_cpu(void)
{
unsigned long env, gcfg = 0;
@@ -275,16 +296,16 @@ int kvm_arch_hardware_enable(void)
/*
* Enable virtualization features granting guest direct control of
* certain features:
- * GCI=2: Trap on init or unimplement cache instruction.
+ * GCI=2: Trap on init or unimplemented cache instruction.
* TORU=0: Trap on Root Unimplement.
* CACTRL=1: Root control cache.
- * TOP=0: Trap on Previlege.
+ * TOP=0: Trap on Privilege.
* TOE=0: Trap on Exception.
* TIT=0: Trap on Timer.
*/
- if (env & CSR_GCFG_GCIP_ALL)
+ if (env & CSR_GCFG_GCIP_SECURE)
gcfg |= CSR_GCFG_GCI_SECURE;
- if (env & CSR_GCFG_MATC_ROOT)
+ if (env & CSR_GCFG_MATP_ROOT)
gcfg |= CSR_GCFG_MATC_ROOT;
write_csr_gcfg(gcfg);
@@ -296,10 +317,17 @@ int kvm_arch_hardware_enable(void)
kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx",
read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc());
+ /*
+ * HW Guest CSR registers are lost after CPU suspend and resume.
+ * Clear last_vcpu so that Guest CSR registers forced to reload
+ * from vCPU SW state.
+ */
+ this_cpu_ptr(vmcs)->last_vcpu = NULL;
+
return 0;
}
-void kvm_arch_hardware_disable(void)
+void kvm_arch_disable_virtualization_cpu(void)
{
write_csr_gcfg(0);
write_csr_gstat(0);
@@ -312,7 +340,7 @@ void kvm_arch_hardware_disable(void)
static int kvm_loongarch_env_init(void)
{
- int cpu, order;
+ int cpu, order, ret;
void *addr;
struct kvm_context *context;
@@ -366,8 +394,22 @@ static int kvm_loongarch_env_init(void)
}
kvm_init_gcsr_flag();
+ kvm_register_perf_callbacks(NULL);
- return 0;
+ /* Register LoongArch IPI interrupt controller interface. */
+ ret = kvm_loongarch_register_ipi_device();
+ if (ret)
+ return ret;
+
+ /* Register LoongArch EIOINTC interrupt controller interface. */
+ ret = kvm_loongarch_register_eiointc_device();
+ if (ret)
+ return ret;
+
+ /* Register LoongArch PCH-PIC interrupt controller interface. */
+ ret = kvm_loongarch_register_pch_pic_device();
+
+ return ret;
}
static void kvm_loongarch_env_exit(void)
@@ -384,6 +426,8 @@ static void kvm_loongarch_env_exit(void)
}
kfree(kvm_loongarch_ops);
}
+
+ kvm_unregister_perf_callbacks();
}
static int kvm_loongarch_init(void)
diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c
index a556cff35740..ed956c5cf2cc 100644
--- a/arch/loongarch/kvm/mmu.c
+++ b/arch/loongarch/kvm/mmu.c
@@ -163,6 +163,7 @@ static kvm_pte_t *kvm_populate_gpa(struct kvm *kvm,
child = kvm_mmu_memory_cache_alloc(cache);
_kvm_pte_init(child, ctx.invalid_ptes[ctx.level - 1]);
+ smp_wmb(); /* Make pte visible before pmd */
kvm_set_pte(entry, __pa(child));
} else if (kvm_pte_huge(*entry)) {
return entry;
@@ -444,6 +445,17 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
enum kvm_mr_change change)
{
int needs_flush;
+ u32 old_flags = old ? old->flags : 0;
+ u32 new_flags = new ? new->flags : 0;
+ bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES;
+
+ /* Only track memslot flags changed */
+ if (change != KVM_MR_FLAGS_ONLY)
+ return;
+
+ /* Discard dirty page tracking on readonly memslot */
+ if ((old_flags & new_flags) & KVM_MEM_READONLY)
+ return;
/*
* If dirty page logging is enabled, write protect all pages in the slot
@@ -454,9 +466,14 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
* MOVE/DELETE: The old mappings will already have been cleaned up by
* kvm_arch_flush_shadow_memslot()
*/
- if (change == KVM_MR_FLAGS_ONLY &&
- (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
- new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
+ if (!(old_flags & KVM_MEM_LOG_DIRTY_PAGES) && log_dirty_pages) {
+ /*
+ * Initially-all-set does not require write protecting any page
+ * because they're all assumed to be dirty.
+ */
+ if (kvm_dirty_log_manual_protect_and_init_set(kvm))
+ return;
+
spin_lock(&kvm->mmu_lock);
/* Write protect GPA page table entries */
needs_flush = kvm_mkclean_gpa_pt(kvm, new->base_gfn,
@@ -494,38 +511,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
range->end << PAGE_SHIFT, &ctx);
}
-bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
-{
- unsigned long prot_bits;
- kvm_pte_t *ptep;
- kvm_pfn_t pfn = pte_pfn(range->arg.pte);
- gpa_t gpa = range->start << PAGE_SHIFT;
-
- ptep = kvm_populate_gpa(kvm, NULL, gpa, 0);
- if (!ptep)
- return false;
-
- /* Replacing an absent or old page doesn't need flushes */
- if (!kvm_pte_present(NULL, ptep) || !kvm_pte_young(*ptep)) {
- kvm_set_pte(ptep, 0);
- return false;
- }
-
- /* Fill new pte if write protected or page migrated */
- prot_bits = _PAGE_PRESENT | __READABLE;
- prot_bits |= _CACHE_MASK & pte_val(range->arg.pte);
-
- /*
- * Set _PAGE_WRITE or _PAGE_DIRTY iff old and new pte both support
- * _PAGE_WRITE for map_page_fast if next page write fault
- * _PAGE_DIRTY since gpa has already recorded as dirty page
- */
- prot_bits |= __WRITEABLE & *ptep & pte_val(range->arg.pte);
- kvm_set_pte(ptep, kvm_pfn_pte(pfn, __pgprot(prot_bits)));
-
- return true;
-}
-
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
kvm_ptw_ctx ctx;
@@ -567,7 +552,6 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
{
int ret = 0;
- kvm_pfn_t pfn = 0;
kvm_pte_t *ptep, changed, new;
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm *kvm = vcpu->kvm;
@@ -583,11 +567,7 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
}
/* Track access to pages marked old */
- new = *ptep;
- if (!kvm_pte_young(new))
- new = kvm_pte_mkyoung(new);
- /* call kvm_set_pfn_accessed() after unlock */
-
+ new = kvm_pte_mkyoung(*ptep);
if (write && !kvm_pte_dirty(new)) {
if (!kvm_pte_write(new)) {
ret = -EFAULT;
@@ -611,23 +591,14 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
}
changed = new ^ (*ptep);
- if (changed) {
+ if (changed)
kvm_set_pte(ptep, new);
- pfn = kvm_pte_pfn(new);
- }
- spin_unlock(&kvm->mmu_lock);
- /*
- * Fixme: pfn may be freed after mmu_lock
- * kvm_try_get_pfn(pfn)/kvm_release_pfn pair to prevent this?
- */
- if (kvm_pte_young(changed))
- kvm_set_pfn_accessed(pfn);
+ spin_unlock(&kvm->mmu_lock);
- if (kvm_pte_dirty(changed)) {
+ if (kvm_pte_dirty(changed))
mark_page_dirty(kvm, gfn);
- kvm_set_pfn_dirty(pfn);
- }
+
return ret;
out:
spin_unlock(&kvm->mmu_lock);
@@ -727,19 +698,19 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
* value) and then p*d_offset() walks into the target huge page instead
* of the old page table (sees the new value).
*/
- pgd = READ_ONCE(*pgd_offset(kvm->mm, hva));
+ pgd = pgdp_get(pgd_offset(kvm->mm, hva));
if (pgd_none(pgd))
goto out;
- p4d = READ_ONCE(*p4d_offset(&pgd, hva));
+ p4d = p4dp_get(p4d_offset(&pgd, hva));
if (p4d_none(p4d) || !p4d_present(p4d))
goto out;
- pud = READ_ONCE(*pud_offset(&p4d, hva));
+ pud = pudp_get(pud_offset(&p4d, hva));
if (pud_none(pud) || !pud_present(pud))
goto out;
- pmd = READ_ONCE(*pmd_offset(&pud, hva));
+ pmd = pmdp_get(pmd_offset(&pud, hva));
if (pmd_none(pmd) || !pmd_present(pmd))
goto out;
@@ -769,6 +740,7 @@ static kvm_pte_t *kvm_split_huge(struct kvm_vcpu *vcpu, kvm_pte_t *ptep, gfn_t g
val += PAGE_SIZE;
}
+ smp_wmb(); /* Make pte visible before pmd */
/* The later kvm_flush_tlb_gpa() will flush hugepage tlb */
kvm_set_pte(ptep, __pa(child));
@@ -808,6 +780,7 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
struct kvm *kvm = vcpu->kvm;
struct kvm_memory_slot *memslot;
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
+ struct page *page;
/* Try the fast path to handle old / clean pages */
srcu_idx = srcu_read_lock(&kvm->srcu);
@@ -835,7 +808,7 @@ retry:
mmu_seq = kvm->mmu_invalidate_seq;
/*
* Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads in
- * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
+ * kvm_faultin_pfn() (which calls get_user_pages()), so that we don't
* risk the page we get a reference to getting unmapped before we have a
* chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
*
@@ -847,7 +820,7 @@ retry:
smp_rmb();
/* Slow path - ask KVM core whether we can access this GPA */
- pfn = gfn_to_pfn_prot(kvm, gfn, write, &writeable);
+ pfn = kvm_faultin_pfn(vcpu, gfn, write, &writeable, &page);
if (is_error_noslot_pfn(pfn)) {
err = -EFAULT;
goto out;
@@ -859,10 +832,10 @@ retry:
/*
* This can happen when mappings are changed asynchronously, but
* also synchronously if a COW is triggered by
- * gfn_to_pfn_prot().
+ * kvm_faultin_pfn().
*/
spin_unlock(&kvm->mmu_lock);
- kvm_release_pfn_clean(pfn);
+ kvm_release_page_unused(page);
if (retry_no > 100) {
retry_no = 0;
schedule();
@@ -890,11 +863,21 @@ retry:
/* Disable dirty logging on HugePages */
level = 0;
- if (!fault_supports_huge_mapping(memslot, hva, write)) {
- level = 0;
- } else {
+ if (fault_supports_huge_mapping(memslot, hva, write)) {
+ /* Check page level about host mmu*/
level = host_pfn_mapping_level(kvm, gfn, memslot);
if (level == 1) {
+ /*
+ * Check page level about secondary mmu
+ * Disable hugepage if it is normal page on
+ * secondary mmu already
+ */
+ ptep = kvm_populate_gpa(kvm, NULL, gpa, 0);
+ if (ptep && !kvm_pte_huge(*ptep))
+ level = 0;
+ }
+
+ if (level == 1) {
gfn = gfn & ~(PTRS_PER_PTE - 1);
pfn = pfn & ~(PTRS_PER_PTE - 1);
}
@@ -917,21 +900,19 @@ retry:
else
++kvm->stat.pages;
kvm_set_pte(ptep, new_pte);
+
+ kvm_release_faultin_page(kvm, page, false, writeable);
spin_unlock(&kvm->mmu_lock);
- if (prot_bits & _PAGE_DIRTY) {
+ if (prot_bits & _PAGE_DIRTY)
mark_page_dirty_in_slot(kvm, memslot, gfn);
- kvm_set_pfn_dirty(pfn);
- }
- kvm_set_pfn_accessed(pfn);
- kvm_release_pfn_clean(pfn);
out:
srcu_read_unlock(&kvm->srcu, srcu_idx);
return err;
}
-int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
+int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write, int ecode)
{
int ret;
@@ -940,7 +921,17 @@ int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
return ret;
/* Invalidate this entry in the TLB */
- kvm_flush_tlb_gpa(vcpu, gpa);
+ if (!cpu_has_ptw || (ecode == EXCCODE_TLBM)) {
+ /*
+ * With HW PTW, invalid TLB is not added when page fault. But
+ * for EXCCODE_TLBM exception, stale TLB may exist because of
+ * the last read access.
+ *
+ * With SW PTW, invalid TLB is added in TLB refill exception.
+ */
+ vcpu->arch.flush_gpa = gpa;
+ kvm_make_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
+ }
return 0;
}
diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S
index 80e988985a6a..f1768b7a6194 100644
--- a/arch/loongarch/kvm/switch.S
+++ b/arch/loongarch/kvm/switch.S
@@ -60,16 +60,8 @@
ld.d t0, a2, KVM_ARCH_GPC
csrwr t0, LOONGARCH_CSR_ERA
- /* Save host PGDL */
- csrrd t0, LOONGARCH_CSR_PGDL
- st.d t0, a2, KVM_ARCH_HPGD
-
- /* Switch to kvm */
- ld.d t1, a2, KVM_VCPU_KVM - KVM_VCPU_ARCH
-
- /* Load guest PGDL */
- li.w t0, KVM_GPGD
- ldx.d t0, t1, t0
+ /* Load PGD for KVM hypervisor */
+ ld.d t0, a2, KVM_ARCH_KVMPGD
csrwr t0, LOONGARCH_CSR_PGDL
/* Mix GID and RID */
@@ -85,7 +77,7 @@
* Guest CRMD comes from separate GCSR_CRMD register
*/
ori t0, zero, CSR_PRMD_PIE
- csrxchg t0, t0, LOONGARCH_CSR_PRMD
+ csrwr t0, LOONGARCH_CSR_PRMD
/* Set PVM bit to setup ertn to guest context */
ori t0, zero, CSR_GSTAT_PVM
@@ -277,6 +269,10 @@ SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest)
#ifdef CONFIG_CPU_HAS_LBT
STACK_FRAME_NON_STANDARD kvm_restore_fpu
+#ifdef CONFIG_CPU_HAS_LSX
STACK_FRAME_NON_STANDARD kvm_restore_lsx
+#endif
+#ifdef CONFIG_CPU_HAS_LASX
STACK_FRAME_NON_STANDARD kvm_restore_lasx
#endif
+#endif
diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c
index bcc6b6d063d9..32dc213374be 100644
--- a/arch/loongarch/kvm/timer.c
+++ b/arch/loongarch/kvm/timer.c
@@ -161,10 +161,11 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu)
if (kvm_vcpu_is_blocking(vcpu)) {
/*
- * HRTIMER_MODE_PINNED is suggested since vcpu may run in
- * the same physical cpu in next time
+ * HRTIMER_MODE_PINNED_HARD is suggested since vcpu may run in
+ * the same physical cpu in next time, and the timer should run
+ * in hardirq context even in the PREEMPT_RT case.
*/
- hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
+ hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED_HARD);
}
}
@@ -188,10 +189,3 @@ void kvm_save_timer(struct kvm_vcpu *vcpu)
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
preempt_enable();
}
-
-void kvm_reset_timer(struct kvm_vcpu *vcpu)
-{
- write_gcsr_timercfg(0);
- kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG, 0);
- hrtimer_cancel(&vcpu->arch.swtimer);
-}
diff --git a/arch/loongarch/kvm/tlb.c b/arch/loongarch/kvm/tlb.c
index 02535df6b51f..ebdbe9264e9c 100644
--- a/arch/loongarch/kvm/tlb.c
+++ b/arch/loongarch/kvm/tlb.c
@@ -23,10 +23,7 @@ void kvm_flush_tlb_all(void)
void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa)
{
- unsigned long flags;
-
- local_irq_save(flags);
+ lockdep_assert_irqs_disabled();
gpa &= (PAGE_MASK << 1);
invtlb(INVTLB_GID_ADDR, read_csr_gstat() & CSR_GSTAT_GID, gpa);
- local_irq_restore(flags);
}
diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h
index c2484ad4cffa..1783397b1bc8 100644
--- a/arch/loongarch/kvm/trace.h
+++ b/arch/loongarch/kvm/trace.h
@@ -19,14 +19,16 @@ DECLARE_EVENT_CLASS(kvm_transition,
TP_PROTO(struct kvm_vcpu *vcpu),
TP_ARGS(vcpu),
TP_STRUCT__entry(
+ __field(unsigned int, vcpu_id)
__field(unsigned long, pc)
),
TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
__entry->pc = vcpu->arch.pc;
),
- TP_printk("PC: 0x%08lx", __entry->pc)
+ TP_printk("vcpu %u PC: 0x%08lx", __entry->vcpu_id, __entry->pc)
);
DEFINE_EVENT(kvm_transition, kvm_enter,
@@ -54,19 +56,22 @@ DECLARE_EVENT_CLASS(kvm_exit,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
TP_ARGS(vcpu, reason),
TP_STRUCT__entry(
+ __field(unsigned int, vcpu_id)
__field(unsigned long, pc)
__field(unsigned int, reason)
),
TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
__entry->pc = vcpu->arch.pc;
__entry->reason = reason;
),
- TP_printk("[%s]PC: 0x%08lx",
- __print_symbolic(__entry->reason,
- kvm_trace_symbol_exit_types),
- __entry->pc)
+ TP_printk("vcpu %u [%s] PC: 0x%08lx",
+ __entry->vcpu_id,
+ __print_symbolic(__entry->reason,
+ kvm_trace_symbol_exit_types),
+ __entry->pc)
);
DEFINE_EVENT(kvm_exit, kvm_exit_idle,
@@ -85,14 +90,17 @@ TRACE_EVENT(kvm_exit_gspr,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int inst_word),
TP_ARGS(vcpu, inst_word),
TP_STRUCT__entry(
+ __field(unsigned int, vcpu_id)
__field(unsigned int, inst_word)
),
TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
__entry->inst_word = inst_word;
),
- TP_printk("Inst word: 0x%08x", __entry->inst_word)
+ TP_printk("vcpu %u Inst word: 0x%08x", __entry->vcpu_id,
+ __entry->inst_word)
);
#define KVM_TRACE_AUX_SAVE 0
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 3a8779065f73..5af32ec62cb1 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -6,6 +6,7 @@
#include <linux/kvm_host.h>
#include <linux/entry-kvm.h>
#include <asm/fpu.h>
+#include <asm/lbt.h>
#include <asm/loongarch.h>
#include <asm/setup.h>
#include <asm/time.h>
@@ -19,6 +20,7 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
STATS_DESC_COUNTER(VCPU, idle_exits),
STATS_DESC_COUNTER(VCPU, cpucfg_exits),
STATS_DESC_COUNTER(VCPU, signal_exits),
+ STATS_DESC_COUNTER(VCPU, hypercall_exits)
};
const struct kvm_stats_header kvm_vcpu_stats_header = {
@@ -30,6 +32,170 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
sizeof(kvm_vcpu_stats_desc),
};
+static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu)
+{
+ struct kvm_context *context;
+
+ context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
+ context->perf_cntr[0] = read_csr_perfcntr0();
+ context->perf_cntr[1] = read_csr_perfcntr1();
+ context->perf_cntr[2] = read_csr_perfcntr2();
+ context->perf_cntr[3] = read_csr_perfcntr3();
+ context->perf_ctrl[0] = write_csr_perfctrl0(0);
+ context->perf_ctrl[1] = write_csr_perfctrl1(0);
+ context->perf_ctrl[2] = write_csr_perfctrl2(0);
+ context->perf_ctrl[3] = write_csr_perfctrl3(0);
+}
+
+static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu)
+{
+ struct kvm_context *context;
+
+ context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
+ write_csr_perfcntr0(context->perf_cntr[0]);
+ write_csr_perfcntr1(context->perf_cntr[1]);
+ write_csr_perfcntr2(context->perf_cntr[2]);
+ write_csr_perfcntr3(context->perf_cntr[3]);
+ write_csr_perfctrl0(context->perf_ctrl[0]);
+ write_csr_perfctrl1(context->perf_ctrl[1]);
+ write_csr_perfctrl2(context->perf_ctrl[2]);
+ write_csr_perfctrl3(context->perf_ctrl[3]);
+}
+
+
+static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu)
+{
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
+ kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
+ kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
+ kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
+ kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
+}
+
+static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu)
+{
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
+}
+
+static int kvm_own_pmu(struct kvm_vcpu *vcpu)
+{
+ unsigned long val;
+
+ if (!kvm_guest_has_pmu(&vcpu->arch))
+ return -EINVAL;
+
+ kvm_save_host_pmu(vcpu);
+
+ /* Set PM0-PM(num) to guest */
+ val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
+ val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
+ write_csr_gcfg(val);
+
+ kvm_restore_guest_pmu(vcpu);
+
+ return 0;
+}
+
+static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
+{
+ unsigned long val;
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+
+ if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU))
+ return;
+
+ kvm_save_guest_pmu(vcpu);
+
+ /* Disable pmu access from guest */
+ write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
+
+ /*
+ * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
+ * exiting the guest, so that the next time trap into the guest.
+ * We don't need to deal with PMU CSRs contexts.
+ */
+ val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
+ val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
+ val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
+ val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
+ if (!(val & KVM_PMU_EVENT_ENABLED))
+ vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
+
+ kvm_restore_host_pmu(vcpu);
+}
+
+static void kvm_restore_pmu(struct kvm_vcpu *vcpu)
+{
+ if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU))
+ kvm_make_request(KVM_REQ_PMU, vcpu);
+}
+
+static void kvm_check_pmu(struct kvm_vcpu *vcpu)
+{
+ if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
+ kvm_own_pmu(vcpu);
+ vcpu->arch.aux_inuse |= KVM_LARCH_PMU;
+ }
+}
+
+static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
+{
+ u32 version;
+ u64 steal;
+ gpa_t gpa;
+ struct kvm_memslots *slots;
+ struct kvm_steal_time __user *st;
+ struct gfn_to_hva_cache *ghc;
+
+ ghc = &vcpu->arch.st.cache;
+ gpa = vcpu->arch.st.guest_addr;
+ if (!(gpa & KVM_STEAL_PHYS_VALID))
+ return;
+
+ gpa &= KVM_STEAL_PHYS_MASK;
+ slots = kvm_memslots(vcpu->kvm);
+ if (slots->generation != ghc->generation || gpa != ghc->gpa) {
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
+ ghc->gpa = INVALID_GPA;
+ return;
+ }
+ }
+
+ st = (struct kvm_steal_time __user *)ghc->hva;
+ unsafe_get_user(version, &st->version, out);
+ if (version & 1)
+ version += 1; /* first time write, random junk */
+
+ version += 1;
+ unsafe_put_user(version, &st->version, out);
+ smp_wmb();
+
+ unsafe_get_user(steal, &st->steal, out);
+ steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
+ vcpu->arch.st.last_steal = current->sched_info.run_delay;
+ unsafe_put_user(steal, &st->steal, out);
+
+ smp_wmb();
+ version += 1;
+ unsafe_put_user(version, &st->version, out);
+out:
+ mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
+}
+
/*
* kvm_check_requests - check and handle pending vCPU requests
*
@@ -47,9 +213,22 @@ static int kvm_check_requests(struct kvm_vcpu *vcpu)
if (kvm_dirty_ring_check_request(vcpu))
return RESUME_HOST;
+ if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
+ kvm_update_stolen_time(vcpu);
+
return RESUME_GUEST;
}
+static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
+{
+ lockdep_assert_irqs_disabled();
+ if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu))
+ if (vcpu->arch.flush_gpa != INVALID_GPA) {
+ kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
+ vcpu->arch.flush_gpa = INVALID_GPA;
+ }
+}
+
/*
* Check and handle pending signal and vCPU requests etc
* Run with irq enabled and preempt enabled
@@ -61,7 +240,7 @@ static int kvm_check_requests(struct kvm_vcpu *vcpu)
*/
static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
{
- int ret;
+ int idx, ret;
/*
* Check conditions before entering the guest
@@ -70,7 +249,9 @@ static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
if (ret < 0)
return ret;
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
ret = kvm_check_requests(vcpu);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
return ret;
}
@@ -100,11 +281,20 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
/* Make sure the vcpu mode has been written */
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
kvm_check_vpid(vcpu);
+ kvm_check_pmu(vcpu);
+
+ /*
+ * Called after function kvm_check_vpid()
+ * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
+ * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
+ */
+ kvm_late_check_requests(vcpu);
vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
/* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
+ kvm_lose_pmu(vcpu);
/* make sure the vcpu mode has been written */
smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
local_irq_enable();
@@ -122,7 +312,7 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
int ret = RESUME_GUEST;
unsigned long estat = vcpu->arch.host_estat;
- u32 intr = estat & 0x1fff; /* Ignore NMI */
+ u32 intr = estat & CSR_ESTAT_IS;
u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
vcpu->mode = OUTSIDE_GUEST_MODE;
@@ -130,6 +320,8 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Set a default exit reason */
run->exit_reason = KVM_EXIT_UNKNOWN;
+ kvm_lose_pmu(vcpu);
+
guest_timing_exit_irqoff();
guest_state_exit_irqoff();
local_irq_enable();
@@ -170,6 +362,34 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
+ unsigned long val;
+
+ preempt_disable();
+ val = gcsr_read(LOONGARCH_CSR_CRMD);
+ preempt_enable();
+
+ return (val & CSR_PRMD_PPLV) == PLV_KERN;
+}
+
+#ifdef CONFIG_GUEST_PERF_EVENTS
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.pc;
+}
+
+/*
+ * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
+ * arrived in guest context. For LoongArch64, if PMU is not passthrough to VM,
+ * any event that arrives while a vCPU is loaded is considered to be "in guest".
+ */
+bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
+{
+ return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU));
+}
+#endif
+
+bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
+{
return false;
}
@@ -247,7 +467,101 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
- return -EINVAL;
+ if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
+ return -EINVAL;
+
+ if (dbg->control & KVM_GUESTDBG_ENABLE)
+ vcpu->guest_debug = dbg->control;
+ else
+ vcpu->guest_debug = 0;
+
+ return 0;
+}
+
+static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val)
+{
+ int cpuid;
+ struct kvm_phyid_map *map;
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+
+ if (val >= KVM_MAX_PHYID)
+ return -EINVAL;
+
+ map = vcpu->kvm->arch.phyid_map;
+ cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
+
+ spin_lock(&vcpu->kvm->arch.phyid_map_lock);
+ if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
+ /* Discard duplicated CPUID set operation */
+ if (cpuid == val) {
+ spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
+ return 0;
+ }
+
+ /*
+ * CPUID is already set before
+ * Forbid changing to a different CPUID at runtime
+ */
+ spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
+ return -EINVAL;
+ }
+
+ if (map->phys_map[val].enabled) {
+ /* Discard duplicated CPUID set operation */
+ if (vcpu == map->phys_map[val].vcpu) {
+ spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
+ return 0;
+ }
+
+ /*
+ * New CPUID is already set with other vcpu
+ * Forbid sharing the same CPUID between different vcpus
+ */
+ spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
+ return -EINVAL;
+ }
+
+ kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val);
+ map->phys_map[val].enabled = true;
+ map->phys_map[val].vcpu = vcpu;
+ spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
+
+ return 0;
+}
+
+static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
+{
+ int cpuid;
+ struct kvm_phyid_map *map;
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+
+ map = vcpu->kvm->arch.phyid_map;
+ cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
+
+ if (cpuid >= KVM_MAX_PHYID)
+ return;
+
+ spin_lock(&vcpu->kvm->arch.phyid_map_lock);
+ if (map->phys_map[cpuid].enabled) {
+ map->phys_map[cpuid].vcpu = NULL;
+ map->phys_map[cpuid].enabled = false;
+ kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
+ }
+ spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
+}
+
+struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
+{
+ struct kvm_phyid_map *map;
+
+ if (cpuid >= KVM_MAX_PHYID)
+ return NULL;
+
+ map = kvm->arch.phyid_map;
+ if (!map->phys_map[cpuid].enabled)
+ return NULL;
+
+ return map->phys_map[cpuid].vcpu;
}
static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
@@ -259,6 +573,17 @@ static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
return -EINVAL;
if (id == LOONGARCH_CSR_ESTAT) {
+ preempt_disable();
+ vcpu_load(vcpu);
+ /*
+ * Sync pending interrupts into ESTAT so that interrupt
+ * remains during VM migration stage
+ */
+ kvm_deliver_intr(vcpu);
+ vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
+ vcpu_put(vcpu);
+ preempt_enable();
+
/* ESTAT IP0~IP7 get from GINTC */
gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
*val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
@@ -282,6 +607,9 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
if (get_gcsr_flag(id) & INVALID_GCSR)
return -EINVAL;
+ if (id == LOONGARCH_CSR_CPUID)
+ return kvm_set_cpuid(vcpu, val);
+
if (id == LOONGARCH_CSR_ESTAT) {
/* ESTAT IP0~IP7 inject through GINTC */
gintc = (val >> 2) & 0xff;
@@ -295,6 +623,22 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
kvm_write_sw_gcsr(csr, id, val);
+ /*
+ * After modifying the PMU CSR register value of the vcpu.
+ * If the PMU CSRs are used, we need to set KVM_REQ_PMU.
+ */
+ if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) {
+ unsigned long val;
+
+ val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) |
+ kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) |
+ kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) |
+ kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
+
+ if (val & KVM_PMU_EVENT_ENABLED)
+ kvm_make_request(KVM_REQ_PMU, vcpu);
+ }
+
return ret;
}
@@ -324,6 +668,12 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v)
*v |= CPUCFG2_LSX;
if (cpu_has_lasx)
*v |= CPUCFG2_LASX;
+ if (cpu_has_lbt_x86)
+ *v |= CPUCFG2_X86BT;
+ if (cpu_has_lbt_arm)
+ *v |= CPUCFG2_ARMBT;
+ if (cpu_has_lbt_mips)
+ *v |= CPUCFG2_MIPSBT;
return 0;
case LOONGARCH_CPUCFG3:
@@ -333,6 +683,12 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v)
case LOONGARCH_CPUCFG5:
*v = GENMASK(31, 0);
return 0;
+ case LOONGARCH_CPUCFG6:
+ if (cpu_has_pmp)
+ *v = GENMASK(14, 0);
+ else
+ *v = 0;
+ return 0;
case LOONGARCH_CPUCFG16:
*v = GENMASK(16, 0);
return 0;
@@ -377,6 +733,17 @@ static int kvm_check_cpucfg(int id, u64 val)
/* LASX architecturally implies LSX and FP but val does not satisfy that */
return -EINVAL;
return 0;
+ case LOONGARCH_CPUCFG6:
+ if (val & CPUCFG6_PMP) {
+ u32 host = read_cpucfg(LOONGARCH_CPUCFG6);
+ if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
+ return -EINVAL;
+ if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM))
+ return -EINVAL;
+ if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM))
+ return -EINVAL;
+ }
+ return 0;
default:
/*
* Values for the other CPUCFG IDs are not being further validated
@@ -404,11 +771,42 @@ static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
else
ret = -EINVAL;
break;
+ case KVM_REG_LOONGARCH_LBT:
+ if (!kvm_guest_has_lbt(&vcpu->arch))
+ return -ENXIO;
+
+ switch (reg->id) {
+ case KVM_REG_LOONGARCH_LBT_SCR0:
+ *v = vcpu->arch.lbt.scr0;
+ break;
+ case KVM_REG_LOONGARCH_LBT_SCR1:
+ *v = vcpu->arch.lbt.scr1;
+ break;
+ case KVM_REG_LOONGARCH_LBT_SCR2:
+ *v = vcpu->arch.lbt.scr2;
+ break;
+ case KVM_REG_LOONGARCH_LBT_SCR3:
+ *v = vcpu->arch.lbt.scr3;
+ break;
+ case KVM_REG_LOONGARCH_LBT_EFLAGS:
+ *v = vcpu->arch.lbt.eflags;
+ break;
+ case KVM_REG_LOONGARCH_LBT_FTOP:
+ *v = vcpu->arch.fpu.ftop;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
case KVM_REG_LOONGARCH_KVM:
switch (reg->id) {
case KVM_REG_LOONGARCH_COUNTER:
*v = drdtime() + vcpu->kvm->arch.time_offset;
break;
+ case KVM_REG_LOONGARCH_DEBUG_INST:
+ *v = INSN_HVCL | KVM_HCALL_SWDBG;
+ break;
default:
ret = -EINVAL;
break;
@@ -459,6 +857,37 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
if (ret)
break;
vcpu->arch.cpucfg[id] = (u32)v;
+ if (id == LOONGARCH_CPUCFG6)
+ vcpu->arch.max_pmu_csrid =
+ LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1;
+ break;
+ case KVM_REG_LOONGARCH_LBT:
+ if (!kvm_guest_has_lbt(&vcpu->arch))
+ return -ENXIO;
+
+ switch (reg->id) {
+ case KVM_REG_LOONGARCH_LBT_SCR0:
+ vcpu->arch.lbt.scr0 = v;
+ break;
+ case KVM_REG_LOONGARCH_LBT_SCR1:
+ vcpu->arch.lbt.scr1 = v;
+ break;
+ case KVM_REG_LOONGARCH_LBT_SCR2:
+ vcpu->arch.lbt.scr2 = v;
+ break;
+ case KVM_REG_LOONGARCH_LBT_SCR3:
+ vcpu->arch.lbt.scr3 = v;
+ break;
+ case KVM_REG_LOONGARCH_LBT_EFLAGS:
+ vcpu->arch.lbt.eflags = v;
+ break;
+ case KVM_REG_LOONGARCH_LBT_FTOP:
+ vcpu->arch.fpu.ftop = v;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
break;
case KVM_REG_LOONGARCH_KVM:
switch (reg->id) {
@@ -471,9 +900,16 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
break;
case KVM_REG_LOONGARCH_VCPU_RESET:
- kvm_reset_timer(vcpu);
+ vcpu->arch.st.guest_addr = 0;
memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
+
+ /*
+ * When vCPU reset, clear the ESTAT and GINTC registers
+ * Other CSR registers are cleared with function _kvm_setcsr().
+ */
+ kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
+ kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
break;
default:
ret = -EINVAL;
@@ -552,7 +988,10 @@ static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
switch (attr->attr) {
- case 2:
+ case LOONGARCH_CPUCFG2:
+ case LOONGARCH_CPUCFG6:
+ return 0;
+ case CPUCFG_KVM_FEATURE:
return 0;
default:
return -ENXIO;
@@ -561,6 +1000,16 @@ static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
return -ENXIO;
}
+static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
+ || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
+ return -ENXIO;
+
+ return 0;
+}
+
static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
@@ -570,6 +1019,9 @@ static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
case KVM_LOONGARCH_VCPU_CPUCFG:
ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
break;
+ case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
+ ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
+ break;
default:
break;
}
@@ -577,22 +1029,48 @@ static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
return ret;
}
-static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu,
+static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
int ret = 0;
uint64_t val;
uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
- ret = _kvm_get_cpucfg_mask(attr->attr, &val);
- if (ret)
- return ret;
+ switch (attr->attr) {
+ case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
+ ret = _kvm_get_cpucfg_mask(attr->attr, &val);
+ if (ret)
+ return ret;
+ break;
+ case CPUCFG_KVM_FEATURE:
+ val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
+ break;
+ default:
+ return -ENXIO;
+ }
put_user(val, uaddr);
return ret;
}
+static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ u64 gpa;
+ u64 __user *user = (u64 __user *)attr->addr;
+
+ if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
+ || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
+ return -ENXIO;
+
+ gpa = vcpu->arch.st.guest_addr;
+ if (put_user(gpa, user))
+ return -EFAULT;
+
+ return 0;
+}
+
static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
@@ -600,7 +1078,10 @@ static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
switch (attr->group) {
case KVM_LOONGARCH_VCPU_CPUCFG:
- ret = kvm_loongarch_get_cpucfg_attr(vcpu, attr);
+ ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
+ break;
+ case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
+ ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
break;
default:
break;
@@ -612,7 +1093,65 @@ static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
- return -ENXIO;
+ u64 val, valid;
+ u64 __user *user = (u64 __user *)attr->addr;
+ struct kvm *kvm = vcpu->kvm;
+
+ switch (attr->attr) {
+ case CPUCFG_KVM_FEATURE:
+ if (get_user(val, user))
+ return -EFAULT;
+
+ valid = LOONGARCH_PV_FEAT_MASK;
+ if (val & ~valid)
+ return -EINVAL;
+
+ /* All vCPUs need set the same PV features */
+ if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED)
+ && ((kvm->arch.pv_features & valid) != val))
+ return -EINVAL;
+ kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED;
+ return 0;
+ default:
+ return -ENXIO;
+ }
+}
+
+static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ int idx, ret = 0;
+ u64 gpa, __user *user = (u64 __user *)attr->addr;
+ struct kvm *kvm = vcpu->kvm;
+
+ if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
+ || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
+ return -ENXIO;
+
+ if (get_user(gpa, user))
+ return -EFAULT;
+
+ if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
+ return -EINVAL;
+
+ if (!(gpa & KVM_STEAL_PHYS_VALID)) {
+ vcpu->arch.st.guest_addr = gpa;
+ return 0;
+ }
+
+ /* Check the address is in a valid memslot */
+ idx = srcu_read_lock(&kvm->srcu);
+ if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
+ ret = -EINVAL;
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ if (!ret) {
+ vcpu->arch.st.guest_addr = gpa;
+ vcpu->arch.st.last_steal = current->sched_info.run_delay;
+ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+ }
+
+ return ret;
}
static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
@@ -624,6 +1163,9 @@ static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
case KVM_LOONGARCH_VCPU_CPUCFG:
ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
break;
+ case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
+ ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
+ break;
default:
break;
}
@@ -728,12 +1270,66 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
return 0;
}
+#ifdef CONFIG_CPU_HAS_LBT
+int kvm_own_lbt(struct kvm_vcpu *vcpu)
+{
+ if (!kvm_guest_has_lbt(&vcpu->arch))
+ return -EINVAL;
+
+ preempt_disable();
+ set_csr_euen(CSR_EUEN_LBTEN);
+ _restore_lbt(&vcpu->arch.lbt);
+ vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
+ preempt_enable();
+
+ return 0;
+}
+
+static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
+ _save_lbt(&vcpu->arch.lbt);
+ clear_csr_euen(CSR_EUEN_LBTEN);
+ vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
+ }
+ preempt_enable();
+}
+
+static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr)
+{
+ /*
+ * If TM is enabled, top register save/restore will
+ * cause lbt exception, here enable lbt in advance
+ */
+ if (fcsr & FPU_CSR_TM)
+ kvm_own_lbt(vcpu);
+}
+
+static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
+ if (vcpu->arch.aux_inuse & KVM_LARCH_LBT)
+ return;
+ kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0));
+ }
+}
+#else
+static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
+static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { }
+static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
+#endif
+
/* Enable FPU and restore context */
void kvm_own_fpu(struct kvm_vcpu *vcpu)
{
preempt_disable();
- /* Enable FPU */
+ /*
+ * Enable FPU for guest
+ * Set FR and FRE according to guest context
+ */
+ kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
set_csr_euen(CSR_EUEN_FPEN);
kvm_restore_fpu(&vcpu->arch.fpu);
@@ -753,6 +1349,7 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu)
preempt_disable();
/* Enable LSX for guest */
+ kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
case KVM_LARCH_FPU:
@@ -787,6 +1384,7 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu)
preempt_disable();
+ kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
case KVM_LARCH_LSX:
@@ -818,6 +1416,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
{
preempt_disable();
+ kvm_check_fcsr_alive(vcpu);
if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
kvm_save_lasx(&vcpu->arch.fpu);
vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
@@ -840,6 +1439,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
/* Disable FPU */
clear_csr_euen(CSR_EUEN_FPEN);
}
+ kvm_lose_lbt(vcpu);
preempt_enable();
}
@@ -893,9 +1493,19 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
struct loongarch_csrs *csr;
vcpu->arch.vpid = 0;
+ vcpu->arch.flush_gpa = INVALID_GPA;
+
+ hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS_PINNED_HARD);
- hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
- vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
+ /* Get GPA (=HVA) of PGD for kvm hypervisor */
+ vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd);
+
+ /*
+ * Get PGD for primary mmu, virtual address is used since there is
+ * memory access after loading from CSR_PGD in tlb exception fast path.
+ */
+ vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd;
vcpu->arch.handle_exit = kvm_handle_exit;
vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
@@ -912,6 +1522,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
/* Init */
vcpu->arch.last_sched_cpu = -1;
+ /* Init ipi_state lock */
+ spin_lock_init(&vcpu->arch.ipi_state.lock);
+
/*
* Initialize guest register state to valid architectural reset state.
*/
@@ -924,6 +1537,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
/* Set cpuid */
kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
+ kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
/* Start with no pending virtual guest interrupts */
csr->csrs[LOONGARCH_CSR_GINTC] = 0;
@@ -942,6 +1556,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
hrtimer_cancel(&vcpu->arch.swtimer);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+ kvm_drop_cpuid(vcpu);
kfree(vcpu->arch.csr);
/*
@@ -978,9 +1593,10 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
/* Restore timer state regardless */
kvm_restore_timer(vcpu);
+ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
- /* Control guest page CCA attribute */
- change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
+ /* Restore hardware PMU CSRs */
+ kvm_restore_pmu(vcpu);
/* Don't bother restoring registers multiple times unless necessary */
if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
@@ -1158,12 +1774,17 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
vcpu->mmio_needed = 0;
}
- if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) {
+ switch (run->exit_reason) {
+ case KVM_EXIT_HYPERCALL:
+ kvm_complete_user_service(vcpu, run);
+ break;
+ case KVM_EXIT_LOONGARCH_IOCSR:
if (!run->iocsr_io.is_write)
kvm_complete_iocsr_read(vcpu, run);
+ break;
}
- if (run->immediate_exit)
+ if (!vcpu->wants_to_run)
return r;
/* Clear exit_reason */
diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c
index 0a37f6fa8f2d..edccfc8c9cd8 100644
--- a/arch/loongarch/kvm/vm.c
+++ b/arch/loongarch/kvm/vm.c
@@ -5,6 +5,9 @@
#include <linux/kvm_host.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_vcpu.h>
+#include <asm/kvm_eiointc.h>
+#include <asm/kvm_pch_pic.h>
const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS(),
@@ -30,8 +33,26 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (!kvm->arch.pgd)
return -ENOMEM;
+ kvm->arch.phyid_map = kvzalloc(sizeof(struct kvm_phyid_map), GFP_KERNEL_ACCOUNT);
+ if (!kvm->arch.phyid_map) {
+ free_page((unsigned long)kvm->arch.pgd);
+ kvm->arch.pgd = NULL;
+ return -ENOMEM;
+ }
+ spin_lock_init(&kvm->arch.phyid_map_lock);
+
kvm_init_vmcs(kvm);
- kvm->arch.gpa_size = BIT(cpu_vabits - 1);
+
+ /* Enable all PV features by default */
+ kvm->arch.pv_features = BIT(KVM_FEATURE_IPI);
+ if (kvm_pvtime_supported())
+ kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
+
+ /*
+ * cpu_vabits means user address space only (a half of total).
+ * GPA size of VM is the same with the size of user address space.
+ */
+ kvm->arch.gpa_size = BIT(cpu_vabits);
kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1;
kvm->arch.invalid_ptes[0] = 0;
kvm->arch.invalid_ptes[1] = (unsigned long)invalid_pte_table;
@@ -52,6 +73,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_destroy_vcpus(kvm);
free_page((unsigned long)kvm->arch.pgd);
kvm->arch.pgd = NULL;
+ kvfree(kvm->arch.phyid_map);
+ kvm->arch.phyid_map = NULL;
}
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -59,6 +82,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
int r;
switch (ext) {
+ case KVM_CAP_IRQCHIP:
case KVM_CAP_ONE_REG:
case KVM_CAP_ENABLE_CAP:
case KVM_CAP_READONLY_MEM:
@@ -66,6 +90,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_IMMEDIATE_EXIT:
case KVM_CAP_IOEVENTFD:
case KVM_CAP_MP_STATE:
+ case KVM_CAP_SET_GUEST_DEBUG:
r = 1;
break;
case KVM_CAP_NR_VCPUS:
@@ -88,7 +113,85 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
return r;
}
+static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ switch (attr->attr) {
+ case KVM_LOONGARCH_VM_FEAT_LSX:
+ if (cpu_has_lsx)
+ return 0;
+ return -ENXIO;
+ case KVM_LOONGARCH_VM_FEAT_LASX:
+ if (cpu_has_lasx)
+ return 0;
+ return -ENXIO;
+ case KVM_LOONGARCH_VM_FEAT_X86BT:
+ if (cpu_has_lbt_x86)
+ return 0;
+ return -ENXIO;
+ case KVM_LOONGARCH_VM_FEAT_ARMBT:
+ if (cpu_has_lbt_arm)
+ return 0;
+ return -ENXIO;
+ case KVM_LOONGARCH_VM_FEAT_MIPSBT:
+ if (cpu_has_lbt_mips)
+ return 0;
+ return -ENXIO;
+ case KVM_LOONGARCH_VM_FEAT_PMU:
+ if (cpu_has_pmp)
+ return 0;
+ return -ENXIO;
+ case KVM_LOONGARCH_VM_FEAT_PV_IPI:
+ return 0;
+ case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME:
+ if (kvm_pvtime_supported())
+ return 0;
+ return -ENXIO;
+ default:
+ return -ENXIO;
+ }
+}
+
+static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_LOONGARCH_VM_FEAT_CTRL:
+ return kvm_vm_feature_has_attr(kvm, attr);
+ default:
+ return -ENXIO;
+ }
+}
+
int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
{
- return -ENOIOCTLCMD;
+ void __user *argp = (void __user *)arg;
+ struct kvm *kvm = filp->private_data;
+ struct kvm_device_attr attr;
+
+ switch (ioctl) {
+ case KVM_CREATE_IRQCHIP:
+ return 0;
+ case KVM_HAS_DEVICE_ATTR:
+ if (copy_from_user(&attr, argp, sizeof(attr)))
+ return -EFAULT;
+
+ return kvm_vm_has_attr(kvm, &attr);
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, bool line_status)
+{
+ if (!kvm_arch_irqchip_in_kernel(kvm))
+ return -ENXIO;
+
+ irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
+ irq_event->irq, irq_event->level, line_status);
+
+ return 0;
+}
+
+bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
+{
+ return (kvm->arch.ipi && kvm->arch.eiointc && kvm->arch.pch_pic);
}
diff --git a/arch/loongarch/lib/Makefile b/arch/loongarch/lib/Makefile
index a77bf160bfc4..fae77809048b 100644
--- a/arch/loongarch/lib/Makefile
+++ b/arch/loongarch/lib/Makefile
@@ -6,6 +6,10 @@
lib-y += delay.o memset.o memcpy.o memmove.o \
clear_user.o copy_user.o csum.o dump_tlb.o unaligned.o
+obj-$(CONFIG_ARCH_SUPPORTS_INT128) += tishift.o
+
obj-$(CONFIG_CPU_HAS_LSX) += xor_simd.o xor_simd_glue.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
+
+obj-$(CONFIG_CRC32_ARCH) += crc32-loongarch.o
diff --git a/arch/loongarch/lib/crc32-loongarch.c b/arch/loongarch/lib/crc32-loongarch.c
new file mode 100644
index 000000000000..b37cd8537b45
--- /dev/null
+++ b/arch/loongarch/lib/crc32-loongarch.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CRC32 and CRC32C using LoongArch crc* instructions
+ *
+ * Module based on mips/crypto/crc32-mips.c
+ *
+ * Copyright (C) 2014 Linaro Ltd <yazen.ghannam@linaro.org>
+ * Copyright (C) 2018 MIPS Tech, LLC
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#include <asm/cpu-features.h>
+#include <linux/crc32.h>
+#include <linux/module.h>
+#include <linux/unaligned.h>
+
+#define _CRC32(crc, value, size, type) \
+do { \
+ __asm__ __volatile__( \
+ #type ".w." #size ".w" " %0, %1, %0\n\t"\
+ : "+r" (crc) \
+ : "r" (value) \
+ : "memory"); \
+} while (0)
+
+#define CRC32(crc, value, size) _CRC32(crc, value, size, crc)
+#define CRC32C(crc, value, size) _CRC32(crc, value, size, crcc)
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_crc32);
+
+u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
+{
+ if (!static_branch_likely(&have_crc32))
+ return crc32_le_base(crc, p, len);
+
+ while (len >= sizeof(u64)) {
+ u64 value = get_unaligned_le64(p);
+
+ CRC32(crc, value, d);
+ p += sizeof(u64);
+ len -= sizeof(u64);
+ }
+
+ if (len & sizeof(u32)) {
+ u32 value = get_unaligned_le32(p);
+
+ CRC32(crc, value, w);
+ p += sizeof(u32);
+ }
+
+ if (len & sizeof(u16)) {
+ u16 value = get_unaligned_le16(p);
+
+ CRC32(crc, value, h);
+ p += sizeof(u16);
+ }
+
+ if (len & sizeof(u8)) {
+ u8 value = *p++;
+
+ CRC32(crc, value, b);
+ }
+
+ return crc;
+}
+EXPORT_SYMBOL(crc32_le_arch);
+
+u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
+{
+ if (!static_branch_likely(&have_crc32))
+ return crc32c_base(crc, p, len);
+
+ while (len >= sizeof(u64)) {
+ u64 value = get_unaligned_le64(p);
+
+ CRC32C(crc, value, d);
+ p += sizeof(u64);
+ len -= sizeof(u64);
+ }
+
+ if (len & sizeof(u32)) {
+ u32 value = get_unaligned_le32(p);
+
+ CRC32C(crc, value, w);
+ p += sizeof(u32);
+ }
+
+ if (len & sizeof(u16)) {
+ u16 value = get_unaligned_le16(p);
+
+ CRC32C(crc, value, h);
+ p += sizeof(u16);
+ }
+
+ if (len & sizeof(u8)) {
+ u8 value = *p++;
+
+ CRC32C(crc, value, b);
+ }
+
+ return crc;
+}
+EXPORT_SYMBOL(crc32c_arch);
+
+u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
+{
+ return crc32_be_base(crc, p, len);
+}
+EXPORT_SYMBOL(crc32_be_arch);
+
+static int __init crc32_loongarch_init(void)
+{
+ if (cpu_has_crc32)
+ static_branch_enable(&have_crc32);
+ return 0;
+}
+subsys_initcall(crc32_loongarch_init);
+
+static void __exit crc32_loongarch_exit(void)
+{
+}
+module_exit(crc32_loongarch_exit);
+
+u32 crc32_optimizations(void)
+{
+ if (static_key_enabled(&have_crc32))
+ return CRC32_LE_OPTIMIZATION | CRC32C_OPTIMIZATION;
+ return 0;
+}
+EXPORT_SYMBOL(crc32_optimizations);
+
+MODULE_AUTHOR("Min Zhou <zhoumin@loongson.cn>");
+MODULE_AUTHOR("Huacai Chen <chenhuacai@loongson.cn>");
+MODULE_DESCRIPTION("CRC32 and CRC32C using LoongArch crc* instructions");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/loongarch/lib/csum.c b/arch/loongarch/lib/csum.c
index a5e84b403c3b..df309ae4045d 100644
--- a/arch/loongarch/lib/csum.c
+++ b/arch/loongarch/lib/csum.c
@@ -25,7 +25,7 @@ unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len)
const u64 *ptr;
u64 data, sum64 = 0;
- if (unlikely(len == 0))
+ if (unlikely(len <= 0))
return 0;
offset = (unsigned long)buff & 7;
diff --git a/arch/loongarch/lib/tishift.S b/arch/loongarch/lib/tishift.S
new file mode 100644
index 000000000000..fa1d310012bc
--- /dev/null
+++ b/arch/loongarch/lib/tishift.S
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <asm/asmmacro.h>
+#include <linux/export.h>
+#include <linux/linkage.h>
+
+SYM_FUNC_START(__ashlti3)
+ srli.d t2, a0, 1
+ nor t3, zero, a2
+ sll.d t1, a1, a2
+ srl.d t2, t2, t3
+ andi t0, a2, 64
+ sll.d a0, a0, a2
+ or t1, t2, t1
+ maskeqz a1, a0, t0
+ masknez a0, a0, t0
+ masknez t0, t1, t0
+ or a1, t0, a1
+ jr ra
+SYM_FUNC_END(__ashlti3)
+EXPORT_SYMBOL(__ashlti3)
+
+SYM_FUNC_START(__ashrti3)
+ nor t3, zero, a2
+ slli.d t2, a1, 1
+ srl.d t1, a0, a2
+ sll.d t2, t2, t3
+ andi t0, a2, 64
+ or t1, t2, t1
+ sra.d a2, a1, a2
+ srai.d a1, a1, 63
+ maskeqz a0, a2, t0
+ maskeqz a1, a1, t0
+ masknez a2, a2, t0
+ masknez t0, t1, t0
+ or a1, a1, a2
+ or a0, t0, a0
+ jr ra
+SYM_FUNC_END(__ashrti3)
+EXPORT_SYMBOL(__ashrti3)
+
+SYM_FUNC_START(__lshrti3)
+ slli.d t2, a1, 1
+ nor t3, zero, a2
+ srl.d t1, a0, a2
+ sll.d t2, t2, t3
+ andi t0, a2, 64
+ srl.d a1, a1, a2
+ or t1, t2, t1
+ maskeqz a0, a1, t0
+ masknez a1, a1, t0
+ masknez t0, t1, t0
+ or a0, t0, a0
+ jr ra
+SYM_FUNC_END(__lshrti3)
+EXPORT_SYMBOL(__lshrti3)
diff --git a/arch/loongarch/mm/Makefile b/arch/loongarch/mm/Makefile
index e4d1e581dbae..278be2c8fc36 100644
--- a/arch/loongarch/mm/Makefile
+++ b/arch/loongarch/mm/Makefile
@@ -4,7 +4,8 @@
#
obj-y += init.o cache.o tlb.o tlbex.o extable.o \
- fault.o ioremap.o maccess.o mmap.o pgtable.o page.o
+ fault.o ioremap.o maccess.o mmap.o pgtable.o \
+ page.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_KASAN) += kasan_init.o
diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
index 97b40defde06..deefd9617d00 100644
--- a/arch/loongarch/mm/fault.c
+++ b/arch/loongarch/mm/fault.c
@@ -31,11 +31,52 @@
int show_unhandled_signals = 1;
+static int __kprobes spurious_fault(unsigned long write, unsigned long address)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if (!(address & __UA_LIMIT))
+ return 0;
+
+ pgd = pgd_offset_k(address);
+ if (!pgd_present(pgdp_get(pgd)))
+ return 0;
+
+ p4d = p4d_offset(pgd, address);
+ if (!p4d_present(p4dp_get(p4d)))
+ return 0;
+
+ pud = pud_offset(p4d, address);
+ if (!pud_present(pudp_get(pud)))
+ return 0;
+
+ pmd = pmd_offset(pud, address);
+ if (!pmd_present(pmdp_get(pmd)))
+ return 0;
+
+ if (pmd_leaf(*pmd)) {
+ return write ? pmd_write(pmdp_get(pmd)) : 1;
+ } else {
+ pte = pte_offset_kernel(pmd, address);
+ if (!pte_present(ptep_get(pte)))
+ return 0;
+
+ return write ? pte_write(ptep_get(pte)) : 1;
+ }
+}
+
static void __kprobes no_context(struct pt_regs *regs,
unsigned long write, unsigned long address)
{
const int field = sizeof(unsigned long) * 2;
+ if (spurious_fault(write, address))
+ return;
+
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c
index 1e76fcb83093..cea84d7f2b91 100644
--- a/arch/loongarch/mm/hugetlbpage.c
+++ b/arch/loongarch/mm/hugetlbpage.c
@@ -39,32 +39,22 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
pmd_t *pmd = NULL;
pgd = pgd_offset(mm, addr);
- if (pgd_present(*pgd)) {
+ if (pgd_present(pgdp_get(pgd))) {
p4d = p4d_offset(pgd, addr);
- if (p4d_present(*p4d)) {
+ if (p4d_present(p4dp_get(p4d))) {
pud = pud_offset(p4d, addr);
- if (pud_present(*pud))
+ if (pud_present(pudp_get(pud)))
pmd = pmd_offset(pud, addr);
}
}
- return (pte_t *) pmd;
-}
-
-int pmd_huge(pmd_t pmd)
-{
- return (pmd_val(pmd) & _PAGE_HUGE) != 0;
-}
-
-int pud_huge(pud_t pud)
-{
- return (pud_val(pud) & _PAGE_HUGE) != 0;
+ return pmd_none(pmdp_get(pmd)) ? NULL : (pte_t *) pmd;
}
uint64_t pmd_to_entrylo(unsigned long pmd_val)
{
uint64_t val;
/* PMD as PTE. Must be huge page */
- if (!pmd_huge(__pmd(pmd_val)))
+ if (!pmd_leaf(__pmd(pmd_val)))
panic("%s", __func__);
val = pmd_val ^ _PAGE_HUGE;
diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
index 4dd53427f657..06f11d9e4ec1 100644
--- a/arch/loongarch/mm/init.c
+++ b/arch/loongarch/mm/init.c
@@ -24,6 +24,7 @@
#include <linux/gfp.h>
#include <linux/hugetlb.h>
#include <linux/mmzone.h>
+#include <linux/execmem.h>
#include <asm/asm-offsets.h>
#include <asm/bootinfo.h>
@@ -64,9 +65,6 @@ void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
-#ifdef CONFIG_ZONE_DMA
- max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
-#endif
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
#endif
@@ -74,14 +72,6 @@ void __init paging_init(void)
free_area_init(max_zone_pfns);
}
-
-void __init mem_init(void)
-{
- max_mapnr = max_low_pfn;
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
-
- memblock_free_all();
-}
#endif /* !CONFIG_NUMA */
void __ref free_initmem(void)
@@ -140,7 +130,7 @@ void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
int __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
unsigned long addr, unsigned long next)
{
- int huge = pmd_val(*pmd) & _PAGE_HUGE;
+ int huge = pmd_val(pmdp_get(pmd)) & _PAGE_HUGE;
if (huge)
vmemmap_verify((pte_t *)pmd, node, addr, next);
@@ -172,10 +162,8 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
pud_t *pud;
pmd_t *pmd;
- if (p4d_none(*p4d)) {
- pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!pud)
- panic("%s: Failed to allocate memory\n", __func__);
+ if (p4d_none(p4dp_get(p4d))) {
+ pud = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
p4d_populate(&init_mm, p4d, pud);
#ifndef __PAGETABLE_PUD_FOLDED
pud_init(pud);
@@ -183,10 +171,8 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
}
pud = pud_offset(p4d, addr);
- if (pud_none(*pud)) {
- pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!pmd)
- panic("%s: Failed to allocate memory\n", __func__);
+ if (pud_none(pudp_get(pud))) {
+ pmd = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
pud_populate(&init_mm, pud, pmd);
#ifndef __PAGETABLE_PMD_FOLDED
pmd_init(pmd);
@@ -194,13 +180,12 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
}
pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd)) {
+ if (!pmd_present(pmdp_get(pmd))) {
pte_t *pte;
- pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!pte)
- panic("%s: Failed to allocate memory\n", __func__);
+ pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd, pte);
+ kernel_pte_init(pte);
}
return pte_offset_kernel(pmd, addr);
@@ -215,7 +200,7 @@ void __init __set_fixmap(enum fixed_addresses idx,
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
ptep = populate_kernel_pte(addr);
- if (!pte_none(*ptep)) {
+ if (!pte_none(ptep_get(ptep))) {
pte_ERROR(*ptep);
return;
}
@@ -248,3 +233,23 @@ EXPORT_SYMBOL(invalid_pmd_table);
#endif
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
EXPORT_SYMBOL(invalid_pte_table);
+
+#ifdef CONFIG_EXECMEM
+static struct execmem_info execmem_info __ro_after_init;
+
+struct execmem_info __init *execmem_arch_setup(void)
+{
+ execmem_info = (struct execmem_info){
+ .ranges = {
+ [EXECMEM_DEFAULT] = {
+ .start = MODULES_VADDR,
+ .end = MODULES_END,
+ .pgprot = PAGE_KERNEL,
+ .alignment = 1,
+ },
+ },
+ };
+
+ return &execmem_info;
+}
+#endif /* CONFIG_EXECMEM */
diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c
index c608adc99845..d2681272d8f0 100644
--- a/arch/loongarch/mm/kasan_init.c
+++ b/arch/loongarch/mm/kasan_init.c
@@ -13,6 +13,13 @@
static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+#ifdef __PAGETABLE_P4D_FOLDED
+#define __pgd_none(early, pgd) (0)
+#else
+#define __pgd_none(early, pgd) (early ? (pgd_val(pgd) == 0) : \
+(__pa(pgd_val(pgd)) == (unsigned long)__pa(kasan_early_shadow_p4d)))
+#endif
+
#ifdef __PAGETABLE_PUD_FOLDED
#define __p4d_none(early, p4d) (0)
#else
@@ -55,6 +62,9 @@ void *kasan_mem_to_shadow(const void *addr)
case XKPRANGE_UC_SEG:
offset = XKPRANGE_UC_SHADOW_OFFSET;
break;
+ case XKPRANGE_WC_SEG:
+ offset = XKPRANGE_WC_SHADOW_OFFSET;
+ break;
case XKVRANGE_VC_SEG:
offset = XKVRANGE_VC_SHADOW_OFFSET;
break;
@@ -79,6 +89,8 @@ const void *kasan_shadow_to_mem(const void *shadow_addr)
if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
+ else if (addr >= XKPRANGE_WC_SHADOW_OFFSET)
+ return (void *)(((addr - XKPRANGE_WC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_WC_START);
else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
@@ -105,7 +117,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early)
{
- if (__pmd_none(early, READ_ONCE(*pmdp))) {
+ if (__pmd_none(early, pmdp_get(pmdp))) {
phys_addr_t pte_phys = early ?
__pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node);
if (!early)
@@ -118,7 +130,7 @@ static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early)
{
- if (__pud_none(early, READ_ONCE(*pudp))) {
+ if (__pud_none(early, pudp_get(pudp))) {
phys_addr_t pmd_phys = early ?
__pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node);
if (!early)
@@ -131,7 +143,7 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early)
{
- if (__p4d_none(early, READ_ONCE(*p4dp))) {
+ if (__p4d_none(early, p4dp_get(p4dp))) {
phys_addr_t pud_phys = early ?
__pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node);
if (!early)
@@ -142,6 +154,19 @@ static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
return pud_offset(p4dp, addr);
}
+static p4d_t *__init kasan_p4d_offset(pgd_t *pgdp, unsigned long addr, int node, bool early)
+{
+ if (__pgd_none(early, pgdp_get(pgdp))) {
+ phys_addr_t p4d_phys = early ?
+ __pa_symbol(kasan_early_shadow_p4d) : kasan_alloc_zeroed_page(node);
+ if (!early)
+ memcpy(__va(p4d_phys), kasan_early_shadow_p4d, sizeof(kasan_early_shadow_p4d));
+ pgd_populate(&init_mm, pgdp, (p4d_t *)__va(p4d_phys));
+ }
+
+ return p4d_offset(pgdp, addr);
+}
+
static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
unsigned long end, int node, bool early)
{
@@ -154,7 +179,7 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
: kasan_alloc_zeroed_page(node);
next = addr + PAGE_SIZE;
set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
- } while (ptep++, addr = next, addr != end && __pte_none(early, READ_ONCE(*ptep)));
+ } while (ptep++, addr = next, addr != end && __pte_none(early, ptep_get(ptep)));
}
static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
@@ -166,7 +191,7 @@ static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
do {
next = pmd_addr_end(addr, end);
kasan_pte_populate(pmdp, addr, next, node, early);
- } while (pmdp++, addr = next, addr != end && __pmd_none(early, READ_ONCE(*pmdp)));
+ } while (pmdp++, addr = next, addr != end && __pmd_none(early, pmdp_get(pmdp)));
}
static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
@@ -178,19 +203,19 @@ static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
do {
next = pud_addr_end(addr, end);
kasan_pmd_populate(pudp, addr, next, node, early);
- } while (pudp++, addr = next, addr != end);
+ } while (pudp++, addr = next, addr != end && __pud_none(early, READ_ONCE(*pudp)));
}
static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
unsigned long end, int node, bool early)
{
unsigned long next;
- p4d_t *p4dp = p4d_offset(pgdp, addr);
+ p4d_t *p4dp = kasan_p4d_offset(pgdp, addr, node, early);
do {
next = p4d_addr_end(addr, end);
kasan_pud_populate(p4dp, addr, next, node, early);
- } while (p4dp++, addr = next, addr != end);
+ } while (p4dp++, addr = next, addr != end && __p4d_none(early, READ_ONCE(*p4dp)));
}
static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
@@ -218,7 +243,7 @@ static void __init kasan_map_populate(unsigned long start, unsigned long end,
asmlinkage void __init kasan_early_init(void)
{
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
- BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END + 1, PGDIR_SIZE));
}
static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval)
@@ -233,7 +258,7 @@ static void __init clear_pgds(unsigned long start, unsigned long end)
* swapper_pg_dir. pgd_clear() can't be used
* here because it's nop on 2,3-level pagetable setups
*/
- for (; start < end; start += PGDIR_SIZE)
+ for (; start < end; start = pgd_addr_end(start, end))
kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0));
}
@@ -243,6 +268,17 @@ void __init kasan_init(void)
phys_addr_t pa_start, pa_end;
/*
+ * If PGDIR_SIZE is too large for cpu_vabits, KASAN_SHADOW_END will
+ * overflow UINTPTR_MAX and then looks like a user space address.
+ * For example, PGDIR_SIZE of CONFIG_4KB_4LEVEL is 2^39, which is too
+ * large for Loongson-2K series whose cpu_vabits = 39.
+ */
+ if (KASAN_SHADOW_END < vm_map_base) {
+ pr_warn("PGDIR_SIZE too large for cpu_vabits, KernelAddressSanitizer disabled.\n");
+ return;
+ }
+
+ /*
* PGD was populated as invalid_pmd_table or invalid_pud_table
* in pagetable_init() which depends on how many levels of page
* table you are using, but we had to clean the gpd of kasan
diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c
index 89af7c12e8c0..1df9e99582cc 100644
--- a/arch/loongarch/mm/mmap.c
+++ b/arch/loongarch/mm/mmap.c
@@ -3,6 +3,7 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/export.h>
+#include <linux/hugetlb.h>
#include <linux/io.h>
#include <linux/kfence.h>
#include <linux/memblock.h>
@@ -25,7 +26,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
struct vm_area_struct *vma;
unsigned long addr = addr0;
int do_color_align;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
if (unlikely(len > TASK_SIZE))
return -ENOMEM;
@@ -63,8 +64,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
}
info.length = len;
- info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
+ if (filp && is_file_hugepages(filp))
+ info.align_mask = huge_page_mask_align(filp);
+ else
+ info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
if (dir == DOWN) {
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
@@ -83,14 +87,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
*/
}
- info.flags = 0;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
return vm_unmapped_area(&info);
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff, unsigned long flags,
+ vm_flags_t vm_flags)
{
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, UP);
@@ -102,7 +106,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
*/
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, DOWN);
diff --git a/arch/loongarch/mm/pageattr.c b/arch/loongarch/mm/pageattr.c
new file mode 100644
index 000000000000..99165903908a
--- /dev/null
+++ b/arch/loongarch/mm/pageattr.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#include <linux/memblock.h>
+#include <linux/pagewalk.h>
+#include <linux/pgtable.h>
+#include <asm/set_memory.h>
+#include <asm/tlbflush.h>
+
+struct pageattr_masks {
+ pgprot_t set_mask;
+ pgprot_t clear_mask;
+};
+
+static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
+{
+ unsigned long new_val = val;
+ struct pageattr_masks *masks = walk->private;
+
+ new_val &= ~(pgprot_val(masks->clear_mask));
+ new_val |= (pgprot_val(masks->set_mask));
+
+ return new_val;
+}
+
+static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pgd_t val = pgdp_get(pgd);
+
+ if (pgd_leaf(val)) {
+ val = __pgd(set_pageattr_masks(pgd_val(val), walk));
+ set_pgd(pgd, val);
+ }
+
+ return 0;
+}
+
+static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ p4d_t val = p4dp_get(p4d);
+
+ if (p4d_leaf(val)) {
+ val = __p4d(set_pageattr_masks(p4d_val(val), walk));
+ set_p4d(p4d, val);
+ }
+
+ return 0;
+}
+
+static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pud_t val = pudp_get(pud);
+
+ if (pud_leaf(val)) {
+ val = __pud(set_pageattr_masks(pud_val(val), walk));
+ set_pud(pud, val);
+ }
+
+ return 0;
+}
+
+static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pmd_t val = pmdp_get(pmd);
+
+ if (pmd_leaf(val)) {
+ val = __pmd(set_pageattr_masks(pmd_val(val), walk));
+ set_pmd(pmd, val);
+ }
+
+ return 0;
+}
+
+static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pte_t val = ptep_get(pte);
+
+ val = __pte(set_pageattr_masks(pte_val(val), walk));
+ set_pte(pte, val);
+
+ return 0;
+}
+
+static int pageattr_pte_hole(unsigned long addr, unsigned long next,
+ int depth, struct mm_walk *walk)
+{
+ return 0;
+}
+
+static const struct mm_walk_ops pageattr_ops = {
+ .pgd_entry = pageattr_pgd_entry,
+ .p4d_entry = pageattr_p4d_entry,
+ .pud_entry = pageattr_pud_entry,
+ .pmd_entry = pageattr_pmd_entry,
+ .pte_entry = pageattr_pte_entry,
+ .pte_hole = pageattr_pte_hole,
+ .walk_lock = PGWALK_RDLOCK,
+};
+
+static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, pgprot_t clear_mask)
+{
+ int ret;
+ unsigned long start = addr;
+ unsigned long end = start + PAGE_SIZE * numpages;
+ struct pageattr_masks masks = {
+ .set_mask = set_mask,
+ .clear_mask = clear_mask
+ };
+
+ if (!numpages)
+ return 0;
+
+ mmap_write_lock(&init_mm);
+ ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, &masks);
+ mmap_write_unlock(&init_mm);
+
+ flush_tlb_kernel_range(start, end);
+
+ return ret;
+}
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_NO_EXEC));
+}
+
+int set_memory_nx(unsigned long addr, int numpages)
+{
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, numpages, __pgprot(_PAGE_NO_EXEC), __pgprot(0));
+}
+
+int set_memory_ro(unsigned long addr, int numpages)
+{
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_WRITE | _PAGE_DIRTY));
+}
+
+int set_memory_rw(unsigned long addr, int numpages)
+{
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, numpages, __pgprot(_PAGE_WRITE | _PAGE_DIRTY), __pgprot(0));
+}
+
+bool kernel_page_present(struct page *page)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long addr = (unsigned long)page_address(page);
+
+ if (addr < vm_map_base)
+ return memblock_is_memory(__pa(addr));
+
+ pgd = pgd_offset_k(addr);
+ if (pgd_none(pgdp_get(pgd)))
+ return false;
+ if (pgd_leaf(pgdp_get(pgd)))
+ return true;
+
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_none(p4dp_get(p4d)))
+ return false;
+ if (p4d_leaf(p4dp_get(p4d)))
+ return true;
+
+ pud = pud_offset(p4d, addr);
+ if (pud_none(pudp_get(pud)))
+ return false;
+ if (pud_leaf(pudp_get(pud)))
+ return true;
+
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(pmdp_get(pmd)))
+ return false;
+ if (pmd_leaf(pmdp_get(pmd)))
+ return true;
+
+ pte = pte_offset_kernel(pmd, addr);
+ return pte_present(ptep_get(pte));
+}
+
+int set_direct_map_default_noflush(struct page *page)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, 1, PAGE_KERNEL, __pgprot(0));
+}
+
+int set_direct_map_invalid_noflush(struct page *page)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, 1, __pgprot(0), __pgprot(_PAGE_PRESENT | _PAGE_VALID));
+}
+
+int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+ pgprot_t set, clear;
+
+ if (addr < vm_map_base)
+ return 0;
+
+ if (valid) {
+ set = PAGE_KERNEL;
+ clear = __pgprot(0);
+ } else {
+ set = __pgprot(0);
+ clear = __pgprot(_PAGE_PRESENT | _PAGE_VALID);
+ }
+
+ return __set_memory(addr, 1, set, clear);
+}
diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c
index bda018150000..352d9b2e02ab 100644
--- a/arch/loongarch/mm/pgtable.c
+++ b/arch/loongarch/mm/pgtable.c
@@ -23,11 +23,10 @@ EXPORT_SYMBOL(tlb_virt_to_page);
pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *init, *ret = NULL;
- struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
+ pgd_t *init, *ret;
- if (ptdesc) {
- ret = (pgd_t *)ptdesc_address(ptdesc);
+ ret = __pgd_alloc(mm, 0);
+ if (ret) {
init = pgd_offset(&init_mm, 0UL);
pgd_init(ret);
memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
@@ -116,19 +115,30 @@ void pud_init(void *addr)
EXPORT_SYMBOL_GPL(pud_init);
#endif
-pmd_t mk_pmd(struct page *page, pgprot_t prot)
+void kernel_pte_init(void *addr)
{
- pmd_t pmd;
+ unsigned long *p, *end;
- pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
+ p = (unsigned long *)addr;
+ end = p + PTRS_PER_PTE;
- return pmd;
+ do {
+ p[0] = _PAGE_GLOBAL;
+ p[1] = _PAGE_GLOBAL;
+ p[2] = _PAGE_GLOBAL;
+ p[3] = _PAGE_GLOBAL;
+ p[4] = _PAGE_GLOBAL;
+ p += 8;
+ p[-3] = _PAGE_GLOBAL;
+ p[-2] = _PAGE_GLOBAL;
+ p[-1] = _PAGE_GLOBAL;
+ } while (p != end);
}
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
- *pmdp = pmd;
+ WRITE_ONCE(*pmdp, pmd);
flush_tlb_all();
}
diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c
index 5ac9beb5f093..3b427b319db2 100644
--- a/arch/loongarch/mm/tlb.c
+++ b/arch/loongarch/mm/tlb.c
@@ -289,7 +289,7 @@ static void setup_tlb_handler(int cpu)
/* Avoid lockdep warning */
rcutree_report_cpu_starting(cpu);
-#ifdef CONFIG_NUMA
+#if defined(CONFIG_NUMA) && !defined(CONFIG_PREEMPT_RT)
vec_sz = sizeof(exception_handlers);
if (pcpu_handlers[cpu])
diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S
index a44387b838af..c08682a89c58 100644
--- a/arch/loongarch/mm/tlbex.S
+++ b/arch/loongarch/mm/tlbex.S
@@ -125,6 +125,8 @@ vmalloc_load:
tlb_huge_update_load:
#ifdef CONFIG_SMP
ll.d ra, t1, 0
+#else
+ rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
#endif
andi t0, ra, _PAGE_PRESENT
beqz t0, nopage_tlb_load
@@ -135,7 +137,6 @@ tlb_huge_update_load:
beqz t0, tlb_huge_update_load
ori t0, ra, _PAGE_VALID
#else
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
ori t0, ra, _PAGE_VALID
st.d t0, t1, 0
#endif
@@ -281,6 +282,8 @@ vmalloc_store:
tlb_huge_update_store:
#ifdef CONFIG_SMP
ll.d ra, t1, 0
+#else
+ rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
#endif
andi t0, ra, _PAGE_PRESENT | _PAGE_WRITE
xori t0, t0, _PAGE_PRESENT | _PAGE_WRITE
@@ -292,7 +295,6 @@ tlb_huge_update_store:
beqz t0, tlb_huge_update_store
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#else
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
st.d t0, t1, 0
#endif
@@ -438,6 +440,8 @@ vmalloc_modify:
tlb_huge_update_modify:
#ifdef CONFIG_SMP
ll.d ra, t1, 0
+#else
+ rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
#endif
andi t0, ra, _PAGE_WRITE
beqz t0, nopage_tlb_modify
@@ -448,7 +452,6 @@ tlb_huge_update_modify:
beqz t0, tlb_huge_update_modify
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#else
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
st.d t0, t1, 0
#endif
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index e73323d759d0..fa1500d4aa3e 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -142,6 +142,8 @@ static void build_prologue(struct jit_ctx *ctx)
*/
if (seen_tail_call(ctx) && seen_call(ctx))
move_reg(ctx, TCC_SAVED, REG_TCC);
+ else
+ emit_insn(ctx, nop);
ctx->stack_size = stack_adjust;
}
@@ -179,15 +181,15 @@ static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call)
if (!is_tail_call) {
/* Set return value */
- move_reg(ctx, LOONGARCH_GPR_A0, regmap[BPF_REG_0]);
+ emit_insn(ctx, addiw, LOONGARCH_GPR_A0, regmap[BPF_REG_0], 0);
/* Return to the caller */
- emit_insn(ctx, jirl, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0);
+ emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0);
} else {
/*
* Call the next bpf prog and skip the first instruction
* of TCC initialization.
*/
- emit_insn(ctx, jirl, LOONGARCH_GPR_T3, LOONGARCH_GPR_ZERO, 1);
+ emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T3, 1);
}
}
@@ -904,8 +906,11 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
return ret;
move_addr(ctx, t1, func_addr);
- emit_insn(ctx, jirl, t1, LOONGARCH_GPR_RA, 0);
- move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
+ emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0);
+
+ if (insn->src_reg != BPF_PSEUDO_CALL)
+ move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
+
break;
/* tail call */
@@ -930,7 +935,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
{
const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
- move_imm(ctx, dst, imm64, is32);
+ if (bpf_pseudo_func(insn))
+ move_addr(ctx, dst, imm64);
+ else
+ move_imm(ctx, dst, imm64, is32);
return 1;
}
@@ -1294,16 +1302,19 @@ skip_init_ctx:
flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx));
if (!prog->is_func || extra_pass) {
+ int err;
+
if (extra_pass && ctx.idx != jit_data->ctx.idx) {
pr_err_once("multi-func JIT bug %d != %d\n",
ctx.idx, jit_data->ctx.idx);
- bpf_jit_binary_free(header);
- prog->bpf_func = NULL;
- prog->jited = 0;
- prog->jited_len = 0;
- goto out_offset;
+ goto out_free;
+ }
+ err = bpf_jit_binary_lock_ro(header);
+ if (err) {
+ pr_err_once("bpf_jit_binary_lock_ro() returned %d\n",
+ err);
+ goto out_free;
}
- bpf_jit_binary_lock_ro(header);
} else {
jit_data->ctx = ctx;
jit_data->image = image_ptr;
@@ -1334,6 +1345,13 @@ out:
out_offset = -1;
return prog;
+
+out_free:
+ bpf_jit_binary_free(header);
+ prog->bpf_func = NULL;
+ prog->jited = 0;
+ prog->jited_len = 0;
+ goto out_offset;
}
/* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h
index 68586338ecf8..f9c569f53949 100644
--- a/arch/loongarch/net/bpf_jit.h
+++ b/arch/loongarch/net/bpf_jit.h
@@ -27,6 +27,11 @@ struct jit_data {
struct jit_ctx ctx;
};
+static inline void emit_nop(union loongarch_instruction *insn)
+{
+ insn->word = INSN_NOP;
+}
+
#define emit_insn(ctx, func, ...) \
do { \
if (ctx->image != NULL) { \
diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c
index 365f7de771cb..1da4dc46df43 100644
--- a/arch/loongarch/pci/acpi.c
+++ b/arch/loongarch/pci/acpi.c
@@ -225,6 +225,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
if (bus) {
memcpy(bus->sysdata, info->cfg, sizeof(struct pci_config_window));
kfree(info);
+ kfree(root_ops);
} else {
struct pci_bus *child;
diff --git a/arch/loongarch/power/hibernate.c b/arch/loongarch/power/hibernate.c
index 1e0590542f98..e7b7346592cb 100644
--- a/arch/loongarch/power/hibernate.c
+++ b/arch/loongarch/power/hibernate.c
@@ -2,6 +2,7 @@
#include <asm/fpu.h>
#include <asm/loongson.h>
#include <asm/sections.h>
+#include <asm/time.h>
#include <asm/tlbflush.h>
#include <linux/suspend.h>
@@ -14,6 +15,7 @@ struct pt_regs saved_regs;
void save_processor_state(void)
{
+ save_counter();
saved_crmd = csr_read32(LOONGARCH_CSR_CRMD);
saved_prmd = csr_read32(LOONGARCH_CSR_PRMD);
saved_euen = csr_read32(LOONGARCH_CSR_EUEN);
@@ -26,6 +28,7 @@ void save_processor_state(void)
void restore_processor_state(void)
{
+ sync_counter();
csr_write32(saved_crmd, LOONGARCH_CSR_CRMD);
csr_write32(saved_prmd, LOONGARCH_CSR_PRMD);
csr_write32(saved_euen, LOONGARCH_CSR_EUEN);
diff --git a/arch/loongarch/power/platform.c b/arch/loongarch/power/platform.c
index 3ea8e07aa225..5bbdb9fd76e5 100644
--- a/arch/loongarch/power/platform.c
+++ b/arch/loongarch/power/platform.c
@@ -17,7 +17,7 @@ void enable_gpe_wakeup(void)
if (acpi_gbl_reduced_hardware)
return;
- acpi_enable_all_wakeup_gpes();
+ acpi_hw_enable_all_wakeup_gpes();
}
void enable_pci_wakeup(void)
@@ -34,22 +34,49 @@ void enable_pci_wakeup(void)
acpi_write_bit_register(ACPI_BITREG_PCIEXP_WAKE_DISABLE, 0);
}
+static struct platform_device loongson3_cpufreq_device = {
+ .name = "loongson3_cpufreq",
+ .id = -1,
+};
+
+static int __init loongson_cpufreq_init(void)
+{
+ if (!cpu_has_scalefreq)
+ return -ENODEV;
+
+ return platform_device_register(&loongson3_cpufreq_device);
+}
+
+arch_initcall(loongson_cpufreq_init);
+
+static void default_suspend_addr(void)
+{
+ acpi_enter_sleep_state(ACPI_STATE_S3);
+}
+
static int __init loongson3_acpi_suspend_init(void)
{
#ifdef CONFIG_ACPI
acpi_status status;
uint64_t suspend_addr = 0;
- if (acpi_disabled || acpi_gbl_reduced_hardware)
+ if (acpi_disabled)
+ return 0;
+
+ if (!acpi_gbl_reduced_hardware)
+ acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
+
+ if (!acpi_sleep_state_supported(ACPI_STATE_S3))
return 0;
- acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
status = acpi_evaluate_integer(NULL, "\\SADR", NULL, &suspend_addr);
if (ACPI_FAILURE(status) || !suspend_addr) {
- pr_err("ACPI S3 is not support!\n");
- return -1;
+ pr_info("ACPI S3 supported with hardware register default\n");
+ loongson_sysconf.suspend_addr = (u64)default_suspend_addr;
+ } else {
+ pr_info("ACPI S3 supported with Loongson ACPI SADR extension\n");
+ loongson_sysconf.suspend_addr = (u64)phys_to_virt(PHYSADDR(suspend_addr));
}
- loongson_sysconf.suspend_addr = (u64)phys_to_virt(PHYSADDR(suspend_addr));
#endif
return 0;
}
diff --git a/arch/loongarch/power/suspend.c b/arch/loongarch/power/suspend.c
index 166d9e06a64b..c9e594925c47 100644
--- a/arch/loongarch/power/suspend.c
+++ b/arch/loongarch/power/suspend.c
@@ -24,6 +24,7 @@ struct saved_registers {
u64 kpgd;
u32 pwctl0;
u32 pwctl1;
+ u64 pcpu_base;
};
static struct saved_registers saved_regs;
@@ -36,6 +37,7 @@ void loongarch_common_suspend(void)
saved_regs.pwctl1 = csr_read32(LOONGARCH_CSR_PWCTL1);
saved_regs.ecfg = csr_read32(LOONGARCH_CSR_ECFG);
saved_regs.euen = csr_read32(LOONGARCH_CSR_EUEN);
+ saved_regs.pcpu_base = csr_read64(PERCPU_BASE_KS);
loongarch_suspend_addr = loongson_sysconf.suspend_addr;
}
@@ -44,7 +46,6 @@ void loongarch_common_resume(void)
{
sync_counter();
local_flush_tlb_all();
- csr_write64(per_cpu_offset(0), PERCPU_BASE_KS);
csr_write64(eentry, LOONGARCH_CSR_EENTRY);
csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
@@ -55,6 +56,7 @@ void loongarch_common_resume(void)
csr_write32(saved_regs.pwctl1, LOONGARCH_CSR_PWCTL1);
csr_write32(saved_regs.ecfg, LOONGARCH_CSR_ECFG);
csr_write32(saved_regs.euen, LOONGARCH_CSR_EUEN);
+ csr_write64(saved_regs.pcpu_base, PERCPU_BASE_KS);
}
int loongarch_acpi_suspend(void)
diff --git a/arch/loongarch/power/suspend_asm.S b/arch/loongarch/power/suspend_asm.S
index e2fc3b4e31f0..df0865df26fa 100644
--- a/arch/loongarch/power/suspend_asm.S
+++ b/arch/loongarch/power/suspend_asm.S
@@ -30,9 +30,6 @@
st.d $r29, sp, PT_R29
st.d $r30, sp, PT_R30
st.d $r31, sp, PT_R31
-
- la.pcrel t0, acpi_saved_sp
- st.d sp, t0, 0
.endm
.macro SETUP_WAKEUP
@@ -51,6 +48,7 @@
ld.d $r29, sp, PT_R29
ld.d $r30, sp, PT_R30
ld.d $r31, sp, PT_R31
+ addi.d sp, sp, PT_SIZE
.endm
.text
@@ -59,6 +57,10 @@
/* Sleep/wakeup code for Loongson-3 */
SYM_FUNC_START(loongarch_suspend_enter)
SETUP_SLEEP
+
+ la.pcrel t0, acpi_saved_sp
+ st.d sp, t0, 0
+
bl __flush_cache_all
/* Pass RA and SP to BIOS */
@@ -66,18 +68,14 @@ SYM_FUNC_START(loongarch_suspend_enter)
la.pcrel a0, loongarch_wakeup_start
la.pcrel t0, loongarch_suspend_addr
ld.d t0, t0, 0
- jirl a0, t0, 0 /* Call BIOS's STR sleep routine */
+ jirl ra, t0, 0 /* Call BIOS's STR sleep routine */
/*
* This is where we return upon wakeup.
* Reload all of the registers and return.
*/
SYM_INNER_LABEL(loongarch_wakeup_start, SYM_L_GLOBAL)
- li.d t0, CSR_DMW0_INIT # UC, PLV0
- csrwr t0, LOONGARCH_CSR_DMWIN0
- li.d t0, CSR_DMW1_INIT # CA, PLV0
- csrwr t0, LOONGARCH_CSR_DMWIN1
-
+ SETUP_DMWINS t0
JUMP_VIRT_ADDR t0, t1
/* Enable PG */
@@ -86,7 +84,7 @@ SYM_INNER_LABEL(loongarch_wakeup_start, SYM_L_GLOBAL)
la.pcrel t0, acpi_saved_sp
ld.d sp, t0, 0
+
SETUP_WAKEUP
- addi.d sp, sp, PT_SIZE
jr ra
SYM_FUNC_END(loongarch_suspend_enter)
diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
index 75c6726382c3..ccd2c5e135c6 100644
--- a/arch/loongarch/vdso/Makefile
+++ b/arch/loongarch/vdso/Makefile
@@ -1,15 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
# Objects to go into the VDSO.
-KASAN_SANITIZE := n
-UBSAN_SANITIZE := n
-KCOV_INSTRUMENT := n
-OBJECT_FILES_NON_STANDARD := y
-
# Include the generic Makefile to check the built vdso.
-include $(srctree)/lib/vdso/Makefile
+include $(srctree)/lib/vdso/Makefile.include
-obj-vdso-y := elf.o vgetcpu.o vgettimeofday.o sigreturn.o
+obj-vdso-y := elf.o vgetcpu.o vgettimeofday.o vgetrandom.o \
+ vgetrandom-chacha.o sigreturn.o
# Common compiler flags between ABIs.
ccflags-vdso := \
@@ -23,7 +19,7 @@ ccflags-vdso := \
cflags-vdso := $(ccflags-vdso) \
-isystem $(shell $(CC) -print-file-name=include) \
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
- -O2 -g -fno-strict-aliasing -fno-common -fno-builtin \
+ -std=gnu11 -O2 -g -fno-strict-aliasing -fno-common -fno-builtin \
-fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \
$(call cc-option, -fno-asynchronous-unwind-tables) \
$(call cc-option, -fno-stack-protector)
@@ -34,12 +30,13 @@ ifneq ($(c-gettimeofday-y),)
CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
endif
+ifneq ($(c-getrandom-y),)
+ CFLAGS_vgetrandom.o += -include $(c-getrandom-y)
+endif
+
# VDSO linker flags.
ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
- $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \
- --hash-style=sysv --build-id -T
-
-GCOV_PROFILE := n
+ $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared --build-id -T
#
# Shared build commands.
@@ -52,7 +49,7 @@ quiet_cmd_vdsoas_o_S = AS $@
cmd_vdsoas_o_S = $(CC) $(a_flags) -c -o $@ $<
# Generate VDSO offsets using helper script
-gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+gen-vdsosym := $(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
diff --git a/arch/loongarch/vdso/vdso.lds.S b/arch/loongarch/vdso/vdso.lds.S
index 56ad855896de..8ff986499947 100644
--- a/arch/loongarch/vdso/vdso.lds.S
+++ b/arch/loongarch/vdso/vdso.lds.S
@@ -3,6 +3,9 @@
* Author: Huacai Chen <chenhuacai@loongson.cn>
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
+#include <asm/page.h>
+#include <generated/asm-offsets.h>
+#include <vdso/datapage.h>
OUTPUT_FORMAT("elf64-loongarch", "elf64-loongarch", "elf64-loongarch")
@@ -10,7 +13,8 @@ OUTPUT_ARCH(loongarch)
SECTIONS
{
- PROVIDE(_start = .);
+ VDSO_VVAR_SYMS
+
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
@@ -62,6 +66,7 @@ VERSION
__vdso_clock_getres;
__vdso_clock_gettime;
__vdso_gettimeofday;
+ __vdso_getrandom;
__vdso_rt_sigreturn;
local: *;
};
diff --git a/arch/loongarch/vdso/vgetcpu.c b/arch/loongarch/vdso/vgetcpu.c
index 9e445be39763..5301cd9d0f83 100644
--- a/arch/loongarch/vdso/vgetcpu.c
+++ b/arch/loongarch/vdso/vgetcpu.c
@@ -19,27 +19,19 @@ static __always_inline int read_cpu_id(void)
return cpu_id;
}
-static __always_inline const struct vdso_pcpu_data *get_pcpu_data(void)
-{
- return (struct vdso_pcpu_data *)(get_vdso_data() + VVAR_LOONGARCH_PAGES_START * PAGE_SIZE);
-}
-
extern
int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused);
int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused)
{
int cpu_id;
- const struct vdso_pcpu_data *data;
cpu_id = read_cpu_id();
if (cpu)
*cpu = cpu_id;
- if (node) {
- data = get_pcpu_data();
- *node = data[cpu_id].node;
- }
+ if (node)
+ *node = vdso_u_arch_data.pdata[cpu_id].node;
return 0;
}
diff --git a/arch/loongarch/vdso/vgetrandom-chacha.S b/arch/loongarch/vdso/vgetrandom-chacha.S
new file mode 100644
index 000000000000..c4dd2bab8825
--- /dev/null
+++ b/arch/loongarch/vdso/vgetrandom-chacha.S
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Xi Ruoyao <xry111@xry111.site>. All Rights Reserved.
+ */
+
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <linux/linkage.h>
+
+.text
+
+.macro OP_4REG op d0 d1 d2 d3 s0 s1 s2 s3
+ \op \d0, \d0, \s0
+ \op \d1, \d1, \s1
+ \op \d2, \d2, \s2
+ \op \d3, \d3, \s3
+.endm
+
+/*
+ * Very basic LoongArch implementation of ChaCha20. Produces a given positive
+ * number of blocks of output with a nonce of 0, taking an input key and
+ * 8-byte counter. Importantly does not spill to the stack. Its arguments
+ * are:
+ *
+ * a0: output bytes
+ * a1: 32-byte key input
+ * a2: 8-byte counter input/output
+ * a3: number of 64-byte blocks to write to output
+ */
+SYM_FUNC_START(__arch_chacha20_blocks_nostack)
+
+/* We don't need a frame pointer */
+#define s9 fp
+
+#define output a0
+#define key a1
+#define counter a2
+#define nblocks a3
+#define i a4
+#define state0 s0
+#define state1 s1
+#define state2 s2
+#define state3 s3
+#define state4 s4
+#define state5 s5
+#define state6 s6
+#define state7 s7
+#define state8 s8
+#define state9 s9
+#define state10 a5
+#define state11 a6
+#define state12 a7
+#define state13 t0
+#define state14 t1
+#define state15 t2
+#define cnt_lo t3
+#define cnt_hi t4
+#define copy0 t5
+#define copy1 t6
+#define copy2 t7
+#define copy3 t8
+
+/* Packs to be used with OP_4REG */
+#define line0 state0, state1, state2, state3
+#define line1 state4, state5, state6, state7
+#define line2 state8, state9, state10, state11
+#define line3 state12, state13, state14, state15
+
+#define line1_perm state5, state6, state7, state4
+#define line2_perm state10, state11, state8, state9
+#define line3_perm state15, state12, state13, state14
+
+#define copy copy0, copy1, copy2, copy3
+
+#define _16 16, 16, 16, 16
+#define _20 20, 20, 20, 20
+#define _24 24, 24, 24, 24
+#define _25 25, 25, 25, 25
+
+ /*
+ * The ABI requires s0-s9 saved, and sp aligned to 16-byte.
+ * This does not violate the stack-less requirement: no sensitive data
+ * is spilled onto the stack.
+ */
+ PTR_ADDI sp, sp, (-SZREG * 10) & STACK_ALIGN
+ REG_S s0, sp, 0
+ REG_S s1, sp, SZREG
+ REG_S s2, sp, SZREG * 2
+ REG_S s3, sp, SZREG * 3
+ REG_S s4, sp, SZREG * 4
+ REG_S s5, sp, SZREG * 5
+ REG_S s6, sp, SZREG * 6
+ REG_S s7, sp, SZREG * 7
+ REG_S s8, sp, SZREG * 8
+ REG_S s9, sp, SZREG * 9
+
+ li.w copy0, 0x61707865
+ li.w copy1, 0x3320646e
+ li.w copy2, 0x79622d32
+ li.w copy3, 0x6b206574
+
+ ld.w cnt_lo, counter, 0
+ ld.w cnt_hi, counter, 4
+
+.Lblock:
+ /* state[0,1,2,3] = "expand 32-byte k" */
+ move state0, copy0
+ move state1, copy1
+ move state2, copy2
+ move state3, copy3
+
+ /* state[4,5,..,11] = key */
+ ld.w state4, key, 0
+ ld.w state5, key, 4
+ ld.w state6, key, 8
+ ld.w state7, key, 12
+ ld.w state8, key, 16
+ ld.w state9, key, 20
+ ld.w state10, key, 24
+ ld.w state11, key, 28
+
+ /* state[12,13] = counter */
+ move state12, cnt_lo
+ move state13, cnt_hi
+
+ /* state[14,15] = 0 */
+ move state14, zero
+ move state15, zero
+
+ li.w i, 10
+.Lpermute:
+ /* odd round */
+ OP_4REG add.w line0, line1
+ OP_4REG xor line3, line0
+ OP_4REG rotri.w line3, _16
+
+ OP_4REG add.w line2, line3
+ OP_4REG xor line1, line2
+ OP_4REG rotri.w line1, _20
+
+ OP_4REG add.w line0, line1
+ OP_4REG xor line3, line0
+ OP_4REG rotri.w line3, _24
+
+ OP_4REG add.w line2, line3
+ OP_4REG xor line1, line2
+ OP_4REG rotri.w line1, _25
+
+ /* even round */
+ OP_4REG add.w line0, line1_perm
+ OP_4REG xor line3_perm, line0
+ OP_4REG rotri.w line3_perm, _16
+
+ OP_4REG add.w line2_perm, line3_perm
+ OP_4REG xor line1_perm, line2_perm
+ OP_4REG rotri.w line1_perm, _20
+
+ OP_4REG add.w line0, line1_perm
+ OP_4REG xor line3_perm, line0
+ OP_4REG rotri.w line3_perm, _24
+
+ OP_4REG add.w line2_perm, line3_perm
+ OP_4REG xor line1_perm, line2_perm
+ OP_4REG rotri.w line1_perm, _25
+
+ addi.w i, i, -1
+ bnez i, .Lpermute
+
+ /* output[0,1,2,3] = copy[0,1,2,3] + state[0,1,2,3] */
+ OP_4REG add.w line0, copy
+ st.w state0, output, 0
+ st.w state1, output, 4
+ st.w state2, output, 8
+ st.w state3, output, 12
+
+ /* from now on state[0,1,2,3] are scratch registers */
+
+ /* state[0,1,2,3] = lo32(key) */
+ ld.w state0, key, 0
+ ld.w state1, key, 4
+ ld.w state2, key, 8
+ ld.w state3, key, 12
+
+ /* output[4,5,6,7] = state[0,1,2,3] + state[4,5,6,7] */
+ OP_4REG add.w line1, line0
+ st.w state4, output, 16
+ st.w state5, output, 20
+ st.w state6, output, 24
+ st.w state7, output, 28
+
+ /* state[0,1,2,3] = hi32(key) */
+ ld.w state0, key, 16
+ ld.w state1, key, 20
+ ld.w state2, key, 24
+ ld.w state3, key, 28
+
+ /* output[8,9,10,11] = state[0,1,2,3] + state[8,9,10,11] */
+ OP_4REG add.w line2, line0
+ st.w state8, output, 32
+ st.w state9, output, 36
+ st.w state10, output, 40
+ st.w state11, output, 44
+
+ /* output[12,13,14,15] = state[12,13,14,15] + [cnt_lo, cnt_hi, 0, 0] */
+ add.w state12, state12, cnt_lo
+ add.w state13, state13, cnt_hi
+ st.w state12, output, 48
+ st.w state13, output, 52
+ st.w state14, output, 56
+ st.w state15, output, 60
+
+ /* ++counter */
+ addi.w cnt_lo, cnt_lo, 1
+ sltui state0, cnt_lo, 1
+ add.w cnt_hi, cnt_hi, state0
+
+ /* output += 64 */
+ PTR_ADDI output, output, 64
+ /* --nblocks */
+ PTR_ADDI nblocks, nblocks, -1
+ bnez nblocks, .Lblock
+
+ /* counter = [cnt_lo, cnt_hi] */
+ st.w cnt_lo, counter, 0
+ st.w cnt_hi, counter, 4
+
+ /*
+ * Zero out the potentially sensitive regs, in case nothing uses these
+ * again. As at now copy[0,1,2,3] just contains "expand 32-byte k" and
+ * state[0,...,9] are s0-s9 those we'll restore in the epilogue, so we
+ * only need to zero state[11,...,15].
+ */
+ move state10, zero
+ move state11, zero
+ move state12, zero
+ move state13, zero
+ move state14, zero
+ move state15, zero
+
+ REG_L s0, sp, 0
+ REG_L s1, sp, SZREG
+ REG_L s2, sp, SZREG * 2
+ REG_L s3, sp, SZREG * 3
+ REG_L s4, sp, SZREG * 4
+ REG_L s5, sp, SZREG * 5
+ REG_L s6, sp, SZREG * 6
+ REG_L s7, sp, SZREG * 7
+ REG_L s8, sp, SZREG * 8
+ REG_L s9, sp, SZREG * 9
+ PTR_ADDI sp, sp, -((-SZREG * 10) & STACK_ALIGN)
+
+ jr ra
+SYM_FUNC_END(__arch_chacha20_blocks_nostack)
diff --git a/arch/loongarch/vdso/vgetrandom.c b/arch/loongarch/vdso/vgetrandom.c
new file mode 100644
index 000000000000..d5f258ac4a36
--- /dev/null
+++ b/arch/loongarch/vdso/vgetrandom.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Xi Ruoyao <xry111@xry111.site>. All Rights Reserved.
+ */
+#include <linux/types.h>
+
+ssize_t __vdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len)
+{
+ return __cvdso_getrandom(buffer, len, flags, opaque_state, opaque_len);
+}