summaryrefslogtreecommitdiff
path: root/arch/loongarch
diff options
context:
space:
mode:
Diffstat (limited to 'arch/loongarch')
-rw-r--r--arch/loongarch/Kbuild1
-rw-r--r--arch/loongarch/Kconfig97
-rw-r--r--arch/loongarch/Makefile37
-rw-r--r--arch/loongarch/boot/dts/Makefile2
-rw-r--r--arch/loongarch/boot/dts/loongson-2k0500-ref.dts9
-rw-r--r--arch/loongarch/boot/dts/loongson-2k0500.dtsi190
-rw-r--r--arch/loongarch/boot/dts/loongson-2k1000-ref.dts40
-rw-r--r--arch/loongarch/boot/dts/loongson-2k1000.dtsi85
-rw-r--r--arch/loongarch/boot/dts/loongson-2k2000-ref.dts10
-rw-r--r--arch/loongarch/boot/dts/loongson-2k2000.dtsi102
-rwxr-xr-xarch/loongarch/boot/install.sh56
-rw-r--r--arch/loongarch/configs/loongson3_defconfig207
-rw-r--r--arch/loongarch/crypto/Kconfig9
-rw-r--r--arch/loongarch/crypto/Makefile2
-rw-r--r--arch/loongarch/crypto/crc32-loongarch.c300
-rw-r--r--arch/loongarch/include/asm/Kbuild21
-rw-r--r--arch/loongarch/include/asm/acenv.h7
-rw-r--r--arch/loongarch/include/asm/acpi.h2
-rw-r--r--arch/loongarch/include/asm/addrspace.h12
-rw-r--r--arch/loongarch/include/asm/alternative-asm.h4
-rw-r--r--arch/loongarch/include/asm/alternative.h4
-rw-r--r--arch/loongarch/include/asm/asm-extable.h6
-rw-r--r--arch/loongarch/include/asm/asm-prototypes.h8
-rw-r--r--arch/loongarch/include/asm/asm.h8
-rw-r--r--arch/loongarch/include/asm/asmmacro.h1
-rw-r--r--arch/loongarch/include/asm/atomic.h2
-rw-r--r--arch/loongarch/include/asm/bootinfo.h4
-rw-r--r--arch/loongarch/include/asm/bug.h30
-rw-r--r--arch/loongarch/include/asm/cache.h2
-rw-r--r--arch/loongarch/include/asm/cpu-features.h5
-rw-r--r--arch/loongarch/include/asm/cpu-info.h22
-rw-r--r--arch/loongarch/include/asm/cpu.h59
-rw-r--r--arch/loongarch/include/asm/dma-direct.h11
-rw-r--r--arch/loongarch/include/asm/entry-common.h8
-rw-r--r--arch/loongarch/include/asm/fprobe.h12
-rw-r--r--arch/loongarch/include/asm/fpu.h39
-rw-r--r--arch/loongarch/include/asm/ftrace.h64
-rw-r--r--arch/loongarch/include/asm/gpr-num.h6
-rw-r--r--arch/loongarch/include/asm/hardirq.h4
-rw-r--r--arch/loongarch/include/asm/hugetlb.h31
-rw-r--r--arch/loongarch/include/asm/hw_breakpoint.h8
-rw-r--r--arch/loongarch/include/asm/hw_irq.h2
-rw-r--r--arch/loongarch/include/asm/image.h52
-rw-r--r--arch/loongarch/include/asm/inst.h23
-rw-r--r--arch/loongarch/include/asm/io.h33
-rw-r--r--arch/loongarch/include/asm/irq.h34
-rw-r--r--arch/loongarch/include/asm/irq_work.h10
-rw-r--r--arch/loongarch/include/asm/irqflags.h20
-rw-r--r--arch/loongarch/include/asm/jump_label.h20
-rw-r--r--arch/loongarch/include/asm/kasan.h24
-rw-r--r--arch/loongarch/include/asm/kexec.h12
-rw-r--r--arch/loongarch/include/asm/kfence.h6
-rw-r--r--arch/loongarch/include/asm/kvm_csr.h6
-rw-r--r--arch/loongarch/include/asm/kvm_eiointc.h84
-rw-r--r--arch/loongarch/include/asm/kvm_host.h89
-rw-r--r--arch/loongarch/include/asm/kvm_ipi.h45
-rw-r--r--arch/loongarch/include/asm/kvm_mmu.h20
-rw-r--r--arch/loongarch/include/asm/kvm_para.h30
-rw-r--r--arch/loongarch/include/asm/kvm_pch_pic.h75
-rw-r--r--arch/loongarch/include/asm/kvm_vcpu.h21
-rw-r--r--arch/loongarch/include/asm/lbt.h10
-rw-r--r--arch/loongarch/include/asm/loongarch.h151
-rw-r--r--arch/loongarch/include/asm/mmu_context.h35
-rw-r--r--arch/loongarch/include/asm/mmzone.h16
-rw-r--r--arch/loongarch/include/asm/numa.h14
-rw-r--r--arch/loongarch/include/asm/orc_types.h4
-rw-r--r--arch/loongarch/include/asm/page.h19
-rw-r--r--arch/loongarch/include/asm/paravirt.h12
-rw-r--r--arch/loongarch/include/asm/percpu.h124
-rw-r--r--arch/loongarch/include/asm/pgalloc.h22
-rw-r--r--arch/loongarch/include/asm/pgtable-bits.h11
-rw-r--r--arch/loongarch/include/asm/pgtable.h114
-rw-r--r--arch/loongarch/include/asm/prefetch.h2
-rw-r--r--arch/loongarch/include/asm/ptrace.h6
-rw-r--r--arch/loongarch/include/asm/qspinlock.h41
-rw-r--r--arch/loongarch/include/asm/set_memory.h22
-rw-r--r--arch/loongarch/include/asm/setup.h5
-rw-r--r--arch/loongarch/include/asm/smp.h9
-rw-r--r--arch/loongarch/include/asm/sparsemem.h5
-rw-r--r--arch/loongarch/include/asm/stackframe.h17
-rw-r--r--arch/loongarch/include/asm/stacktrace.h8
-rw-r--r--arch/loongarch/include/asm/syscall.h15
-rw-r--r--arch/loongarch/include/asm/thread_info.h78
-rw-r--r--arch/loongarch/include/asm/topology.h16
-rw-r--r--arch/loongarch/include/asm/types.h2
-rw-r--r--arch/loongarch/include/asm/unistd.h3
-rw-r--r--arch/loongarch/include/asm/unwind_hints.h12
-rw-r--r--arch/loongarch/include/asm/uprobes.h5
-rw-r--r--arch/loongarch/include/asm/vdso.h1
-rw-r--r--arch/loongarch/include/asm/vdso/arch_data.h25
-rw-r--r--arch/loongarch/include/asm/vdso/getrandom.h33
-rw-r--r--arch/loongarch/include/asm/vdso/gettimeofday.h24
-rw-r--r--arch/loongarch/include/asm/vdso/processor.h4
-rw-r--r--arch/loongarch/include/asm/vdso/vdso.h60
-rw-r--r--arch/loongarch/include/asm/vdso/vsyscall.h17
-rw-r--r--arch/loongarch/include/uapi/asm/Kbuild2
-rw-r--r--arch/loongarch/include/uapi/asm/hwcap.h1
-rw-r--r--arch/loongarch/include/uapi/asm/kvm.h46
-rw-r--r--arch/loongarch/include/uapi/asm/kvm_para.h22
-rw-r--r--arch/loongarch/include/uapi/asm/ptrace.h40
-rw-r--r--arch/loongarch/include/uapi/asm/setup.h8
-rw-r--r--arch/loongarch/include/uapi/asm/sigcontext.h1
-rw-r--r--arch/loongarch/include/uapi/asm/unistd.h4
-rw-r--r--arch/loongarch/kernel/Makefile15
-rw-r--r--arch/loongarch/kernel/Makefile.syscalls4
-rw-r--r--arch/loongarch/kernel/acpi.c142
-rw-r--r--arch/loongarch/kernel/alternative.c1
-rw-r--r--arch/loongarch/kernel/asm-offsets.c24
-rw-r--r--arch/loongarch/kernel/cacheinfo.c6
-rw-r--r--arch/loongarch/kernel/cpu-probe.c170
-rw-r--r--arch/loongarch/kernel/efi-header.S4
-rw-r--r--arch/loongarch/kernel/efi.c20
-rw-r--r--arch/loongarch/kernel/elf.c1
-rw-r--r--arch/loongarch/kernel/entry.S25
-rw-r--r--arch/loongarch/kernel/env.c19
-rw-r--r--arch/loongarch/kernel/fpu.S10
-rw-r--r--arch/loongarch/kernel/ftrace_dyn.c21
-rw-r--r--arch/loongarch/kernel/genex.S33
-rw-r--r--arch/loongarch/kernel/head.S13
-rw-r--r--arch/loongarch/kernel/hw_breakpoint.c18
-rw-r--r--arch/loongarch/kernel/idle.c3
-rw-r--r--arch/loongarch/kernel/inst.c88
-rw-r--r--arch/loongarch/kernel/io.c94
-rw-r--r--arch/loongarch/kernel/irq.c15
-rw-r--r--arch/loongarch/kernel/kdebugfs.c168
-rw-r--r--arch/loongarch/kernel/kexec_efi.c113
-rw-r--r--arch/loongarch/kernel/kexec_elf.c105
-rw-r--r--arch/loongarch/kernel/kfpu.c23
-rw-r--r--arch/loongarch/kernel/kgdb.c5
-rw-r--r--arch/loongarch/kernel/kprobes.c4
-rw-r--r--arch/loongarch/kernel/lbt.S4
-rw-r--r--arch/loongarch/kernel/machine_kexec.c65
-rw-r--r--arch/loongarch/kernel/machine_kexec_file.c239
-rw-r--r--arch/loongarch/kernel/mcount.S17
-rw-r--r--arch/loongarch/kernel/mcount_dyn.S14
-rw-r--r--arch/loongarch/kernel/mem.c7
-rw-r--r--arch/loongarch/kernel/module-sections.c36
-rw-r--r--arch/loongarch/kernel/numa.c196
-rw-r--r--arch/loongarch/kernel/paravirt.c199
-rw-r--r--arch/loongarch/kernel/perf_event.c10
-rw-r--r--arch/loongarch/kernel/proc.c41
-rw-r--r--arch/loongarch/kernel/process.c49
-rw-r--r--arch/loongarch/kernel/ptrace.c25
-rw-r--r--arch/loongarch/kernel/relocate.c56
-rw-r--r--arch/loongarch/kernel/relocate_kernel.S2
-rw-r--r--arch/loongarch/kernel/reset.c6
-rw-r--r--arch/loongarch/kernel/setup.c45
-rw-r--r--arch/loongarch/kernel/signal.c31
-rw-r--r--arch/loongarch/kernel/smp.c135
-rw-r--r--arch/loongarch/kernel/stacktrace.c3
-rw-r--r--arch/loongarch/kernel/switch.S2
-rw-r--r--arch/loongarch/kernel/syscall.c21
-rw-r--r--arch/loongarch/kernel/time.c34
-rw-r--r--arch/loongarch/kernel/traps.c39
-rw-r--r--arch/loongarch/kernel/unaligned.c8
-rw-r--r--arch/loongarch/kernel/unwind_guess.c1
-rw-r--r--arch/loongarch/kernel/unwind_orc.c9
-rw-r--r--arch/loongarch/kernel/unwind_prologue.c1
-rw-r--r--arch/loongarch/kernel/uprobes.c11
-rw-r--r--arch/loongarch/kernel/vdso.c107
-rw-r--r--arch/loongarch/kernel/vmlinux.lds.S8
-rw-r--r--arch/loongarch/kvm/Kconfig10
-rw-r--r--arch/loongarch/kvm/Makefile8
-rw-r--r--arch/loongarch/kvm/exit.c266
-rw-r--r--arch/loongarch/kvm/intc/eiointc.c695
-rw-r--r--arch/loongarch/kvm/intc/ipi.c475
-rw-r--r--arch/loongarch/kvm/intc/pch_pic.c491
-rw-r--r--arch/loongarch/kvm/interrupt.c36
-rw-r--r--arch/loongarch/kvm/irqfd.c89
-rw-r--r--arch/loongarch/kvm/main.c60
-rw-r--r--arch/loongarch/kvm/mmu.c111
-rw-r--r--arch/loongarch/kvm/switch.S18
-rw-r--r--arch/loongarch/kvm/timer.c16
-rw-r--r--arch/loongarch/kvm/tlb.c5
-rw-r--r--arch/loongarch/kvm/trace.h49
-rw-r--r--arch/loongarch/kvm/vcpu.c591
-rw-r--r--arch/loongarch/kvm/vm.c114
-rw-r--r--arch/loongarch/lib/csum.c3
-rw-r--r--arch/loongarch/mm/Makefile3
-rw-r--r--arch/loongarch/mm/fault.c99
-rw-r--r--arch/loongarch/mm/hugetlbpage.c9
-rw-r--r--arch/loongarch/mm/init.c44
-rw-r--r--arch/loongarch/mm/ioremap.c6
-rw-r--r--arch/loongarch/mm/kasan_init.c64
-rw-r--r--arch/loongarch/mm/mmap.c11
-rw-r--r--arch/loongarch/mm/pageattr.c238
-rw-r--r--arch/loongarch/mm/pgtable.c28
-rw-r--r--arch/loongarch/mm/tlb.c2
-rw-r--r--arch/loongarch/net/bpf_jit.c765
-rw-r--r--arch/loongarch/net/bpf_jit.h11
-rw-r--r--arch/loongarch/pci/acpi.c15
-rw-r--r--arch/loongarch/pci/pci.c9
-rw-r--r--arch/loongarch/power/hibernate.c3
-rw-r--r--arch/loongarch/power/platform.c39
-rw-r--r--arch/loongarch/power/suspend_asm.S18
-rw-r--r--arch/loongarch/vdso/Makefile14
-rw-r--r--arch/loongarch/vdso/vdso.lds.S7
-rw-r--r--arch/loongarch/vdso/vgetcpu.c12
-rw-r--r--arch/loongarch/vdso/vgetrandom-chacha.S253
-rw-r--r--arch/loongarch/vdso/vgetrandom.c10
200 files changed, 8186 insertions, 2275 deletions
diff --git a/arch/loongarch/Kbuild b/arch/loongarch/Kbuild
index bfa21465d83a..beb8499dd8ed 100644
--- a/arch/loongarch/Kbuild
+++ b/arch/loongarch/Kbuild
@@ -4,7 +4,6 @@ obj-y += net/
obj-y += vdso/
obj-$(CONFIG_KVM) += kvm/
-obj-$(CONFIG_BUILTIN_DTB) += boot/dts/
# for cleaning
subdir- += boot
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index ddc042895d01..5b1116733d88 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -9,6 +9,7 @@ config LOONGARCH
select ACPI_PPTT if ACPI
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_BINFMT_ELF_STATE
+ select ARCH_NEEDS_DEFER_KASAN
select ARCH_DISABLE_KASAN_INLINE
select ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_ENABLE_MEMORY_HOTREMOVE
@@ -16,14 +17,20 @@ config LOONGARCH
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_CURRENT_STACK_POINTER
+ select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KCOV
select ARCH_HAS_KERNEL_FPU_SUPPORT if CPU_HAS_FPU
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ select ARCH_HAS_PREEMPT_LAZY
select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_SET_MEMORY
+ select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAS_UBSAN
+ select ARCH_HAS_VDSO_ARCH_DATA
select ARCH_INLINE_READ_LOCK if !PREEMPTION
select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION
@@ -61,9 +68,15 @@ config LOONGARCH
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select ARCH_SUPPORTS_LTO_CLANG
select ARCH_SUPPORTS_LTO_CLANG_THIN
+ select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
select ARCH_SUPPORTS_NUMA_BALANCING
+ select ARCH_SUPPORTS_PER_VMA_LOCK
+ select ARCH_SUPPORTS_RT
+ select ARCH_SUPPORTS_SCHED_SMT if SMP
+ select ARCH_SUPPORTS_SCHED_MC if SMP
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_DEFAULT_BPF_JIT
@@ -75,14 +88,17 @@ config LOONGARCH
select BUILDTIME_TABLE_SORT
select COMMON_CLK
select CPU_PM
+ select EDAC_SUPPORT
select EFI
select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_DEVICES
+ select GENERIC_CPU_VULNERABILITIES
select GENERIC_ENTRY
select GENERIC_GETTIMEOFDAY
select GENERIC_IOREMAP if !ARCH_IOREMAP
+ select GENERIC_IRQ_MATRIX_ALLOCATOR
select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
@@ -96,7 +112,6 @@ config LOONGARCH
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
- select GENERIC_VDSO_TIME_NS
select GPIOLIB
select HAS_IOPORT
select HAVE_ARCH_AUDITSYSCALL
@@ -105,7 +120,9 @@ config LOONGARCH
select HAVE_ARCH_KASAN
select HAVE_ARCH_KFENCE
select HAVE_ARCH_KGDB if PERF_EVENTS
+ select HAVE_ARCH_KSTACK_ERASE
select HAVE_ARCH_MMAP_RND_BITS if MMU
+ select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
@@ -119,16 +136,18 @@ config LOONGARCH
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_ARGS
+ select HAVE_FTRACE_REGS_HAVING_PT_REGS
select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_EBPF_JIT
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !ARCH_STRICT_ALIGN
select HAVE_EXIT_THREAD
+ select HAVE_GENERIC_TIF_BITS
select HAVE_GUP_FAST
- select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FTRACE_GRAPH_FUNC
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_FUNCTION_ERROR_INJECTION
- select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_GRAPH_FREGS
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS
@@ -143,11 +162,12 @@ config LOONGARCH
select HAVE_LIVEPATCH
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
- select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB && !CC_IS_CLANG
+ select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_POSIX_CPU_TIMERS_TASK_WORK
select HAVE_PREEMPT_DYNAMIC_KEY
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if UNWINDER_ORC
@@ -161,7 +181,7 @@ config LOONGARCH
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_TIF_NOHZ
- select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
+ select HAVE_VIRT_CPU_ACCOUNTING_GEN
select IRQ_FORCED_THREADING
select IRQ_LOONGARCH_CPU
select LOCK_MM_AND_FIND_VMA
@@ -169,6 +189,7 @@ config LOONGARCH
select MODULES_USE_ELF_RELA if MODULES
select NEED_PER_CPU_EMBED_FIRST_CHUNK
select NEED_PER_CPU_PAGE_FIRST_CHUNK
+ select NUMA_MEMBLKS if NUMA
select OF
select OF_EARLY_FLATTREE
select PCI
@@ -187,6 +208,7 @@ config LOONGARCH
select TRACE_IRQFLAGS_SUPPORT
select USE_PERCPU_NUMA_NODE_ID
select USER_STACKTRACE_SUPPORT
+ select VDSO_GETRANDOM
select ZONE_DMA32
config 32BIT
@@ -234,7 +256,7 @@ config MACH_LOONGSON64
def_bool 64BIT
config FIX_EARLYCON_MEM
- def_bool y
+ def_bool !ARCH_IOREMAP
config PGTABLE_2LEVEL
bool
@@ -262,7 +284,7 @@ config AS_HAS_FCSR_CLASS
def_bool $(as-instr,movfcsr2gr \$t0$(comma)\$fcsr0)
config AS_HAS_THIN_ADD_SUB
- def_bool $(cc-option,-Wa$(comma)-mthin-add-sub)
+ def_bool $(cc-option,-Wa$(comma)-mthin-add-sub) || AS_IS_LLVM
config AS_HAS_LSX_EXTENSION
def_bool $(as-instr,vld \$vr0$(comma)\$a0$(comma)0)
@@ -276,6 +298,13 @@ config AS_HAS_LBT_EXTENSION
config AS_HAS_LVZ_EXTENSION
def_bool $(as-instr,hvcl 0)
+config CC_HAS_ANNOTATE_TABLEJUMP
+ def_bool $(cc-option,-mannotate-tablejump)
+
+config RUSTC_HAS_ANNOTATE_TABLEJUMP
+ depends on RUST
+ def_bool $(rustc-option,-Cllvm-args=--loongarch-annotate-tablejump)
+
menu "Kernel type and options"
source "kernel/Kconfig.hz"
@@ -367,8 +396,8 @@ config CMDLINE_BOOTLOADER
config CMDLINE_EXTEND
bool "Use built-in to extend bootloader kernel arguments"
help
- The command-line arguments provided during boot will be
- appended to the built-in command line. This is useful in
+ The built-in command line will be appended to the command-
+ line arguments provided during boot. This is useful in
cases where the provided arguments are insufficient and
you don't want to or cannot modify them.
@@ -385,6 +414,7 @@ endchoice
config BUILTIN_DTB
bool "Enable built-in dtb in kernel"
depends on OF
+ select GENERIC_BUILTIN_DTB
help
Some existing systems do not provide a canonical device tree to
the kernel at boot time. Let's provide a device tree table in the
@@ -425,14 +455,6 @@ config EFI_STUB
This kernel feature allows the kernel to be loaded directly by
EFI firmware without the use of a bootloader.
-config SCHED_SMT
- bool "SMT scheduler support"
- depends on SMP
- default y
- help
- Improves scheduler's performance when there are multiple
- threads in one physical core.
-
config SMP
bool "Multi-Processing support"
help
@@ -462,10 +484,10 @@ config HOTPLUG_CPU
Say N if you want to disable CPU hotplug.
config NR_CPUS
- int "Maximum number of CPUs (2-256)"
- range 2 256
+ int "Maximum number of CPUs (2-2048)"
+ range 2 2048
+ default "2048"
depends on SMP
- default "64"
help
This allows you to specify the maximum number of CPUs which this
kernel will support.
@@ -473,7 +495,6 @@ config NR_CPUS
config NUMA
bool "NUMA Support"
select SMP
- select ACPI_NUMA if ACPI
help
Say Y to compile the kernel with NUMA (Non-Uniform Memory Access)
support. This option improves performance on systems with more
@@ -532,10 +553,14 @@ config ARCH_STRICT_ALIGN
-mstrict-align build parameter to prevent unaligned accesses.
CPUs with h/w unaligned access support:
- Loongson-2K2000/2K3000/3A5000/3C5000/3D5000.
+ Loongson-2K2000/2K3000 and all of Loongson-3 series processors
+ based on LoongArch.
CPUs without h/w unaligned access support:
- Loongson-2K500/2K1000.
+ Loongson-2K0300/2K0500/2K1000.
+
+ If you want to make sure whether to support unaligned memory access
+ on your hardware, please read the bit 20 (UAL) of CPUCFG1 register.
This option is enabled by default to make the kernel be able to run
on all LoongArch systems. But you can disable it manually if you want
@@ -594,9 +619,22 @@ config CPU_HAS_PREFETCH
config ARCH_SUPPORTS_KEXEC
def_bool y
+config ARCH_SUPPORTS_KEXEC_FILE
+ def_bool 64BIT
+
+config ARCH_SELECTS_KEXEC_FILE
+ def_bool 64BIT
+ depends on KEXEC_FILE
+ select KEXEC_ELF
+ select RELOCATABLE
+ select HAVE_IMA_KEXEC if IMA
+
config ARCH_SUPPORTS_CRASH_DUMP
def_bool y
+config ARCH_DEFAULT_CRASH_DUMP
+ def_bool y
+
config ARCH_SELECTS_CRASH_DUMP
def_bool y
depends on CRASH_DUMP
@@ -607,6 +645,7 @@ config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
config RELOCATABLE
bool "Relocatable kernel"
+ select ARCH_HAS_RELR
help
This builds the kernel as a Position Independent Executable (PIE),
which retains all relocation metadata required, so as to relocate
@@ -649,6 +688,17 @@ config PARAVIRT
over full virtualization. However, when run without a hypervisor
the kernel is theoretically slower and slightly larger.
+config PARAVIRT_TIME_ACCOUNTING
+ bool "Paravirtual steal time accounting"
+ depends on PARAVIRT
+ help
+ Select this option to enable fine granularity task steal time
+ accounting. Time spent executing other tasks in parallel with
+ the current vCPU is discounted from the vCPU power. To account for
+ that, there can be a small performance impact.
+
+ If in doubt, say N here.
+
endmenu
config ARCH_SELECT_MEMORY_MODEL
@@ -699,6 +749,7 @@ config ARCH_HIBERNATION_POSSIBLE
source "kernel/power/Kconfig"
source "drivers/acpi/Kconfig"
+source "drivers/cpufreq/Kconfig"
endmenu
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index 8674e7e24c4a..96ca1a688984 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -59,7 +59,7 @@ endif
ifdef CONFIG_64BIT
ld-emul = $(64bit-emul)
-cflags-y += -mabi=lp64s
+cflags-y += -mabi=lp64s -mcmodel=normal
endif
cflags-y += -pipe $(CC_FLAGS_NO_FPU)
@@ -101,11 +101,27 @@ KBUILD_AFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)
KBUILD_CFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)-mthin-add-sub)
ifdef CONFIG_OBJTOOL
-KBUILD_CFLAGS += -fno-jump-tables
+ifdef CONFIG_CC_HAS_ANNOTATE_TABLEJUMP
+KBUILD_CFLAGS += -mannotate-tablejump
+else
+KBUILD_CFLAGS += -fno-jump-tables # keep compatibility with older compilers
+endif
+ifdef CONFIG_RUSTC_HAS_ANNOTATE_TABLEJUMP
+KBUILD_RUSTFLAGS += -Cllvm-args=--loongarch-annotate-tablejump
+else
+KBUILD_RUSTFLAGS += $(if $(call rustc-min-version,109300),-Cjump-tables=n,-Zno-jump-tables) # keep compatibility with older compilers
+endif
+ifdef CONFIG_LTO_CLANG
+# The annotate-tablejump option can not be passed to LLVM backend when LTO is enabled.
+# Ensure it is aware of linker with LTO, '--loongarch-annotate-tablejump' also needs to
+# be passed via '-mllvm' to ld.lld.
+KBUILD_LDFLAGS += $(call ld-option,-mllvm --loongarch-annotate-tablejump)
+endif
endif
-KBUILD_RUSTFLAGS += --target=loongarch64-unknown-none-softfloat
-KBUILD_RUSTFLAGS_MODULE += -Crelocation-model=pic
+KBUILD_RUSTFLAGS += --target=loongarch64-unknown-none-softfloat -Ccode-model=small
+KBUILD_RUSTFLAGS_KERNEL += -Zdirect-access-external-data=yes
+KBUILD_RUSTFLAGS_MODULE += -Zdirect-access-external-data=no
ifeq ($(CONFIG_RELOCATABLE),y)
KBUILD_CFLAGS_KERNEL += -fPIE
@@ -113,7 +129,7 @@ KBUILD_RUSTFLAGS_KERNEL += -Crelocation-model=pie
LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext $(call ld-option, --apply-dynamic-relocs)
endif
-cflags-y += $(call cc-option, -mno-check-zero-division)
+cflags-y += $(call cc-option, -mno-check-zero-division -fno-isolate-erroneous-paths-dereference)
ifndef CONFIG_KASAN
cflags-y += -fno-builtin-memcpy -fno-builtin-memmove -fno-builtin-memset
@@ -176,11 +192,14 @@ vmlinux.elf vmlinux.efi vmlinuz.efi: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(bootvars-y) $(boot)/$@
install:
- $(Q)install -D -m 755 $(KBUILD_IMAGE) $(INSTALL_PATH)/$(image-name-y)-$(KERNELRELEASE)
- $(Q)install -D -m 644 .config $(INSTALL_PATH)/config-$(KERNELRELEASE)
- $(Q)install -D -m 644 System.map $(INSTALL_PATH)/System.map-$(KERNELRELEASE)
+ $(call cmd,install)
define archhelp
- echo ' install - install kernel into $(INSTALL_PATH)'
+ echo ' vmlinux.elf - Uncompressed ELF kernel image (arch/loongarch/boot/vmlinux.elf)'
+ echo ' vmlinux.efi - Uncompressed EFI kernel image (arch/loongarch/boot/vmlinux.efi)'
+ echo ' vmlinuz.efi - GZIP/ZSTD-compressed EFI kernel image (arch/loongarch/boot/vmlinuz.efi)'
+ echo ' Default when CONFIG_EFI_ZBOOT=y'
+ echo ' install - Install kernel using (your) ~/bin/$(INSTALLKERNEL) or'
+ echo ' (distribution) /sbin/$(INSTALLKERNEL) or install.sh to $$(INSTALL_PATH)'
echo
endef
diff --git a/arch/loongarch/boot/dts/Makefile b/arch/loongarch/boot/dts/Makefile
index 747d0c3f6389..15d5e14fe418 100644
--- a/arch/loongarch/boot/dts/Makefile
+++ b/arch/loongarch/boot/dts/Makefile
@@ -1,5 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
dtb-y = loongson-2k0500-ref.dtb loongson-2k1000-ref.dtb loongson-2k2000-ref.dtb
-
-obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .dtb.o, $(CONFIG_BUILTIN_DTB_NAME))
diff --git a/arch/loongarch/boot/dts/loongson-2k0500-ref.dts b/arch/loongarch/boot/dts/loongson-2k0500-ref.dts
index a34734a6c3ce..018ed904352a 100644
--- a/arch/loongarch/boot/dts/loongson-2k0500-ref.dts
+++ b/arch/loongarch/boot/dts/loongson-2k0500-ref.dts
@@ -41,6 +41,15 @@
};
};
+&apbdma3 {
+ status = "okay";
+};
+
+&mmc0 {
+ status = "okay";
+ bus-width = <4>;
+};
+
&gmac0 {
status = "okay";
diff --git a/arch/loongarch/boot/dts/loongson-2k0500.dtsi b/arch/loongarch/boot/dts/loongson-2k0500.dtsi
index 3b38ff8853a7..357de4ca7555 100644
--- a/arch/loongarch/boot/dts/loongson-2k0500.dtsi
+++ b/arch/loongarch/boot/dts/loongson-2k0500.dtsi
@@ -104,7 +104,7 @@
status = "disabled";
};
- dma-controller@1fe10c20 {
+ apbdma2: dma-controller@1fe10c20 {
compatible = "loongson,ls2k0500-apbdma", "loongson,ls2k1000-apbdma";
reg = <0 0x1fe10c20 0 0x8>;
interrupt-parent = <&eiointc>;
@@ -114,7 +114,7 @@
status = "disabled";
};
- dma-controller@1fe10c30 {
+ apbdma3: dma-controller@1fe10c30 {
compatible = "loongson,ls2k0500-apbdma", "loongson,ls2k1000-apbdma";
reg = <0 0x1fe10c30 0 0x8>;
interrupt-parent = <&eiointc>;
@@ -169,6 +169,166 @@
interrupts = <3>;
};
+ pwm@1ff5c000 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c000 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c010 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c010 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c020 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c020 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c030 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c030 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c040 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c040 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c050 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c050 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c060 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c060 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c070 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c070 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c080 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c080 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c090 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c090 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c0a0 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c0a0 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c0b0 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c0b0 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c0c0 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c0c0 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c0d0 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c0d0 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c0e0 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c0e0 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c0f0 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c0f0 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
gmac0: ethernet@1f020000 {
compatible = "snps,dwmac-3.70a";
reg = <0x0 0x1f020000 0x0 0x10000>;
@@ -220,7 +380,7 @@
};
uart0: serial@1ff40800 {
- compatible = "ns16550a";
+ compatible = "loongson,ls2k0500-uart", "ns16550a";
reg = <0x0 0x1ff40800 0x0 0x10>;
clock-frequency = <100000000>;
interrupt-parent = <&eiointc>;
@@ -277,6 +437,30 @@
status = "disabled";
};
+ mmc0: mmc@1ff64000 {
+ compatible = "loongson,ls2k0500-mmc";
+ reg = <0 0x1ff64000 0 0x2000>,
+ <0 0x1fe10100 0 0x4>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <57>;
+ dmas = <&apbdma3 0>;
+ dma-names = "rx-tx";
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ status = "disabled";
+ };
+
+ mmc@1ff66000 {
+ compatible = "loongson,ls2k0500-mmc";
+ reg = <0 0x1ff66000 0 0x2000>,
+ <0 0x1fe10100 0 0x4>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <58>;
+ dmas = <&apbdma2 0>;
+ dma-names = "rx-tx";
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ status = "disabled";
+ };
+
pmc: power-management@1ff6c000 {
compatible = "loongson,ls2k0500-pmc", "syscon";
reg = <0x0 0x1ff6c000 0x0 0x58>;
diff --git a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
index 23cf26cc3e5f..d9a452ada5d7 100644
--- a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
+++ b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
@@ -5,6 +5,7 @@
/dts-v1/;
+#include "dt-bindings/thermal/thermal.h"
#include "loongson-2k1000.dtsi"
/ {
@@ -38,6 +39,26 @@
linux,cma-default;
};
};
+
+ fan0: pwm-fan {
+ compatible = "pwm-fan";
+ cooling-levels = <255 153 85 25>;
+ pwms = <&pwm1 0 100000 0>;
+ #cooling-cells = <2>;
+ };
+};
+
+&apbdma1 {
+ status = "okay";
+};
+
+&mmc {
+ status = "okay";
+
+ pinctrl-0 = <&sdio_pins_default>;
+ pinctrl-names = "default";
+ bus-width = <4>;
+ cd-gpios = <&gpio0 22 GPIO_ACTIVE_LOW>;
};
&gmac0 {
@@ -90,10 +111,21 @@
#address-cells = <1>;
#size-cells = <0>;
- spidev@0 {
- compatible = "rohm,dh2228fv";
- spi-max-frequency = <100000000>;
- reg = <0>;
+};
+
+&pwm1 {
+ status = "okay";
+
+ pinctrl-0 = <&pwm1_pins_default>;
+ pinctrl-names = "default";
+};
+
+&cpu_thermal {
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert>;
+ cooling-device = <&fan0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
};
};
diff --git a/arch/loongarch/boot/dts/loongson-2k1000.dtsi b/arch/loongarch/boot/dts/loongson-2k1000.dtsi
index 92180140eb56..60ab425f793f 100644
--- a/arch/loongarch/boot/dts/loongson-2k1000.dtsi
+++ b/arch/loongarch/boot/dts/loongson-2k1000.dtsi
@@ -68,7 +68,7 @@
};
thermal-zones {
- cpu-thermal {
+ cpu_thermal: cpu-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tsensor 0>;
@@ -187,14 +187,14 @@
<26 IRQ_TYPE_LEVEL_HIGH>,
<26 IRQ_TYPE_LEVEL_HIGH>,
<26 IRQ_TYPE_LEVEL_HIGH>,
- <>,
- <26 IRQ_TYPE_LEVEL_HIGH>,
+ <0 IRQ_TYPE_NONE>,
<26 IRQ_TYPE_LEVEL_HIGH>,
<26 IRQ_TYPE_LEVEL_HIGH>,
<26 IRQ_TYPE_LEVEL_HIGH>,
<26 IRQ_TYPE_LEVEL_HIGH>,
<26 IRQ_TYPE_LEVEL_HIGH>,
<26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_NONE>,
<26 IRQ_TYPE_LEVEL_HIGH>,
<26 IRQ_TYPE_LEVEL_HIGH>,
<26 IRQ_TYPE_LEVEL_HIGH>,
@@ -209,13 +209,13 @@
<27 IRQ_TYPE_LEVEL_HIGH>,
<27 IRQ_TYPE_LEVEL_HIGH>,
<27 IRQ_TYPE_LEVEL_HIGH>,
- <>,
+ <0 IRQ_TYPE_NONE>,
<27 IRQ_TYPE_LEVEL_HIGH>,
<27 IRQ_TYPE_LEVEL_HIGH>,
<27 IRQ_TYPE_LEVEL_HIGH>,
<27 IRQ_TYPE_LEVEL_HIGH>,
- <>,
- <>,
+ <0 IRQ_TYPE_NONE>,
+ <0 IRQ_TYPE_NONE>,
<27 IRQ_TYPE_LEVEL_HIGH>,
<27 IRQ_TYPE_LEVEL_HIGH>,
<27 IRQ_TYPE_LEVEL_HIGH>,
@@ -256,7 +256,7 @@
status = "disabled";
};
- dma-controller@1fe00c10 {
+ apbdma1: dma-controller@1fe00c10 {
compatible = "loongson,ls2k1000-apbdma";
reg = <0x0 0x1fe00c10 0x0 0x8>;
interrupt-parent = <&liointc1>;
@@ -266,7 +266,7 @@
status = "disabled";
};
- dma-controller@1fe00c20 {
+ apbdma2: dma-controller@1fe00c20 {
compatible = "loongson,ls2k1000-apbdma";
reg = <0x0 0x1fe00c20 0x0 0x8>;
interrupt-parent = <&liointc1>;
@@ -276,7 +276,7 @@
status = "disabled";
};
- dma-controller@1fe00c30 {
+ apbdma3: dma-controller@1fe00c30 {
compatible = "loongson,ls2k1000-apbdma";
reg = <0x0 0x1fe00c30 0x0 0x8>;
interrupt-parent = <&liointc1>;
@@ -297,7 +297,7 @@
};
uart0: serial@1fe20000 {
- compatible = "ns16550a";
+ compatible = "loongson,ls2k1000-uart", "loongson,ls2k0500-uart", "ns16550a";
reg = <0x0 0x1fe20000 0x0 0x10>;
clock-frequency = <125000000>;
interrupt-parent = <&liointc0>;
@@ -322,6 +322,46 @@
status = "disabled";
};
+ pwm@1fe22000 {
+ compatible = "loongson,ls2k1000-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1fe22000 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm1: pwm@1fe22010 {
+ compatible = "loongson,ls2k1000-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1fe22010 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1fe22020 {
+ compatible = "loongson,ls2k1000-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1fe22020 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1fe22030 {
+ compatible = "loongson,ls2k1000-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1fe22030 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
pmc: power-management@1fe27000 {
compatible = "loongson,ls2k1000-pmc", "loongson,ls2k0500-pmc", "syscon";
reg = <0x0 0x1fe27000 0x0 0x58>;
@@ -352,6 +392,31 @@
status = "disabled";
};
+ i2s: i2s@1fe2d000 {
+ compatible = "loongson,ls2k1000-i2s";
+ reg = <0 0x1fe2d000 0 0x14>,
+ <0 0x1fe00438 0 0x8>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ dmas = <&apbdma2 0>, <&apbdma3 0>;
+ dma-names = "tx", "rx";
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ mmc: mmc@1fe2c000 {
+ compatible = "loongson,ls2k1000-mmc";
+ reg = <0 0x1fe2c000 0 0x68>,
+ <0 0x1fe00438 0 0x8>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ dmas = <&apbdma1 0>;
+ dma-names = "rx-tx";
+ status = "disabled";
+ };
+
spi0: spi@1fff0220 {
compatible = "loongson,ls2k1000-spi";
reg = <0x0 0x1fff0220 0x0 0x10>;
diff --git a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
index ea9e6985d0e9..3c6b12220386 100644
--- a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
+++ b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
@@ -39,6 +39,16 @@
};
};
+&emmc {
+ status = "okay";
+
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ mmc-hs200-1_8v;
+ no-sd;
+ no-sdio;
+};
+
&sata {
status = "okay";
};
diff --git a/arch/loongarch/boot/dts/loongson-2k2000.dtsi b/arch/loongarch/boot/dts/loongson-2k2000.dtsi
index 0953c5707825..6c77b86ee06c 100644
--- a/arch/loongarch/boot/dts/loongson-2k2000.dtsi
+++ b/arch/loongarch/boot/dts/loongson-2k2000.dtsi
@@ -165,6 +165,66 @@
interrupt-parent = <&eiointc>;
};
+ pwm@100a0000 {
+ compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x100a0000 0x0 0x10>;
+ interrupt-parent = <&pic>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_MISC_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@100a0100 {
+ compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x100a0100 0x0 0x10>;
+ interrupt-parent = <&pic>;
+ interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_MISC_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@100a0200 {
+ compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x100a0200 0x0 0x10>;
+ interrupt-parent = <&pic>;
+ interrupts = <26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_MISC_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@100a0300 {
+ compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x100a0300 0x0 0x10>;
+ interrupt-parent = <&pic>;
+ interrupts = <27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_MISC_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@100a0400 {
+ compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x100a0400 0x0 0x10>;
+ interrupt-parent = <&pic>;
+ interrupts = <38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_MISC_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@100a0500 {
+ compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x100a0500 0x0 0x10>;
+ interrupt-parent = <&pic>;
+ interrupts = <39 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_MISC_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
rtc0: rtc@100d0100 {
compatible = "loongson,ls2k2000-rtc", "loongson,ls7a-rtc";
reg = <0x0 0x100d0100 0x0 0x100>;
@@ -173,8 +233,24 @@
status = "disabled";
};
+ i2c@1fe00120 {
+ compatible = "loongson,ls2k-i2c";
+ reg = <0x0 0x1fe00120 0x0 0x8>;
+ interrupt-parent = <&liointc>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ i2c@1fe00130 {
+ compatible = "loongson,ls2k-i2c";
+ reg = <0x0 0x1fe00130 0x0 0x8>;
+ interrupt-parent = <&liointc>;
+ interrupts = <9 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
uart0: serial@1fe001e0 {
- compatible = "ns16550a";
+ compatible = "loongson,ls2k2000-uart", "loongson,ls2k1500-uart", "ns16550a";
reg = <0x0 0x1fe001e0 0x0 0x10>;
clock-frequency = <100000000>;
interrupt-parent = <&liointc>;
@@ -183,6 +259,24 @@
status = "disabled";
};
+ emmc: mmc@79990000 {
+ compatible = "loongson,ls2k2000-mmc";
+ reg = <0x0 0x79990000 0x0 0x1000>;
+ interrupt-parent = <&pic>;
+ interrupts = <51 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_EMMC_CLK>;
+ status = "disabled";
+ };
+
+ mmc@79991000 {
+ compatible = "loongson,ls2k2000-mmc";
+ reg = <0x0 0x79991000 0x0 0x1000>;
+ interrupt-parent = <&pic>;
+ interrupts = <50 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_EMMC_CLK>;
+ status = "disabled";
+ };
+
pcie@1a000000 {
compatible = "loongson,ls2k-pci";
reg = <0x0 0x1a000000 0x0 0x02000000>,
@@ -243,9 +337,11 @@
status = "disabled";
};
- hda@7,0 {
+ i2s@7,0 {
reg = <0x3800 0x0 0x0 0x0 0x0>;
- interrupts = <58 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <78 IRQ_TYPE_LEVEL_HIGH>,
+ <79 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tx", "rx";
interrupt-parent = <&pic>;
status = "disabled";
};
diff --git a/arch/loongarch/boot/install.sh b/arch/loongarch/boot/install.sh
new file mode 100755
index 000000000000..daac197d3315
--- /dev/null
+++ b/arch/loongarch/boot/install.sh
@@ -0,0 +1,56 @@
+#!/bin/sh
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1995 by Linus Torvalds
+#
+# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
+# Adapted from code in arch/i386/boot/install.sh by Russell King
+#
+# "make install" script for the LoongArch Linux port
+#
+# Arguments:
+# $1 - kernel version
+# $2 - kernel image file
+# $3 - kernel map file
+# $4 - default install path (blank if root directory)
+
+set -e
+
+case "${2##*/}" in
+vmlinux.elf)
+ echo "Installing uncompressed vmlinux.elf kernel"
+ base=vmlinux
+ ;;
+vmlinux.efi)
+ echo "Installing uncompressed vmlinux.efi kernel"
+ base=vmlinux
+ ;;
+vmlinuz.efi)
+ echo "Installing gzip/zstd compressed vmlinuz.efi kernel"
+ base=vmlinuz
+ ;;
+*)
+ echo "Warning: Unexpected kernel type"
+ exit 1
+ ;;
+esac
+
+if [ -f $4/$base-$1 ]; then
+ mv $4/$base-$1 $4/$base-$1.old
+fi
+cat $2 > $4/$base-$1
+
+# Install system map file
+if [ -f $4/System.map-$1 ]; then
+ mv $4/System.map-$1 $4/System.map-$1.old
+fi
+cp $3 $4/System.map-$1
+
+# Install kernel config file
+if [ -f $4/config-$1 ]; then
+ mv $4/config-$1 $4/config-$1.old
+fi
+cp .config $4/config-$1
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
index b4252c357c8e..50e1304e7a6f 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -1,4 +1,5 @@
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_ZSTD=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_NO_HZ=y
@@ -23,9 +24,9 @@ CONFIG_NUMA_BALANCING=y
CONFIG_MEMCG=y
CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
-CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_RDMA=y
+CONFIG_CGROUP_DMEM=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
CONFIG_CPUSETS=y
@@ -44,6 +45,7 @@ CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PERF_EVENTS=y
CONFIG_KEXEC=y
+CONFIG_KEXEC_FILE=y
CONFIG_CRASH_DUMP=y
CONFIG_LOONGARCH=y
CONFIG_64BIT=y
@@ -54,7 +56,7 @@ CONFIG_DMI=y
CONFIG_EFI=y
CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
-CONFIG_NR_CPUS=256
+CONFIG_NR_CPUS=2048
CONFIG_NUMA=y
CONFIG_CPU_HAS_FPU=y
CONFIG_CPU_HAS_LSX=y
@@ -70,6 +72,14 @@ CONFIG_ACPI_IPMI=m
CONFIG_ACPI_HOTPLUG_CPU=y
CONFIG_ACPI_PCI_SLOT=y
CONFIG_ACPI_HOTPLUG_MEMORY=y
+CONFIG_ACPI_BGRT=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_LOONGSON3_CPUFREQ=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=m
CONFIG_JUMP_LABEL=y
@@ -78,6 +88,9 @@ CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
+CONFIG_MODULE_COMPRESS=y
+CONFIG_MODULE_COMPRESS_ZSTD=y
+CONFIG_MODULE_DECOMPRESS=y
CONFIG_BLK_DEV_ZONED=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
@@ -85,6 +98,8 @@ CONFIG_BLK_CGROUP_IOLATENCY=y
CONFIG_BLK_CGROUP_FC_APPID=y
CONFIG_BLK_CGROUP_IOCOST=y
CONFIG_BLK_CGROUP_IOPRIO=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_BSD_DISKLABEL=y
CONFIG_UNIXWARE_DISKLABEL=y
@@ -92,15 +107,15 @@ CONFIG_CMDLINE_PARTITION=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BFQ_GROUP_IOSCHED=y
CONFIG_BINFMT_MISC=m
-CONFIG_ZPOOL=y
CONFIG_ZSWAP=y
CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y
-CONFIG_ZBUD=y
-CONFIG_Z3FOLD=y
-CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC=y
# CONFIG_COMPAT_BRK is not set
CONFIG_MEMORY_HOTPLUG=y
-CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
+# CONFIG_MHP_DEFAULT_ONLINE_TYPE_OFFLINE is not set
+CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_AUTO=y
+# CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_KERNEL is not set
+# CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_MOVABLE is not set
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
@@ -140,7 +155,16 @@ CONFIG_INET_ESPINTCP=y
CONFIG_INET_IPCOMP=m
CONFIG_INET_UDP_DIAG=y
CONFIG_TCP_CONG_ADVANCED=y
-CONFIG_TCP_CONG_BBR=m
+CONFIG_TCP_CONG_BIC=y
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_VEGAS=m
+CONFIG_TCP_CONG_NV=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_DCTCP=m
+CONFIG_TCP_CONG_CDG=m
+CONFIG_TCP_CONG_BBR=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_INET6_AH=m
@@ -210,7 +234,6 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_CPU=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
@@ -318,15 +341,33 @@ CONFIG_LLC2=m
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFB=m
CONFIG_NET_SCH_SFQ=m
CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_CBS=m
+CONFIG_NET_SCH_GRED=m
CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_SKBPRIO=m
+CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_CODEL=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_SCH_CAKE=m
+CONFIG_NET_SCH_FQ=m
+CONFIG_NET_SCH_PIE=m
+CONFIG_NET_SCH_FQ_PIE=m
CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_DEFAULT=y
CONFIG_NET_CLS_BASIC=m
CONFIG_NET_CLS_FW=m
CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_FLOW=m
CONFIG_NET_CLS_CGROUP=m
CONFIG_NET_CLS_BPF=m
+CONFIG_NET_CLS_FLOWER=m
+CONFIG_NET_CLS_MATCHALL=m
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=m
CONFIG_NET_ACT_GACT=m
@@ -394,6 +435,7 @@ CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_FW_LOADER_COMPRESS=y
CONFIG_FW_LOADER_COMPRESS_ZSTD=y
+CONFIG_SYSFB_SIMPLEFB=y
CONFIG_EFI_ZBOOT=y
CONFIG_EFI_BOOTLOADER_CONTROL=m
CONFIG_EFI_CAPSULE_LOADER=m
@@ -407,6 +449,11 @@ CONFIG_MTD_CFI_AMDSTD=m
CONFIG_MTD_CFI_STAA=m
CONFIG_MTD_RAM=m
CONFIG_MTD_ROM=m
+CONFIG_MTD_RAW_NAND=m
+CONFIG_MTD_NAND_PLATFORM=m
+CONFIG_MTD_NAND_LOONGSON=m
+CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y
+CONFIG_MTD_NAND_ECC_SW_BCH=y
CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_BLOCK=y
CONFIG_PARPORT=y
@@ -414,7 +461,16 @@ CONFIG_PARPORT_PC=y
CONFIG_PARPORT_SERIAL=y
CONFIG_PARPORT_PC_FIFO=y
CONFIG_ZRAM=m
+CONFIG_ZRAM_BACKEND_LZ4=y
+CONFIG_ZRAM_BACKEND_LZ4HC=y
+CONFIG_ZRAM_BACKEND_ZSTD=y
+CONFIG_ZRAM_BACKEND_DEFLATE=y
+CONFIG_ZRAM_BACKEND_842=y
+CONFIG_ZRAM_BACKEND_LZO=y
CONFIG_ZRAM_DEF_COMP_ZSTD=y
+CONFIG_ZRAM_WRITEBACK=y
+CONFIG_ZRAM_MEMORY_TRACKING=y
+CONFIG_ZRAM_MULTI_COMP=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
@@ -434,6 +490,9 @@ CONFIG_NVME_TARGET_RDMA=m
CONFIG_NVME_TARGET_FC=m
CONFIG_NVME_TARGET_TCP=m
CONFIG_EEPROM_AT24=m
+CONFIG_PVPANIC=y
+CONFIG_PVPANIC_MMIO=m
+CONFIG_PVPANIC_PCI=m
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
@@ -471,12 +530,10 @@ CONFIG_PATA_ATIIXP=y
CONFIG_PATA_PCMCIA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=m
-CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
CONFIG_MD_RAID456=m
-CONFIG_MD_MULTIPATH=m
CONFIG_BCACHE=m
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=m
@@ -490,6 +547,16 @@ CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_MULTIPATH_QL=m
CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_MULTIPATH_HST=m
+CONFIG_DM_MULTIPATH_IOA=m
+CONFIG_DM_INIT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=m
+CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_INTEGRITY=m
+CONFIG_DM_ZONED=m
+CONFIG_DM_VDO=m
CONFIG_TARGET_CORE=m
CONFIG_TCM_IBLOCK=m
CONFIG_TCM_FILEIO=m
@@ -501,6 +568,13 @@ CONFIG_NETDEVICES=y
CONFIG_BONDING=m
CONFIG_DUMMY=y
CONFIG_WIREGUARD=m
+CONFIG_IFB=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_RANDOM=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
@@ -535,6 +609,11 @@ CONFIG_E1000=y
CONFIG_E1000E=y
CONFIG_IGB=y
CONFIG_IXGBE=y
+CONFIG_I40E=y
+CONFIG_ICE=y
+CONFIG_FM10K=y
+CONFIG_IGC=y
+CONFIG_IDPF=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MELLANOX is not set
# CONFIG_NET_VENDOR_MICREL is not set
@@ -581,12 +660,14 @@ CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
CONFIG_USB_RTL8150=m
CONFIG_USB_RTL8152=m
+CONFIG_USB_USBNET=m
# CONFIG_USB_NET_AX8817X is not set
# CONFIG_USB_NET_AX88179_178A is not set
CONFIG_USB_NET_CDC_EEM=m
CONFIG_USB_NET_HUAWEI_CDC_NCM=m
CONFIG_USB_NET_CDC_MBIM=m
# CONFIG_USB_NET_NET1080 is not set
+CONFIG_USB_NET_RNDIS_HOST=m
# CONFIG_USB_BELKIN is not set
# CONFIG_USB_ARMLINUX is not set
# CONFIG_USB_NET_ZAURUS is not set
@@ -595,10 +676,11 @@ CONFIG_ATH9K_HTC=m
CONFIG_IWLWIFI=m
CONFIG_IWLDVM=m
CONFIG_IWLMVM=m
-CONFIG_HOSTAP=m
CONFIG_MT7601U=m
CONFIG_RT2X00=m
CONFIG_RT2800USB=m
+CONFIG_RTL8180=m
+CONFIG_RTL8187=m
CONFIG_RTL8192CE=m
CONFIG_RTL8192SE=m
CONFIG_RTL8192DE=m
@@ -608,22 +690,37 @@ CONFIG_RTL8188EE=m
CONFIG_RTL8192EE=m
CONFIG_RTL8821AE=m
CONFIG_RTL8192CU=m
+CONFIG_RTL8192DU=m
# CONFIG_RTLWIFI_DEBUG is not set
CONFIG_RTL8XXXU=m
CONFIG_RTW88=m
CONFIG_RTW88_8822BE=m
+CONFIG_RTW88_8822BU=m
CONFIG_RTW88_8822CE=m
+CONFIG_RTW88_8822CU=m
CONFIG_RTW88_8723DE=m
+CONFIG_RTW88_8723DU=m
CONFIG_RTW88_8821CE=m
+CONFIG_RTW88_8821CU=m
+CONFIG_RTW88_8821AU=m
+CONFIG_RTW88_8812AU=m
+CONFIG_RTW88_8814AE=m
+CONFIG_RTW88_8814AU=m
CONFIG_RTW89=m
+CONFIG_RTW89_8851BE=m
CONFIG_RTW89_8852AE=m
+CONFIG_RTW89_8852BE=m
+CONFIG_RTW89_8852BTE=m
CONFIG_RTW89_8852CE=m
+CONFIG_RTW89_8922AE=m
CONFIG_ZD1211RW=m
-CONFIG_USB_NET_RNDIS_WLAN=m
CONFIG_USB4_NET=m
CONFIG_INPUT_MOUSEDEV=y
CONFIG_INPUT_MOUSEDEV_PSAUX=y
CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=m
+CONFIG_KEYBOARD_GPIO_POLLED=m
+CONFIG_KEYBOARD_MATRIX=m
CONFIG_KEYBOARD_XTKBD=m
CONFIG_MOUSE_PS2_ELANTECH=y
CONFIG_MOUSE_PS2_SENTELIC=y
@@ -648,10 +745,16 @@ CONFIG_VIRTIO_CONSOLE=y
CONFIG_IPMI_HANDLER=m
CONFIG_IPMI_DEVICE_INTERFACE=m
CONFIG_IPMI_SI=m
+CONFIG_IPMI_LS2K=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_TCG_TPM=m
+CONFIG_TCG_LOONGSON=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_PIIX4=y
+CONFIG_I2C_DESIGNWARE_CORE=y
+CONFIG_I2C_DESIGNWARE_SLAVE=y
+CONFIG_I2C_DESIGNWARE_PCI=y
CONFIG_I2C_GPIO=y
CONFIG_I2C_LS2X=y
CONFIG_SPI=y
@@ -662,6 +765,10 @@ CONFIG_PINCTRL_LOONGSON2=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_LOONGSON=y
CONFIG_GPIO_LOONGSON_64BIT=y
+CONFIG_GPIO_PCA953X=m
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_PCA9570=m
+CONFIG_GPIO_PCF857X=m
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_RESTART=y
CONFIG_POWER_RESET_SYSCON=y
@@ -672,6 +779,7 @@ CONFIG_SENSORS_LM93=m
CONFIG_SENSORS_W83795=m
CONFIG_SENSORS_W83627HF=m
CONFIG_LOONGSON2_THERMAL=m
+CONFIG_MFD_LOONGSON_SE=m
CONFIG_RC_CORE=m
CONFIG_LIRC=y
CONFIG_RC_DECODERS=y
@@ -692,6 +800,7 @@ CONFIG_MEDIA_PCI_SUPPORT=y
CONFIG_VIDEO_BT848=m
CONFIG_DVB_BT8XX=m
CONFIG_DRM=y
+CONFIG_DRM_LOAD_EDID_FIRMWARE=y
CONFIG_DRM_RADEON=m
CONFIG_DRM_RADEON_USERPTR=y
CONFIG_DRM_AMDGPU=m
@@ -702,9 +811,11 @@ CONFIG_DRM_AST=y
CONFIG_DRM_QXL=m
CONFIG_DRM_VIRTIO_GPU=m
CONFIG_DRM_LOONGSON=y
+CONFIG_DRM_SIMPLEDRM=y
CONFIG_FB=y
CONFIG_FB_EFI=y
CONFIG_FB_RADEON=y
+CONFIG_FIRMWARE_EDID=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_PLATFORM=m
# CONFIG_VGA_CONSOLE is not set
@@ -722,17 +833,44 @@ CONFIG_SND_HDA_HWDEP=y
CONFIG_SND_HDA_INPUT_BEEP=y
CONFIG_SND_HDA_PATCH_LOADER=y
CONFIG_SND_HDA_CODEC_REALTEK=y
+CONFIG_SND_HDA_CODEC_REALTEK_LIB=y
+CONFIG_SND_HDA_CODEC_ALC260=y
+CONFIG_SND_HDA_CODEC_ALC262=y
+CONFIG_SND_HDA_CODEC_ALC268=y
+CONFIG_SND_HDA_CODEC_ALC269=y
+CONFIG_SND_HDA_CODEC_ALC662=y
+CONFIG_SND_HDA_CODEC_ALC680=y
+CONFIG_SND_HDA_CODEC_ALC861=y
+CONFIG_SND_HDA_CODEC_ALC861VD=y
+CONFIG_SND_HDA_CODEC_ALC880=y
+CONFIG_SND_HDA_CODEC_ALC882=y
CONFIG_SND_HDA_CODEC_SIGMATEL=y
CONFIG_SND_HDA_CODEC_HDMI=y
+CONFIG_SND_HDA_CODEC_HDMI_GENERIC=y
+CONFIG_SND_HDA_CODEC_HDMI_INTEL=y
+CONFIG_SND_HDA_CODEC_HDMI_ATI=y
+CONFIG_SND_HDA_CODEC_HDMI_NVIDIA=y
CONFIG_SND_HDA_CODEC_CONEXANT=y
CONFIG_SND_USB_AUDIO=m
+CONFIG_SND_USB_AUDIO_MIDI_V2=y
CONFIG_SND_SOC=m
CONFIG_SND_SOC_LOONGSON_CARD=m
+CONFIG_SND_SOC_ES7134=m
+CONFIG_SND_SOC_ES7241=m
+CONFIG_SND_SOC_ES8311=m
+CONFIG_SND_SOC_ES8316=m
+CONFIG_SND_SOC_ES8323=m
+CONFIG_SND_SOC_ES8326=m
+CONFIG_SND_SOC_ES8328_I2C=m
+CONFIG_SND_SOC_ES8328_SPI=m
+CONFIG_SND_SOC_UDA1334=m
+CONFIG_SND_SOC_UDA1342=m
CONFIG_SND_VIRTIO=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
CONFIG_HID_A4TECH=m
CONFIG_HID_CHERRY=m
+CONFIG_HID_ELAN=m
CONFIG_HID_LOGITECH=m
CONFIG_HID_LOGITECH_DJ=m
CONFIG_LOGITECH_FF=y
@@ -741,7 +879,11 @@ CONFIG_LOGIG940_FF=y
CONFIG_HID_MICROSOFT=m
CONFIG_HID_MULTITOUCH=m
CONFIG_HID_SUNPLUS=m
+CONFIG_HID_WACOM=m
CONFIG_USB_HIDDEV=y
+CONFIG_I2C_HID_ACPI=m
+CONFIG_I2C_HID_OF=m
+CONFIG_I2C_HID_OF_ELAN=m
CONFIG_USB=y
CONFIG_USB_OTG=y
CONFIG_USB_MON=y
@@ -771,12 +913,16 @@ CONFIG_TYPEC_TCPM=m
CONFIG_TYPEC_TCPCI=m
CONFIG_TYPEC_UCSI=m
CONFIG_UCSI_ACPI=m
+CONFIG_MMC=y
+CONFIG_MMC_LOONGSON2=m
CONFIG_INFINIBAND=m
+CONFIG_EDAC=y
+CONFIG_EDAC_LOONGSON=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_EFI=y
CONFIG_RTC_DRV_LOONGSON=y
CONFIG_DMADEVICES=y
-CONFIG_LS2X_APB_DMA=y
+CONFIG_LOONGSON2_APB_DMA=y
CONFIG_UDMABUF=y
CONFIG_DMABUF_HEAPS=y
CONFIG_DMABUF_HEAPS_SYSTEM=y
@@ -829,19 +975,22 @@ CONFIG_NTB_SWITCHTEC=m
CONFIG_NTB_PERF=m
CONFIG_NTB_TRANSPORT=m
CONFIG_PWM=y
+CONFIG_PWM_LOONGSON=y
CONFIG_GENERIC_PHY=y
CONFIG_USB4=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
CONFIG_XFS_FS=y
+CONFIG_XFS_SUPPORT_V4=y
+CONFIG_XFS_SUPPORT_ASCII_CI=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_GFS2_FS=m
@@ -853,6 +1002,9 @@ CONFIG_F2FS_FS=m
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_CHECK_FS=y
CONFIG_F2FS_FS_COMPRESSION=y
+CONFIG_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
+CONFIG_FS_VERITY=y
CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA=y
@@ -905,16 +1057,13 @@ CONFIG_SQUASHFS_ZSTD=y
CONFIG_MINIX_FS=m
CONFIG_ROMFS_FS=m
CONFIG_PSTORE=m
-CONFIG_PSTORE_LZO_COMPRESS=m
-CONFIG_PSTORE_LZ4_COMPRESS=m
-CONFIG_PSTORE_LZ4HC_COMPRESS=m
-CONFIG_PSTORE_842_COMPRESS=y
-CONFIG_PSTORE_ZSTD_COMPRESS=y
-CONFIG_PSTORE_ZSTD_COMPRESS_DEFAULT=y
-CONFIG_SYSV_FS=m
+CONFIG_PSTORE_COMPRESS=y
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_EROFS_FS_ZIP_LZMA=y
+CONFIG_EROFS_FS_ZIP_DEFLATE=y
+CONFIG_EROFS_FS_ZIP_ZSTD=y
+CONFIG_EROFS_FS_ONDEMAND=y
CONFIG_EROFS_FS_PCPU_KTHREAD=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
@@ -933,9 +1082,12 @@ CONFIG_CEPH_FS_SECURITY_LABEL=y
CONFIG_CIFS=m
# CONFIG_CIFS_DEBUG is not set
CONFIG_9P_FS=y
+CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
CONFIG_DLM=m
CONFIG_KEY_DH_OPERATIONS=y
@@ -946,7 +1098,7 @@ CONFIG_SECURITY_APPARMOR=y
CONFIG_SECURITY_YAMA=y
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_CRYPTO_USER=m
-# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_SELFTESTS=y
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_ANUBIS=m
@@ -956,10 +1108,11 @@ CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_SM4_GENERIC=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_LZO=m
@@ -970,8 +1123,8 @@ CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
-CONFIG_CRYPTO_CRC32_LOONGARCH=m
CONFIG_CRYPTO_DEV_VIRTIO=m
+CONFIG_CRYPTO_DEV_LOONGSON_RNG=m
CONFIG_DMA_CMA=y
CONFIG_DMA_NUMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
diff --git a/arch/loongarch/crypto/Kconfig b/arch/loongarch/crypto/Kconfig
index 200a6e8b43b1..a0270b3e5b30 100644
--- a/arch/loongarch/crypto/Kconfig
+++ b/arch/loongarch/crypto/Kconfig
@@ -2,13 +2,4 @@
menu "Accelerated Cryptographic Algorithms for CPU (loongarch)"
-config CRYPTO_CRC32_LOONGARCH
- tristate "CRC32c and CRC32"
- select CRC32
- select CRYPTO_HASH
- help
- CRC32c and CRC32 CRC algorithms
-
- Architecture: LoongArch with CRC32 instructions
-
endmenu
diff --git a/arch/loongarch/crypto/Makefile b/arch/loongarch/crypto/Makefile
index d22613d27ce9..ba83755dde2b 100644
--- a/arch/loongarch/crypto/Makefile
+++ b/arch/loongarch/crypto/Makefile
@@ -2,5 +2,3 @@
#
# Makefile for LoongArch crypto files..
#
-
-obj-$(CONFIG_CRYPTO_CRC32_LOONGARCH) += crc32-loongarch.o
diff --git a/arch/loongarch/crypto/crc32-loongarch.c b/arch/loongarch/crypto/crc32-loongarch.c
deleted file mode 100644
index 3eebea3a7b47..000000000000
--- a/arch/loongarch/crypto/crc32-loongarch.c
+++ /dev/null
@@ -1,300 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * crc32.c - CRC32 and CRC32C using LoongArch crc* instructions
- *
- * Module based on mips/crypto/crc32-mips.c
- *
- * Copyright (C) 2014 Linaro Ltd <yazen.ghannam@linaro.org>
- * Copyright (C) 2018 MIPS Tech, LLC
- * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
- */
-
-#include <linux/module.h>
-#include <crypto/internal/hash.h>
-
-#include <asm/cpu-features.h>
-#include <asm/unaligned.h>
-
-#define _CRC32(crc, value, size, type) \
-do { \
- __asm__ __volatile__( \
- #type ".w." #size ".w" " %0, %1, %0\n\t"\
- : "+r" (crc) \
- : "r" (value) \
- : "memory"); \
-} while (0)
-
-#define CRC32(crc, value, size) _CRC32(crc, value, size, crc)
-#define CRC32C(crc, value, size) _CRC32(crc, value, size, crcc)
-
-static u32 crc32_loongarch_hw(u32 crc_, const u8 *p, unsigned int len)
-{
- u32 crc = crc_;
-
- while (len >= sizeof(u64)) {
- u64 value = get_unaligned_le64(p);
-
- CRC32(crc, value, d);
- p += sizeof(u64);
- len -= sizeof(u64);
- }
-
- if (len & sizeof(u32)) {
- u32 value = get_unaligned_le32(p);
-
- CRC32(crc, value, w);
- p += sizeof(u32);
- }
-
- if (len & sizeof(u16)) {
- u16 value = get_unaligned_le16(p);
-
- CRC32(crc, value, h);
- p += sizeof(u16);
- }
-
- if (len & sizeof(u8)) {
- u8 value = *p++;
-
- CRC32(crc, value, b);
- }
-
- return crc;
-}
-
-static u32 crc32c_loongarch_hw(u32 crc_, const u8 *p, unsigned int len)
-{
- u32 crc = crc_;
-
- while (len >= sizeof(u64)) {
- u64 value = get_unaligned_le64(p);
-
- CRC32C(crc, value, d);
- p += sizeof(u64);
- len -= sizeof(u64);
- }
-
- if (len & sizeof(u32)) {
- u32 value = get_unaligned_le32(p);
-
- CRC32C(crc, value, w);
- p += sizeof(u32);
- }
-
- if (len & sizeof(u16)) {
- u16 value = get_unaligned_le16(p);
-
- CRC32C(crc, value, h);
- p += sizeof(u16);
- }
-
- if (len & sizeof(u8)) {
- u8 value = *p++;
-
- CRC32C(crc, value, b);
- }
-
- return crc;
-}
-
-#define CHKSUM_BLOCK_SIZE 1
-#define CHKSUM_DIGEST_SIZE 4
-
-struct chksum_ctx {
- u32 key;
-};
-
-struct chksum_desc_ctx {
- u32 crc;
-};
-
-static int chksum_init(struct shash_desc *desc)
-{
- struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = mctx->key;
-
- return 0;
-}
-
-/*
- * Setting the seed allows arbitrary accumulators and flexible XOR policy
- * If your algorithm starts with ~0, then XOR with ~0 before you set the seed.
- */
-static int chksum_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen)
-{
- struct chksum_ctx *mctx = crypto_shash_ctx(tfm);
-
- if (keylen != sizeof(mctx->key))
- return -EINVAL;
-
- mctx->key = get_unaligned_le32(key);
-
- return 0;
-}
-
-static int chksum_update(struct shash_desc *desc, const u8 *data, unsigned int length)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = crc32_loongarch_hw(ctx->crc, data, length);
- return 0;
-}
-
-static int chksumc_update(struct shash_desc *desc, const u8 *data, unsigned int length)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = crc32c_loongarch_hw(ctx->crc, data, length);
- return 0;
-}
-
-static int chksum_final(struct shash_desc *desc, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- put_unaligned_le32(ctx->crc, out);
- return 0;
-}
-
-static int chksumc_final(struct shash_desc *desc, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- put_unaligned_le32(~ctx->crc, out);
- return 0;
-}
-
-static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
-{
- put_unaligned_le32(crc32_loongarch_hw(crc, data, len), out);
- return 0;
-}
-
-static int __chksumc_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
-{
- put_unaligned_le32(~crc32c_loongarch_hw(crc, data, len), out);
- return 0;
-}
-
-static int chksum_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksum_finup(ctx->crc, data, len, out);
-}
-
-static int chksumc_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksumc_finup(ctx->crc, data, len, out);
-}
-
-static int chksum_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out)
-{
- struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
-
- return __chksum_finup(mctx->key, data, length, out);
-}
-
-static int chksumc_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out)
-{
- struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
-
- return __chksumc_finup(mctx->key, data, length, out);
-}
-
-static int chksum_cra_init(struct crypto_tfm *tfm)
-{
- struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
-
- mctx->key = 0;
- return 0;
-}
-
-static int chksumc_cra_init(struct crypto_tfm *tfm)
-{
- struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
-
- mctx->key = ~0;
- return 0;
-}
-
-static struct shash_alg crc32_alg = {
- .digestsize = CHKSUM_DIGEST_SIZE,
- .setkey = chksum_setkey,
- .init = chksum_init,
- .update = chksum_update,
- .final = chksum_final,
- .finup = chksum_finup,
- .digest = chksum_digest,
- .descsize = sizeof(struct chksum_desc_ctx),
- .base = {
- .cra_name = "crc32",
- .cra_driver_name = "crc32-loongarch",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct chksum_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = chksum_cra_init,
- }
-};
-
-static struct shash_alg crc32c_alg = {
- .digestsize = CHKSUM_DIGEST_SIZE,
- .setkey = chksum_setkey,
- .init = chksum_init,
- .update = chksumc_update,
- .final = chksumc_final,
- .finup = chksumc_finup,
- .digest = chksumc_digest,
- .descsize = sizeof(struct chksum_desc_ctx),
- .base = {
- .cra_name = "crc32c",
- .cra_driver_name = "crc32c-loongarch",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct chksum_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = chksumc_cra_init,
- }
-};
-
-static int __init crc32_mod_init(void)
-{
- int err;
-
- if (!cpu_has(CPU_FEATURE_CRC32))
- return 0;
-
- err = crypto_register_shash(&crc32_alg);
- if (err)
- return err;
-
- err = crypto_register_shash(&crc32c_alg);
- if (err)
- return err;
-
- return 0;
-}
-
-static void __exit crc32_mod_exit(void)
-{
- if (!cpu_has(CPU_FEATURE_CRC32))
- return;
-
- crypto_unregister_shash(&crc32_alg);
- crypto_unregister_shash(&crc32c_alg);
-}
-
-module_init(crc32_mod_init);
-module_exit(crc32_mod_exit);
-
-MODULE_AUTHOR("Min Zhou <zhoumin@loongson.cn>");
-MODULE_AUTHOR("Huacai Chen <chenhuacai@loongson.cn>");
-MODULE_DESCRIPTION("CRC32 and CRC32C using LoongArch crc* instructions");
-MODULE_LICENSE("GPL v2");
diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild
index c862672ed953..b04d2cef935f 100644
--- a/arch/loongarch/include/asm/Kbuild
+++ b/arch/loongarch/include/asm/Kbuild
@@ -1,28 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscall_table_64.h
generated-y += orc_hash.h
-generic-y += dma-contiguous.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += early_ioremap.h
generic-y += qrwlock.h
-generic-y += qspinlock.h
-generic-y += rwsem.h
-generic-y += segment.h
generic-y += user.h
-generic-y += stat.h
-generic-y += fcntl.h
generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += sembuf.h
-generic-y += shmbuf.h
+generic-y += mmzone.h
generic-y += statfs.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += termbits.h
-generic-y += poll.h
-generic-y += param.h
-generic-y += posix_types.h
-generic-y += resource.h
+generic-y += text-patching.h
diff --git a/arch/loongarch/include/asm/acenv.h b/arch/loongarch/include/asm/acenv.h
index 52f298f7293b..483c955f2ae5 100644
--- a/arch/loongarch/include/asm/acenv.h
+++ b/arch/loongarch/include/asm/acenv.h
@@ -10,9 +10,8 @@
#ifndef _ASM_LOONGARCH_ACENV_H
#define _ASM_LOONGARCH_ACENV_H
-/*
- * This header is required by ACPI core, but we have nothing to fill in
- * right now. Will be updated later when needed.
- */
+#ifdef CONFIG_ARCH_STRICT_ALIGN
+#define ACPI_MISALIGNMENT_NOT_SUPPORTED
+#endif /* CONFIG_ARCH_STRICT_ALIGN */
#endif /* _ASM_LOONGARCH_ACENV_H */
diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h
index 313f66f7913a..7376840fa9f7 100644
--- a/arch/loongarch/include/asm/acpi.h
+++ b/arch/loongarch/include/asm/acpi.h
@@ -33,7 +33,7 @@ static inline bool acpi_has_cpu_in_madt(void)
return true;
}
-#define MAX_CORE_PIC 256
+#define MAX_CORE_PIC 2048
extern struct list_head acpi_wakeup_device_list;
extern struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC];
diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h
index 7bd47d65bf7a..e739dbc6329d 100644
--- a/arch/loongarch/include/asm/addrspace.h
+++ b/arch/loongarch/include/asm/addrspace.h
@@ -18,12 +18,12 @@
/*
* This gives the physical RAM offset.
*/
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifndef PHYS_OFFSET
#define PHYS_OFFSET _UL(0)
#endif
extern unsigned long vm_map_base;
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#ifndef IO_BASE
#define IO_BASE CSR_DMW0_BASE
@@ -37,6 +37,10 @@ extern unsigned long vm_map_base;
#define UNCACHE_BASE CSR_DMW0_BASE
#endif
+#ifndef WRITECOMBINE_BASE
+#define WRITECOMBINE_BASE CSR_DMW2_BASE
+#endif
+
#define DMW_PABITS 48
#define TO_PHYS_MASK ((1ULL << DMW_PABITS) - 1)
@@ -62,7 +66,7 @@ extern unsigned long vm_map_base;
#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000)
#endif
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define _ATYPE_
#define _ATYPE32_
#define _ATYPE64_
@@ -81,7 +85,7 @@ extern unsigned long vm_map_base;
/*
* 32/64-bit LoongArch address spaces
*/
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define _ACAST32_
#define _ACAST64_
#else
diff --git a/arch/loongarch/include/asm/alternative-asm.h b/arch/loongarch/include/asm/alternative-asm.h
index ff3d10ac393f..7dc29bd9b2f0 100644
--- a/arch/loongarch/include/asm/alternative-asm.h
+++ b/arch/loongarch/include/asm/alternative-asm.h
@@ -2,7 +2,7 @@
#ifndef _ASM_ALTERNATIVE_ASM_H
#define _ASM_ALTERNATIVE_ASM_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#include <asm/asm.h>
@@ -77,6 +77,6 @@
.previous
.endm
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_ALTERNATIVE_ASM_H */
diff --git a/arch/loongarch/include/asm/alternative.h b/arch/loongarch/include/asm/alternative.h
index cee7b29785ab..b5bae21fb3c8 100644
--- a/arch/loongarch/include/asm/alternative.h
+++ b/arch/loongarch/include/asm/alternative.h
@@ -2,7 +2,7 @@
#ifndef _ASM_ALTERNATIVE_H
#define _ASM_ALTERNATIVE_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <linux/stddef.h>
@@ -106,6 +106,6 @@ extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \
(asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory"))
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_ALTERNATIVE_H */
diff --git a/arch/loongarch/include/asm/asm-extable.h b/arch/loongarch/include/asm/asm-extable.h
index df05005f2b80..d60bdf2e6377 100644
--- a/arch/loongarch/include/asm/asm-extable.h
+++ b/arch/loongarch/include/asm/asm-extable.h
@@ -7,7 +7,7 @@
#define EX_TYPE_UACCESS_ERR_ZERO 2
#define EX_TYPE_BPF 3
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
.pushsection __ex_table, "a"; \
@@ -22,7 +22,7 @@
__ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0)
.endm
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
#include <linux/bits.h>
#include <linux/stringify.h>
@@ -60,6 +60,6 @@
#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_ASM_EXTABLE_H */
diff --git a/arch/loongarch/include/asm/asm-prototypes.h b/arch/loongarch/include/asm/asm-prototypes.h
index 51f224bcfc65..704066b4f736 100644
--- a/arch/loongarch/include/asm/asm-prototypes.h
+++ b/arch/loongarch/include/asm/asm-prototypes.h
@@ -12,3 +12,11 @@ __int128_t __ashlti3(__int128_t a, int b);
__int128_t __ashrti3(__int128_t a, int b);
__int128_t __lshrti3(__int128_t a, int b);
#endif
+
+asmlinkage void noinstr __no_stack_protector ret_from_fork(struct task_struct *prev,
+ struct pt_regs *regs);
+
+asmlinkage void noinstr __no_stack_protector ret_from_kernel_thread(struct task_struct *prev,
+ struct pt_regs *regs,
+ int (*fn)(void *),
+ void *fn_arg);
diff --git a/arch/loongarch/include/asm/asm.h b/arch/loongarch/include/asm/asm.h
index f591b3245def..f018d26fc995 100644
--- a/arch/loongarch/include/asm/asm.h
+++ b/arch/loongarch/include/asm/asm.h
@@ -110,7 +110,7 @@
#define LONG_SRA srai.w
#define LONG_SRAV sra.w
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define LONG .word
#endif
#define LONGSIZE 4
@@ -131,7 +131,7 @@
#define LONG_SRA srai.d
#define LONG_SRAV sra.d
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define LONG .dword
#endif
#define LONGSIZE 8
@@ -158,7 +158,7 @@
#define PTR_SCALESHIFT 2
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define PTR .word
#endif
#define PTRSIZE 4
@@ -181,7 +181,7 @@
#define PTR_SCALESHIFT 3
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define PTR .dword
#endif
#define PTRSIZE 8
diff --git a/arch/loongarch/include/asm/asmmacro.h b/arch/loongarch/include/asm/asmmacro.h
index 655db7d7a427..8d7f501b0a12 100644
--- a/arch/loongarch/include/asm/asmmacro.h
+++ b/arch/loongarch/include/asm/asmmacro.h
@@ -609,6 +609,7 @@
lu32i.d \reg, 0
lu52i.d \reg, \reg, 0
.pushsection ".la_abs", "aw", %progbits
+ .p2align 3
.dword 766b
.dword \sym
.popsection
diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h
index 99af8b3160a8..c86f0ab922ec 100644
--- a/arch/loongarch/include/asm/atomic.h
+++ b/arch/loongarch/include/asm/atomic.h
@@ -15,6 +15,7 @@
#define __LL "ll.w "
#define __SC "sc.w "
#define __AMADD "amadd.w "
+#define __AMOR "amor.w "
#define __AMAND_DB "amand_db.w "
#define __AMOR_DB "amor_db.w "
#define __AMXOR_DB "amxor_db.w "
@@ -22,6 +23,7 @@
#define __LL "ll.d "
#define __SC "sc.d "
#define __AMADD "amadd.d "
+#define __AMOR "amor.d "
#define __AMAND_DB "amand_db.d "
#define __AMOR_DB "amor_db.d "
#define __AMXOR_DB "amxor_db.d "
diff --git a/arch/loongarch/include/asm/bootinfo.h b/arch/loongarch/include/asm/bootinfo.h
index 6d5846dd075c..7657e016233f 100644
--- a/arch/loongarch/include/asm/bootinfo.h
+++ b/arch/loongarch/include/asm/bootinfo.h
@@ -26,6 +26,10 @@ struct loongson_board_info {
#define NR_WORDS DIV_ROUND_UP(NR_CPUS, BITS_PER_LONG)
+/*
+ * The "core" of cores_per_node and cores_per_package stands for a
+ * logical core, which means in a SMT system it stands for a thread.
+ */
struct loongson_system_configuration {
int nr_cpus;
int nr_nodes;
diff --git a/arch/loongarch/include/asm/bug.h b/arch/loongarch/include/asm/bug.h
index 08388876ade4..d090a5bec5eb 100644
--- a/arch/loongarch/include/asm/bug.h
+++ b/arch/loongarch/include/asm/bug.h
@@ -4,13 +4,14 @@
#include <asm/break.h>
#include <linux/stringify.h>
+#include <linux/objtool.h>
#ifndef CONFIG_DEBUG_BUGVERBOSE
#define _BUGVERBOSE_LOCATION(file, line)
#else
#define __BUGVERBOSE_LOCATION(file, line) \
.pushsection .rodata.str, "aMS", @progbits, 1; \
- 10002: .string file; \
+ 10002: .ascii file "\0"; \
.popsection; \
\
.long 10002b - .; \
@@ -19,39 +20,38 @@
#endif
#ifndef CONFIG_GENERIC_BUG
-#define __BUG_ENTRY(flags)
+#define __BUG_ENTRY(cond_str, flags)
#else
-#define __BUG_ENTRY(flags) \
+#define __BUG_ENTRY(cond_str, flags) \
.pushsection __bug_table, "aw"; \
.align 2; \
10000: .long 10001f - .; \
- _BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
- .short flags; \
+ _BUGVERBOSE_LOCATION(WARN_CONDITION_STR(cond_str) __FILE__, __LINE__) \
+ .short flags; \
.popsection; \
10001:
#endif
-#define ASM_BUG_FLAGS(flags) \
- __BUG_ENTRY(flags) \
- break BRK_BUG
+#define ASM_BUG_FLAGS(cond_str, flags) \
+ __BUG_ENTRY(cond_str, flags) \
+ break BRK_BUG;
-#define ASM_BUG() ASM_BUG_FLAGS(0)
+#define ASM_BUG() ASM_BUG_FLAGS("", 0)
-#define __BUG_FLAGS(flags) \
- asm_inline volatile (__stringify(ASM_BUG_FLAGS(flags)));
+#define __BUG_FLAGS(cond_str, flags, extra) \
+ asm_inline volatile (__stringify(ASM_BUG_FLAGS(cond_str, flags)) extra);
-#define __WARN_FLAGS(flags) \
+#define __WARN_FLAGS(cond_str, flags) \
do { \
instrumentation_begin(); \
- __BUG_FLAGS(BUGFLAG_WARNING|(flags)); \
- annotate_reachable(); \
+ __BUG_FLAGS(cond_str, BUGFLAG_WARNING|(flags), ANNOTATE_REACHABLE(10001b));\
instrumentation_end(); \
} while (0)
#define BUG() \
do { \
instrumentation_begin(); \
- __BUG_FLAGS(0); \
+ __BUG_FLAGS("", 0, ""); \
unreachable(); \
} while (0)
diff --git a/arch/loongarch/include/asm/cache.h b/arch/loongarch/include/asm/cache.h
index 1b6d09617199..aa622c754414 100644
--- a/arch/loongarch/include/asm/cache.h
+++ b/arch/loongarch/include/asm/cache.h
@@ -8,6 +8,8 @@
#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define ARCH_DMA_MINALIGN (16)
+
#define __read_mostly __section(".data..read_mostly")
#endif /* _ASM_CACHE_H */
diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h
index 2eafe6a6aca8..bd5f0457ad21 100644
--- a/arch/loongarch/include/asm/cpu-features.h
+++ b/arch/loongarch/include/asm/cpu-features.h
@@ -51,6 +51,7 @@
#define cpu_has_lbt_mips cpu_opt(LOONGARCH_CPU_LBT_MIPS)
#define cpu_has_lbt (cpu_has_lbt_x86|cpu_has_lbt_arm|cpu_has_lbt_mips)
#define cpu_has_csr cpu_opt(LOONGARCH_CPU_CSR)
+#define cpu_has_iocsr cpu_opt(LOONGARCH_CPU_IOCSR)
#define cpu_has_tlb cpu_opt(LOONGARCH_CPU_TLB)
#define cpu_has_watch cpu_opt(LOONGARCH_CPU_WATCH)
#define cpu_has_vint cpu_opt(LOONGARCH_CPU_VINT)
@@ -65,5 +66,9 @@
#define cpu_has_guestid cpu_opt(LOONGARCH_CPU_GUESTID)
#define cpu_has_hypervisor cpu_opt(LOONGARCH_CPU_HYPERVISOR)
#define cpu_has_ptw cpu_opt(LOONGARCH_CPU_PTW)
+#define cpu_has_lspw cpu_opt(LOONGARCH_CPU_LSPW)
+#define cpu_has_msgint cpu_opt(LOONGARCH_CPU_MSGINT)
+#define cpu_has_avecint cpu_opt(LOONGARCH_CPU_AVECINT)
+#define cpu_has_redirectint cpu_opt(LOONGARCH_CPU_REDIRECTINT)
#endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/loongarch/include/asm/cpu-info.h b/arch/loongarch/include/asm/cpu-info.h
index 900589cb159d..7f5bc0ad9d50 100644
--- a/arch/loongarch/include/asm/cpu-info.h
+++ b/arch/loongarch/include/asm/cpu-info.h
@@ -57,6 +57,7 @@ struct cpuinfo_loongarch {
int global_id; /* physical global thread number */
int vabits; /* Virtual Address size in bits */
int pabits; /* Physical Address size in bits */
+ int timerbits; /* Width of arch timer in bits */
unsigned int ksave_mask; /* Usable KSave mask. */
unsigned int watch_dreg_count; /* Number data breakpoints */
unsigned int watch_ireg_count; /* Number instruction breakpoints */
@@ -75,27 +76,6 @@ extern const char *__cpu_full_name[];
#define cpu_family_string() __cpu_family[raw_smp_processor_id()]
#define cpu_full_name_string() __cpu_full_name[raw_smp_processor_id()]
-struct seq_file;
-struct notifier_block;
-
-extern int register_proc_cpuinfo_notifier(struct notifier_block *nb);
-extern int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v);
-
-#define proc_cpuinfo_notifier(fn, pri) \
-({ \
- static struct notifier_block fn##_nb = { \
- .notifier_call = fn, \
- .priority = pri \
- }; \
- \
- register_proc_cpuinfo_notifier(&fn##_nb); \
-})
-
-struct proc_cpuinfo_notifier_args {
- struct seq_file *m;
- unsigned long n;
-};
-
static inline bool cpus_are_siblings(int cpua, int cpub)
{
struct cpuinfo_loongarch *infoa = &cpu_data[cpua];
diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h
index 48b9f7168bcc..f3efb00b6141 100644
--- a/arch/loongarch/include/asm/cpu.h
+++ b/arch/loongarch/include/asm/cpu.h
@@ -46,7 +46,7 @@
#define PRID_PRODUCT_MASK 0x0fff
-#if !defined(__ASSEMBLY__)
+#if !defined(__ASSEMBLER__)
enum cpu_type_enum {
CPU_UNKNOWN,
@@ -55,7 +55,28 @@ enum cpu_type_enum {
CPU_LAST
};
-#endif /* !__ASSEMBLY */
+static inline char *id_to_core_name(unsigned int id)
+{
+ if ((id & PRID_COMP_MASK) != PRID_COMP_LOONGSON)
+ return "Unknown";
+
+ switch (id & PRID_SERIES_MASK) {
+ case PRID_SERIES_LA132:
+ return "LA132";
+ case PRID_SERIES_LA264:
+ return "LA264";
+ case PRID_SERIES_LA364:
+ return "LA364";
+ case PRID_SERIES_LA464:
+ return "LA464";
+ case PRID_SERIES_LA664:
+ return "LA664";
+ default:
+ return "Unknown";
+ }
+}
+
+#endif /* !__ASSEMBLER__ */
/*
* ISA Level encodings
@@ -87,18 +108,23 @@ enum cpu_type_enum {
#define CPU_FEATURE_LBT_MIPS 12 /* CPU has MIPS Binary Translation */
#define CPU_FEATURE_TLB 13 /* CPU has TLB */
#define CPU_FEATURE_CSR 14 /* CPU has CSR */
-#define CPU_FEATURE_WATCH 15 /* CPU has watchpoint registers */
-#define CPU_FEATURE_VINT 16 /* CPU has vectored interrupts */
-#define CPU_FEATURE_CSRIPI 17 /* CPU has CSR-IPI */
-#define CPU_FEATURE_EXTIOI 18 /* CPU has EXT-IOI */
-#define CPU_FEATURE_PREFETCH 19 /* CPU has prefetch instructions */
-#define CPU_FEATURE_PMP 20 /* CPU has perfermance counter */
-#define CPU_FEATURE_SCALEFREQ 21 /* CPU supports cpufreq scaling */
-#define CPU_FEATURE_FLATMODE 22 /* CPU has flat mode */
-#define CPU_FEATURE_EIODECODE 23 /* CPU has EXTIOI interrupt pin decode mode */
-#define CPU_FEATURE_GUESTID 24 /* CPU has GuestID feature */
-#define CPU_FEATURE_HYPERVISOR 25 /* CPU has hypervisor (running in VM) */
-#define CPU_FEATURE_PTW 26 /* CPU has hardware page table walker */
+#define CPU_FEATURE_IOCSR 15 /* CPU has IOCSR */
+#define CPU_FEATURE_WATCH 16 /* CPU has watchpoint registers */
+#define CPU_FEATURE_VINT 17 /* CPU has vectored interrupts */
+#define CPU_FEATURE_CSRIPI 18 /* CPU has CSR-IPI */
+#define CPU_FEATURE_EXTIOI 19 /* CPU has EXT-IOI */
+#define CPU_FEATURE_PREFETCH 20 /* CPU has prefetch instructions */
+#define CPU_FEATURE_PMP 21 /* CPU has perfermance counter */
+#define CPU_FEATURE_SCALEFREQ 22 /* CPU supports cpufreq scaling */
+#define CPU_FEATURE_FLATMODE 23 /* CPU has flat mode */
+#define CPU_FEATURE_EIODECODE 24 /* CPU has EXTIOI interrupt pin decode mode */
+#define CPU_FEATURE_GUESTID 25 /* CPU has GuestID feature */
+#define CPU_FEATURE_HYPERVISOR 26 /* CPU has hypervisor (running in VM) */
+#define CPU_FEATURE_PTW 27 /* CPU has hardware page table walker */
+#define CPU_FEATURE_LSPW 28 /* CPU has LSPW (lddir/ldpte instructions) */
+#define CPU_FEATURE_MSGINT 29 /* CPU has MSG interrupt */
+#define CPU_FEATURE_AVECINT 30 /* CPU has AVEC interrupt */
+#define CPU_FEATURE_REDIRECTINT 31 /* CPU has interrupt remapping */
#define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG)
#define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM)
@@ -114,6 +140,7 @@ enum cpu_type_enum {
#define LOONGARCH_CPU_LBT_ARM BIT_ULL(CPU_FEATURE_LBT_ARM)
#define LOONGARCH_CPU_LBT_MIPS BIT_ULL(CPU_FEATURE_LBT_MIPS)
#define LOONGARCH_CPU_TLB BIT_ULL(CPU_FEATURE_TLB)
+#define LOONGARCH_CPU_IOCSR BIT_ULL(CPU_FEATURE_IOCSR)
#define LOONGARCH_CPU_CSR BIT_ULL(CPU_FEATURE_CSR)
#define LOONGARCH_CPU_WATCH BIT_ULL(CPU_FEATURE_WATCH)
#define LOONGARCH_CPU_VINT BIT_ULL(CPU_FEATURE_VINT)
@@ -127,5 +154,9 @@ enum cpu_type_enum {
#define LOONGARCH_CPU_GUESTID BIT_ULL(CPU_FEATURE_GUESTID)
#define LOONGARCH_CPU_HYPERVISOR BIT_ULL(CPU_FEATURE_HYPERVISOR)
#define LOONGARCH_CPU_PTW BIT_ULL(CPU_FEATURE_PTW)
+#define LOONGARCH_CPU_LSPW BIT_ULL(CPU_FEATURE_LSPW)
+#define LOONGARCH_CPU_MSGINT BIT_ULL(CPU_FEATURE_MSGINT)
+#define LOONGARCH_CPU_AVECINT BIT_ULL(CPU_FEATURE_AVECINT)
+#define LOONGARCH_CPU_REDIRECTINT BIT_ULL(CPU_FEATURE_REDIRECTINT)
#endif /* _ASM_CPU_H */
diff --git a/arch/loongarch/include/asm/dma-direct.h b/arch/loongarch/include/asm/dma-direct.h
deleted file mode 100644
index 75ccd808a2af..000000000000
--- a/arch/loongarch/include/asm/dma-direct.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
- */
-#ifndef _LOONGARCH_DMA_DIRECT_H
-#define _LOONGARCH_DMA_DIRECT_H
-
-dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
-phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
-
-#endif /* _LOONGARCH_DMA_DIRECT_H */
diff --git a/arch/loongarch/include/asm/entry-common.h b/arch/loongarch/include/asm/entry-common.h
index 0fe2a098ded9..099132980dc9 100644
--- a/arch/loongarch/include/asm/entry-common.h
+++ b/arch/loongarch/include/asm/entry-common.h
@@ -2,12 +2,6 @@
#ifndef ARCH_LOONGARCH_ENTRY_COMMON_H
#define ARCH_LOONGARCH_ENTRY_COMMON_H
-#include <linux/sched.h>
-#include <linux/processor.h>
-
-static inline bool on_thread_stack(void)
-{
- return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
-}
+#include <asm/stacktrace.h> /* For on_thread_stack() */
#endif
diff --git a/arch/loongarch/include/asm/fprobe.h b/arch/loongarch/include/asm/fprobe.h
new file mode 100644
index 000000000000..7af3b3126caf
--- /dev/null
+++ b/arch/loongarch/include/asm/fprobe.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_LOONGARCH_FPROBE_H
+#define _ASM_LOONGARCH_FPROBE_H
+
+/*
+ * Explicitly undef ARCH_DEFINE_ENCODE_FPROBE_HEADER, because loongarch does not
+ * have enough number of fixed MSBs of the address of kernel objects for
+ * encoding the size of data in fprobe_header. Use 2-entries encoding instead.
+ */
+#undef ARCH_DEFINE_ENCODE_FPROBE_HEADER
+
+#endif /* _ASM_LOONGARCH_FPROBE_H */
diff --git a/arch/loongarch/include/asm/fpu.h b/arch/loongarch/include/asm/fpu.h
index 3177674228f8..45514f314664 100644
--- a/arch/loongarch/include/asm/fpu.h
+++ b/arch/loongarch/include/asm/fpu.h
@@ -22,22 +22,29 @@
struct sigcontext;
#define kernel_fpu_available() cpu_has_fpu
-extern void kernel_fpu_begin(void);
-extern void kernel_fpu_end(void);
-
-extern void _init_fpu(unsigned int);
-extern void _save_fp(struct loongarch_fpu *);
-extern void _restore_fp(struct loongarch_fpu *);
-
-extern void _save_lsx(struct loongarch_fpu *fpu);
-extern void _restore_lsx(struct loongarch_fpu *fpu);
-extern void _init_lsx_upper(void);
-extern void _restore_lsx_upper(struct loongarch_fpu *fpu);
-
-extern void _save_lasx(struct loongarch_fpu *fpu);
-extern void _restore_lasx(struct loongarch_fpu *fpu);
-extern void _init_lasx_upper(void);
-extern void _restore_lasx_upper(struct loongarch_fpu *fpu);
+
+void kernel_fpu_begin(void);
+void kernel_fpu_end(void);
+
+asmlinkage void _init_fpu(unsigned int);
+asmlinkage void _save_fp(struct loongarch_fpu *);
+asmlinkage void _restore_fp(struct loongarch_fpu *);
+asmlinkage int _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
+asmlinkage int _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
+
+asmlinkage void _save_lsx(struct loongarch_fpu *fpu);
+asmlinkage void _restore_lsx(struct loongarch_fpu *fpu);
+asmlinkage void _init_lsx_upper(void);
+asmlinkage void _restore_lsx_upper(struct loongarch_fpu *fpu);
+asmlinkage int _save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+asmlinkage int _restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+
+asmlinkage void _save_lasx(struct loongarch_fpu *fpu);
+asmlinkage void _restore_lasx(struct loongarch_fpu *fpu);
+asmlinkage void _init_lasx_upper(void);
+asmlinkage void _restore_lasx_upper(struct loongarch_fpu *fpu);
+asmlinkage int _save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+asmlinkage int _restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
static inline void enable_lsx(void);
static inline void disable_lsx(void);
diff --git a/arch/loongarch/include/asm/ftrace.h b/arch/loongarch/include/asm/ftrace.h
index de891c2c83d4..f4caaf764f9e 100644
--- a/arch/loongarch/include/asm/ftrace.h
+++ b/arch/loongarch/include/asm/ftrace.h
@@ -14,7 +14,7 @@
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifndef CONFIG_DYNAMIC_FTRACE
@@ -28,7 +28,6 @@ struct dyn_ftrace;
struct dyn_arch_ftrace { };
#define ARCH_SUPPORTS_FTRACE_OPS 1
-#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
#define ftrace_init_nop ftrace_init_nop
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
@@ -45,39 +44,28 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent);
#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
struct ftrace_ops;
-struct ftrace_regs {
- struct pt_regs regs;
-};
+#include <linux/ftrace_regs.h>
static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
{
- return &fregs->regs;
-}
-
-static __always_inline unsigned long
-ftrace_regs_get_instruction_pointer(struct ftrace_regs *fregs)
-{
- return instruction_pointer(&fregs->regs);
+ return &arch_ftrace_regs(fregs)->regs;
}
static __always_inline void
ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, unsigned long ip)
{
- instruction_pointer_set(&fregs->regs, ip);
+ instruction_pointer_set(&arch_ftrace_regs(fregs)->regs, ip);
}
-#define ftrace_regs_get_argument(fregs, n) \
- regs_get_kernel_argument(&(fregs)->regs, n)
-#define ftrace_regs_get_stack_pointer(fregs) \
- kernel_stack_pointer(&(fregs)->regs)
-#define ftrace_regs_return_value(fregs) \
- regs_return_value(&(fregs)->regs)
-#define ftrace_regs_set_return_value(fregs, ret) \
- regs_set_return_value(&(fregs)->regs, ret)
-#define ftrace_override_function_with_return(fregs) \
- override_function_with_return(&(fregs)->regs)
-#define ftrace_regs_query_register_offset(name) \
- regs_query_register_offset(name)
+#undef ftrace_regs_get_frame_pointer
+#define ftrace_regs_get_frame_pointer(fregs) \
+ (arch_ftrace_regs(fregs)->regs.regs[22])
+
+static __always_inline unsigned long
+ftrace_regs_get_return_address(struct ftrace_regs *fregs)
+{
+ return *(unsigned long *)(arch_ftrace_regs(fregs)->regs.regs[1]);
+}
#define ftrace_graph_func ftrace_graph_func
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
@@ -91,35 +79,13 @@ __arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr)
}
#define arch_ftrace_set_direct_caller(fregs, addr) \
- __arch_ftrace_set_direct_caller(&(fregs)->regs, addr)
+ __arch_ftrace_set_direct_caller(&arch_ftrace_regs(fregs)->regs, addr)
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* CONFIG_FUNCTION_TRACER */
-#ifndef __ASSEMBLY__
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-struct fgraph_ret_regs {
- /* a0 - a1 */
- unsigned long regs[2];
-
- unsigned long fp;
- unsigned long __unused;
-};
-
-static inline unsigned long fgraph_ret_regs_return_value(struct fgraph_ret_regs *ret_regs)
-{
- return ret_regs->regs[0];
-}
-
-static inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs *ret_regs)
-{
- return ret_regs->fp;
-}
-#endif /* ifdef CONFIG_FUNCTION_GRAPH_TRACER */
-#endif
-
#endif /* _ASM_LOONGARCH_FTRACE_H */
diff --git a/arch/loongarch/include/asm/gpr-num.h b/arch/loongarch/include/asm/gpr-num.h
index 996038da806d..af95b941f48b 100644
--- a/arch/loongarch/include/asm/gpr-num.h
+++ b/arch/loongarch/include/asm/gpr-num.h
@@ -2,7 +2,7 @@
#ifndef __ASM_GPR_NUM_H
#define __ASM_GPR_NUM_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
.equ .L__gpr_num_zero, 0
.irp num,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
@@ -25,7 +25,7 @@
.equ .L__gpr_num_$s\num, 23 + \num
.endr
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
#define __DEFINE_ASM_GPR_NUMS \
" .equ .L__gpr_num_zero, 0\n" \
@@ -47,6 +47,6 @@
" .equ .L__gpr_num_$s\\num, 23 + \\num\n" \
" .endr\n" \
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_GPR_NUM_H */
diff --git a/arch/loongarch/include/asm/hardirq.h b/arch/loongarch/include/asm/hardirq.h
index d41138abcf26..10da8d6961cb 100644
--- a/arch/loongarch/include/asm/hardirq.h
+++ b/arch/loongarch/include/asm/hardirq.h
@@ -12,11 +12,13 @@
extern void ack_bad_irq(unsigned int irq);
#define ack_bad_irq ack_bad_irq
-#define NR_IPI 2
+#define NR_IPI 4
enum ipi_msg_type {
IPI_RESCHEDULE,
IPI_CALL_FUNCTION,
+ IPI_IRQ_WORK,
+ IPI_CLEAR_VECTOR,
};
typedef struct {
diff --git a/arch/loongarch/include/asm/hugetlb.h b/arch/loongarch/include/asm/hugetlb.h
index aa44b3fe43dd..ab68b594f889 100644
--- a/arch/loongarch/include/asm/hugetlb.h
+++ b/arch/loongarch/include/asm/hugetlb.h
@@ -10,31 +10,23 @@
uint64_t pmd_to_entrylo(unsigned long pmd_val);
-#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
-static inline int prepare_hugepage_range(struct file *file,
- unsigned long addr,
- unsigned long len)
+#define __HAVE_ARCH_HUGE_PTE_CLEAR
+static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long sz)
{
- unsigned long task_size = STACK_TOP;
- struct hstate *h = hstate_file(file);
+ pte_t clear;
- if (len & ~huge_page_mask(h))
- return -EINVAL;
- if (addr & ~huge_page_mask(h))
- return -EINVAL;
- if (len > task_size)
- return -ENOMEM;
- if (task_size - len < addr)
- return -EINVAL;
- return 0;
+ pte_val(clear) = (unsigned long)invalid_pte_table;
+ set_pte_at(mm, addr, ptep, clear);
}
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep,
+ unsigned long sz)
{
pte_t clear;
- pte_t pte = *ptep;
+ pte_t pte = ptep_get(ptep);
pte_val(clear) = (unsigned long)invalid_pte_table;
set_pte_at(mm, addr, ptep, clear);
@@ -46,8 +38,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte;
+ unsigned long sz = huge_page_size(hstate_vma(vma));
- pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
flush_tlb_page(vma, addr);
return pte;
}
@@ -65,7 +58,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
pte_t *ptep, pte_t pte,
int dirty)
{
- int changed = !pte_same(*ptep, pte);
+ int changed = !pte_same(ptep_get(ptep), pte);
if (changed) {
set_pte_at(vma->vm_mm, addr, ptep, pte);
diff --git a/arch/loongarch/include/asm/hw_breakpoint.h b/arch/loongarch/include/asm/hw_breakpoint.h
index d78330916bd1..5faa97a87a9e 100644
--- a/arch/loongarch/include/asm/hw_breakpoint.h
+++ b/arch/loongarch/include/asm/hw_breakpoint.h
@@ -38,8 +38,8 @@ struct arch_hw_breakpoint {
* Limits.
* Changing these will require modifications to the register accessors.
*/
-#define LOONGARCH_MAX_BRP 8
-#define LOONGARCH_MAX_WRP 8
+#define LOONGARCH_MAX_BRP 14
+#define LOONGARCH_MAX_WRP 14
/* Virtual debug register bases. */
#define CSR_CFG_ADDR 0
@@ -134,13 +134,13 @@ static inline void hw_breakpoint_thread_switch(struct task_struct *next)
/* Determine number of BRP registers available. */
static inline int get_num_brps(void)
{
- return csr_read64(LOONGARCH_CSR_FWPC) & CSR_FWPC_NUM;
+ return csr_read32(LOONGARCH_CSR_FWPC) & CSR_FWPC_NUM;
}
/* Determine number of WRP registers available. */
static inline int get_num_wrps(void)
{
- return csr_read64(LOONGARCH_CSR_MWPC) & CSR_MWPC_NUM;
+ return csr_read32(LOONGARCH_CSR_MWPC) & CSR_MWPC_NUM;
}
#endif /* __KERNEL__ */
diff --git a/arch/loongarch/include/asm/hw_irq.h b/arch/loongarch/include/asm/hw_irq.h
index af4f4e8fbd85..8156ffb67415 100644
--- a/arch/loongarch/include/asm/hw_irq.h
+++ b/arch/loongarch/include/asm/hw_irq.h
@@ -9,6 +9,8 @@
extern atomic_t irq_err_count;
+#define ARCH_IRQ_INIT_FLAGS IRQ_NOPROBE
+
/*
* interrupt-retrigger: NOP for now. This may not be appropriate for all
* machines, we'll see ...
diff --git a/arch/loongarch/include/asm/image.h b/arch/loongarch/include/asm/image.h
new file mode 100644
index 000000000000..cab981cdb72a
--- /dev/null
+++ b/arch/loongarch/include/asm/image.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * LoongArch binary image header for EFI(PE/COFF) format.
+ *
+ * Author: Youling Tang <tangyouling@kylinos.cn>
+ * Copyright (C) 2025 KylinSoft Corporation.
+ */
+
+#ifndef __ASM_IMAGE_H
+#define __ASM_IMAGE_H
+
+#ifndef __ASSEMBLER__
+
+/**
+ * struct loongarch_image_header
+ *
+ * @dos_sig: Optional PE format 'MZ' signature.
+ * @padding_1: Reserved.
+ * @kernel_entry: Kernel image entry pointer.
+ * @kernel_asize: An estimated size of the memory image size in LSB byte order.
+ * @text_offset: The image load offset in LSB byte order.
+ * @padding_2: Reserved.
+ * @pe_header: Optional offset to a PE format header.
+ **/
+
+struct loongarch_image_header {
+ uint8_t dos_sig[2];
+ uint16_t padding_1[3];
+ uint64_t kernel_entry;
+ uint64_t kernel_asize;
+ uint64_t text_offset;
+ uint32_t padding_2[7];
+ uint32_t pe_header;
+};
+
+/*
+ * loongarch_header_check_dos_sig - Helper to check the header
+ *
+ * Returns true (non-zero) if 'MZ' signature is found.
+ */
+
+static inline int loongarch_header_check_dos_sig(const struct loongarch_image_header *h)
+{
+ if (!h)
+ return 0;
+
+ return (h->dos_sig[0] == 'M' && h->dos_sig[1] == 'Z');
+}
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* __ASM_IMAGE_H */
diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
index c3993fd88aba..55e64a12a124 100644
--- a/arch/loongarch/include/asm/inst.h
+++ b/arch/loongarch/include/asm/inst.h
@@ -77,6 +77,10 @@ enum reg2_op {
iocsrwrh_op = 0x19205,
iocsrwrw_op = 0x19206,
iocsrwrd_op = 0x19207,
+ llacqw_op = 0xe15e0,
+ screlw_op = 0xe15e1,
+ llacqd_op = 0xe15e2,
+ screld_op = 0xe15e3,
};
enum reg2i5_op {
@@ -189,6 +193,7 @@ enum reg3_op {
fldxd_op = 0x7068,
fstxs_op = 0x7070,
fstxd_op = 0x7078,
+ scq_op = 0x70ae,
amswapw_op = 0x70c0,
amswapd_op = 0x70c1,
amaddw_op = 0x70c2,
@@ -497,6 +502,7 @@ void arch_simulate_insn(union loongarch_instruction insn, struct pt_regs *regs);
int larch_insn_read(void *addr, u32 *insnp);
int larch_insn_write(void *addr, u32 insn);
int larch_insn_patch_text(void *addr, u32 insn);
+int larch_insn_text_copy(void *dst, void *src, size_t len);
u32 larch_insn_gen_nop(void);
u32 larch_insn_gen_b(unsigned long pc, unsigned long dest);
@@ -510,6 +516,8 @@ u32 larch_insn_gen_move(enum loongarch_gpr rd, enum loongarch_gpr rj);
u32 larch_insn_gen_lu12iw(enum loongarch_gpr rd, int imm);
u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm);
u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
+u32 larch_insn_gen_beq(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
+u32 larch_insn_gen_bne(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
static inline bool signed_imm_check(long val, unsigned int bit)
@@ -532,6 +540,9 @@ static inline void emit_##NAME(union loongarch_instruction *insn, \
DEF_EMIT_REG0I15_FORMAT(break, break_op)
+/* like emit_break(imm) but returns a constant expression */
+#define __emit_break(imm) ((u32)((imm) | (break_op << 15)))
+
#define DEF_EMIT_REG0I26_FORMAT(NAME, OP) \
static inline void emit_##NAME(union loongarch_instruction *insn, \
int offset) \
@@ -680,7 +691,17 @@ DEF_EMIT_REG2I16_FORMAT(blt, blt_op)
DEF_EMIT_REG2I16_FORMAT(bge, bge_op)
DEF_EMIT_REG2I16_FORMAT(bltu, bltu_op)
DEF_EMIT_REG2I16_FORMAT(bgeu, bgeu_op)
-DEF_EMIT_REG2I16_FORMAT(jirl, jirl_op)
+
+static inline void emit_jirl(union loongarch_instruction *insn,
+ enum loongarch_gpr rd,
+ enum loongarch_gpr rj,
+ int offset)
+{
+ insn->reg2i16_format.opcode = jirl_op;
+ insn->reg2i16_format.immediate = offset;
+ insn->reg2i16_format.rd = rd;
+ insn->reg2i16_format.rj = rj;
+}
#define DEF_EMIT_REG2BSTRD_FORMAT(NAME, OP) \
static inline void emit_##NAME(union loongarch_instruction *insn, \
diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h
index c2f9979b2979..0130185e0349 100644
--- a/arch/loongarch/include/asm/io.h
+++ b/arch/loongarch/include/asm/io.h
@@ -14,7 +14,7 @@
#include <asm/pgtable-bits.h>
#include <asm/string.h>
-extern void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size);
+extern void __init __iomem *early_ioremap(phys_addr_t phys_addr, unsigned long size);
extern void __init early_iounmap(void __iomem *addr, unsigned long size);
#define early_memremap early_ioremap
@@ -23,16 +23,25 @@ extern void __init early_iounmap(void __iomem *addr, unsigned long size);
#ifdef CONFIG_ARCH_IOREMAP
static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
- unsigned long prot_val)
+ pgprot_t prot)
{
- if (prot_val & _CACHE_CC)
+ if (offset > TO_PHYS_MASK)
+ return NULL;
+
+ switch (pgprot_val(prot) & _CACHE_MASK) {
+ case _CACHE_CC:
return (void __iomem *)(unsigned long)(CACHE_BASE + offset);
- else
+ case _CACHE_SUC:
return (void __iomem *)(unsigned long)(UNCACHE_BASE + offset);
+ case _CACHE_WUC:
+ return (void __iomem *)(unsigned long)(WRITECOMBINE_BASE + offset);
+ default:
+ return NULL;
+ }
}
#define ioremap(offset, size) \
- ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_SUC))
+ ioremap_prot((offset), (size), PAGE_KERNEL_SUC)
#define iounmap(addr) ((void)(addr))
@@ -49,23 +58,13 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
*/
#define ioremap_wc(offset, size) \
ioremap_prot((offset), (size), \
- pgprot_val(wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC))
+ wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC)
#define ioremap_cache(offset, size) \
- ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL))
+ ioremap_prot((offset), (size), PAGE_KERNEL)
#define mmiowb() wmb()
-/*
- * String version of I/O memory access operations.
- */
-extern void __memset_io(volatile void __iomem *dst, int c, size_t count);
-extern void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count);
-extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count);
-#define memset_io(c, v, l) __memset_io((c), (v), (l))
-#define memcpy_fromio(a, c, l) __memcpy_fromio((a), (c), (l))
-#define memcpy_toio(c, a, l) __memcpy_toio((c), (a), (l))
-
#define __io_aw() mmiowb()
#ifdef CONFIG_KFENCE
diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h
index 480418bc5071..12bd15578c33 100644
--- a/arch/loongarch/include/asm/irq.h
+++ b/arch/loongarch/include/asm/irq.h
@@ -39,11 +39,22 @@ void spurious_interrupt(void);
#define NR_IRQS_LEGACY 16
+/*
+ * 256 Vectors Mapping for AVECINTC:
+ *
+ * 0 - 15: Mapping classic IPs, e.g. IP0-12.
+ * 16 - 255: Mapping vectors for external IRQ.
+ *
+ */
+#define NR_VECTORS 256
+#define NR_LEGACY_VECTORS 16
+#define IRQ_MATRIX_BITS NR_VECTORS
+
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu);
-#define MAX_IO_PICS 2
-#define NR_IRQS (64 + (256 * MAX_IO_PICS))
+#define MAX_IO_PICS 8
+#define NR_IRQS (64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS))
struct acpi_vector_group {
int node;
@@ -54,6 +65,7 @@ extern struct acpi_vector_group pch_group[MAX_IO_PICS];
extern struct acpi_vector_group msi_group[MAX_IO_PICS];
#define CORES_PER_EIO_NODE 4
+#define CORES_PER_VEIO_NODE 256
#define LOONGSON_CPU_UART0_VEC 10 /* CPU UART0 */
#define LOONGSON_CPU_THSENS_VEC 14 /* CPU Thsens */
@@ -65,7 +77,7 @@ extern struct acpi_vector_group msi_group[MAX_IO_PICS];
#define LOONGSON_LPC_LAST_IRQ (LOONGSON_LPC_IRQ_BASE + 15)
#define LOONGSON_CPU_IRQ_BASE 16
-#define LOONGSON_CPU_LAST_IRQ (LOONGSON_CPU_IRQ_BASE + 14)
+#define LOONGSON_CPU_LAST_IRQ (LOONGSON_CPU_IRQ_BASE + 15)
#define LOONGSON_PCH_IRQ_BASE 64
#define LOONGSON_PCH_ACPI_IRQ (LOONGSON_PCH_IRQ_BASE + 47)
@@ -88,20 +100,8 @@ struct acpi_madt_bio_pic;
struct acpi_madt_msi_pic;
struct acpi_madt_lpc_pic;
-int liointc_acpi_init(struct irq_domain *parent,
- struct acpi_madt_lio_pic *acpi_liointc);
-int eiointc_acpi_init(struct irq_domain *parent,
- struct acpi_madt_eio_pic *acpi_eiointc);
-
-int htvec_acpi_init(struct irq_domain *parent,
- struct acpi_madt_ht_pic *acpi_htvec);
-int pch_lpc_acpi_init(struct irq_domain *parent,
- struct acpi_madt_lpc_pic *acpi_pchlpc);
-int pch_msi_acpi_init(struct irq_domain *parent,
- struct acpi_madt_msi_pic *acpi_pchmsi);
-int pch_pic_acpi_init(struct irq_domain *parent,
- struct acpi_madt_bio_pic *acpi_pchpic);
-int find_pch_pic(u32 gsi);
+void complete_irq_moving(void);
+
struct fwnode_handle *get_pch_msi_handle(int pci_segment);
extern struct acpi_madt_lio_pic *acpi_liointc;
diff --git a/arch/loongarch/include/asm/irq_work.h b/arch/loongarch/include/asm/irq_work.h
new file mode 100644
index 000000000000..d63076e9160d
--- /dev/null
+++ b/arch/loongarch/include/asm/irq_work.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_LOONGARCH_IRQ_WORK_H
+#define _ASM_LOONGARCH_IRQ_WORK_H
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+ return IS_ENABLED(CONFIG_SMP);
+}
+
+#endif /* _ASM_LOONGARCH_IRQ_WORK_H */
diff --git a/arch/loongarch/include/asm/irqflags.h b/arch/loongarch/include/asm/irqflags.h
index 319a8c616f1f..620163628a7f 100644
--- a/arch/loongarch/include/asm/irqflags.h
+++ b/arch/loongarch/include/asm/irqflags.h
@@ -5,7 +5,7 @@
#ifndef _ASM_IRQFLAGS_H
#define _ASM_IRQFLAGS_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/compiler.h>
#include <linux/stringify.h>
@@ -14,40 +14,48 @@
static inline void arch_local_irq_enable(void)
{
u32 flags = CSR_CRMD_IE;
+ register u32 mask asm("t0") = CSR_CRMD_IE;
+
__asm__ __volatile__(
"csrxchg %[val], %[mask], %[reg]\n\t"
: [val] "+r" (flags)
- : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+ : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
: "memory");
}
static inline void arch_local_irq_disable(void)
{
u32 flags = 0;
+ register u32 mask asm("t0") = CSR_CRMD_IE;
+
__asm__ __volatile__(
"csrxchg %[val], %[mask], %[reg]\n\t"
: [val] "+r" (flags)
- : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+ : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
: "memory");
}
static inline unsigned long arch_local_irq_save(void)
{
u32 flags = 0;
+ register u32 mask asm("t0") = CSR_CRMD_IE;
+
__asm__ __volatile__(
"csrxchg %[val], %[mask], %[reg]\n\t"
: [val] "+r" (flags)
- : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+ : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
: "memory");
return flags;
}
static inline void arch_local_irq_restore(unsigned long flags)
{
+ register u32 mask asm("t0") = CSR_CRMD_IE;
+
__asm__ __volatile__(
"csrxchg %[val], %[mask], %[reg]\n\t"
: [val] "+r" (flags)
- : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+ : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
: "memory");
}
@@ -72,6 +80,6 @@ static inline int arch_irqs_disabled(void)
return arch_irqs_disabled_flags(arch_local_save_flags());
}
-#endif /* #ifndef __ASSEMBLY__ */
+#endif /* #ifndef __ASSEMBLER__ */
#endif /* _ASM_IRQFLAGS_H */
diff --git a/arch/loongarch/include/asm/jump_label.h b/arch/loongarch/include/asm/jump_label.h
index 29acfe3de3fa..4000c7603d8e 100644
--- a/arch/loongarch/include/asm/jump_label.h
+++ b/arch/loongarch/include/asm/jump_label.h
@@ -7,24 +7,28 @@
#ifndef __ASM_JUMP_LABEL_H
#define __ASM_JUMP_LABEL_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#define JUMP_LABEL_NOP_SIZE 4
-#define JUMP_TABLE_ENTRY \
+/* This macro is also expanded on the Rust side. */
+#define JUMP_TABLE_ENTRY(key, label) \
".pushsection __jump_table, \"aw\" \n\t" \
".align 3 \n\t" \
- ".long 1b - ., %l[l_yes] - . \n\t" \
- ".quad %0 - . \n\t" \
+ ".long 1b - ., " label " - . \n\t" \
+ ".quad " key " - . \n\t" \
".popsection \n\t"
+#define ARCH_STATIC_BRANCH_ASM(key, label) \
+ "1: nop \n\t" \
+ JUMP_TABLE_ENTRY(key, label)
+
static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
{
asm goto(
- "1: nop \n\t"
- JUMP_TABLE_ENTRY
+ ARCH_STATIC_BRANCH_ASM("%0", "%l[l_yes]")
: : "i"(&((char *)key)[branch]) : : l_yes);
return false;
@@ -37,7 +41,7 @@ static __always_inline bool arch_static_branch_jump(struct static_key * const ke
{
asm goto(
"1: b %l[l_yes] \n\t"
- JUMP_TABLE_ENTRY
+ JUMP_TABLE_ENTRY("%0", "%l[l_yes]")
: : "i"(&((char *)key)[branch]) : : l_yes);
return false;
@@ -46,5 +50,5 @@ l_yes:
return true;
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_JUMP_LABEL_H */
diff --git a/arch/loongarch/include/asm/kasan.h b/arch/loongarch/include/asm/kasan.h
index cd6084f4e153..0e50e5b5e056 100644
--- a/arch/loongarch/include/asm/kasan.h
+++ b/arch/loongarch/include/asm/kasan.h
@@ -2,7 +2,7 @@
#ifndef __ASM_KASAN_H
#define __ASM_KASAN_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/linkage.h>
#include <linux/mmzone.h>
@@ -16,7 +16,7 @@
#define XRANGE_SHIFT (48)
/* Valid address length */
-#define XRANGE_SHADOW_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
+#define XRANGE_SHADOW_SHIFT min(cpu_vabits, VA_BITS)
/* Used for taking out the valid address */
#define XRANGE_SHADOW_MASK GENMASK_ULL(XRANGE_SHADOW_SHIFT - 1, 0)
/* One segment whole address space size */
@@ -25,6 +25,7 @@
/* 64-bit segment value. */
#define XKPRANGE_UC_SEG (0x8000)
#define XKPRANGE_CC_SEG (0x9000)
+#define XKPRANGE_WC_SEG (0xa000)
#define XKVRANGE_VC_SEG (0xffff)
/* Cached */
@@ -41,23 +42,30 @@
#define XKPRANGE_UC_SHADOW_SIZE (XKPRANGE_UC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
#define XKPRANGE_UC_SHADOW_END (XKPRANGE_UC_KASAN_OFFSET + XKPRANGE_UC_SHADOW_SIZE)
+/* WriteCombine */
+#define XKPRANGE_WC_START WRITECOMBINE_BASE
+#define XKPRANGE_WC_SIZE XRANGE_SIZE
+#define XKPRANGE_WC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END
+#define XKPRANGE_WC_SHADOW_SIZE (XKPRANGE_WC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
+#define XKPRANGE_WC_SHADOW_END (XKPRANGE_WC_KASAN_OFFSET + XKPRANGE_WC_SHADOW_SIZE)
+
/* VMALLOC (Cached or UnCached) */
#define XKVRANGE_VC_START MODULES_VADDR
#define XKVRANGE_VC_SIZE round_up(KFENCE_AREA_END - MODULES_VADDR + 1, PGDIR_SIZE)
-#define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END
+#define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_WC_SHADOW_END
#define XKVRANGE_VC_SHADOW_SIZE (XKVRANGE_VC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
#define XKVRANGE_VC_SHADOW_END (XKVRANGE_VC_KASAN_OFFSET + XKVRANGE_VC_SHADOW_SIZE)
/* KAsan shadow memory start right after vmalloc. */
#define KASAN_SHADOW_START round_up(KFENCE_AREA_END, PGDIR_SIZE)
#define KASAN_SHADOW_SIZE (XKVRANGE_VC_SHADOW_END - XKPRANGE_CC_KASAN_OFFSET)
-#define KASAN_SHADOW_END round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE)
+#define KASAN_SHADOW_END (round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE) - 1)
#define XKPRANGE_CC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_CC_KASAN_OFFSET)
#define XKPRANGE_UC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_UC_KASAN_OFFSET)
+#define XKPRANGE_WC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_WC_KASAN_OFFSET)
#define XKVRANGE_VC_SHADOW_OFFSET (KASAN_SHADOW_START + XKVRANGE_VC_KASAN_OFFSET)
-extern bool kasan_early_stage;
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
#define kasan_mem_to_shadow kasan_mem_to_shadow
@@ -66,12 +74,6 @@ void *kasan_mem_to_shadow(const void *addr);
#define kasan_shadow_to_mem kasan_shadow_to_mem
const void *kasan_shadow_to_mem(const void *shadow_addr);
-#define kasan_arch_is_ready kasan_arch_is_ready
-static __always_inline bool kasan_arch_is_ready(void)
-{
- return !kasan_early_stage;
-}
-
#define addr_has_metadata addr_has_metadata
static __always_inline bool addr_has_metadata(const void *addr)
{
diff --git a/arch/loongarch/include/asm/kexec.h b/arch/loongarch/include/asm/kexec.h
index cf95cd3eb2de..209fa43222e1 100644
--- a/arch/loongarch/include/asm/kexec.h
+++ b/arch/loongarch/include/asm/kexec.h
@@ -41,6 +41,18 @@ struct kimage_arch {
unsigned long systable_ptr;
};
+#ifdef CONFIG_KEXEC_FILE
+extern const struct kexec_file_ops kexec_efi_ops;
+extern const struct kexec_file_ops kexec_elf_ops;
+
+int arch_kimage_file_post_load_cleanup(struct kimage *image);
+#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
+
+extern int load_other_segments(struct kimage *image,
+ unsigned long kernel_load_addr, unsigned long kernel_size,
+ char *initrd, unsigned long initrd_len, char *cmdline, unsigned long cmdline_len);
+#endif
+
typedef void (*do_kexec_t)(unsigned long efi_boot,
unsigned long cmdline_ptr,
unsigned long systable_ptr,
diff --git a/arch/loongarch/include/asm/kfence.h b/arch/loongarch/include/asm/kfence.h
index 92636e82957c..da9e93024626 100644
--- a/arch/loongarch/include/asm/kfence.h
+++ b/arch/loongarch/include/asm/kfence.h
@@ -53,13 +53,13 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
pte_t *pte = virt_to_kpte(addr);
- if (WARN_ON(!pte) || pte_none(*pte))
+ if (WARN_ON(!pte) || pte_none(ptep_get(pte)))
return false;
if (protect)
- set_pte(pte, __pte(pte_val(*pte) & ~(_PAGE_VALID | _PAGE_PRESENT)));
+ set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~(_PAGE_VALID | _PAGE_PRESENT)));
else
- set_pte(pte, __pte(pte_val(*pte) | (_PAGE_VALID | _PAGE_PRESENT)));
+ set_pte(pte, __pte(pte_val(ptep_get(pte)) | (_PAGE_VALID | _PAGE_PRESENT)));
preempt_disable();
local_flush_tlb_one(addr);
diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h
index 724ca8b7b401..4a76ce796f1f 100644
--- a/arch/loongarch/include/asm/kvm_csr.h
+++ b/arch/loongarch/include/asm/kvm_csr.h
@@ -30,6 +30,7 @@
: [val] "+r" (__v) \
: [reg] "i" (csr) \
: "memory"); \
+ __v; \
})
#define gcsr_xchg(v, m, csr) \
@@ -181,6 +182,8 @@ __BUILD_GCSR_OP(tlbidx)
#define kvm_save_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_read(gid))
#define kvm_restore_hw_gcsr(csr, gid) (gcsr_write(csr->csrs[gid], gid))
+#define kvm_read_clear_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_write(0, gid))
+
int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu);
static __always_inline unsigned long kvm_read_sw_gcsr(struct loongarch_csrs *csr, int gid)
@@ -208,4 +211,7 @@ static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr,
csr->csrs[gid] |= val & _mask;
}
+#define KVM_PMU_EVENT_ENABLED (CSR_PERFCTRL_PLV0 | CSR_PERFCTRL_PLV1 | \
+ CSR_PERFCTRL_PLV2 | CSR_PERFCTRL_PLV3)
+
#endif /* __ASM_LOONGARCH_KVM_CSR_H__ */
diff --git a/arch/loongarch/include/asm/kvm_eiointc.h b/arch/loongarch/include/asm/kvm_eiointc.h
new file mode 100644
index 000000000000..8b7a2fa3f7f8
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_eiointc.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_KVM_EIOINTC_H
+#define __ASM_KVM_EIOINTC_H
+
+#include <kvm/iodev.h>
+
+#define EIOINTC_IRQS 256
+#define EIOINTC_ROUTE_MAX_VCPUS 256
+#define EIOINTC_IRQS_U64_NUMS (EIOINTC_IRQS / 64)
+/* map to ipnum per 32 irqs */
+#define EIOINTC_IRQS_NODETYPE_COUNT 16
+
+#define EIOINTC_BASE 0x1400
+#define EIOINTC_SIZE 0x900
+
+#define EIOINTC_NODETYPE_START 0xa0
+#define EIOINTC_NODETYPE_END 0xbf
+#define EIOINTC_IPMAP_START 0xc0
+#define EIOINTC_IPMAP_END 0xc7
+#define EIOINTC_ENABLE_START 0x200
+#define EIOINTC_ENABLE_END 0x21f
+#define EIOINTC_BOUNCE_START 0x280
+#define EIOINTC_BOUNCE_END 0x29f
+#define EIOINTC_ISR_START 0x300
+#define EIOINTC_ISR_END 0x31f
+#define EIOINTC_COREISR_START 0x400
+#define EIOINTC_COREISR_END 0x41f
+#define EIOINTC_COREMAP_START 0x800
+#define EIOINTC_COREMAP_END 0x8ff
+
+#define EIOINTC_VIRT_BASE (0x40000000)
+#define EIOINTC_VIRT_SIZE (0x1000)
+
+#define EIOINTC_VIRT_FEATURES (0x0)
+#define EIOINTC_HAS_VIRT_EXTENSION (0)
+#define EIOINTC_HAS_ENABLE_OPTION (1)
+#define EIOINTC_HAS_INT_ENCODE (2)
+#define EIOINTC_HAS_CPU_ENCODE (3)
+#define EIOINTC_VIRT_HAS_FEATURES ((1U << EIOINTC_HAS_VIRT_EXTENSION) \
+ | (1U << EIOINTC_HAS_ENABLE_OPTION) \
+ | (1U << EIOINTC_HAS_INT_ENCODE) \
+ | (1U << EIOINTC_HAS_CPU_ENCODE))
+#define EIOINTC_VIRT_CONFIG (0x4)
+#define EIOINTC_ENABLE (1)
+#define EIOINTC_ENABLE_INT_ENCODE (2)
+#define EIOINTC_ENABLE_CPU_ENCODE (3)
+
+#define LOONGSON_IP_NUM 8
+
+struct loongarch_eiointc {
+ spinlock_t lock;
+ struct kvm *kvm;
+ struct kvm_io_device device;
+ struct kvm_io_device device_vext;
+ uint32_t num_cpu;
+ uint32_t features;
+ uint32_t status;
+
+ /* hardware state */
+ u64 nodetype[EIOINTC_IRQS_NODETYPE_COUNT / 4];
+
+ /* one bit shows the state of one irq */
+ u64 bounce[EIOINTC_IRQS_U64_NUMS];
+ u64 isr[EIOINTC_IRQS_U64_NUMS];
+ u64 coreisr[EIOINTC_ROUTE_MAX_VCPUS][EIOINTC_IRQS_U64_NUMS];
+ u64 enable[EIOINTC_IRQS_U64_NUMS];
+
+ /* use one byte to config ipmap for 32 irqs at once */
+ u64 ipmap;
+ /* use one byte to config coremap for one irq */
+ u64 coremap[EIOINTC_IRQS / 8];
+
+ DECLARE_BITMAP(sw_coreisr[EIOINTC_ROUTE_MAX_VCPUS][LOONGSON_IP_NUM], EIOINTC_IRQS);
+ uint8_t sw_coremap[EIOINTC_IRQS];
+};
+
+int kvm_loongarch_register_eiointc_device(void);
+void eiointc_set_irq(struct loongarch_eiointc *s, int irq, int level);
+
+#endif /* __ASM_KVM_EIOINTC_H */
diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
index c87b6ea0ec47..e4fe5b8e8149 100644
--- a/arch/loongarch/include/asm/kvm_host.h
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -12,30 +12,40 @@
#include <linux/kvm.h>
#include <linux/kvm_types.h>
#include <linux/mutex.h>
+#include <linux/perf_event.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
#include <linux/types.h>
#include <asm/inst.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_ipi.h>
+#include <asm/kvm_eiointc.h>
+#include <asm/kvm_pch_pic.h>
#include <asm/loongarch.h>
+#define __KVM_HAVE_ARCH_INTC_INITIALIZED
+
/* Loongarch KVM register ids */
#define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT)
#define KVM_GET_IOC_CPUCFG_IDX(id) ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT)
#define KVM_MAX_VCPUS 256
#define KVM_MAX_CPUCFG_REGS 21
-/* memory slots that does not exposed to userspace */
-#define KVM_PRIVATE_MEM_SLOTS 0
#define KVM_HALT_POLL_NS_DEFAULT 500000
+#define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0)
+#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1)
+#define KVM_REQ_PMU KVM_ARCH_REQ(2)
#define KVM_GUESTDBG_SW_BP_MASK \
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)
#define KVM_GUESTDBG_VALID_MASK \
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP)
+#define KVM_DIRTY_LOG_MANUAL_CAPS \
+ (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | KVM_DIRTY_LOG_INITIALLY_SET)
+
struct kvm_vm_stat {
struct kvm_vm_stat_generic generic;
u64 pages;
@@ -49,6 +59,12 @@ struct kvm_vcpu_stat {
u64 cpucfg_exits;
u64 signal_exits;
u64 hypercall_exits;
+ u64 ipi_read_exits;
+ u64 ipi_write_exits;
+ u64 eiointc_read_exits;
+ u64 eiointc_write_exits;
+ u64 pch_pic_read_exits;
+ u64 pch_pic_write_exits;
};
#define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0)
@@ -57,9 +73,13 @@ struct kvm_arch_memory_slot {
unsigned long flags;
};
+#define HOST_MAX_PMNUM 16
struct kvm_context {
unsigned long vpid_cache;
struct kvm_vcpu *last_vcpu;
+ /* Host PMU CSR */
+ u64 perf_ctrl[HOST_MAX_PMNUM];
+ u64 perf_cntr[HOST_MAX_PMNUM];
};
struct kvm_world_switch {
@@ -76,7 +96,7 @@ struct kvm_world_switch {
*
* For LOONGARCH_CSR_CPUID register, max CPUID size if 512
* For IPI hardware, max destination CPUID size 1024
- * For extioi interrupt controller, max destination CPUID size is 256
+ * For eiointc interrupt controller, max destination CPUID size is 256
* For msgint interrupt controller, max supported CPUID size is 65536
*
* Currently max CPUID is defined as 256 for KVM hypervisor, in future
@@ -104,9 +124,16 @@ struct kvm_arch {
unsigned int root_level;
spinlock_t phyid_map_lock;
struct kvm_phyid_map *phyid_map;
+ /* Enabled PV features */
+ unsigned long pv_features;
+ /* Supported KVM features */
+ unsigned long kvm_features;
s64 time_offset;
struct kvm_context __percpu *vmcs;
+ struct loongarch_ipi *ipi;
+ struct loongarch_eiointc *eiointc;
+ struct loongarch_pch_pic *pch_pic;
};
#define CSR_MAX_NUMS 0x800
@@ -130,8 +157,16 @@ enum emulation_result {
#define KVM_LARCH_FPU (0x1 << 0)
#define KVM_LARCH_LSX (0x1 << 1)
#define KVM_LARCH_LASX (0x1 << 2)
-#define KVM_LARCH_SWCSR_LATEST (0x1 << 3)
-#define KVM_LARCH_HWCSR_USABLE (0x1 << 4)
+#define KVM_LARCH_LBT (0x1 << 3)
+#define KVM_LARCH_PMU (0x1 << 4)
+#define KVM_LARCH_SWCSR_LATEST (0x1 << 5)
+#define KVM_LARCH_HWCSR_USABLE (0x1 << 6)
+
+#define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63)
+#define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \
+ BIT(KVM_FEATURE_STEAL_TIME) | \
+ BIT(KVM_FEATURE_USER_HCALL) | \
+ BIT(KVM_FEATURE_VIRT_EXTIOI))
struct kvm_vcpu_arch {
/*
@@ -144,6 +179,9 @@ struct kvm_vcpu_arch {
/* Pointers stored here for easy accessing from assembly code */
int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ /* GPA (=HVA) of PGD for secondary mmu */
+ unsigned long kvm_pgd;
+
/* Host registers preserved across guest mode execution */
unsigned long host_sp;
unsigned long host_tp;
@@ -165,10 +203,14 @@ struct kvm_vcpu_arch {
/* FPU state */
struct loongarch_fpu fpu FPU_ALIGN;
+ struct loongarch_lbt lbt;
/* CSR state */
struct loongarch_csrs *csr;
+ /* Guest max PMU CSR id */
+ int max_pmu_csrid;
+
/* GPR used as IO source/target */
u32 io_gpr;
@@ -190,6 +232,7 @@ struct kvm_vcpu_arch {
/* vcpu's vpid */
u64 vpid;
+ gpa_t flush_gpa;
/* Frequency of stable timer in Hz */
u64 timer_mhz;
@@ -199,8 +242,17 @@ struct kvm_vcpu_arch {
int last_sched_cpu;
/* mp state */
struct kvm_mp_state mp_state;
+ /* ipi state */
+ struct ipi_state ipi_state;
/* cpucfg */
u32 cpucfg[KVM_MAX_CPUCFG_REGS];
+
+ /* paravirt steal time */
+ struct {
+ u64 guest_addr;
+ u64 last_steal;
+ struct gfn_to_hva_cache cache;
+ } st;
};
static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
@@ -228,13 +280,36 @@ static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
return arch->cpucfg[2] & CPUCFG2_LASX;
}
+static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch)
+{
+ return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT | CPUCFG2_MIPSBT);
+}
+
+static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch)
+{
+ return arch->cpucfg[6] & CPUCFG6_PMP;
+}
+
+static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch)
+{
+ return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT;
+}
+
+/* Check whether KVM support this feature (VMM may disable it) */
+static inline bool kvm_vm_support(struct kvm_arch *arch, int feature)
+{
+ return !!(arch->kvm_features & BIT_ULL(feature));
+}
+
+bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu);
+
/* Debug: dump vcpu state */
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
/* MMU handling */
void kvm_flush_tlb_all(void);
void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
-int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
+int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write, int ecode);
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
@@ -259,9 +334,7 @@ static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
/* Misc */
static inline void kvm_arch_hardware_unsetup(void) {}
-static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
-static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
diff --git a/arch/loongarch/include/asm/kvm_ipi.h b/arch/loongarch/include/asm/kvm_ipi.h
new file mode 100644
index 000000000000..060163dfb4a3
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_ipi.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_KVM_IPI_H
+#define __ASM_KVM_IPI_H
+
+#include <kvm/iodev.h>
+
+#define LARCH_INT_IPI 12
+
+struct loongarch_ipi {
+ spinlock_t lock;
+ struct kvm *kvm;
+ struct kvm_io_device device;
+};
+
+struct ipi_state {
+ spinlock_t lock;
+ uint32_t status;
+ uint32_t en;
+ uint32_t set;
+ uint32_t clear;
+ uint64_t buf[4];
+};
+
+#define IOCSR_IPI_BASE 0x1000
+#define IOCSR_IPI_SIZE 0x160
+
+#define IOCSR_IPI_STATUS 0x000
+#define IOCSR_IPI_EN 0x004
+#define IOCSR_IPI_SET 0x008
+#define IOCSR_IPI_CLEAR 0x00c
+#define IOCSR_IPI_BUF_20 0x020
+#define IOCSR_IPI_BUF_28 0x028
+#define IOCSR_IPI_BUF_30 0x030
+#define IOCSR_IPI_BUF_38 0x038
+#define IOCSR_IPI_SEND 0x040
+#define IOCSR_MAIL_SEND 0x048
+#define IOCSR_ANY_SEND 0x158
+
+int kvm_loongarch_register_ipi_device(void);
+
+#endif
diff --git a/arch/loongarch/include/asm/kvm_mmu.h b/arch/loongarch/include/asm/kvm_mmu.h
index 099bafc6f797..e36cc7e8ed20 100644
--- a/arch/loongarch/include/asm/kvm_mmu.h
+++ b/arch/loongarch/include/asm/kvm_mmu.h
@@ -16,6 +16,13 @@
*/
#define KVM_MMU_CACHE_MIN_PAGES (CONFIG_PGTABLE_LEVELS - 1)
+/*
+ * _PAGE_MODIFIED is a SW pte bit, it records page ever written on host
+ * kernel, on secondary MMU it records the page writeable attribute, in
+ * order for fast path handling.
+ */
+#define KVM_PAGE_WRITEABLE _PAGE_MODIFIED
+
#define _KVM_FLUSH_PGTABLE 0x1
#define _KVM_HAS_PGMASK 0x2
#define kvm_pfn_pte(pfn, prot) (((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
@@ -52,10 +59,10 @@ static inline void kvm_set_pte(kvm_pte_t *ptep, kvm_pte_t val)
WRITE_ONCE(*ptep, val);
}
-static inline int kvm_pte_write(kvm_pte_t pte) { return pte & _PAGE_WRITE; }
-static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & _PAGE_DIRTY; }
static inline int kvm_pte_young(kvm_pte_t pte) { return pte & _PAGE_ACCESSED; }
static inline int kvm_pte_huge(kvm_pte_t pte) { return pte & _PAGE_HUGE; }
+static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & __WRITEABLE; }
+static inline int kvm_pte_writeable(kvm_pte_t pte) { return pte & KVM_PAGE_WRITEABLE; }
static inline kvm_pte_t kvm_pte_mkyoung(kvm_pte_t pte)
{
@@ -69,12 +76,12 @@ static inline kvm_pte_t kvm_pte_mkold(kvm_pte_t pte)
static inline kvm_pte_t kvm_pte_mkdirty(kvm_pte_t pte)
{
- return pte | _PAGE_DIRTY;
+ return pte | __WRITEABLE;
}
static inline kvm_pte_t kvm_pte_mkclean(kvm_pte_t pte)
{
- return pte & ~_PAGE_DIRTY;
+ return pte & ~__WRITEABLE;
}
static inline kvm_pte_t kvm_pte_mkhuge(kvm_pte_t pte)
@@ -87,6 +94,11 @@ static inline kvm_pte_t kvm_pte_mksmall(kvm_pte_t pte)
return pte & ~_PAGE_HUGE;
}
+static inline kvm_pte_t kvm_pte_mkwriteable(kvm_pte_t pte)
+{
+ return pte | KVM_PAGE_WRITEABLE;
+}
+
static inline int kvm_need_flush(kvm_ptw_ctx *ctx)
{
return ctx->flag & _KVM_FLUSH_PGTABLE;
diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h
index 4ba2312e5f8c..3e4b397f423f 100644
--- a/arch/loongarch/include/asm/kvm_para.h
+++ b/arch/loongarch/include/asm/kvm_para.h
@@ -2,6 +2,8 @@
#ifndef _ASM_LOONGARCH_KVM_PARA_H
#define _ASM_LOONGARCH_KVM_PARA_H
+#include <uapi/asm/kvm_para.h>
+
/*
* Hypercall code field
*/
@@ -11,12 +13,16 @@
#define KVM_HCALL_CODE_SERVICE 0
#define KVM_HCALL_CODE_SWDBG 1
+#define KVM_HCALL_CODE_USER_SERVICE 2
#define KVM_HCALL_SERVICE HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SERVICE)
#define KVM_HCALL_FUNC_IPI 1
+#define KVM_HCALL_FUNC_NOTIFY 2
#define KVM_HCALL_SWDBG HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG)
+#define KVM_HCALL_USER_SERVICE HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_USER_SERVICE)
+
/*
* LoongArch hypercall return code
*/
@@ -24,13 +30,23 @@
#define KVM_HCALL_INVALID_CODE -1UL
#define KVM_HCALL_INVALID_PARAMETER -2UL
+#define KVM_STEAL_PHYS_VALID BIT_ULL(0)
+#define KVM_STEAL_PHYS_MASK GENMASK_ULL(63, 6)
+
+struct kvm_steal_time {
+ __u64 steal;
+ __u32 version;
+ __u32 flags;
+ __u32 pad[12];
+};
+
/*
* Hypercall interface for KVM hypervisor
*
* a0: function identifier
- * a1-a6: args
+ * a1-a5: args
* Return value will be placed in a0.
- * Up to 6 arguments are passed in a1, a2, a3, a4, a5, a6.
+ * Up to 5 arguments are passed in a1, a2, a3, a4, a5.
*/
static __always_inline long kvm_hypercall0(u64 fid)
{
@@ -143,10 +159,20 @@ static __always_inline long kvm_hypercall5(u64 fid,
return ret;
}
+#ifdef CONFIG_PARAVIRT
+bool kvm_para_available(void);
+unsigned int kvm_arch_para_features(void);
+#else
+static inline bool kvm_para_available(void)
+{
+ return false;
+}
+
static inline unsigned int kvm_arch_para_features(void)
{
return 0;
}
+#endif
static inline unsigned int kvm_arch_para_hints(void)
{
diff --git a/arch/loongarch/include/asm/kvm_pch_pic.h b/arch/loongarch/include/asm/kvm_pch_pic.h
new file mode 100644
index 000000000000..7f33a3039272
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_pch_pic.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_KVM_PCH_PIC_H
+#define __ASM_KVM_PCH_PIC_H
+
+#include <kvm/iodev.h>
+
+#define PCH_PIC_SIZE 0x3e8
+
+#define PCH_PIC_INT_ID_START 0x0
+#define PCH_PIC_INT_ID_END 0x7
+#define PCH_PIC_MASK_START 0x20
+#define PCH_PIC_MASK_END 0x27
+#define PCH_PIC_HTMSI_EN_START 0x40
+#define PCH_PIC_HTMSI_EN_END 0x47
+#define PCH_PIC_EDGE_START 0x60
+#define PCH_PIC_EDGE_END 0x67
+#define PCH_PIC_CLEAR_START 0x80
+#define PCH_PIC_CLEAR_END 0x87
+#define PCH_PIC_AUTO_CTRL0_START 0xc0
+#define PCH_PIC_AUTO_CTRL0_END 0xc7
+#define PCH_PIC_AUTO_CTRL1_START 0xe0
+#define PCH_PIC_AUTO_CTRL1_END 0xe7
+#define PCH_PIC_ROUTE_ENTRY_START 0x100
+#define PCH_PIC_ROUTE_ENTRY_END 0x13f
+#define PCH_PIC_HTMSI_VEC_START 0x200
+#define PCH_PIC_HTMSI_VEC_END 0x23f
+#define PCH_PIC_INT_IRR_START 0x380
+#define PCH_PIC_INT_IRR_END 0x38f
+#define PCH_PIC_INT_ISR_START 0x3a0
+#define PCH_PIC_INT_ISR_END 0x3af
+#define PCH_PIC_POLARITY_START 0x3e0
+#define PCH_PIC_POLARITY_END 0x3e7
+#define PCH_PIC_INT_ID_VAL 0x7UL
+#define PCH_PIC_INT_ID_VER 0x1UL
+
+union pch_pic_id {
+ struct {
+ uint8_t reserved_0[3];
+ uint8_t id;
+ uint8_t version;
+ uint8_t reserved_1;
+ uint8_t irq_num;
+ uint8_t reserved_2;
+ } desc;
+ uint64_t data;
+};
+
+struct loongarch_pch_pic {
+ spinlock_t lock;
+ struct kvm *kvm;
+ struct kvm_io_device device;
+ union pch_pic_id id;
+ uint64_t mask; /* 1:disable irq, 0:enable irq */
+ uint64_t htmsi_en; /* 1:msi */
+ uint64_t edge; /* 1:edge triggered, 0:level triggered */
+ uint64_t auto_ctrl0; /* only use default value 00b */
+ uint64_t auto_ctrl1; /* only use default value 00b */
+ uint64_t last_intirr; /* edge detection */
+ uint64_t irr; /* interrupt request register */
+ uint64_t isr; /* interrupt service register */
+ uint64_t polarity; /* 0: high level trigger, 1: low level trigger */
+ uint8_t route_entry[64]; /* default value 0, route to int0: eiointc */
+ uint8_t htmsi_vector[64]; /* irq route table for routing to eiointc */
+ uint64_t pch_pic_base;
+};
+
+int kvm_loongarch_register_pch_pic_device(void);
+void pch_pic_set_irq(struct loongarch_pch_pic *s, int irq, int level);
+void pch_msi_set_irq(struct kvm *kvm, int irq, int level);
+
+#endif /* __ASM_KVM_PCH_PIC_H */
diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
index 590a92cb5416..3784ab4ccdb5 100644
--- a/arch/loongarch/include/asm/kvm_vcpu.h
+++ b/arch/loongarch/include/asm/kvm_vcpu.h
@@ -15,6 +15,7 @@
#define CPU_PMU (_ULCAST_(1) << 10)
#define CPU_TIMER (_ULCAST_(1) << 11)
#define CPU_IPI (_ULCAST_(1) << 12)
+#define CPU_AVEC (_ULCAST_(1) << 14)
/* Controlled by 0x52 guest exception VIP aligned to estat bit 5~12 */
#define CPU_IP0 (_ULCAST_(1))
@@ -37,12 +38,13 @@
#define KVM_LOONGSON_IRQ_NUM_MASK 0xffff
typedef union loongarch_instruction larch_inst;
-typedef int (*exit_handle_fn)(struct kvm_vcpu *);
+typedef int (*exit_handle_fn)(struct kvm_vcpu *, int);
int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst);
int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst);
int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_emu_idle(struct kvm_vcpu *vcpu);
int kvm_pending_timer(struct kvm_vcpu *vcpu);
int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault);
@@ -75,8 +77,13 @@ static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
#endif
+#ifdef CONFIG_CPU_HAS_LBT
+int kvm_own_lbt(struct kvm_vcpu *vcpu);
+#else
+static inline int kvm_own_lbt(struct kvm_vcpu *vcpu) { return -EINVAL; }
+#endif
+
void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
-void kvm_reset_timer(struct kvm_vcpu *vcpu);
void kvm_save_timer(struct kvm_vcpu *vcpu);
void kvm_restore_timer(struct kvm_vcpu *vcpu);
@@ -120,4 +127,14 @@ static inline void kvm_write_reg(struct kvm_vcpu *vcpu, int num, unsigned long v
vcpu->arch.gprs[num] = val;
}
+static inline bool kvm_pvtime_supported(void)
+{
+ return !!sched_info_on();
+}
+
+static inline bool kvm_guest_has_pv_feature(struct kvm_vcpu *vcpu, unsigned int feature)
+{
+ return vcpu->kvm->arch.pv_features & BIT(feature);
+}
+
#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */
diff --git a/arch/loongarch/include/asm/lbt.h b/arch/loongarch/include/asm/lbt.h
index e671978bf552..38566574e562 100644
--- a/arch/loongarch/include/asm/lbt.h
+++ b/arch/loongarch/include/asm/lbt.h
@@ -12,9 +12,13 @@
#include <asm/loongarch.h>
#include <asm/processor.h>
-extern void _init_lbt(void);
-extern void _save_lbt(struct loongarch_lbt *);
-extern void _restore_lbt(struct loongarch_lbt *);
+asmlinkage void _init_lbt(void);
+asmlinkage void _save_lbt(struct loongarch_lbt *);
+asmlinkage void _restore_lbt(struct loongarch_lbt *);
+asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
+asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
+asmlinkage int _save_ftop_context(void __user *ftop);
+asmlinkage int _restore_ftop_context(void __user *ftop);
static inline int is_lbt_enabled(void)
{
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index eb09adda54b7..58a4a3b6b035 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -9,15 +9,15 @@
#include <linux/linkage.h>
#include <linux/types.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <larchintrin.h>
/* CPUCFG */
#define read_cpucfg(reg) __cpucfg(reg)
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
/* LoongArch Registers */
#define REG_ZERO 0x0
@@ -53,7 +53,7 @@
#define REG_S7 0x1e
#define REG_S8 0x1f
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
/* Bit fields for CPUCFG registers */
#define LOONGARCH_CPUCFG0 0x0
@@ -62,6 +62,7 @@
#define LOONGARCH_CPUCFG1 0x1
#define CPUCFG1_ISGR32 BIT(0)
#define CPUCFG1_ISGR64 BIT(1)
+#define CPUCFG1_ISA GENMASK(1, 0)
#define CPUCFG1_PAGING BIT(2)
#define CPUCFG1_IOCSR BIT(3)
#define CPUCFG1_PABITS GENMASK(11, 4)
@@ -107,6 +108,12 @@
#define CPUCFG3_SPW_HG_HF BIT(11)
#define CPUCFG3_RVA BIT(12)
#define CPUCFG3_RVAMAX GENMASK(16, 13)
+#define CPUCFG3_ALDORDER_CAP BIT(18) /* All address load ordered, capability */
+#define CPUCFG3_ASTORDER_CAP BIT(19) /* All address store ordered, capability */
+#define CPUCFG3_ALDORDER_STA BIT(20) /* All address load ordered, status */
+#define CPUCFG3_ASTORDER_STA BIT(21) /* All address store ordered, status */
+#define CPUCFG3_SLDORDER_CAP BIT(22) /* Same address load ordered, capability */
+#define CPUCFG3_SLDORDER_STA BIT(23) /* Same address load ordered, status */
#define LOONGARCH_CPUCFG4 0x4
#define CPUCFG4_CCFREQ GENMASK(31, 0)
@@ -119,7 +126,9 @@
#define CPUCFG6_PMP BIT(0)
#define CPUCFG6_PAMVER GENMASK(3, 1)
#define CPUCFG6_PMNUM GENMASK(7, 4)
+#define CPUCFG6_PMNUM_SHIFT 4
#define CPUCFG6_PMBITS GENMASK(13, 8)
+#define CPUCFG6_PMBITS_SHIFT 8
#define CPUCFG6_UPM BIT(14)
#define LOONGARCH_CPUCFG16 0x10
@@ -160,17 +169,10 @@
/*
* CPUCFG index area: 0x40000000 -- 0x400000ff
- * SW emulation for KVM hypervirsor
+ * SW emulation for KVM hypervirsor, see arch/loongarch/include/uapi/asm/kvm_para.h
*/
-#define CPUCFG_KVM_BASE 0x40000000
-#define CPUCFG_KVM_SIZE 0x100
-#define CPUCFG_KVM_SIG (CPUCFG_KVM_BASE + 0)
-#define KVM_SIGNATURE "KVM\0"
-#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4)
-#define KVM_FEATURE_IPI BIT(1)
-
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/* CSR */
#define csr_read32(reg) __csrrd_w(reg)
@@ -186,7 +188,7 @@
#define iocsr_write32(val, reg) __iocsrwr_w(val, reg)
#define iocsr_write64(val, reg) __iocsrwr_d(val, reg)
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/* CSR register number */
@@ -252,10 +254,10 @@
#define CSR_ESTAT_EXC_WIDTH 6
#define CSR_ESTAT_EXC (_ULCAST_(0x3f) << CSR_ESTAT_EXC_SHIFT)
#define CSR_ESTAT_IS_SHIFT 0
-#define CSR_ESTAT_IS_WIDTH 14
-#define CSR_ESTAT_IS (_ULCAST_(0x3fff) << CSR_ESTAT_IS_SHIFT)
+#define CSR_ESTAT_IS_WIDTH 15
+#define CSR_ESTAT_IS (_ULCAST_(0x7fff) << CSR_ESTAT_IS_SHIFT)
-#define LOONGARCH_CSR_ERA 0x6 /* ERA */
+#define LOONGARCH_CSR_ERA 0x6 /* Exception return address */
#define LOONGARCH_CSR_BADV 0x7 /* Bad virtual address */
@@ -410,8 +412,8 @@
/* Config CSR registers */
#define LOONGARCH_CSR_CPUID 0x20 /* CPU core id */
-#define CSR_CPUID_COREID_WIDTH 9
-#define CSR_CPUID_COREID _ULCAST_(0x1ff)
+#define CSR_CPUID_COREID_WIDTH 11
+#define CSR_CPUID_COREID _ULCAST_(0x7ff)
#define LOONGARCH_CSR_PRCFG1 0x21 /* Config1 */
#define CSR_CONF1_VSMAX_SHIFT 12
@@ -450,6 +452,13 @@
#define LOONGARCH_CSR_KS6 0x36
#define LOONGARCH_CSR_KS7 0x37
#define LOONGARCH_CSR_KS8 0x38
+#define LOONGARCH_CSR_KS9 0x39
+#define LOONGARCH_CSR_KS10 0x3a
+#define LOONGARCH_CSR_KS11 0x3b
+#define LOONGARCH_CSR_KS12 0x3c
+#define LOONGARCH_CSR_KS13 0x3d
+#define LOONGARCH_CSR_KS14 0x3e
+#define LOONGARCH_CSR_KS15 0x3f
/* Exception allocated KS0, KS1 and KS2 statically */
#define EXCEPTION_KS0 LOONGARCH_CSR_KS0
@@ -471,7 +480,6 @@
#define LOONGARCH_CSR_TCFG 0x41 /* Timer config */
#define CSR_TCFG_VAL_SHIFT 2
-#define CSR_TCFG_VAL_WIDTH 48
#define CSR_TCFG_VAL (_ULCAST_(0x3fffffffffff) << CSR_TCFG_VAL_SHIFT)
#define CSR_TCFG_PERIOD_SHIFT 1
#define CSR_TCFG_PERIOD (_ULCAST_(0x1) << CSR_TCFG_PERIOD_SHIFT)
@@ -503,6 +511,8 @@
#define CSR_GCFG_GPERF_SHIFT 24
#define CSR_GCFG_GPERF_WIDTH 3
#define CSR_GCFG_GPERF (_ULCAST_(0x7) << CSR_GCFG_GPERF_SHIFT)
+#define CSR_GCFG_GPMP_SHIFT 23
+#define CSR_GCFG_GPMP (_ULCAST_(0x1) << CSR_GCFG_GPMP_SHIFT)
#define CSR_GCFG_GCI_SHIFT 20
#define CSR_GCFG_GCI_WIDTH 2
#define CSR_GCFG_GCI (_ULCAST_(0x3) << CSR_GCFG_GCI_SHIFT)
@@ -571,6 +581,15 @@
/* Implement dependent */
#define LOONGARCH_CSR_IMPCTL1 0x80 /* Loongson config1 */
+#define CSR_LDSTORDER_SHIFT 28
+#define CSR_LDSTORDER_WIDTH 3
+#define CSR_LDSTORDER_MASK (_ULCAST_(0x7) << CSR_LDSTORDER_SHIFT)
+#define CSR_LDSTORDER_NLD_NST (_ULCAST_(0x0) << CSR_LDSTORDER_SHIFT) /* 000 = No Load No Store */
+#define CSR_LDSTORDER_ALD_NST (_ULCAST_(0x1) << CSR_LDSTORDER_SHIFT) /* 001 = All Load No Store */
+#define CSR_LDSTORDER_SLD_NST (_ULCAST_(0x3) << CSR_LDSTORDER_SHIFT) /* 011 = Same Load No Store */
+#define CSR_LDSTORDER_NLD_AST (_ULCAST_(0x4) << CSR_LDSTORDER_SHIFT) /* 100 = No Load All Store */
+#define CSR_LDSTORDER_ALD_AST (_ULCAST_(0x5) << CSR_LDSTORDER_SHIFT) /* 101 = All Load All Store */
+#define CSR_LDSTORDER_SLD_AST (_ULCAST_(0x7) << CSR_LDSTORDER_SHIFT) /* 111 = Same Load All Store */
#define CSR_MISPEC_SHIFT 20
#define CSR_MISPEC_WIDTH 8
#define CSR_MISPEC (_ULCAST_(0xff) << CSR_MISPEC_SHIFT)
@@ -648,6 +667,13 @@
#define LOONGARCH_CSR_CTAG 0x98 /* TagLo + TagHi */
+#define LOONGARCH_CSR_ISR0 0xa0
+#define LOONGARCH_CSR_ISR1 0xa1
+#define LOONGARCH_CSR_ISR2 0xa2
+#define LOONGARCH_CSR_ISR3 0xa3
+
+#define LOONGARCH_CSR_IRR 0xa4
+
#define LOONGARCH_CSR_PRID 0xc0
/* Shadow MCSR : 0xc0 ~ 0xff */
@@ -877,7 +903,7 @@
#define LOONGARCH_CSR_DMWIN2 0x182 /* 64 direct map win2: MEM */
#define LOONGARCH_CSR_DMWIN3 0x183 /* 64 direct map win3: MEM */
-/* Direct Map window 0/1 */
+/* Direct Map window 0/1/2/3 */
#define CSR_DMW0_PLV0 _CONST64_(1 << 0)
#define CSR_DMW0_VSEG _CONST64_(0x8000)
#define CSR_DMW0_BASE (CSR_DMW0_VSEG << DMW_PABITS)
@@ -889,6 +915,14 @@
#define CSR_DMW1_BASE (CSR_DMW1_VSEG << DMW_PABITS)
#define CSR_DMW1_INIT (CSR_DMW1_BASE | CSR_DMW1_MAT | CSR_DMW1_PLV0)
+#define CSR_DMW2_PLV0 _CONST64_(1 << 0)
+#define CSR_DMW2_MAT _CONST64_(2 << 4)
+#define CSR_DMW2_VSEG _CONST64_(0xa000)
+#define CSR_DMW2_BASE (CSR_DMW2_VSEG << DMW_PABITS)
+#define CSR_DMW2_INIT (CSR_DMW2_BASE | CSR_DMW2_MAT | CSR_DMW2_PLV0)
+
+#define CSR_DMW3_INIT 0x0
+
/* Performance Counter registers */
#define LOONGARCH_CSR_PERFCTRL0 0x200 /* 32 perf event 0 config */
#define LOONGARCH_CSR_PERFCNTR0 0x201 /* 64 perf event 0 count value */
@@ -949,6 +983,36 @@
#define LOONGARCH_CSR_DB7CTRL 0x34a /* data breakpoint 7 control */
#define LOONGARCH_CSR_DB7ASID 0x34b /* data breakpoint 7 asid */
+#define LOONGARCH_CSR_DB8ADDR 0x350 /* data breakpoint 8 address */
+#define LOONGARCH_CSR_DB8MASK 0x351 /* data breakpoint 8 mask */
+#define LOONGARCH_CSR_DB8CTRL 0x352 /* data breakpoint 8 control */
+#define LOONGARCH_CSR_DB8ASID 0x353 /* data breakpoint 8 asid */
+
+#define LOONGARCH_CSR_DB9ADDR 0x358 /* data breakpoint 9 address */
+#define LOONGARCH_CSR_DB9MASK 0x359 /* data breakpoint 9 mask */
+#define LOONGARCH_CSR_DB9CTRL 0x35a /* data breakpoint 9 control */
+#define LOONGARCH_CSR_DB9ASID 0x35b /* data breakpoint 9 asid */
+
+#define LOONGARCH_CSR_DB10ADDR 0x360 /* data breakpoint 10 address */
+#define LOONGARCH_CSR_DB10MASK 0x361 /* data breakpoint 10 mask */
+#define LOONGARCH_CSR_DB10CTRL 0x362 /* data breakpoint 10 control */
+#define LOONGARCH_CSR_DB10ASID 0x363 /* data breakpoint 10 asid */
+
+#define LOONGARCH_CSR_DB11ADDR 0x368 /* data breakpoint 11 address */
+#define LOONGARCH_CSR_DB11MASK 0x369 /* data breakpoint 11 mask */
+#define LOONGARCH_CSR_DB11CTRL 0x36a /* data breakpoint 11 control */
+#define LOONGARCH_CSR_DB11ASID 0x36b /* data breakpoint 11 asid */
+
+#define LOONGARCH_CSR_DB12ADDR 0x370 /* data breakpoint 12 address */
+#define LOONGARCH_CSR_DB12MASK 0x371 /* data breakpoint 12 mask */
+#define LOONGARCH_CSR_DB12CTRL 0x372 /* data breakpoint 12 control */
+#define LOONGARCH_CSR_DB12ASID 0x373 /* data breakpoint 12 asid */
+
+#define LOONGARCH_CSR_DB13ADDR 0x378 /* data breakpoint 13 address */
+#define LOONGARCH_CSR_DB13MASK 0x379 /* data breakpoint 13 mask */
+#define LOONGARCH_CSR_DB13CTRL 0x37a /* data breakpoint 13 control */
+#define LOONGARCH_CSR_DB13ASID 0x37b /* data breakpoint 13 asid */
+
#define LOONGARCH_CSR_FWPC 0x380 /* instruction breakpoint config */
#define LOONGARCH_CSR_FWPS 0x381 /* instruction breakpoint status */
@@ -992,6 +1056,36 @@
#define LOONGARCH_CSR_IB7CTRL 0x3ca /* inst breakpoint 7 control */
#define LOONGARCH_CSR_IB7ASID 0x3cb /* inst breakpoint 7 asid */
+#define LOONGARCH_CSR_IB8ADDR 0x3d0 /* inst breakpoint 8 address */
+#define LOONGARCH_CSR_IB8MASK 0x3d1 /* inst breakpoint 8 mask */
+#define LOONGARCH_CSR_IB8CTRL 0x3d2 /* inst breakpoint 8 control */
+#define LOONGARCH_CSR_IB8ASID 0x3d3 /* inst breakpoint 8 asid */
+
+#define LOONGARCH_CSR_IB9ADDR 0x3d8 /* inst breakpoint 9 address */
+#define LOONGARCH_CSR_IB9MASK 0x3d9 /* inst breakpoint 9 mask */
+#define LOONGARCH_CSR_IB9CTRL 0x3da /* inst breakpoint 9 control */
+#define LOONGARCH_CSR_IB9ASID 0x3db /* inst breakpoint 9 asid */
+
+#define LOONGARCH_CSR_IB10ADDR 0x3e0 /* inst breakpoint 10 address */
+#define LOONGARCH_CSR_IB10MASK 0x3e1 /* inst breakpoint 10 mask */
+#define LOONGARCH_CSR_IB10CTRL 0x3e2 /* inst breakpoint 10 control */
+#define LOONGARCH_CSR_IB10ASID 0x3e3 /* inst breakpoint 10 asid */
+
+#define LOONGARCH_CSR_IB11ADDR 0x3e8 /* inst breakpoint 11 address */
+#define LOONGARCH_CSR_IB11MASK 0x3e9 /* inst breakpoint 11 mask */
+#define LOONGARCH_CSR_IB11CTRL 0x3ea /* inst breakpoint 11 control */
+#define LOONGARCH_CSR_IB11ASID 0x3eb /* inst breakpoint 11 asid */
+
+#define LOONGARCH_CSR_IB12ADDR 0x3f0 /* inst breakpoint 12 address */
+#define LOONGARCH_CSR_IB12MASK 0x3f1 /* inst breakpoint 12 mask */
+#define LOONGARCH_CSR_IB12CTRL 0x3f2 /* inst breakpoint 12 control */
+#define LOONGARCH_CSR_IB12ASID 0x3f3 /* inst breakpoint 12 asid */
+
+#define LOONGARCH_CSR_IB13ADDR 0x3f8 /* inst breakpoint 13 address */
+#define LOONGARCH_CSR_IB13MASK 0x3f9 /* inst breakpoint 13 mask */
+#define LOONGARCH_CSR_IB13CTRL 0x3fa /* inst breakpoint 13 control */
+#define LOONGARCH_CSR_IB13ASID 0x3fb /* inst breakpoint 13 asid */
+
#define LOONGARCH_CSR_DEBUG 0x500 /* debug config */
#define LOONGARCH_CSR_DERA 0x501 /* debug era */
#define LOONGARCH_CSR_DESAVE 0x502 /* debug save */
@@ -1002,7 +1096,7 @@
/*
* CSR_ECFG IM
*/
-#define ECFG0_IM 0x00001fff
+#define ECFG0_IM 0x00005fff
#define ECFGB_SIP0 0
#define ECFGF_SIP0 (_ULCAST_(1) << ECFGB_SIP0)
#define ECFGB_SIP1 1
@@ -1045,6 +1139,8 @@
#define IOCSRF_EIODECODE BIT_ULL(9)
#define IOCSRF_FLATMODE BIT_ULL(10)
#define IOCSRF_VM BIT_ULL(11)
+#define IOCSRF_AVEC BIT_ULL(15)
+#define IOCSRF_REDIRECT BIT_ULL(16)
#define LOONGARCH_IOCSR_VENDOR 0x10
@@ -1053,11 +1149,15 @@
#define LOONGARCH_IOCSR_NODECNT 0x408
#define LOONGARCH_IOCSR_MISC_FUNC 0x420
+#define IOCSR_MISC_FUNC_SOFT_INT BIT_ULL(10)
#define IOCSR_MISC_FUNC_TIMER_RESET BIT_ULL(21)
#define IOCSR_MISC_FUNC_EXT_IOI_EN BIT_ULL(48)
+#define IOCSR_MISC_FUNC_AVEC_EN BIT_ULL(51)
#define LOONGARCH_IOCSR_CPUTEMP 0x428
+#define LOONGARCH_IOCSR_SMCMBX 0x51c
+
/* PerCore CSR, only accessible by local cores */
#define LOONGARCH_IOCSR_IPI_STATUS 0x1000
#define LOONGARCH_IOCSR_IPI_EN 0x1004
@@ -1106,7 +1206,7 @@
#define LOONGARCH_IOCSR_EXTIOI_ROUTE_BASE 0x1c00
#define IOCSR_EXTIOI_VECTOR_NUM 256
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
static __always_inline u64 drdtime(void)
{
@@ -1268,7 +1368,7 @@ __BUILD_CSR_OP(tlbidx)
#define clear_csr_estat(val) \
csr_xchg32(~(val), val, LOONGARCH_CSR_ESTAT)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
/* Generic EntryLo bit definitions */
#define ENTRYLO_V (_ULCAST_(1) << 0)
@@ -1375,9 +1475,10 @@ __BUILD_CSR_OP(tlbidx)
#define INT_TI 11 /* Timer */
#define INT_IPI 12
#define INT_NMI 13
+#define INT_AVEC 14
/* ExcCodes corresponding to interrupts */
-#define EXCCODE_INT_NUM (INT_NMI + 1)
+#define EXCCODE_INT_NUM (INT_AVEC + 1)
#define EXCCODE_INT_START 64
#define EXCCODE_INT_END (EXCCODE_INT_START + EXCCODE_INT_NUM - 1)
diff --git a/arch/loongarch/include/asm/mmu_context.h b/arch/loongarch/include/asm/mmu_context.h
index 9f97c3453b9c..304363bd3935 100644
--- a/arch/loongarch/include/asm/mmu_context.h
+++ b/arch/loongarch/include/asm/mmu_context.h
@@ -49,12 +49,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
/* Normal, classic get_new_mmu_context */
static inline void
-get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
+get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, bool *need_flush)
{
u64 asid = asid_cache(cpu);
if (!((++asid) & cpu_asid_mask(&cpu_data[cpu])))
- local_flush_tlb_user(); /* start new asid cycle */
+ *need_flush = true; /* start new asid cycle */
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
}
@@ -74,21 +74,34 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return 0;
}
+static inline void atomic_update_pgd_asid(unsigned long asid, unsigned long pgdl)
+{
+ __asm__ __volatile__(
+ "csrwr %[pgdl_val], %[pgdl_reg] \n\t"
+ "csrwr %[asid_val], %[asid_reg] \n\t"
+ : [asid_val] "+r" (asid), [pgdl_val] "+r" (pgdl)
+ : [asid_reg] "i" (LOONGARCH_CSR_ASID), [pgdl_reg] "i" (LOONGARCH_CSR_PGDL)
+ : "memory"
+ );
+}
+
static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
+ bool need_flush = false;
unsigned int cpu = smp_processor_id();
/* Check if our ASID is of an older version and thus invalid */
if (!asid_valid(next, cpu))
- get_new_mmu_context(next, cpu);
-
- write_csr_asid(cpu_asid(cpu, next));
+ get_new_mmu_context(next, cpu, &need_flush);
if (next != &init_mm)
- csr_write64((unsigned long)next->pgd, LOONGARCH_CSR_PGDL);
+ atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)next->pgd);
else
- csr_write64((unsigned long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
+ atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)invalid_pg_dir);
+
+ if (need_flush)
+ local_flush_tlb_user(); /* Flush tlb after update ASID */
/*
* Mark current->active_mm as not "active" anymore.
@@ -135,9 +148,15 @@ drop_mmu_context(struct mm_struct *mm, unsigned int cpu)
asid = read_csr_asid() & cpu_asid_mask(&current_cpu_data);
if (asid == cpu_asid(cpu, mm)) {
+ bool need_flush = false;
+
if (!current->mm || (current->mm == mm)) {
- get_new_mmu_context(mm, cpu);
+ get_new_mmu_context(mm, cpu, &need_flush);
+
write_csr_asid(cpu_asid(cpu, mm));
+ if (need_flush)
+ local_flush_tlb_user(); /* Flush tlb after update ASID */
+
goto out;
}
}
diff --git a/arch/loongarch/include/asm/mmzone.h b/arch/loongarch/include/asm/mmzone.h
deleted file mode 100644
index 2b9a90727e19..000000000000
--- a/arch/loongarch/include/asm/mmzone.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Author: Huacai Chen (chenhuacai@loongson.cn)
- * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
- */
-#ifndef _ASM_MMZONE_H_
-#define _ASM_MMZONE_H_
-
-#include <asm/page.h>
-#include <asm/numa.h>
-
-extern struct pglist_data *node_data[];
-
-#define NODE_DATA(nid) (node_data[(nid)])
-
-#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/loongarch/include/asm/numa.h b/arch/loongarch/include/asm/numa.h
index b5f9de9f102e..bbf9f70bd25f 100644
--- a/arch/loongarch/include/asm/numa.h
+++ b/arch/loongarch/include/asm/numa.h
@@ -22,20 +22,6 @@ extern int numa_off;
extern s16 __cpuid_to_node[CONFIG_NR_CPUS];
extern nodemask_t numa_nodes_parsed __initdata;
-struct numa_memblk {
- u64 start;
- u64 end;
- int nid;
-};
-
-#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
-struct numa_meminfo {
- int nr_blks;
- struct numa_memblk blk[NR_NODE_MEMBLKS];
-};
-
-extern int __init numa_add_memblk(int nodeid, u64 start, u64 end);
-
extern void __init early_numa_add_cpu(int cpuid, s16 node);
extern void numa_add_cpu(unsigned int cpu);
extern void numa_remove_cpu(unsigned int cpu);
diff --git a/arch/loongarch/include/asm/orc_types.h b/arch/loongarch/include/asm/orc_types.h
index caf1f71a1057..d5fa98d1d177 100644
--- a/arch/loongarch/include/asm/orc_types.h
+++ b/arch/loongarch/include/asm/orc_types.h
@@ -34,7 +34,7 @@
#define ORC_TYPE_REGS 3
#define ORC_TYPE_REGS_PARTIAL 4
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* This struct is more or less a vastly simplified version of the DWARF Call
* Frame Information standard. It contains only the necessary parts of DWARF
@@ -53,6 +53,6 @@ struct orc_entry {
unsigned int type:3;
unsigned int signal:1;
};
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ORC_TYPES_H */
diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h
index e85df33f11c7..a3aaf34fba16 100644
--- a/arch/loongarch/include/asm/page.h
+++ b/arch/loongarch/include/asm/page.h
@@ -8,19 +8,14 @@
#include <linux/const.h>
#include <asm/addrspace.h>
-/*
- * PAGE_SHIFT determines the page size
- */
-#define PAGE_SHIFT CONFIG_PAGE_SHIFT
-#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE - 1))
+#include <vdso/page.h>
#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/kernel.h>
#include <linux/pfn.h>
@@ -81,9 +76,6 @@ struct page *tlb_virt_to_page(unsigned long kaddr);
#define pfn_to_phys(pfn) __pfn_to_phys(pfn)
#define phys_to_pfn(paddr) __phys_to_pfn(paddr)
-#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
-#define phys_to_page(paddr) pfn_to_page(phys_to_pfn(paddr))
-
#ifndef CONFIG_KFENCE
#define page_to_virt(page) __va(page_to_phys(page))
@@ -113,14 +105,11 @@ struct page *tlb_virt_to_page(unsigned long kaddr);
extern int __virt_addr_valid(volatile void *kaddr);
#define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr))
-#define VM_DATA_DEFAULT_FLAGS \
- (VM_READ | VM_WRITE | \
- ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_PAGE_H */
diff --git a/arch/loongarch/include/asm/paravirt.h b/arch/loongarch/include/asm/paravirt.h
index 0965710f47f2..3f4323603e6a 100644
--- a/arch/loongarch/include/asm/paravirt.h
+++ b/arch/loongarch/include/asm/paravirt.h
@@ -18,6 +18,8 @@ static inline u64 paravirt_steal_clock(int cpu)
}
int __init pv_ipi_init(void);
+int __init pv_time_init(void);
+int __init pv_spinlock_init(void);
#else
@@ -26,5 +28,15 @@ static inline int pv_ipi_init(void)
return 0;
}
+static inline int pv_time_init(void)
+{
+ return 0;
+}
+
+static inline int pv_spinlock_init(void)
+{
+ return 0;
+}
+
#endif // CONFIG_PARAVIRT
#endif
diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
index 8f290e5546cf..87be9b14e9da 100644
--- a/arch/loongarch/include/asm/percpu.h
+++ b/arch/loongarch/include/asm/percpu.h
@@ -68,75 +68,6 @@ PERCPU_OP(and, and, &)
PERCPU_OP(or, or, |)
#undef PERCPU_OP
-static __always_inline unsigned long __percpu_read(void __percpu *ptr, int size)
-{
- unsigned long ret;
-
- switch (size) {
- case 1:
- __asm__ __volatile__ ("ldx.b %[ret], $r21, %[ptr] \n"
- : [ret] "=&r"(ret)
- : [ptr] "r"(ptr)
- : "memory");
- break;
- case 2:
- __asm__ __volatile__ ("ldx.h %[ret], $r21, %[ptr] \n"
- : [ret] "=&r"(ret)
- : [ptr] "r"(ptr)
- : "memory");
- break;
- case 4:
- __asm__ __volatile__ ("ldx.w %[ret], $r21, %[ptr] \n"
- : [ret] "=&r"(ret)
- : [ptr] "r"(ptr)
- : "memory");
- break;
- case 8:
- __asm__ __volatile__ ("ldx.d %[ret], $r21, %[ptr] \n"
- : [ret] "=&r"(ret)
- : [ptr] "r"(ptr)
- : "memory");
- break;
- default:
- ret = 0;
- BUILD_BUG();
- }
-
- return ret;
-}
-
-static __always_inline void __percpu_write(void __percpu *ptr, unsigned long val, int size)
-{
- switch (size) {
- case 1:
- __asm__ __volatile__("stx.b %[val], $r21, %[ptr] \n"
- :
- : [val] "r" (val), [ptr] "r" (ptr)
- : "memory");
- break;
- case 2:
- __asm__ __volatile__("stx.h %[val], $r21, %[ptr] \n"
- :
- : [val] "r" (val), [ptr] "r" (ptr)
- : "memory");
- break;
- case 4:
- __asm__ __volatile__("stx.w %[val], $r21, %[ptr] \n"
- :
- : [val] "r" (val), [ptr] "r" (ptr)
- : "memory");
- break;
- case 8:
- __asm__ __volatile__("stx.d %[val], $r21, %[ptr] \n"
- :
- : [val] "r" (val), [ptr] "r" (ptr)
- : "memory");
- break;
- default:
- BUILD_BUG();
- }
-}
-
static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, int size)
{
switch (size) {
@@ -157,6 +88,33 @@ static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
return 0;
}
+#define __pcpu_op_1(op) op ".b "
+#define __pcpu_op_2(op) op ".h "
+#define __pcpu_op_4(op) op ".w "
+#define __pcpu_op_8(op) op ".d "
+
+#define _percpu_read(size, _pcp) \
+({ \
+ typeof(_pcp) __pcp_ret; \
+ \
+ __asm__ __volatile__( \
+ __pcpu_op_##size("ldx") "%[ret], $r21, %[ptr] \n" \
+ : [ret] "=&r"(__pcp_ret) \
+ : [ptr] "r"(&(_pcp)) \
+ : "memory"); \
+ \
+ __pcp_ret; \
+})
+
+#define _percpu_write(size, _pcp, _val) \
+do { \
+ __asm__ __volatile__( \
+ __pcpu_op_##size("stx") "%[val], $r21, %[ptr] \n" \
+ : \
+ : [val] "r"(_val), [ptr] "r"(&(_pcp)) \
+ : "memory"); \
+} while (0)
+
/* this_cpu_cmpxchg */
#define _protect_cmpxchg_local(pcp, o, n) \
({ \
@@ -167,18 +125,6 @@ static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
__ret; \
})
-#define _percpu_read(pcp) \
-({ \
- typeof(pcp) __retval; \
- __retval = (typeof(pcp))__percpu_read(&(pcp), sizeof(pcp)); \
- __retval; \
-})
-
-#define _percpu_write(pcp, val) \
-do { \
- __percpu_write(&(pcp), (unsigned long)(val), sizeof(pcp)); \
-} while (0) \
-
#define _pcp_protect(operation, pcp, val) \
({ \
typeof(pcp) __retval; \
@@ -215,15 +161,15 @@ do { \
#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
-#define this_cpu_read_1(pcp) _percpu_read(pcp)
-#define this_cpu_read_2(pcp) _percpu_read(pcp)
-#define this_cpu_read_4(pcp) _percpu_read(pcp)
-#define this_cpu_read_8(pcp) _percpu_read(pcp)
+#define this_cpu_read_1(pcp) _percpu_read(1, pcp)
+#define this_cpu_read_2(pcp) _percpu_read(2, pcp)
+#define this_cpu_read_4(pcp) _percpu_read(4, pcp)
+#define this_cpu_read_8(pcp) _percpu_read(8, pcp)
-#define this_cpu_write_1(pcp, val) _percpu_write(pcp, val)
-#define this_cpu_write_2(pcp, val) _percpu_write(pcp, val)
-#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
-#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_1(pcp, val) _percpu_write(1, pcp, val)
+#define this_cpu_write_2(pcp, val) _percpu_write(2, pcp, val)
+#define this_cpu_write_4(pcp, val) _percpu_write(4, pcp, val)
+#define this_cpu_write_8(pcp, val) _percpu_write(8, pcp, val)
#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val)
#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h
index 4e2d6b7ca2ee..08dcc698ec18 100644
--- a/arch/loongarch/include/asm/pgalloc.h
+++ b/arch/loongarch/include/asm/pgalloc.h
@@ -10,6 +10,7 @@
#define __HAVE_ARCH_PMD_ALLOC_ONE
#define __HAVE_ARCH_PUD_ALLOC_ONE
+#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
#include <asm-generic/pgalloc.h>
static inline void pmd_populate_kernel(struct mm_struct *mm,
@@ -44,11 +45,18 @@ extern void pagetable_init(void);
extern pgd_t *pgd_alloc(struct mm_struct *mm);
-#define __pte_free_tlb(tlb, pte, address) \
-do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \
-} while (0)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
+{
+ pte_t *pte = __pte_alloc_one_kernel(mm);
+
+ if (pte)
+ kernel_pte_init(pte);
+
+ return pte;
+}
+
+#define __pte_free_tlb(tlb, pte, address) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#ifndef __PAGETABLE_PMD_FOLDED
@@ -61,7 +69,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
if (!ptdesc)
return NULL;
- if (!pagetable_pmd_ctor(ptdesc)) {
+ if (!pagetable_pmd_ctor(mm, ptdesc)) {
pagetable_free(ptdesc);
return NULL;
}
@@ -80,7 +88,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
pud_t *pud;
- struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
+ struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, 0);
if (!ptdesc)
return NULL;
diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h
index 21319c1e045c..2fc3789220ac 100644
--- a/arch/loongarch/include/asm/pgtable-bits.h
+++ b/arch/loongarch/include/asm/pgtable-bits.h
@@ -90,10 +90,17 @@
#define PAGE_KERNEL_WUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
_PAGE_GLOBAL | _PAGE_KERN | _CACHE_WUC)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_SUC)
+#define pgprot_nx pgprot_nx
+
+static inline pgprot_t pgprot_nx(pgprot_t _prot)
+{
+ return __pgprot(pgprot_val(_prot) | _PAGE_NO_EXEC);
+}
+
#define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t _prot)
@@ -118,6 +125,6 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
return __pgprot(prot);
}
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_PGTABLE_BITS_H */
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index af3acdf3481a..03fb60432fde 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -55,7 +55,7 @@
#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/mm_types.h>
#include <linux/mmzone.h>
@@ -106,6 +106,9 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define KFENCE_AREA_START (VMEMMAP_END + 1)
#define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
+#define ptep_get(ptep) READ_ONCE(*(ptep))
+#define pmdp_get(pmdp) READ_ONCE(*(pmdp))
+
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
#ifndef __PAGETABLE_PMD_FOLDED
@@ -147,11 +150,6 @@ static inline int p4d_present(p4d_t p4d)
return p4d_val(p4d) != (unsigned long)invalid_pud_table;
}
-static inline void p4d_clear(p4d_t *p4dp)
-{
- p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
-}
-
static inline pud_t *p4d_pgtable(p4d_t p4d)
{
return (pud_t *)p4d_val(p4d);
@@ -159,7 +157,12 @@ static inline pud_t *p4d_pgtable(p4d_t p4d)
static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
{
- *p4d = p4dval;
+ WRITE_ONCE(*p4d, p4dval);
+}
+
+static inline void p4d_clear(p4d_t *p4dp)
+{
+ set_p4d(p4dp, __p4d((unsigned long)invalid_pud_table));
}
#define p4d_phys(p4d) PHYSADDR(p4d_val(p4d))
@@ -193,17 +196,20 @@ static inline int pud_present(pud_t pud)
return pud_val(pud) != (unsigned long)invalid_pmd_table;
}
-static inline void pud_clear(pud_t *pudp)
+static inline pmd_t *pud_pgtable(pud_t pud)
{
- pud_val(*pudp) = ((unsigned long)invalid_pmd_table);
+ return (pmd_t *)pud_val(pud);
}
-static inline pmd_t *pud_pgtable(pud_t pud)
+static inline void set_pud(pud_t *pud, pud_t pudval)
{
- return (pmd_t *)pud_val(pud);
+ WRITE_ONCE(*pud, pudval);
}
-#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0)
+static inline void pud_clear(pud_t *pudp)
+{
+ set_pud(pudp, __pud((unsigned long)invalid_pmd_table));
+}
#define pud_phys(pud) PHYSADDR(pud_val(pud))
#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
@@ -231,12 +237,15 @@ static inline int pmd_present(pmd_t pmd)
return pmd_val(pmd) != (unsigned long)invalid_pte_table;
}
-static inline void pmd_clear(pmd_t *pmdp)
+static inline void set_pmd(pmd_t *pmd, pmd_t pmdval)
{
- pmd_val(*pmdp) = ((unsigned long)invalid_pte_table);
+ WRITE_ONCE(*pmd, pmdval);
}
-#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0)
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ set_pmd(pmdp, __pmd((unsigned long)invalid_pte_table));
+}
#define pmd_phys(pmd) PHYSADDR(pmd_val(pmd))
@@ -246,7 +255,6 @@ static inline void pmd_clear(pmd_t *pmdp)
#define pmd_page_vaddr(pmd) pmd_val(pmd)
-extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
#define pte_page(x) pfn_to_page(pte_pfn(x))
@@ -259,7 +267,11 @@ extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pm
*/
extern void pgd_init(void *addr);
extern void pud_init(void *addr);
+#define pud_init pud_init
extern void pmd_init(void *addr);
+#define pmd_init pmd_init
+extern void kernel_pte_init(void *addr);
+#define kernel_pte_init kernel_pte_init
/*
* Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
@@ -289,7 +301,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
#define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE })
-static inline int pte_swp_exclusive(pte_t pte)
+static inline bool pte_swp_exclusive(pte_t pte)
{
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
}
@@ -314,46 +326,19 @@ extern void paging_init(void);
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
- *ptep = pteval;
- if (pte_val(pteval) & _PAGE_GLOBAL) {
- pte_t *buddy = ptep_buddy(ptep);
- /*
- * Make sure the buddy is global too (if it's !none,
- * it better already be global)
- */
+ WRITE_ONCE(*ptep, pteval);
+
#ifdef CONFIG_SMP
- /*
- * For SMP, multiple CPUs can race, so we need to do
- * this atomically.
- */
- unsigned long page_global = _PAGE_GLOBAL;
- unsigned long tmp;
-
- __asm__ __volatile__ (
- "1:" __LL "%[tmp], %[buddy] \n"
- " bnez %[tmp], 2f \n"
- " or %[tmp], %[tmp], %[global] \n"
- __SC "%[tmp], %[buddy] \n"
- " beqz %[tmp], 1b \n"
- " nop \n"
- "2: \n"
- __WEAK_LLSC_MB
- : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
- : [global] "r" (page_global));
-#else /* !CONFIG_SMP */
- if (pte_none(*buddy))
- pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
-#endif /* CONFIG_SMP */
- }
+ if (pte_val(pteval) & _PAGE_GLOBAL)
+ DBAR(0b11000); /* o_wrw = 0b11000 */
+#endif
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
- /* Preserve global status for the pair */
- if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
- set_pte(ptep, __pte(_PAGE_GLOBAL));
- else
- set_pte(ptep, __pte(0));
+ pte_t pte = ptep_get(ptep);
+ pte_val(pte) &= _PAGE_GLOBAL;
+ set_pte(ptep, pte);
}
#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
@@ -437,14 +422,11 @@ static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
return false;
}
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
+ if (pte_val(pte) & _PAGE_DIRTY)
+ pte_val(pte) |= _PAGE_MODIFIED;
+
return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
(pgprot_val(newprot) & ~_PAGE_CHG_MASK));
}
@@ -467,8 +449,8 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
#define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
-#define __HAVE_ARCH_UPDATE_MMU_TLB
-#define update_mmu_tlb update_mmu_cache
+#define update_mmu_tlb_range(vma, addr, ptep, nr) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, nr)
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
@@ -568,9 +550,11 @@ static inline struct page *pmd_page(pmd_t pmd)
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
- pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) |
- (pgprot_val(newprot) & ~_HPAGE_CHG_MASK);
- return pmd;
+ if (pmd_val(pmd) & _PAGE_DIRTY)
+ pmd_val(pmd) |= _PAGE_MODIFIED;
+
+ return __pmd((pmd_val(pmd) & _HPAGE_CHG_MASK) |
+ (pgprot_val(newprot) & ~_HPAGE_CHG_MASK));
}
static inline pmd_t pmd_mkinvalid(pmd_t pmd)
@@ -589,7 +573,7 @@ static inline pmd_t pmd_mkinvalid(pmd_t pmd)
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
- pmd_t old = *pmdp;
+ pmd_t old = pmdp_get(pmdp);
pmd_clear(pmdp);
@@ -620,6 +604,6 @@ static inline long pmd_protnone(pmd_t pmd)
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_PGTABLE_H */
diff --git a/arch/loongarch/include/asm/prefetch.h b/arch/loongarch/include/asm/prefetch.h
index 1672262a5e2e..0b168cdaae9a 100644
--- a/arch/loongarch/include/asm/prefetch.h
+++ b/arch/loongarch/include/asm/prefetch.h
@@ -8,7 +8,7 @@
#define Pref_Load 0
#define Pref_Store 8
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
.macro __pref hint addr
#ifdef CONFIG_CPU_HAS_PREFETCH
diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h
index f3ddaed9ef7f..e5d21e836d99 100644
--- a/arch/loongarch/include/asm/ptrace.h
+++ b/arch/loongarch/include/asm/ptrace.h
@@ -33,9 +33,9 @@ struct pt_regs {
unsigned long __last[];
} __aligned(8);
-static inline int regs_irqs_disabled(struct pt_regs *regs)
+static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
{
- return arch_irqs_disabled_flags(regs->csr_prmd);
+ return !(regs->csr_prmd & CSR_PRMD_PIE);
}
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
@@ -55,7 +55,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long v
/* Query offset/name of register from its name/offset */
extern int regs_query_register_offset(const char *name);
-#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last) - sizeof(unsigned long))
/**
* regs_get_register() - get register value from its offset
diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h
new file mode 100644
index 000000000000..e76d3aa1e1eb
--- /dev/null
+++ b/arch/loongarch/include/asm/qspinlock.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_LOONGARCH_QSPINLOCK_H
+#define _ASM_LOONGARCH_QSPINLOCK_H
+
+#include <linux/jump_label.h>
+
+#ifdef CONFIG_PARAVIRT
+
+DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
+
+#define virt_spin_lock virt_spin_lock
+
+static inline bool virt_spin_lock(struct qspinlock *lock)
+{
+ int val;
+
+ if (!static_branch_unlikely(&virt_spin_lock_key))
+ return false;
+
+ /*
+ * On hypervisors without PARAVIRT_SPINLOCKS support we fall
+ * back to a Test-and-Set spinlock, because fair locks have
+ * horrible lock 'holder' preemption issues.
+ */
+
+__retry:
+ val = atomic_read(&lock->val);
+
+ if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) {
+ cpu_relax();
+ goto __retry;
+ }
+
+ return true;
+}
+
+#endif /* CONFIG_PARAVIRT */
+
+#include <asm-generic/qspinlock.h>
+
+#endif // _ASM_LOONGARCH_QSPINLOCK_H
diff --git a/arch/loongarch/include/asm/set_memory.h b/arch/loongarch/include/asm/set_memory.h
new file mode 100644
index 000000000000..55dfaefd02c8
--- /dev/null
+++ b/arch/loongarch/include/asm/set_memory.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_LOONGARCH_SET_MEMORY_H
+#define _ASM_LOONGARCH_SET_MEMORY_H
+
+/*
+ * Functions to change memory attributes.
+ */
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+
+bool kernel_page_present(struct page *page);
+int set_direct_map_default_noflush(struct page *page);
+int set_direct_map_invalid_noflush(struct page *page);
+int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid);
+
+#endif /* _ASM_LOONGARCH_SET_MEMORY_H */
diff --git a/arch/loongarch/include/asm/setup.h b/arch/loongarch/include/asm/setup.h
index ee52fb1e9963..3c2fb16b11b6 100644
--- a/arch/loongarch/include/asm/setup.h
+++ b/arch/loongarch/include/asm/setup.h
@@ -34,6 +34,11 @@ extern long __la_abs_end;
extern long __rela_dyn_begin;
extern long __rela_dyn_end;
+#ifdef CONFIG_RELR
+extern long __relr_dyn_begin;
+extern long __relr_dyn_end;
+#endif
+
extern unsigned long __init relocate_kernel(void);
#endif
diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h
index 278700cfee88..3a47f52959a8 100644
--- a/arch/loongarch/include/asm/smp.h
+++ b/arch/loongarch/include/asm/smp.h
@@ -25,6 +25,7 @@ extern int smp_num_siblings;
extern int num_processors;
extern int disabled_cpus;
extern cpumask_t cpu_sibling_map[];
+extern cpumask_t cpu_llc_shared_map[];
extern cpumask_t cpu_core_map[];
extern cpumask_t cpu_foreign_map[];
@@ -38,7 +39,7 @@ int loongson_cpu_disable(void);
void loongson_cpu_die(unsigned int cpu);
#endif
-static inline void plat_smp_setup(void)
+static inline void __init plat_smp_setup(void)
{
loongson_smp_setup();
}
@@ -69,9 +70,15 @@ extern int __cpu_logical_map[NR_CPUS];
#define ACTION_BOOT_CPU 0
#define ACTION_RESCHEDULE 1
#define ACTION_CALL_FUNCTION 2
+#define ACTION_IRQ_WORK 3
+#define ACTION_CLEAR_VECTOR 4
#define SMP_BOOT_CPU BIT(ACTION_BOOT_CPU)
#define SMP_RESCHEDULE BIT(ACTION_RESCHEDULE)
#define SMP_CALL_FUNCTION BIT(ACTION_CALL_FUNCTION)
+#define SMP_IRQ_WORK BIT(ACTION_IRQ_WORK)
+#define SMP_CLEAR_VECTOR BIT(ACTION_CLEAR_VECTOR)
+
+struct seq_file;
struct secondary_data {
unsigned long stack;
diff --git a/arch/loongarch/include/asm/sparsemem.h b/arch/loongarch/include/asm/sparsemem.h
index 8d4af6aff8a8..4501efac1a87 100644
--- a/arch/loongarch/include/asm/sparsemem.h
+++ b/arch/loongarch/include/asm/sparsemem.h
@@ -21,11 +21,6 @@
#define VMEMMAP_SIZE 0 /* 1, For FLATMEM; 2, For SPARSEMEM without VMEMMAP. */
#endif
-#ifdef CONFIG_MEMORY_HOTPLUG
-int memory_add_physaddr_to_nid(u64 addr);
-#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
-#endif
-
#define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS)
#endif /* _LOONGARCH_SPARSEMEM_H */
diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h
index d9eafd3ee3d1..5cb568a60cf8 100644
--- a/arch/loongarch/include/asm/stackframe.h
+++ b/arch/loongarch/include/asm/stackframe.h
@@ -38,6 +38,17 @@
cfi_restore \reg \offset \docfi
.endm
+ .macro SETUP_DMWINS temp
+ li.d \temp, CSR_DMW0_INIT # WUC, PLV0, 0x8000 xxxx xxxx xxxx
+ csrwr \temp, LOONGARCH_CSR_DMWIN0
+ li.d \temp, CSR_DMW1_INIT # CAC, PLV0, 0x9000 xxxx xxxx xxxx
+ csrwr \temp, LOONGARCH_CSR_DMWIN1
+ li.d \temp, CSR_DMW2_INIT # WUC, PLV0, 0xa000 xxxx xxxx xxxx
+ csrwr \temp, LOONGARCH_CSR_DMWIN2
+ li.d \temp, CSR_DMW3_INIT # 0x0, unused
+ csrwr \temp, LOONGARCH_CSR_DMWIN3
+ .endm
+
/* Jump to the runtime virtual address. */
.macro JUMP_VIRT_ADDR temp1 temp2
li.d \temp1, CACHE_BASE
@@ -46,6 +57,12 @@
jirl zero, \temp1, 0xc
.endm
+ .macro STACKLEAK_ERASE
+#ifdef CONFIG_KSTACK_ERASE
+ bl stackleak_erase_on_task_stack
+#endif
+ .endm
+
.macro BACKUP_T0T1
csrwr t0, EXCEPTION_KS0
csrwr t1, EXCEPTION_KS1
diff --git a/arch/loongarch/include/asm/stacktrace.h b/arch/loongarch/include/asm/stacktrace.h
index f23adb15f418..5c8be156567c 100644
--- a/arch/loongarch/include/asm/stacktrace.h
+++ b/arch/loongarch/include/asm/stacktrace.h
@@ -8,6 +8,7 @@
#include <asm/asm.h>
#include <asm/ptrace.h>
#include <asm/loongarch.h>
+#include <asm/unwind_hints.h>
#include <linux/stringify.h>
enum stack_type {
@@ -30,6 +31,11 @@ bool in_irq_stack(unsigned long stack, struct stack_info *info);
bool in_task_stack(unsigned long stack, struct task_struct *task, struct stack_info *info);
int get_stack_info(unsigned long stack, struct task_struct *task, struct stack_info *info);
+static __always_inline bool on_thread_stack(void)
+{
+ return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
+}
+
#define STR_LONG_L __stringify(LONG_L)
#define STR_LONG_S __stringify(LONG_S)
#define STR_LONGSIZE __stringify(LONGSIZE)
@@ -43,6 +49,7 @@ int get_stack_info(unsigned long stack, struct task_struct *task, struct stack_i
static __always_inline void prepare_frametrace(struct pt_regs *regs)
{
__asm__ __volatile__(
+ UNWIND_HINT_SAVE
/* Save $ra */
STORE_ONE_REG(1)
/* Use $ra to save PC */
@@ -80,6 +87,7 @@ static __always_inline void prepare_frametrace(struct pt_regs *regs)
STORE_ONE_REG(29)
STORE_ONE_REG(30)
STORE_ONE_REG(31)
+ UNWIND_HINT_RESTORE
: "=m" (regs->csr_era)
: "r" (regs->regs)
: "memory");
diff --git a/arch/loongarch/include/asm/syscall.h b/arch/loongarch/include/asm/syscall.h
index e286dc58476e..81d2733f7b94 100644
--- a/arch/loongarch/include/asm/syscall.h
+++ b/arch/loongarch/include/asm/syscall.h
@@ -26,6 +26,13 @@ static inline long syscall_get_nr(struct task_struct *task,
return regs->regs[11];
}
+static inline void syscall_set_nr(struct task_struct *task,
+ struct pt_regs *regs,
+ int nr)
+{
+ regs->regs[11] = nr;
+}
+
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
@@ -61,6 +68,14 @@ static inline void syscall_get_arguments(struct task_struct *task,
memcpy(&args[1], &regs->regs[5], 5 * sizeof(long));
}
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *args)
+{
+ regs->orig_a0 = args[0];
+ memcpy(&regs->regs[5], &args[1], 5 * sizeof(long));
+}
+
static inline int syscall_get_arch(struct task_struct *task)
{
return AUDIT_ARCH_LOONGARCH64;
diff --git a/arch/loongarch/include/asm/thread_info.h b/arch/loongarch/include/asm/thread_info.h
index 8bf0e6f51546..4d7117fcdc78 100644
--- a/arch/loongarch/include/asm/thread_info.h
+++ b/arch/loongarch/include/asm/thread_info.h
@@ -10,7 +10,7 @@
#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/processor.h>
@@ -53,7 +53,7 @@ static inline struct thread_info *current_thread_info(void)
register unsigned long current_stack_pointer __asm__("$sp");
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/* thread information allocation */
#define THREAD_SIZE SZ_16K
@@ -65,48 +65,42 @@ register unsigned long current_stack_pointer __asm__("$sp");
* access
* - pending work-to-be-done flags are in LSW
* - other flags in MSW
+ *
+ * Tell the generic TIF infrastructure which special bits loongarch supports
*/
-#define TIF_SIGPENDING 1 /* signal pending */
-#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
-#define TIF_NOTIFY_RESUME 3 /* callback before returning to user */
-#define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */
-#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
-#define TIF_NOHZ 6 /* in adaptive nohz mode */
-#define TIF_UPROBE 7 /* breakpointed or singlestepping */
-#define TIF_USEDFPU 8 /* FPU was used by this task this quantum (SMP) */
-#define TIF_USEDSIMD 9 /* SIMD has been used this quantum */
-#define TIF_MEMDIE 10 /* is terminating due to OOM killer */
-#define TIF_FIXADE 11 /* Fix address errors in software */
-#define TIF_LOGADE 12 /* Log address errors to syslog */
-#define TIF_32BIT_REGS 13 /* 32-bit general purpose registers */
-#define TIF_32BIT_ADDR 14 /* 32-bit address space */
-#define TIF_LOAD_WATCH 15 /* If set, load watch registers */
-#define TIF_SINGLESTEP 16 /* Single Step */
-#define TIF_LSX_CTX_LIVE 17 /* LSX context must be preserved */
-#define TIF_LASX_CTX_LIVE 18 /* LASX context must be preserved */
-#define TIF_USEDLBT 19 /* LBT was used by this task this quantum (SMP) */
-#define TIF_LBT_CTX_LIVE 20 /* LBT context must be preserved */
-#define TIF_PATCH_PENDING 21 /* pending live patching update */
+#define HAVE_TIF_NEED_RESCHED_LAZY
+#define HAVE_TIF_RESTORE_SIGMASK
+
+#include <asm-generic/thread_info_tif.h>
+
+/* Architecture specific bits */
+#define TIF_NOHZ 16 /* in adaptive nohz mode */
+#define TIF_USEDFPU 17 /* FPU was used by this task this quantum (SMP) */
+#define TIF_USEDSIMD 18 /* SIMD has been used this quantum */
+#define TIF_FIXADE 19 /* Fix address errors in software */
+#define TIF_LOGADE 20 /* Log address errors to syslog */
+#define TIF_32BIT_REGS 21 /* 32-bit general purpose registers */
+#define TIF_32BIT_ADDR 22 /* 32-bit address space */
+#define TIF_LOAD_WATCH 23 /* If set, load watch registers */
+#define TIF_SINGLESTEP 24 /* Single Step */
+#define TIF_LSX_CTX_LIVE 25 /* LSX context must be preserved */
+#define TIF_LASX_CTX_LIVE 26 /* LASX context must be preserved */
+#define TIF_USEDLBT 27 /* LBT was used by this task this quantum (SMP) */
+#define TIF_LBT_CTX_LIVE 28 /* LBT context must be preserved */
-#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
-#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
-#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
-#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL)
-#define _TIF_NOHZ (1<<TIF_NOHZ)
-#define _TIF_UPROBE (1<<TIF_UPROBE)
-#define _TIF_USEDFPU (1<<TIF_USEDFPU)
-#define _TIF_USEDSIMD (1<<TIF_USEDSIMD)
-#define _TIF_FIXADE (1<<TIF_FIXADE)
-#define _TIF_LOGADE (1<<TIF_LOGADE)
-#define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS)
-#define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
-#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
-#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
-#define _TIF_LSX_CTX_LIVE (1<<TIF_LSX_CTX_LIVE)
-#define _TIF_LASX_CTX_LIVE (1<<TIF_LASX_CTX_LIVE)
-#define _TIF_USEDLBT (1<<TIF_USEDLBT)
-#define _TIF_LBT_CTX_LIVE (1<<TIF_LBT_CTX_LIVE)
-#define _TIF_PATCH_PENDING (1<<TIF_PATCH_PENDING)
+#define _TIF_NOHZ BIT(TIF_NOHZ)
+#define _TIF_USEDFPU BIT(TIF_USEDFPU)
+#define _TIF_USEDSIMD BIT(TIF_USEDSIMD)
+#define _TIF_FIXADE BIT(TIF_FIXADE)
+#define _TIF_LOGADE BIT(TIF_LOGADE)
+#define _TIF_32BIT_REGS BIT(TIF_32BIT_REGS)
+#define _TIF_32BIT_ADDR BIT(TIF_32BIT_ADDR)
+#define _TIF_LOAD_WATCH BIT(TIF_LOAD_WATCH)
+#define _TIF_SINGLESTEP BIT(TIF_SINGLESTEP)
+#define _TIF_LSX_CTX_LIVE BIT(TIF_LSX_CTX_LIVE)
+#define _TIF_LASX_CTX_LIVE BIT(TIF_LASX_CTX_LIVE)
+#define _TIF_USEDLBT BIT(TIF_USEDLBT)
+#define _TIF_LBT_CTX_LIVE BIT(TIF_LBT_CTX_LIVE)
#endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/loongarch/include/asm/topology.h b/arch/loongarch/include/asm/topology.h
index 66128dec0bf6..f06e7ff25bb7 100644
--- a/arch/loongarch/include/asm/topology.h
+++ b/arch/loongarch/include/asm/topology.h
@@ -8,6 +8,7 @@
#include <linux/smp.h>
#ifdef CONFIG_NUMA
+#include <asm/numa.h>
extern cpumask_t cpus_on_node[];
@@ -18,17 +19,22 @@ extern int pcibus_to_node(struct pci_bus *);
#define cpumask_of_pcibus(bus) (cpu_online_mask)
-extern unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES];
-
-void numa_set_distance(int from, int to, int distance);
-
-#define node_distance(from, to) (node_distances[(from)][(to)])
+int __node_distance(int from, int to);
+#define node_distance(from, to) __node_distance(from, to)
#else
#define pcibus_to_node(bus) 0
#endif
#ifdef CONFIG_SMP
+/*
+ * Return cpus that shares the last level cache.
+ */
+static inline const struct cpumask *cpu_coregroup_mask(int cpu)
+{
+ return &cpu_llc_shared_map[cpu];
+}
+
#define topology_physical_package_id(cpu) (cpu_data[cpu].package)
#define topology_core_id(cpu) (cpu_data[cpu].core)
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
diff --git a/arch/loongarch/include/asm/types.h b/arch/loongarch/include/asm/types.h
index baf15a0dcf8b..0edd731f3d6a 100644
--- a/arch/loongarch/include/asm/types.h
+++ b/arch/loongarch/include/asm/types.h
@@ -8,7 +8,7 @@
#include <asm-generic/int-ll64.h>
#include <uapi/asm/types.h>
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define _ULCAST_
#define _U64CAST_
#else
diff --git a/arch/loongarch/include/asm/unistd.h b/arch/loongarch/include/asm/unistd.h
index cfddb0116a8c..e2c0f3d86c7b 100644
--- a/arch/loongarch/include/asm/unistd.h
+++ b/arch/loongarch/include/asm/unistd.h
@@ -8,4 +8,7 @@
#include <uapi/asm/unistd.h>
+#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_SYS_CLONE
+
#define NR_syscalls (__NR_syscalls)
diff --git a/arch/loongarch/include/asm/unwind_hints.h b/arch/loongarch/include/asm/unwind_hints.h
index a01086ad9dde..16c7f7e465a0 100644
--- a/arch/loongarch/include/asm/unwind_hints.h
+++ b/arch/loongarch/include/asm/unwind_hints.h
@@ -5,7 +5,7 @@
#include <linux/objtool.h>
#include <asm/orc_types.h>
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
.macro UNWIND_HINT_UNDEFINED
UNWIND_HINT type=UNWIND_HINT_TYPE_UNDEFINED
@@ -23,6 +23,14 @@
UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_CALL
.endm
-#endif /* __ASSEMBLY__ */
+#else /* !__ASSEMBLER__ */
+
+#define UNWIND_HINT_SAVE \
+ UNWIND_HINT(UNWIND_HINT_TYPE_SAVE, 0, 0, 0)
+
+#define UNWIND_HINT_RESTORE \
+ UNWIND_HINT(UNWIND_HINT_TYPE_RESTORE, 0, 0, 0)
+
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_LOONGARCH_UNWIND_HINTS_H */
diff --git a/arch/loongarch/include/asm/uprobes.h b/arch/loongarch/include/asm/uprobes.h
index c8f59983f702..025fc3f0a102 100644
--- a/arch/loongarch/include/asm/uprobes.h
+++ b/arch/loongarch/include/asm/uprobes.h
@@ -9,13 +9,12 @@ typedef u32 uprobe_opcode_t;
#define MAX_UINSN_BYTES 8
#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
-#define UPROBE_SWBP_INSN larch_insn_gen_break(BRK_UPROBE_BP)
+#define UPROBE_SWBP_INSN __emit_break(BRK_UPROBE_BP)
#define UPROBE_SWBP_INSN_SIZE LOONGARCH_INSN_SIZE
-#define UPROBE_XOLBP_INSN larch_insn_gen_break(BRK_UPROBE_XOLBP)
+#define UPROBE_XOLBP_INSN __emit_break(BRK_UPROBE_XOLBP)
struct arch_uprobe {
- unsigned long resume_era;
u32 insn[2];
u32 ixol[2];
bool simulate;
diff --git a/arch/loongarch/include/asm/vdso.h b/arch/loongarch/include/asm/vdso.h
index d3ba35eb23e7..f72ec79e2dde 100644
--- a/arch/loongarch/include/asm/vdso.h
+++ b/arch/loongarch/include/asm/vdso.h
@@ -31,7 +31,6 @@ struct loongarch_vdso_info {
unsigned long size;
unsigned long offset_sigreturn;
struct vm_special_mapping code_mapping;
- struct vm_special_mapping data_mapping;
};
extern struct loongarch_vdso_info vdso_info;
diff --git a/arch/loongarch/include/asm/vdso/arch_data.h b/arch/loongarch/include/asm/vdso/arch_data.h
new file mode 100644
index 000000000000..395ec223bcbe
--- /dev/null
+++ b/arch/loongarch/include/asm/vdso/arch_data.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _VDSO_ARCH_DATA_H
+#define _VDSO_ARCH_DATA_H
+
+#ifndef __ASSEMBLER__
+
+#include <asm/asm.h>
+#include <asm/vdso.h>
+
+struct vdso_pcpu_data {
+ u32 node;
+} ____cacheline_aligned_in_smp;
+
+struct vdso_arch_data {
+ struct vdso_pcpu_data pdata[NR_CPUS];
+};
+
+#endif /* __ASSEMBLER__ */
+
+#endif
diff --git a/arch/loongarch/include/asm/vdso/getrandom.h b/arch/loongarch/include/asm/vdso/getrandom.h
new file mode 100644
index 000000000000..2ff05003c6e7
--- /dev/null
+++ b/arch/loongarch/include/asm/vdso/getrandom.h
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Xi Ruoyao <xry111@xry111.site>. All Rights Reserved.
+ */
+#ifndef __ASM_VDSO_GETRANDOM_H
+#define __ASM_VDSO_GETRANDOM_H
+
+#ifndef __ASSEMBLER__
+
+#include <asm/unistd.h>
+#include <asm/vdso/vdso.h>
+
+static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, unsigned int _flags)
+{
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_getrandom;
+ register void *buffer asm("a0") = _buffer;
+ register size_t len asm("a1") = _len;
+ register unsigned int flags asm("a2") = _flags;
+
+ asm volatile(
+ " syscall 0\n"
+ : "=r" (ret)
+ : "r" (nr), "r" (buffer), "r" (len), "r" (flags)
+ : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
+ "memory");
+
+ return ret;
+}
+
+#endif /* !__ASSEMBLER__ */
+
+#endif /* __ASM_VDSO_GETRANDOM_H */
diff --git a/arch/loongarch/include/asm/vdso/gettimeofday.h b/arch/loongarch/include/asm/vdso/gettimeofday.h
index 89e6b222c2f2..dcafabca9bb6 100644
--- a/arch/loongarch/include/asm/vdso/gettimeofday.h
+++ b/arch/loongarch/include/asm/vdso/gettimeofday.h
@@ -7,7 +7,7 @@
#ifndef __ASM_VDSO_GETTIMEOFDAY_H
#define __ASM_VDSO_GETTIMEOFDAY_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/unistd.h>
#include <asm/vdso/vdso.h>
@@ -25,7 +25,7 @@ static __always_inline long gettimeofday_fallback(
asm volatile(
" syscall 0\n"
- : "+r" (ret)
+ : "=r" (ret)
: "r" (nr), "r" (tv), "r" (tz)
: "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7",
"$t8", "memory");
@@ -44,7 +44,7 @@ static __always_inline long clock_gettime_fallback(
asm volatile(
" syscall 0\n"
- : "+r" (ret)
+ : "=r" (ret)
: "r" (nr), "r" (clkid), "r" (ts)
: "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7",
"$t8", "memory");
@@ -63,7 +63,7 @@ static __always_inline int clock_getres_fallback(
asm volatile(
" syscall 0\n"
- : "+r" (ret)
+ : "=r" (ret)
: "r" (nr), "r" (clkid), "r" (ts)
: "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7",
"$t8", "memory");
@@ -72,7 +72,7 @@ static __always_inline int clock_getres_fallback(
}
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
uint64_t count;
@@ -89,18 +89,6 @@ static inline bool loongarch_vdso_hres_capable(void)
}
#define __arch_vdso_hres_capable loongarch_vdso_hres_capable
-static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
-{
- return (const struct vdso_data *)get_vdso_data();
-}
-
-#ifdef CONFIG_TIME_NS
-static __always_inline
-const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
-{
- return (const struct vdso_data *)(get_vdso_data() + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE);
-}
-#endif
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/loongarch/include/asm/vdso/processor.h b/arch/loongarch/include/asm/vdso/processor.h
index ef5770b343a0..1e255373b0b8 100644
--- a/arch/loongarch/include/asm/vdso/processor.h
+++ b/arch/loongarch/include/asm/vdso/processor.h
@@ -5,10 +5,10 @@
#ifndef __ASM_VDSO_PROCESSOR_H
#define __ASM_VDSO_PROCESSOR_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define cpu_relax() barrier()
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/loongarch/include/asm/vdso/vdso.h b/arch/loongarch/include/asm/vdso/vdso.h
index 5a12309d9fb5..04bd2d452876 100644
--- a/arch/loongarch/include/asm/vdso/vdso.h
+++ b/arch/loongarch/include/asm/vdso/vdso.h
@@ -4,62 +4,18 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
-#ifndef __ASSEMBLY__
+#ifndef _ASM_VDSO_VDSO_H
+#define _ASM_VDSO_VDSO_H
+
+#ifndef __ASSEMBLER__
#include <asm/asm.h>
#include <asm/page.h>
#include <asm/vdso.h>
+#include <vdso/datapage.h>
-struct vdso_pcpu_data {
- u32 node;
-} ____cacheline_aligned_in_smp;
-
-struct loongarch_vdso_data {
- struct vdso_pcpu_data pdata[NR_CPUS];
-};
-
-/*
- * The layout of vvar:
- *
- * high
- * +---------------------+--------------------------+
- * | loongarch vdso data | LOONGARCH_VDSO_DATA_SIZE |
- * +---------------------+--------------------------+
- * | time-ns vdso data | PAGE_SIZE |
- * +---------------------+--------------------------+
- * | generic vdso data | PAGE_SIZE |
- * +---------------------+--------------------------+
- * low
- */
-#define LOONGARCH_VDSO_DATA_SIZE PAGE_ALIGN(sizeof(struct loongarch_vdso_data))
-#define LOONGARCH_VDSO_DATA_PAGES (LOONGARCH_VDSO_DATA_SIZE >> PAGE_SHIFT)
-
-enum vvar_pages {
- VVAR_GENERIC_PAGE_OFFSET,
- VVAR_TIMENS_PAGE_OFFSET,
- VVAR_LOONGARCH_PAGES_START,
- VVAR_LOONGARCH_PAGES_END = VVAR_LOONGARCH_PAGES_START + LOONGARCH_VDSO_DATA_PAGES - 1,
- VVAR_NR_PAGES,
-};
-
-#define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT)
-
-static inline unsigned long get_vdso_base(void)
-{
- unsigned long addr;
-
- __asm__(
- " la.pcrel %0, _start\n"
- : "=r" (addr)
- :
- :);
-
- return addr;
-}
+#define VVAR_SIZE (VDSO_NR_PAGES << PAGE_SHIFT)
-static inline unsigned long get_vdso_data(void)
-{
- return get_vdso_base() - VVAR_SIZE;
-}
+#endif /* __ASSEMBLER__ */
-#endif /* __ASSEMBLY__ */
+#endif
diff --git a/arch/loongarch/include/asm/vdso/vsyscall.h b/arch/loongarch/include/asm/vdso/vsyscall.h
index 5de615383a22..558eb9dfda52 100644
--- a/arch/loongarch/include/asm/vdso/vsyscall.h
+++ b/arch/loongarch/include/asm/vdso/vsyscall.h
@@ -2,26 +2,13 @@
#ifndef __ASM_VDSO_VSYSCALL_H
#define __ASM_VDSO_VSYSCALL_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
-#include <linux/timekeeper_internal.h>
#include <vdso/datapage.h>
-extern struct vdso_data *vdso_data;
-
-/*
- * Update the vDSO data page to keep in sync with kernel timekeeping.
- */
-static __always_inline
-struct vdso_data *__loongarch_get_k_vdso_data(void)
-{
- return vdso_data;
-}
-#define __arch_get_k_vdso_data __loongarch_get_k_vdso_data
-
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/arch/loongarch/include/uapi/asm/Kbuild b/arch/loongarch/include/uapi/asm/Kbuild
index 4aa680ca2e5f..517761419999 100644
--- a/arch/loongarch/include/uapi/asm/Kbuild
+++ b/arch/loongarch/include/uapi/asm/Kbuild
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
-generic-y += kvm_para.h
+syscall-y += unistd_64.h
diff --git a/arch/loongarch/include/uapi/asm/hwcap.h b/arch/loongarch/include/uapi/asm/hwcap.h
index 6955a7cb2c65..2b34e56cfa9e 100644
--- a/arch/loongarch/include/uapi/asm/hwcap.h
+++ b/arch/loongarch/include/uapi/asm/hwcap.h
@@ -17,5 +17,6 @@
#define HWCAP_LOONGARCH_LBT_ARM (1 << 11)
#define HWCAP_LOONGARCH_LBT_MIPS (1 << 12)
#define HWCAP_LOONGARCH_PTW (1 << 13)
+#define HWCAP_LOONGARCH_LSPW (1 << 14)
#endif /* _UAPI_ASM_HWCAP_H */
diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h
index f9abef382317..de6c3f18e40a 100644
--- a/arch/loongarch/include/uapi/asm/kvm.h
+++ b/arch/loongarch/include/uapi/asm/kvm.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
+#define __KVM_HAVE_IRQ_LINE
+
/*
* KVM LoongArch specific structures and definitions.
*
@@ -64,6 +66,7 @@ struct kvm_fpu {
#define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x20000ULL)
#define KVM_REG_LOONGARCH_FPSIMD (KVM_REG_LOONGARCH | 0x30000ULL)
#define KVM_REG_LOONGARCH_CPUCFG (KVM_REG_LOONGARCH | 0x40000ULL)
+#define KVM_REG_LOONGARCH_LBT (KVM_REG_LOONGARCH | 0x50000ULL)
#define KVM_REG_LOONGARCH_MASK (KVM_REG_LOONGARCH | 0x70000ULL)
#define KVM_CSR_IDX_MASK 0x7fff
#define KVM_CPUCFG_IDX_MASK 0x7fff
@@ -77,11 +80,36 @@ struct kvm_fpu {
/* Debugging: Special instruction for software breakpoint */
#define KVM_REG_LOONGARCH_DEBUG_INST (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3)
+/* LBT registers */
+#define KVM_REG_LOONGARCH_LBT_SCR0 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 1)
+#define KVM_REG_LOONGARCH_LBT_SCR1 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 2)
+#define KVM_REG_LOONGARCH_LBT_SCR2 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 3)
+#define KVM_REG_LOONGARCH_LBT_SCR3 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 4)
+#define KVM_REG_LOONGARCH_LBT_EFLAGS (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 5)
+#define KVM_REG_LOONGARCH_LBT_FTOP (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 6)
+
#define LOONGARCH_REG_SHIFT 3
#define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
#define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
#define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
+
+/* Device Control API on vm fd */
+#define KVM_LOONGARCH_VM_FEAT_CTRL 0
+#define KVM_LOONGARCH_VM_FEAT_LSX 0
+#define KVM_LOONGARCH_VM_FEAT_LASX 1
+#define KVM_LOONGARCH_VM_FEAT_X86BT 2
+#define KVM_LOONGARCH_VM_FEAT_ARMBT 3
+#define KVM_LOONGARCH_VM_FEAT_MIPSBT 4
+#define KVM_LOONGARCH_VM_FEAT_PMU 5
+#define KVM_LOONGARCH_VM_FEAT_PV_IPI 6
+#define KVM_LOONGARCH_VM_FEAT_PV_STEALTIME 7
+#define KVM_LOONGARCH_VM_FEAT_PTW 8
+#define KVM_LOONGARCH_VM_FEAT_MSGINT 9
+
+/* Device Control API on vcpu fd */
#define KVM_LOONGARCH_VCPU_CPUCFG 0
+#define KVM_LOONGARCH_VCPU_PVTIME_CTRL 1
+#define KVM_LOONGARCH_VCPU_PVTIME_GPA 0
struct kvm_debug_exit_arch {
};
@@ -108,4 +136,22 @@ struct kvm_iocsr_entry {
#define KVM_IRQCHIP_NUM_PINS 64
#define KVM_MAX_CORES 256
+#define KVM_DEV_LOONGARCH_IPI_GRP_REGS 0x40000001
+
+#define KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS 0x40000002
+
+#define KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS 0x40000003
+#define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU 0x0
+#define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE 0x1
+#define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE 0x2
+
+#define KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL 0x40000004
+#define KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU 0x0
+#define KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE 0x1
+#define KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED 0x3
+
+#define KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS 0x40000005
+#define KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL 0x40000006
+#define KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT 0
+
#endif /* __UAPI_ASM_LOONGARCH_KVM_H */
diff --git a/arch/loongarch/include/uapi/asm/kvm_para.h b/arch/loongarch/include/uapi/asm/kvm_para.h
new file mode 100644
index 000000000000..76d802ef01ce
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/kvm_para.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_ASM_KVM_PARA_H
+#define _UAPI_ASM_KVM_PARA_H
+
+#include <linux/types.h>
+
+/*
+ * CPUCFG index area: 0x40000000 -- 0x400000ff
+ * SW emulation for KVM hypervirsor
+ */
+#define CPUCFG_KVM_BASE 0x40000000
+#define CPUCFG_KVM_SIZE 0x100
+#define CPUCFG_KVM_SIG (CPUCFG_KVM_BASE + 0)
+#define KVM_SIGNATURE "KVM\0"
+#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4)
+#define KVM_FEATURE_IPI 1
+#define KVM_FEATURE_STEAL_TIME 2
+/* BIT 24 - 31 are features configurable by user space vmm */
+#define KVM_FEATURE_VIRT_EXTIOI 24
+#define KVM_FEATURE_USER_HCALL 25
+
+#endif /* _UAPI_ASM_KVM_PARA_H */
diff --git a/arch/loongarch/include/uapi/asm/ptrace.h b/arch/loongarch/include/uapi/asm/ptrace.h
index ac915f841650..215e0f9e8aa3 100644
--- a/arch/loongarch/include/uapi/asm/ptrace.h
+++ b/arch/loongarch/include/uapi/asm/ptrace.h
@@ -10,10 +10,6 @@
#include <linux/types.h>
-#ifndef __KERNEL__
-#include <stdint.h>
-#endif
-
/*
* For PTRACE_{POKE,PEEK}USR. 0 - 31 are GPRs,
* 32 is syscall's original ARG0, 33 is PC, 34 is BADVADDR.
@@ -41,37 +37,47 @@ struct user_pt_regs {
} __attribute__((aligned(8)));
struct user_fp_state {
- uint64_t fpr[32];
- uint64_t fcc;
- uint32_t fcsr;
+ __u64 fpr[32];
+ __u64 fcc;
+ __u32 fcsr;
};
struct user_lsx_state {
/* 32 registers, 128 bits width per register. */
- uint64_t vregs[32*2];
+ __u64 vregs[32*2];
};
struct user_lasx_state {
/* 32 registers, 256 bits width per register. */
- uint64_t vregs[32*4];
+ __u64 vregs[32*4];
};
struct user_lbt_state {
- uint64_t scr[4];
- uint32_t eflags;
- uint32_t ftop;
+ __u64 scr[4];
+ __u32 eflags;
+ __u32 ftop;
};
struct user_watch_state {
- uint64_t dbg_info;
+ __u64 dbg_info;
struct {
- uint64_t addr;
- uint64_t mask;
- uint32_t ctrl;
- uint32_t pad;
+ __u64 addr;
+ __u64 mask;
+ __u32 ctrl;
+ __u32 pad;
} dbg_regs[8];
};
+struct user_watch_state_v2 {
+ __u64 dbg_info;
+ struct {
+ __u64 addr;
+ __u64 mask;
+ __u32 ctrl;
+ __u32 pad;
+ } dbg_regs[14];
+};
+
#define PTRACE_SYSEMU 0x1f
#define PTRACE_SYSEMU_SINGLESTEP 0x20
diff --git a/arch/loongarch/include/uapi/asm/setup.h b/arch/loongarch/include/uapi/asm/setup.h
new file mode 100644
index 000000000000..d46363ce3e02
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/setup.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef _UAPI_ASM_LOONGARCH_SETUP_H
+#define _UAPI_ASM_LOONGARCH_SETUP_H
+
+#define COMMAND_LINE_SIZE 4096
+
+#endif /* _UAPI_ASM_LOONGARCH_SETUP_H */
diff --git a/arch/loongarch/include/uapi/asm/sigcontext.h b/arch/loongarch/include/uapi/asm/sigcontext.h
index 6c22f616b8f1..5cd121275bac 100644
--- a/arch/loongarch/include/uapi/asm/sigcontext.h
+++ b/arch/loongarch/include/uapi/asm/sigcontext.h
@@ -9,7 +9,6 @@
#define _UAPI_ASM_SIGCONTEXT_H
#include <linux/types.h>
-#include <linux/posix_types.h>
/* FP context was used */
#define SC_USED_FP (1 << 0)
diff --git a/arch/loongarch/include/uapi/asm/unistd.h b/arch/loongarch/include/uapi/asm/unistd.h
index fcb668984f03..1f01980f9c94 100644
--- a/arch/loongarch/include/uapi/asm/unistd.h
+++ b/arch/loongarch/include/uapi/asm/unistd.h
@@ -1,5 +1,3 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
-#include <asm-generic/unistd.h>
+#include <asm/unistd_64.h>
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index c9bfeda89e40..001924877772 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -5,12 +5,12 @@
OBJECT_FILES_NON_STANDARD_head.o := y
-extra-y := vmlinux.lds
+always-$(KBUILD_BUILTIN) := vmlinux.lds
obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \
- traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \
+ traps.o irq.o idle.o process.o dma.o mem.o reset.o switch.o \
elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \
- alternative.o unwind.o
+ alternative.o kdebugfs.o unwind.o
obj-$(CONFIG_ACPI) += acpi.o
obj-$(CONFIG_EFI) += efi.o
@@ -21,10 +21,10 @@ obj-$(CONFIG_CPU_HAS_LBT) += lbt.o
obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o
-CFLAGS_module.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_syscall.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_traps.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_perf_event.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_module.o += $(call cc-disable-warning, override-init)
+CFLAGS_syscall.o += $(call cc-disable-warning, override-init)
+CFLAGS_traps.o += $(call cc-disable-warning, override-init)
+CFLAGS_perf_event.o += $(call cc-disable-warning, override-init)
ifdef CONFIG_FUNCTION_TRACER
ifndef CONFIG_DYNAMIC_FTRACE
@@ -62,6 +62,7 @@ obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
obj-$(CONFIG_RELOCATABLE) += relocate.o
obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_efi.o kexec_elf.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
diff --git a/arch/loongarch/kernel/Makefile.syscalls b/arch/loongarch/kernel/Makefile.syscalls
new file mode 100644
index 000000000000..ab7d9baa2915
--- /dev/null
+++ b/arch/loongarch/kernel/Makefile.syscalls
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# No special ABIs on loongarch so far
+syscall_abis_64 +=
diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
index 5cf59c617126..1367ca759468 100644
--- a/arch/loongarch/kernel/acpi.c
+++ b/arch/loongarch/kernel/acpi.c
@@ -9,6 +9,8 @@
#include <linux/init.h>
#include <linux/acpi.h>
+#include <linux/efi-bgrt.h>
+#include <linux/export.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/memblock.h>
@@ -58,38 +60,47 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
}
#ifdef CONFIG_SMP
-static int set_processor_mask(u32 id, u32 flags)
+static int set_processor_mask(u32 id, u32 pass)
{
+ int cpu = -1, cpuid = id;
- int cpu, cpuid = id;
-
- if (num_processors >= nr_cpu_ids) {
- pr_warn(PREFIX "nr_cpus/possible_cpus limit of %i reached."
- " processor 0x%x ignored.\n", nr_cpu_ids, cpuid);
+ if (num_processors >= NR_CPUS) {
+ pr_warn(PREFIX "nr_cpus limit of %i reached."
+ " processor 0x%x ignored.\n", NR_CPUS, cpuid);
return -ENODEV;
}
+
if (cpuid == loongson_sysconf.boot_cpu_id)
cpu = 0;
- else
- cpu = cpumask_next_zero(-1, cpu_present_mask);
- if (flags & ACPI_MADT_ENABLED) {
+ switch (pass) {
+ case 1: /* Pass 1 handle enabled processors */
+ if (cpu < 0)
+ cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
num_processors++;
- set_cpu_possible(cpu, true);
set_cpu_present(cpu, true);
- __cpu_number_map[cpuid] = cpu;
- __cpu_logical_map[cpu] = cpuid;
- } else
+ break;
+ case 2: /* Pass 2 handle disabled processors */
+ if (cpu < 0)
+ cpu = find_first_zero_bit(cpumask_bits(cpu_possible_mask), NR_CPUS);
disabled_cpus++;
+ break;
+ default:
+ return cpu;
+ }
+
+ set_cpu_possible(cpu, true);
+ __cpu_number_map[cpuid] = cpu;
+ __cpu_logical_map[cpu] = cpuid;
return cpu;
}
#endif
static int __init
-acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long end)
+acpi_parse_p1_processor(union acpi_subtable_headers *header, const unsigned long end)
{
struct acpi_madt_core_pic *processor = NULL;
@@ -100,13 +111,30 @@ acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long en
acpi_table_print_madt_entry(&header->common);
#ifdef CONFIG_SMP
acpi_core_pic[processor->core_id] = *processor;
- set_processor_mask(processor->core_id, processor->flags);
+ if (processor->flags & ACPI_MADT_ENABLED)
+ set_processor_mask(processor->core_id, 1);
#endif
return 0;
}
static int __init
+acpi_parse_p2_processor(union acpi_subtable_headers *header, const unsigned long end)
+{
+ struct acpi_madt_core_pic *processor = NULL;
+
+ processor = (struct acpi_madt_core_pic *)header;
+ if (BAD_MADT_ENTRY(processor, end))
+ return -EINVAL;
+
+#ifdef CONFIG_SMP
+ if (!(processor->flags & ACPI_MADT_ENABLED))
+ set_processor_mask(processor->core_id, 2);
+#endif
+
+ return 0;
+}
+static int __init
acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end)
{
static int core = 0;
@@ -133,7 +161,10 @@ static void __init acpi_process_madt(void)
}
#endif
acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
- acpi_parse_processor, MAX_CORE_PIC);
+ acpi_parse_p1_processor, MAX_CORE_PIC);
+
+ acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
+ acpi_parse_p2_processor, MAX_CORE_PIC);
acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
acpi_parse_eio_master, MAX_IO_PICS);
@@ -202,6 +233,9 @@ void __init acpi_boot_table_init(void)
/* Do not enable ACPI SPCR console by default */
acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
+ if (IS_ENABLED(CONFIG_ACPI_BGRT))
+ acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
+
return;
fdt_earlycon:
@@ -211,34 +245,6 @@ fdt_earlycon:
#ifdef CONFIG_ACPI_NUMA
-static __init int setup_node(int pxm)
-{
- return acpi_map_pxm_to_node(pxm);
-}
-
-/*
- * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
- * I/O localities since SRAT does not list them. I/O localities are
- * not supported at this point.
- */
-unsigned int numa_distance_cnt;
-
-static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
-{
- return slit->locality_count;
-}
-
-void __init numa_set_distance(int from, int to, int distance)
-{
- if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
- pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
- from, to, distance);
- return;
- }
-
- node_distances[from][to] = distance;
-}
-
/* Callback for Proximity Domain -> CPUID mapping */
void __init
acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
@@ -259,7 +265,41 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
pxm |= (pa->proximity_domain_hi[1] << 16);
pxm |= (pa->proximity_domain_hi[2] << 24);
}
- node = setup_node(pxm);
+ node = acpi_map_pxm_to_node(pxm);
+ if (node < 0) {
+ pr_err("SRAT: Too many proximity domains %x\n", pxm);
+ bad_srat();
+ return;
+ }
+
+ if (pa->apic_id >= CONFIG_NR_CPUS) {
+ pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n",
+ pxm, pa->apic_id, node);
+ return;
+ }
+
+ early_numa_add_cpu(pa->apic_id, node);
+
+ set_cpuid_to_node(pa->apic_id, node);
+ node_set(node, numa_nodes_parsed);
+ pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
+}
+
+void __init
+acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
+{
+ int pxm, node;
+
+ if (srat_disabled())
+ return;
+ if (pa->header.length < sizeof(struct acpi_srat_x2apic_cpu_affinity)) {
+ bad_srat();
+ return;
+ }
+ if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
+ return;
+ pxm = pa->proximity_domain;
+ node = acpi_map_pxm_to_node(pxm);
if (node < 0) {
pr_err("SRAT: Too many proximity domains %x\n", pxm);
bad_srat();
@@ -296,6 +336,10 @@ static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
int nid;
nid = acpi_get_node(handle);
+
+ if (nid != NUMA_NO_NODE)
+ nid = early_cpu_to_node(cpu);
+
if (nid != NUMA_NO_NODE) {
set_cpuid_to_node(physid, nid);
node_set(nid, numa_nodes_parsed);
@@ -310,12 +354,14 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu
{
int cpu;
- cpu = set_processor_mask(physid, ACPI_MADT_ENABLED);
- if (cpu < 0) {
+ cpu = cpu_number_map(physid);
+ if (cpu < 0 || cpu >= nr_cpu_ids) {
pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
- return cpu;
+ return -ERANGE;
}
+ num_processors++;
+ set_cpu_present(cpu, true);
acpi_map_cpu2node(handle, cpu, physid);
*pcpu = cpu;
diff --git a/arch/loongarch/kernel/alternative.c b/arch/loongarch/kernel/alternative.c
index 4ad13847e962..0e0c766df1e3 100644
--- a/arch/loongarch/kernel/alternative.c
+++ b/arch/loongarch/kernel/alternative.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/export.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <asm/alternative.h>
diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c
index bee9f7a3108f..3017c7157600 100644
--- a/arch/loongarch/kernel/asm-offsets.c
+++ b/arch/loongarch/kernel/asm-offsets.c
@@ -4,6 +4,8 @@
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
+#define COMPILE_OFFSETS
+
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
@@ -14,6 +16,7 @@
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/ftrace.h>
+#include <vdso/datapage.h>
static void __used output_ptreg_defines(void)
{
@@ -279,18 +282,6 @@ static void __used output_pbe_defines(void)
}
#endif
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-static void __used output_fgraph_ret_regs_defines(void)
-{
- COMMENT("LoongArch fgraph_ret_regs offsets.");
- OFFSET(FGRET_REGS_A0, fgraph_ret_regs, regs[0]);
- OFFSET(FGRET_REGS_A1, fgraph_ret_regs, regs[1]);
- OFFSET(FGRET_REGS_FP, fgraph_ret_regs, fp);
- DEFINE(FGRET_REGS_SIZE, sizeof(struct fgraph_ret_regs));
- BLANK();
-}
-#endif
-
static void __used output_kvm_defines(void)
{
COMMENT("KVM/LoongArch Specific offsets.");
@@ -307,6 +298,7 @@ static void __used output_kvm_defines(void)
OFFSET(KVM_ARCH_HSP, kvm_vcpu_arch, host_sp);
OFFSET(KVM_ARCH_HTP, kvm_vcpu_arch, host_tp);
OFFSET(KVM_ARCH_HPGD, kvm_vcpu_arch, host_pgd);
+ OFFSET(KVM_ARCH_KVMPGD, kvm_vcpu_arch, kvm_pgd);
OFFSET(KVM_ARCH_HANDLE_EXIT, kvm_vcpu_arch, handle_exit);
OFFSET(KVM_ARCH_HEENTRY, kvm_vcpu_arch, host_eentry);
OFFSET(KVM_ARCH_GEENTRY, kvm_vcpu_arch, guest_eentry);
@@ -321,3 +313,11 @@ static void __used output_kvm_defines(void)
OFFSET(KVM_GPGD, kvm, arch.pgd);
BLANK();
}
+
+static void __used output_vdso_defines(void)
+{
+ COMMENT("LoongArch vDSO offsets.");
+
+ DEFINE(__VDSO_PAGES, VDSO_NR_PAGES);
+ BLANK();
+}
diff --git a/arch/loongarch/kernel/cacheinfo.c b/arch/loongarch/kernel/cacheinfo.c
index c7988f757281..8e231b0d2cd6 100644
--- a/arch/loongarch/kernel/cacheinfo.c
+++ b/arch/loongarch/kernel/cacheinfo.c
@@ -51,6 +51,12 @@ static void cache_cpumap_setup(unsigned int cpu)
continue;
sib_leaf = sib_cpu_ci->info_list + index;
+ /* SMT cores share all caches */
+ if (cpus_are_siblings(i, cpu)) {
+ cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
+ cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
+ }
+ /* Node's cores share shared caches */
if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
index 55320813ee08..a2060a24b39f 100644
--- a/arch/loongarch/kernel/cpu-probe.c
+++ b/arch/loongarch/kernel/cpu-probe.c
@@ -52,6 +52,48 @@ static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_loongarch *c)
c->fpu_mask = ~(fcsr0 ^ fcsr1) & ~mask;
}
+/* simd = -1/0/128/256 */
+static unsigned int simd = -1U;
+
+static int __init cpu_setup_simd(char *str)
+{
+ get_option(&str, &simd);
+ pr_info("Set SIMD width = %u\n", simd);
+
+ return 0;
+}
+
+early_param("simd", cpu_setup_simd);
+
+static int __init cpu_final_simd(void)
+{
+ struct cpuinfo_loongarch *c = &cpu_data[0];
+
+ if (simd < 128) {
+ c->options &= ~LOONGARCH_CPU_LSX;
+ elf_hwcap &= ~HWCAP_LOONGARCH_LSX;
+ }
+
+ if (simd < 256) {
+ c->options &= ~LOONGARCH_CPU_LASX;
+ elf_hwcap &= ~HWCAP_LOONGARCH_LASX;
+ }
+
+ simd = 0;
+
+ if (c->options & LOONGARCH_CPU_LSX)
+ simd = 128;
+
+ if (c->options & LOONGARCH_CPU_LASX)
+ simd = 256;
+
+ pr_info("Final SIMD width = %u\n", simd);
+
+ return 0;
+}
+
+arch_initcall(cpu_final_simd);
+
static inline void set_elf_platform(int cpu, const char *plat)
{
if (cpu == 0)
@@ -91,12 +133,32 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
unsigned int config;
unsigned long asid_mask;
- c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR |
- LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH;
+ c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR | LOONGARCH_CPU_VINT;
elf_hwcap = HWCAP_LOONGARCH_CPUCFG;
config = read_cpucfg(LOONGARCH_CPUCFG1);
+
+ switch (config & CPUCFG1_ISA) {
+ case 0:
+ set_isa(c, LOONGARCH_CPU_ISA_LA32R);
+ break;
+ case 1:
+ set_isa(c, LOONGARCH_CPU_ISA_LA32S);
+ break;
+ case 2:
+ set_isa(c, LOONGARCH_CPU_ISA_LA64);
+ break;
+ default:
+ pr_warn("Warning: unknown ISA level\n");
+ }
+
+ if (config & CPUCFG1_PAGING)
+ c->options |= LOONGARCH_CPU_TLB;
+ if (config & CPUCFG1_IOCSR)
+ c->options |= LOONGARCH_CPU_IOCSR;
+ if (config & CPUCFG1_MSGINT)
+ c->options |= LOONGARCH_CPU_MSGINT;
if (config & CPUCFG1_UAL) {
c->options |= LOONGARCH_CPU_UAL;
elf_hwcap |= HWCAP_LOONGARCH_UAL;
@@ -106,7 +168,6 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
elf_hwcap |= HWCAP_LOONGARCH_CRC32;
}
-
config = read_cpucfg(LOONGARCH_CPUCFG2);
if (config & CPUCFG2_LAM) {
c->options |= LOONGARCH_CPU_LAM;
@@ -117,13 +178,13 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
elf_hwcap |= HWCAP_LOONGARCH_FPU;
}
#ifdef CONFIG_CPU_HAS_LSX
- if (config & CPUCFG2_LSX) {
+ if ((config & CPUCFG2_LSX) && (simd >= 128)) {
c->options |= LOONGARCH_CPU_LSX;
elf_hwcap |= HWCAP_LOONGARCH_LSX;
}
#endif
#ifdef CONFIG_CPU_HAS_LASX
- if (config & CPUCFG2_LASX) {
+ if ((config & CPUCFG2_LASX) && (simd >= 256)) {
c->options |= LOONGARCH_CPU_LASX;
elf_hwcap |= HWCAP_LOONGARCH_LASX;
}
@@ -140,6 +201,10 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
c->options |= LOONGARCH_CPU_PTW;
elf_hwcap |= HWCAP_LOONGARCH_PTW;
}
+ if (config & CPUCFG2_LSPW) {
+ c->options |= LOONGARCH_CPU_LSPW;
+ elf_hwcap |= HWCAP_LOONGARCH_LSPW;
+ }
if (config & CPUCFG2_LVZP) {
c->options |= LOONGARCH_CPU_LVZ;
elf_hwcap |= HWCAP_LOONGARCH_LVZ;
@@ -163,26 +228,13 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
if (config & CPUCFG6_PMP)
c->options |= LOONGARCH_CPU_PMP;
- config = iocsr_read32(LOONGARCH_IOCSR_FEATURES);
- if (config & IOCSRF_CSRIPI)
- c->options |= LOONGARCH_CPU_CSRIPI;
- if (config & IOCSRF_EXTIOI)
- c->options |= LOONGARCH_CPU_EXTIOI;
- if (config & IOCSRF_FREQSCALE)
- c->options |= LOONGARCH_CPU_SCALEFREQ;
- if (config & IOCSRF_FLATMODE)
- c->options |= LOONGARCH_CPU_FLATMODE;
- if (config & IOCSRF_EIODECODE)
- c->options |= LOONGARCH_CPU_EIODECODE;
- if (config & IOCSRF_VM)
- c->options |= LOONGARCH_CPU_HYPERVISOR;
-
config = csr_read32(LOONGARCH_CSR_ASID);
config = (config & CSR_ASID_BIT) >> CSR_ASID_BIT_SHIFT;
asid_mask = GENMASK(config - 1, 0);
set_cpu_asid_mask(c, asid_mask);
config = read_csr_prcfg1();
+ c->timerbits = (config & CSR_CONF1_TMRBITS) >> CSR_CONF1_TMRBITS_SHIFT;
c->ksave_mask = GENMASK((config & CSR_CONF1_KSNUM) - 1, 0);
c->ksave_mask &= ~(EXC_KSAVE_MASK | PERCPU_KSAVE_MASK | KVM_KSAVE_MASK);
@@ -209,6 +261,9 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
default:
pr_warn("Warning: unknown TLB type\n");
}
+
+ if (get_num_brps() + get_num_wrps())
+ c->options |= LOONGARCH_CPU_WATCH;
}
#define MAX_NAME_LEN 32
@@ -219,52 +274,57 @@ static char cpu_full_name[MAX_NAME_LEN] = " - ";
static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int cpu)
{
+ uint32_t config;
uint64_t *vendor = (void *)(&cpu_full_name[VENDOR_OFFSET]);
uint64_t *cpuname = (void *)(&cpu_full_name[CPUNAME_OFFSET]);
+ const char *core_name = id_to_core_name(c->processor_id);
- if (!__cpu_full_name[cpu])
- __cpu_full_name[cpu] = cpu_full_name;
-
- *vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
- *cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
-
- switch (c->processor_id & PRID_SERIES_MASK) {
- case PRID_SERIES_LA132:
+ switch (BIT(fls(c->isa_level) - 1)) {
+ case LOONGARCH_CPU_ISA_LA32R:
+ case LOONGARCH_CPU_ISA_LA32S:
c->cputype = CPU_LOONGSON32;
- set_isa(c, LOONGARCH_CPU_ISA_LA32S);
__cpu_family[cpu] = "Loongson-32bit";
- pr_info("32-bit Loongson Processor probed (LA132 Core)\n");
- break;
- case PRID_SERIES_LA264:
- c->cputype = CPU_LOONGSON64;
- set_isa(c, LOONGARCH_CPU_ISA_LA64);
- __cpu_family[cpu] = "Loongson-64bit";
- pr_info("64-bit Loongson Processor probed (LA264 Core)\n");
break;
- case PRID_SERIES_LA364:
- c->cputype = CPU_LOONGSON64;
- set_isa(c, LOONGARCH_CPU_ISA_LA64);
- __cpu_family[cpu] = "Loongson-64bit";
- pr_info("64-bit Loongson Processor probed (LA364 Core)\n");
- break;
- case PRID_SERIES_LA464:
- c->cputype = CPU_LOONGSON64;
- set_isa(c, LOONGARCH_CPU_ISA_LA64);
- __cpu_family[cpu] = "Loongson-64bit";
- pr_info("64-bit Loongson Processor probed (LA464 Core)\n");
- break;
- case PRID_SERIES_LA664:
+ case LOONGARCH_CPU_ISA_LA64:
c->cputype = CPU_LOONGSON64;
- set_isa(c, LOONGARCH_CPU_ISA_LA64);
__cpu_family[cpu] = "Loongson-64bit";
- pr_info("64-bit Loongson Processor probed (LA664 Core)\n");
break;
- default: /* Default to 64 bit */
- c->cputype = CPU_LOONGSON64;
- set_isa(c, LOONGARCH_CPU_ISA_LA64);
- __cpu_family[cpu] = "Loongson-64bit";
- pr_info("64-bit Loongson Processor probed (Unknown Core)\n");
}
+
+ pr_info("%s Processor probed (%s Core)\n", __cpu_family[cpu], core_name);
+
+ if (!cpu_has_iocsr) {
+ __cpu_full_name[cpu] = "Unknown";
+ return;
+ }
+
+ *vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
+ *cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
+
+ if (!__cpu_full_name[cpu]) {
+ if (((char *)vendor)[0] == 0)
+ __cpu_full_name[cpu] = "Unknown";
+ else
+ __cpu_full_name[cpu] = cpu_full_name;
+ }
+
+ config = iocsr_read32(LOONGARCH_IOCSR_FEATURES);
+ if (config & IOCSRF_CSRIPI)
+ c->options |= LOONGARCH_CPU_CSRIPI;
+ if (config & IOCSRF_EXTIOI)
+ c->options |= LOONGARCH_CPU_EXTIOI;
+ if (config & IOCSRF_FREQSCALE)
+ c->options |= LOONGARCH_CPU_SCALEFREQ;
+ if (config & IOCSRF_FLATMODE)
+ c->options |= LOONGARCH_CPU_FLATMODE;
+ if (config & IOCSRF_EIODECODE)
+ c->options |= LOONGARCH_CPU_EIODECODE;
+ if (config & IOCSRF_AVEC)
+ c->options |= LOONGARCH_CPU_AVECINT;
+ if (config & IOCSRF_REDIRECT)
+ c->options |= LOONGARCH_CPU_REDIRECTINT;
+ if (config & IOCSRF_VM)
+ c->options |= LOONGARCH_CPU_HYPERVISOR;
}
#ifdef CONFIG_64BIT
diff --git a/arch/loongarch/kernel/efi-header.S b/arch/loongarch/kernel/efi-header.S
index 5f23b85d78ca..ba0bdbf86aa8 100644
--- a/arch/loongarch/kernel/efi-header.S
+++ b/arch/loongarch/kernel/efi-header.S
@@ -7,7 +7,7 @@
#include <linux/sizes.h>
.macro __EFI_PE_HEADER
- .long PE_MAGIC
+ .long IMAGE_NT_SIGNATURE
.Lcoff_header:
.short IMAGE_FILE_MACHINE_LOONGARCH64 /* Machine */
.short .Lsection_count /* NumberOfSections */
@@ -20,7 +20,7 @@
IMAGE_FILE_LINE_NUMS_STRIPPED /* Characteristics */
.Loptional_header:
- .short PE_OPT_MAGIC_PE32PLUS /* PE32+ format */
+ .short IMAGE_NT_OPTIONAL_HDR64_MAGIC /* PE32+ format */
.byte 0x02 /* MajorLinkerVersion */
.byte 0x14 /* MinorLinkerVersion */
.long __inittext_end - .Lefi_header_end /* SizeOfCode */
diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c
index 000825406c1f..860a3bc030e0 100644
--- a/arch/loongarch/kernel/efi.c
+++ b/arch/loongarch/kernel/efi.c
@@ -66,6 +66,12 @@ void __init efi_runtime_init(void)
set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
}
+bool efi_poweroff_required(void)
+{
+ return efi_enabled(EFI_RUNTIME_SERVICES) &&
+ (acpi_gbl_reduced_hardware || acpi_no_s5);
+}
+
unsigned long __initdata screen_info_table = EFI_INVALID_TABLE_ADDR;
#if defined(CONFIG_SYSFB) || defined(CONFIG_EFI_EARLYCON)
@@ -89,7 +95,7 @@ static void __init init_screen_info(void)
memset(si, 0, sizeof(*si));
early_memunmap(si, sizeof(*si));
- memblock_reserve(screen_info.lfb_base, screen_info.lfb_size);
+ memblock_reserve(__screen_info_lfb_base(&screen_info), screen_info.lfb_size);
}
void __init efi_init(void)
@@ -138,6 +144,18 @@ void __init efi_init(void)
if (efi_memmap_init_early(&data) < 0)
panic("Unable to map EFI memory map.\n");
+ /*
+ * Reserve the physical memory region occupied by the EFI
+ * memory map table (header + descriptors). This is crucial
+ * for kdump, as the kdump kernel relies on this original
+ * memmap passed by the bootloader. Without reservation,
+ * this region could be overwritten by the primary kernel.
+ * Also, set the EFI_PRESERVE_BS_REGIONS flag to indicate that
+ * critical boot services code/data regions like this are preserved.
+ */
+ memblock_reserve((phys_addr_t)boot_memmap, sizeof(*tbl) + data.size);
+ set_bit(EFI_PRESERVE_BS_REGIONS, &efi.flags);
+
early_memunmap(tbl, sizeof(*tbl));
}
diff --git a/arch/loongarch/kernel/elf.c b/arch/loongarch/kernel/elf.c
index 0fa81ced28dc..3d98c6aa00db 100644
--- a/arch/loongarch/kernel/elf.c
+++ b/arch/loongarch/kernel/elf.c
@@ -6,7 +6,6 @@
#include <linux/binfmts.h>
#include <linux/elf.h>
-#include <linux/export.h>
#include <linux/sched.h>
#include <asm/cpu-features.h>
diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S
index 48e7e34e355e..47e1db9a1ce4 100644
--- a/arch/loongarch/kernel/entry.S
+++ b/arch/loongarch/kernel/entry.S
@@ -73,28 +73,29 @@ SYM_CODE_START(handle_syscall)
move a0, sp
bl do_syscall
+ STACKLEAK_ERASE
RESTORE_ALL_AND_RET
SYM_CODE_END(handle_syscall)
_ASM_NOKPROBE(handle_syscall)
-SYM_CODE_START(ret_from_fork)
+SYM_CODE_START(ret_from_fork_asm)
UNWIND_HINT_REGS
- bl schedule_tail # a0 = struct task_struct *prev
- move a0, sp
- bl syscall_exit_to_user_mode
+ move a1, sp
+ bl ret_from_fork
+ STACKLEAK_ERASE
RESTORE_STATIC
RESTORE_SOME
RESTORE_SP_AND_RET
-SYM_CODE_END(ret_from_fork)
+SYM_CODE_END(ret_from_fork_asm)
-SYM_CODE_START(ret_from_kernel_thread)
+SYM_CODE_START(ret_from_kernel_thread_asm)
UNWIND_HINT_REGS
- bl schedule_tail # a0 = struct task_struct *prev
- move a0, s1
- jirl ra, s0, 0
- move a0, sp
- bl syscall_exit_to_user_mode
+ move a1, sp
+ move a2, s0
+ move a3, s1
+ bl ret_from_kernel_thread
+ STACKLEAK_ERASE
RESTORE_STATIC
RESTORE_SOME
RESTORE_SP_AND_RET
-SYM_CODE_END(ret_from_kernel_thread)
+SYM_CODE_END(ret_from_kernel_thread_asm)
diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c
index 2f1f5b08638f..23bd5ae2212c 100644
--- a/arch/loongarch/kernel/env.c
+++ b/arch/loongarch/kernel/env.c
@@ -39,16 +39,19 @@ void __init init_environ(void)
static int __init init_cpu_fullname(void)
{
- struct device_node *root;
int cpu, ret;
- char *model;
+ char *cpuname;
+ const char *model;
+ struct device_node *root;
/* Parsing cpuname from DTS model property */
root = of_find_node_by_path("/");
- ret = of_property_read_string(root, "model", (const char **)&model);
+ ret = of_property_read_string(root, "model", &model);
+ if (ret == 0) {
+ cpuname = kstrdup(model, GFP_KERNEL);
+ loongson_sysconf.cpuname = strsep(&cpuname, " ");
+ }
of_node_put(root);
- if (ret == 0)
- loongson_sysconf.cpuname = strsep(&model, " ");
if (loongson_sysconf.cpuname && !strncmp(loongson_sysconf.cpuname, "Loongson", 8)) {
for (cpu = 0; cpu < NR_CPUS; cpu++)
@@ -68,6 +71,8 @@ static int __init fdt_cpu_clk_init(void)
return -ENODEV;
clk = of_clk_get(np, 0);
+ of_node_put(np);
+
if (IS_ERR(clk))
return -ENODEV;
@@ -81,7 +86,7 @@ late_initcall(fdt_cpu_clk_init);
static ssize_t boardinfo_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf,
+ return sysfs_emit(buf,
"BIOS Information\n"
"Vendor\t\t\t: %s\n"
"Version\t\t\t: %s\n"
@@ -104,6 +109,8 @@ static int __init boardinfo_init(void)
struct kobject *loongson_kobj;
loongson_kobj = kobject_create_and_add("loongson", firmware_kobj);
+ if (!loongson_kobj)
+ return -ENOMEM;
return sysfs_create_file(loongson_kobj, &boardinfo_attr.attr);
}
diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
index 69a85f2479fb..28caf416ae36 100644
--- a/arch/loongarch/kernel/fpu.S
+++ b/arch/loongarch/kernel/fpu.S
@@ -458,6 +458,7 @@ SYM_FUNC_START(_save_fp_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_fp_context)
+EXPORT_SYMBOL_GPL(_save_fp_context)
/*
* a0: fpregs
@@ -471,6 +472,7 @@ SYM_FUNC_START(_restore_fp_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_fp_context)
+EXPORT_SYMBOL_GPL(_restore_fp_context)
/*
* a0: fpregs
@@ -484,6 +486,7 @@ SYM_FUNC_START(_save_lsx_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_lsx_context)
+EXPORT_SYMBOL_GPL(_save_lsx_context)
/*
* a0: fpregs
@@ -497,6 +500,7 @@ SYM_FUNC_START(_restore_lsx_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_lsx_context)
+EXPORT_SYMBOL_GPL(_restore_lsx_context)
/*
* a0: fpregs
@@ -510,6 +514,7 @@ SYM_FUNC_START(_save_lasx_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_lasx_context)
+EXPORT_SYMBOL_GPL(_save_lasx_context)
/*
* a0: fpregs
@@ -523,6 +528,7 @@ SYM_FUNC_START(_restore_lasx_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_lasx_context)
+EXPORT_SYMBOL_GPL(_restore_lasx_context)
.L_fpu_fault:
li.w a0, -EFAULT # failure
@@ -530,6 +536,10 @@ SYM_FUNC_END(_restore_lasx_context)
#ifdef CONFIG_CPU_HAS_LBT
STACK_FRAME_NON_STANDARD _restore_fp
+#ifdef CONFIG_CPU_HAS_LSX
STACK_FRAME_NON_STANDARD _restore_lsx
+#endif
+#ifdef CONFIG_CPU_HAS_LASX
STACK_FRAME_NON_STANDARD _restore_lasx
#endif
+#endif
diff --git a/arch/loongarch/kernel/ftrace_dyn.c b/arch/loongarch/kernel/ftrace_dyn.c
index bff058317062..d5d81d74034c 100644
--- a/arch/loongarch/kernel/ftrace_dyn.c
+++ b/arch/loongarch/kernel/ftrace_dyn.c
@@ -85,14 +85,13 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod
* dealing with an out-of-range condition, we can assume it
* is due to a module being loaded far away from the kernel.
*
- * NOTE: __module_text_address() must be called with preemption
- * disabled, but we can rely on ftrace_lock to ensure that 'mod'
+ * NOTE: __module_text_address() must be called within a RCU read
+ * section, but we can rely on ftrace_lock to ensure that 'mod'
* retains its validity throughout the remainder of this code.
*/
if (!mod) {
- preempt_disable();
- mod = __module_text_address(pc);
- preempt_enable();
+ scoped_guard(rcu)
+ mod = __module_text_address(pc);
}
if (WARN_ON(!mod))
@@ -241,10 +240,18 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent)
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
- struct pt_regs *regs = &fregs->regs;
+ struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs;
unsigned long *parent = (unsigned long *)&regs->regs[1];
+ unsigned long return_hooker = (unsigned long)&return_to_handler;
+ unsigned long old;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ old = *parent;
- prepare_ftrace_return(ip, (unsigned long *)parent);
+ if (!function_graph_enter_regs(old, ip, 0, parent, fregs))
+ *parent = return_hooker;
}
#else
static int ftrace_modify_graph_caller(bool enable)
diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
index 86d5d90ebefe..733a7665e434 100644
--- a/arch/loongarch/kernel/genex.S
+++ b/arch/loongarch/kernel/genex.S
@@ -16,30 +16,35 @@
#include <asm/stackframe.h>
#include <asm/thread_info.h>
+ .section .cpuidle.text, "ax"
.align 5
SYM_FUNC_START(__arch_cpu_idle)
- /* start of rollback region */
- LONG_L t0, tp, TI_FLAGS
- nop
- andi t0, t0, _TIF_NEED_RESCHED
- bnez t0, 1f
- nop
- nop
- nop
+ /* start of idle interrupt region */
+ ori t0, zero, CSR_CRMD_IE
+ /* idle instruction needs irq enabled */
+ csrxchg t0, t0, LOONGARCH_CSR_CRMD
+ /*
+ * If an interrupt lands here; between enabling interrupts above and
+ * going idle on the next instruction, we must *NOT* go idle since the
+ * interrupt could have set TIF_NEED_RESCHED or caused an timer to need
+ * reprogramming. Fall through -- see handle_vint() below -- and have
+ * the idle loop take care of things.
+ */
idle 0
- /* end of rollback region */
-1: jr ra
+ /* end of idle interrupt region */
+idle_exit:
+ jr ra
SYM_FUNC_END(__arch_cpu_idle)
+ .previous
SYM_CODE_START(handle_vint)
UNWIND_HINT_UNDEFINED
BACKUP_T0T1
SAVE_ALL
- la_abs t1, __arch_cpu_idle
+ la_abs t1, idle_exit
LONG_L t0, sp, PT_ERA
- /* 32 byte rollback region */
- ori t0, t0, 0x1f
- xori t0, t0, 0x1f
+ /* 3 instructions idle interrupt region */
+ ori t0, t0, 0b1100
bne t0, t1, 1f
LONG_S t0, sp, PT_ERA
1: move a0, sp
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
index 4677ea8fa8e9..e3865e92a917 100644
--- a/arch/loongarch/kernel/head.S
+++ b/arch/loongarch/kernel/head.S
@@ -20,7 +20,7 @@
__HEAD
_head:
- .word MZ_MAGIC /* "MZ", MS-DOS header */
+ .word IMAGE_DOS_SIGNATURE /* "MZ", MS-DOS header */
.org 0x8
.dword _kernel_entry /* Kernel entry point (physical address) */
.dword _kernel_asize /* Kernel image effective size */
@@ -44,11 +44,7 @@ SYM_DATA(kernel_fsize, .long _kernel_fsize);
SYM_CODE_START(kernel_entry) # kernel entry point
/* Config direct window and set PG */
- li.d t0, CSR_DMW0_INIT # UC, PLV0, 0x8000 xxxx xxxx xxxx
- csrwr t0, LOONGARCH_CSR_DMWIN0
- li.d t0, CSR_DMW1_INIT # CA, PLV0, 0x9000 xxxx xxxx xxxx
- csrwr t0, LOONGARCH_CSR_DMWIN1
-
+ SETUP_DMWINS t0
JUMP_VIRT_ADDR t0, t1
/* Enable PG */
@@ -124,11 +120,8 @@ SYM_CODE_END(kernel_entry)
* function after setting up the stack and tp registers.
*/
SYM_CODE_START(smpboot_entry)
- li.d t0, CSR_DMW0_INIT # UC, PLV0
- csrwr t0, LOONGARCH_CSR_DMWIN0
- li.d t0, CSR_DMW1_INIT # CA, PLV0
- csrwr t0, LOONGARCH_CSR_DMWIN1
+ SETUP_DMWINS t0
JUMP_VIRT_ADDR t0, t1
#ifdef CONFIG_PAGE_SIZE_4KB
diff --git a/arch/loongarch/kernel/hw_breakpoint.c b/arch/loongarch/kernel/hw_breakpoint.c
index 621ad7634df7..c35f9bf38033 100644
--- a/arch/loongarch/kernel/hw_breakpoint.c
+++ b/arch/loongarch/kernel/hw_breakpoint.c
@@ -51,7 +51,13 @@ int hw_breakpoint_slots(int type)
READ_WB_REG_CASE(OFF, 4, REG, T, VAL); \
READ_WB_REG_CASE(OFF, 5, REG, T, VAL); \
READ_WB_REG_CASE(OFF, 6, REG, T, VAL); \
- READ_WB_REG_CASE(OFF, 7, REG, T, VAL);
+ READ_WB_REG_CASE(OFF, 7, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 8, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 9, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 10, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 11, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 12, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 13, REG, T, VAL);
#define GEN_WRITE_WB_REG_CASES(OFF, REG, T, VAL) \
WRITE_WB_REG_CASE(OFF, 0, REG, T, VAL); \
@@ -61,7 +67,13 @@ int hw_breakpoint_slots(int type)
WRITE_WB_REG_CASE(OFF, 4, REG, T, VAL); \
WRITE_WB_REG_CASE(OFF, 5, REG, T, VAL); \
WRITE_WB_REG_CASE(OFF, 6, REG, T, VAL); \
- WRITE_WB_REG_CASE(OFF, 7, REG, T, VAL);
+ WRITE_WB_REG_CASE(OFF, 7, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 8, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 9, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 10, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 11, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 12, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 13, REG, T, VAL);
static u64 read_wb_reg(int reg, int n, int t)
{
@@ -221,7 +233,7 @@ static int hw_breakpoint_control(struct perf_event *bp,
}
enable = csr_read64(LOONGARCH_CSR_CRMD);
csr_write64(CSR_CRMD_WE | enable, LOONGARCH_CSR_CRMD);
- if (bp->hw.target)
+ if (bp->hw.target && test_tsk_thread_flag(bp->hw.target, TIF_LOAD_WATCH))
regs->csr_prmd |= CSR_PRMD_PWE;
break;
case HW_BREAKPOINT_UNINSTALL:
diff --git a/arch/loongarch/kernel/idle.c b/arch/loongarch/kernel/idle.c
index 0b5dd2faeb90..54b247d8cdb6 100644
--- a/arch/loongarch/kernel/idle.c
+++ b/arch/loongarch/kernel/idle.c
@@ -11,7 +11,6 @@
void __cpuidle arch_cpu_idle(void)
{
- raw_local_irq_enable();
- __arch_cpu_idle(); /* idle instruction needs irq enabled */
+ __arch_cpu_idle();
raw_local_irq_disable();
}
diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c
index 3050329556d1..bf037f0c6b26 100644
--- a/arch/loongarch/kernel/inst.c
+++ b/arch/loongarch/kernel/inst.c
@@ -4,6 +4,8 @@
*/
#include <linux/sizes.h>
#include <linux/uaccess.h>
+#include <linux/set_memory.h>
+#include <linux/stop_machine.h>
#include <asm/cacheflush.h>
#include <asm/inst.h>
@@ -139,6 +141,9 @@ bool insns_not_supported(union loongarch_instruction insn)
case amswapw_op ... ammindbdu_op:
pr_notice("atomic memory access instructions are not supported\n");
return true;
+ case scq_op:
+ pr_notice("sc.q instruction is not supported\n");
+ return true;
}
switch (insn.reg2i14_format.opcode) {
@@ -150,6 +155,15 @@ bool insns_not_supported(union loongarch_instruction insn)
return true;
}
+ switch (insn.reg2_format.opcode) {
+ case llacqw_op:
+ case llacqd_op:
+ case screlw_op:
+ case screld_op:
+ pr_notice("llacq and screl instructions are not supported\n");
+ return true;
+ }
+
switch (insn.reg1i21_format.opcode) {
case bceqz_op:
pr_notice("bceqz and bcnez instructions are not supported\n");
@@ -218,6 +232,50 @@ int larch_insn_patch_text(void *addr, u32 insn)
return ret;
}
+struct insn_copy {
+ void *dst;
+ void *src;
+ size_t len;
+ unsigned int cpu;
+};
+
+static int text_copy_cb(void *data)
+{
+ int ret = 0;
+ struct insn_copy *copy = data;
+
+ if (smp_processor_id() == copy->cpu) {
+ ret = copy_to_kernel_nofault(copy->dst, copy->src, copy->len);
+ if (ret)
+ pr_err("%s: operation failed\n", __func__);
+ }
+
+ flush_icache_range((unsigned long)copy->dst, (unsigned long)copy->dst + copy->len);
+
+ return ret;
+}
+
+int larch_insn_text_copy(void *dst, void *src, size_t len)
+{
+ int ret = 0;
+ size_t start, end;
+ struct insn_copy copy = {
+ .dst = dst,
+ .src = src,
+ .len = len,
+ .cpu = smp_processor_id(),
+ };
+
+ start = round_down((size_t)dst, PAGE_SIZE);
+ end = round_up((size_t)dst + len, PAGE_SIZE);
+
+ set_memory_rw(start, (end - start) / PAGE_SIZE);
+ ret = stop_machine(text_copy_cb, &copy, cpu_online_mask);
+ set_memory_rox(start, (end - start) / PAGE_SIZE);
+
+ return ret;
+}
+
u32 larch_insn_gen_nop(void)
{
return INSN_NOP;
@@ -323,6 +381,34 @@ u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
return insn.word;
}
+u32 larch_insn_gen_beq(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
+{
+ union loongarch_instruction insn;
+
+ if ((imm & 3) || imm < -SZ_128K || imm >= SZ_128K) {
+ pr_warn("The generated beq instruction is out of range.\n");
+ return INSN_BREAK;
+ }
+
+ emit_beq(&insn, rj, rd, imm >> 2);
+
+ return insn.word;
+}
+
+u32 larch_insn_gen_bne(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
+{
+ union loongarch_instruction insn;
+
+ if ((imm & 3) || imm < -SZ_128K || imm >= SZ_128K) {
+ pr_warn("The generated bne instruction is out of range.\n");
+ return INSN_BREAK;
+ }
+
+ emit_bne(&insn, rj, rd, imm >> 2);
+
+ return insn.word;
+}
+
u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
{
union loongarch_instruction insn;
@@ -332,7 +418,7 @@ u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
return INSN_BREAK;
}
- emit_jirl(&insn, rj, rd, imm >> 2);
+ emit_jirl(&insn, rd, rj, imm >> 2);
return insn.word;
}
diff --git a/arch/loongarch/kernel/io.c b/arch/loongarch/kernel/io.c
deleted file mode 100644
index cb85bda5a6ad..000000000000
--- a/arch/loongarch/kernel/io.c
+++ /dev/null
@@ -1,94 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
- */
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/io.h>
-
-/*
- * Copy data from IO memory space to "real" memory space.
- */
-void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
-{
- while (count && !IS_ALIGNED((unsigned long)from, 8)) {
- *(u8 *)to = __raw_readb(from);
- from++;
- to++;
- count--;
- }
-
- while (count >= 8) {
- *(u64 *)to = __raw_readq(from);
- from += 8;
- to += 8;
- count -= 8;
- }
-
- while (count) {
- *(u8 *)to = __raw_readb(from);
- from++;
- to++;
- count--;
- }
-}
-EXPORT_SYMBOL(__memcpy_fromio);
-
-/*
- * Copy data from "real" memory space to IO memory space.
- */
-void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
-{
- while (count && !IS_ALIGNED((unsigned long)to, 8)) {
- __raw_writeb(*(u8 *)from, to);
- from++;
- to++;
- count--;
- }
-
- while (count >= 8) {
- __raw_writeq(*(u64 *)from, to);
- from += 8;
- to += 8;
- count -= 8;
- }
-
- while (count) {
- __raw_writeb(*(u8 *)from, to);
- from++;
- to++;
- count--;
- }
-}
-EXPORT_SYMBOL(__memcpy_toio);
-
-/*
- * "memset" on IO memory space.
- */
-void __memset_io(volatile void __iomem *dst, int c, size_t count)
-{
- u64 qc = (u8)c;
-
- qc |= qc << 8;
- qc |= qc << 16;
- qc |= qc << 32;
-
- while (count && !IS_ALIGNED((unsigned long)dst, 8)) {
- __raw_writeb(c, dst);
- dst++;
- count--;
- }
-
- while (count >= 8) {
- __raw_writeq(qc, dst);
- dst += 8;
- count -= 8;
- }
-
- while (count) {
- __raw_writeb(c, dst);
- dst++;
- count--;
- }
-}
-EXPORT_SYMBOL(__memset_io);
diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c
index f4991c03514f..80946cafaec1 100644
--- a/arch/loongarch/kernel/irq.c
+++ b/arch/loongarch/kernel/irq.c
@@ -87,6 +87,18 @@ static void __init init_vec_parent_group(void)
acpi_table_parse(ACPI_SIG_MCFG, early_pci_mcfg_parse);
}
+int __init arch_probe_nr_irqs(void)
+{
+ int nr_io_pics = bitmap_weight(loongson_sysconf.cores_io_master, NR_CPUS);
+
+ if (!cpu_has_avecint)
+ irq_set_nr_irqs(64 + NR_VECTORS * nr_io_pics);
+ else
+ irq_set_nr_irqs(64 + NR_VECTORS * (nr_cpu_ids + nr_io_pics));
+
+ return NR_IRQS_LEGACY;
+}
+
void __init init_IRQ(void)
{
int i;
@@ -102,9 +114,6 @@ void __init init_IRQ(void)
mp_ops.init_ipi();
#endif
- for (i = 0; i < NR_IRQS; i++)
- irq_set_noprobe(i);
-
for_each_possible_cpu(i) {
page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, order);
diff --git a/arch/loongarch/kernel/kdebugfs.c b/arch/loongarch/kernel/kdebugfs.c
new file mode 100644
index 000000000000..80cf64772399
--- /dev/null
+++ b/arch/loongarch/kernel/kdebugfs.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/debugfs.h>
+#include <linux/kstrtox.h>
+#include <asm/loongarch.h>
+
+struct dentry *arch_debugfs_dir;
+EXPORT_SYMBOL(arch_debugfs_dir);
+
+static int sfb_state, tso_state;
+
+static void set_sfb_state(void *info)
+{
+ int val = *(int *)info << CSR_STFILL_SHIFT;
+
+ csr_xchg32(val, CSR_STFILL, LOONGARCH_CSR_IMPCTL1);
+}
+
+static ssize_t sfb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ int s, state;
+ char str[32];
+
+ state = (csr_read32(LOONGARCH_CSR_IMPCTL1) & CSR_STFILL) >> CSR_STFILL_SHIFT;
+
+ s = snprintf(str, sizeof(str), "Boot State: %x\nCurrent State: %x\n", sfb_state, state);
+
+ if (*ppos >= s)
+ return 0;
+
+ s -= *ppos;
+ s = min_t(u32, s, count);
+
+ if (copy_to_user(buf, &str[*ppos], s))
+ return -EFAULT;
+
+ *ppos += s;
+
+ return s;
+}
+
+static ssize_t sfb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+{
+ int state;
+
+ if (kstrtoint_from_user(buf, count, 10, &state))
+ return -EFAULT;
+
+ switch (state) {
+ case 0: case 1:
+ on_each_cpu(set_sfb_state, &state, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations sfb_fops = {
+ .read = sfb_read,
+ .write = sfb_write,
+ .open = simple_open,
+ .llseek = default_llseek
+};
+
+#define LDSTORDER_NLD_NST 0x0 /* 000 = No Load No Store */
+#define LDSTORDER_ALD_NST 0x1 /* 001 = All Load No Store */
+#define LDSTORDER_SLD_NST 0x3 /* 011 = Same Load No Store */
+#define LDSTORDER_NLD_AST 0x4 /* 100 = No Load All Store */
+#define LDSTORDER_ALD_AST 0x5 /* 101 = All Load All Store */
+#define LDSTORDER_SLD_AST 0x7 /* 111 = Same Load All Store */
+
+static char *tso_hints[] = {
+ "No Load No Store",
+ "All Load No Store",
+ "Invalid Config",
+ "Same Load No Store",
+ "No Load All Store",
+ "All Load All Store",
+ "Invalid Config",
+ "Same Load All Store"
+};
+
+static void set_tso_state(void *info)
+{
+ int val = *(int *)info << CSR_LDSTORDER_SHIFT;
+
+ csr_xchg32(val, CSR_LDSTORDER_MASK, LOONGARCH_CSR_IMPCTL1);
+}
+
+static ssize_t tso_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ int s, state;
+ char str[240];
+
+ state = (csr_read32(LOONGARCH_CSR_IMPCTL1) & CSR_LDSTORDER_MASK) >> CSR_LDSTORDER_SHIFT;
+
+ s = snprintf(str, sizeof(str), "Boot State: %d (%s)\n"
+ "Current State: %d (%s)\n\n"
+ "Available States:\n"
+ "0 (%s)\t" "1 (%s)\t" "3 (%s)\n"
+ "4 (%s)\t" "5 (%s)\t" "7 (%s)\n",
+ tso_state, tso_hints[tso_state], state, tso_hints[state],
+ tso_hints[0], tso_hints[1], tso_hints[3], tso_hints[4], tso_hints[5], tso_hints[7]);
+
+ if (*ppos >= s)
+ return 0;
+
+ s -= *ppos;
+ s = min_t(u32, s, count);
+
+ if (copy_to_user(buf, &str[*ppos], s))
+ return -EFAULT;
+
+ *ppos += s;
+
+ return s;
+}
+
+static ssize_t tso_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+{
+ int state;
+
+ if (kstrtoint_from_user(buf, count, 10, &state))
+ return -EFAULT;
+
+ switch (state) {
+ case 0: case 1: case 3:
+ case 4: case 5: case 7:
+ on_each_cpu(set_tso_state, &state, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations tso_fops = {
+ .read = tso_read,
+ .write = tso_write,
+ .open = simple_open,
+ .llseek = default_llseek
+};
+
+static int __init arch_kdebugfs_init(void)
+{
+ unsigned int config = read_cpucfg(LOONGARCH_CPUCFG3);
+
+ arch_debugfs_dir = debugfs_create_dir("loongarch", NULL);
+
+ if (config & CPUCFG3_SFB) {
+ debugfs_create_file("sfb_state", S_IRUGO | S_IWUSR,
+ arch_debugfs_dir, &sfb_state, &sfb_fops);
+ sfb_state = (csr_read32(LOONGARCH_CSR_IMPCTL1) & CSR_STFILL) >> CSR_STFILL_SHIFT;
+ }
+
+ if (config & (CPUCFG3_ALDORDER_CAP | CPUCFG3_ASTORDER_CAP)) {
+ debugfs_create_file("tso_state", S_IRUGO | S_IWUSR,
+ arch_debugfs_dir, &tso_state, &tso_fops);
+ tso_state = (csr_read32(LOONGARCH_CSR_IMPCTL1) & CSR_LDSTORDER_MASK) >> CSR_LDSTORDER_SHIFT;
+ }
+
+ return 0;
+}
+postcore_initcall(arch_kdebugfs_init);
diff --git a/arch/loongarch/kernel/kexec_efi.c b/arch/loongarch/kernel/kexec_efi.c
new file mode 100644
index 000000000000..5ee78ebb1546
--- /dev/null
+++ b/arch/loongarch/kernel/kexec_efi.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Load EFI vmlinux file for the kexec_file_load syscall.
+ *
+ * Author: Youling Tang <tangyouling@kylinos.cn>
+ * Copyright (C) 2025 KylinSoft Corporation.
+ */
+
+#define pr_fmt(fmt) "kexec_file(EFI): " fmt
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+#include <linux/pe.h>
+#include <linux/string.h>
+#include <asm/byteorder.h>
+#include <asm/cpufeature.h>
+#include <asm/image.h>
+
+static int efi_kexec_probe(const char *kernel_buf, unsigned long kernel_len)
+{
+ const struct loongarch_image_header *h = (const struct loongarch_image_header *)kernel_buf;
+
+ if (!h || (kernel_len < sizeof(*h))) {
+ kexec_dprintk("No LoongArch image header.\n");
+ return -EINVAL;
+ }
+
+ if (!loongarch_header_check_dos_sig(h)) {
+ kexec_dprintk("No LoongArch PE image header.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void *efi_kexec_load(struct kimage *image,
+ char *kernel, unsigned long kernel_len,
+ char *initrd, unsigned long initrd_len,
+ char *cmdline, unsigned long cmdline_len)
+{
+ int ret;
+ unsigned long text_offset, kernel_segment_number;
+ struct kexec_buf kbuf = {};
+ struct kexec_segment *kernel_segment;
+ struct loongarch_image_header *h;
+
+ h = (struct loongarch_image_header *)kernel;
+ if (!h->kernel_asize)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Load the kernel
+ * FIXME: Non-relocatable kernel rejected for kexec_file (require CONFIG_RELOCATABLE)
+ */
+ kbuf.image = image;
+ kbuf.buf_max = ULONG_MAX;
+ kbuf.top_down = false;
+
+ kbuf.buffer = kernel;
+ kbuf.bufsz = kernel_len;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ kbuf.memsz = le64_to_cpu(h->kernel_asize);
+ text_offset = le64_to_cpu(h->text_offset);
+ kbuf.buf_min = text_offset;
+ kbuf.buf_align = SZ_2M;
+
+ kernel_segment_number = image->nr_segments;
+
+ /*
+ * The location of the kernel segment may make it impossible to
+ * satisfy the other segment requirements, so we try repeatedly
+ * to find a location that will work.
+ */
+ while ((ret = kexec_add_buffer(&kbuf)) == 0) {
+ /* Try to load additional data */
+ kernel_segment = &image->segment[kernel_segment_number];
+ ret = load_other_segments(image, kernel_segment->mem,
+ kernel_segment->memsz, initrd,
+ initrd_len, cmdline, cmdline_len);
+ if (!ret)
+ break;
+
+ /*
+ * We couldn't find space for the other segments; erase the
+ * kernel segment and try the next available hole.
+ */
+ image->nr_segments -= 1;
+ kbuf.buf_min = kernel_segment->mem + kernel_segment->memsz;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ }
+
+ if (ret < 0) {
+ pr_err("Could not find any suitable kernel location!");
+ return ERR_PTR(ret);
+ }
+
+ kernel_segment = &image->segment[kernel_segment_number];
+
+ /* Make sure the second kernel jumps to the correct "kernel_entry" */
+ image->start = kernel_segment->mem + h->kernel_entry - text_offset;
+
+ kexec_dprintk("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ kernel_segment->mem, kbuf.bufsz, kernel_segment->memsz);
+
+ return NULL;
+}
+
+const struct kexec_file_ops kexec_efi_ops = {
+ .probe = efi_kexec_probe,
+ .load = efi_kexec_load,
+};
diff --git a/arch/loongarch/kernel/kexec_elf.c b/arch/loongarch/kernel/kexec_elf.c
new file mode 100644
index 000000000000..1b6b64744c7f
--- /dev/null
+++ b/arch/loongarch/kernel/kexec_elf.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Load ELF vmlinux file for the kexec_file_load syscall.
+ *
+ * Author: Youling Tang <tangyouling@kylinos.cn>
+ * Copyright (C) 2025 KylinSoft Corporation.
+ */
+
+#define pr_fmt(fmt) "kexec_file(ELF): " fmt
+
+#include <linux/elf.h>
+#include <linux/kexec.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/memblock.h>
+#include <asm/setup.h>
+
+#define elf_kexec_probe kexec_elf_probe
+
+static int _elf_kexec_load(struct kimage *image,
+ struct elfhdr *ehdr, struct kexec_elf_info *elf_info,
+ struct kexec_buf *kbuf, unsigned long *text_offset)
+{
+ int i, ret = -1;
+
+ /* Read in the PT_LOAD segments. */
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ size_t size;
+ const struct elf_phdr *phdr;
+
+ phdr = &elf_info->proghdrs[i];
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ size = phdr->p_filesz;
+ if (size > phdr->p_memsz)
+ size = phdr->p_memsz;
+
+ kbuf->buffer = (void *)elf_info->buffer + phdr->p_offset;
+ kbuf->bufsz = size;
+ kbuf->buf_align = phdr->p_align;
+ *text_offset = __pa(phdr->p_paddr);
+ kbuf->buf_min = *text_offset;
+ kbuf->memsz = ALIGN(phdr->p_memsz, SZ_64K);
+ kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
+ ret = kexec_add_buffer(kbuf);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static void *elf_kexec_load(struct kimage *image,
+ char *kernel, unsigned long kernel_len,
+ char *initrd, unsigned long initrd_len,
+ char *cmdline, unsigned long cmdline_len)
+{
+ int ret;
+ unsigned long text_offset, kernel_segment_number;
+ struct elfhdr ehdr;
+ struct kexec_buf kbuf = {};
+ struct kexec_elf_info elf_info;
+ struct kexec_segment *kernel_segment;
+
+ ret = kexec_build_elf_info(kernel, kernel_len, &ehdr, &elf_info);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /*
+ * Load the kernel
+ * FIXME: Non-relocatable kernel rejected for kexec_file (require CONFIG_RELOCATABLE)
+ */
+ kbuf.image = image;
+ kbuf.buf_max = ULONG_MAX;
+ kbuf.top_down = false;
+
+ kernel_segment_number = image->nr_segments;
+
+ ret = _elf_kexec_load(image, &ehdr, &elf_info, &kbuf, &text_offset);
+ if (ret < 0)
+ goto out;
+
+ /* Load additional data */
+ kernel_segment = &image->segment[kernel_segment_number];
+ ret = load_other_segments(image, kernel_segment->mem, kernel_segment->memsz,
+ initrd, initrd_len, cmdline, cmdline_len);
+ if (ret < 0)
+ goto out;
+
+ /* Make sure the second kernel jumps to the correct "kernel_entry". */
+ image->start = kernel_segment->mem + __pa(ehdr.e_entry) - text_offset;
+
+ kexec_dprintk("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ kernel_segment->mem, kbuf.bufsz, kernel_segment->memsz);
+
+out:
+ kexec_free_elf_info(&elf_info);
+ return ret ? ERR_PTR(ret) : NULL;
+}
+
+const struct kexec_file_ops kexec_elf_ops = {
+ .probe = elf_kexec_probe,
+ .load = elf_kexec_load,
+};
diff --git a/arch/loongarch/kernel/kfpu.c b/arch/loongarch/kernel/kfpu.c
index ec5b28e570c9..141b49bd989c 100644
--- a/arch/loongarch/kernel/kfpu.c
+++ b/arch/loongarch/kernel/kfpu.c
@@ -4,6 +4,7 @@
*/
#include <linux/cpu.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <asm/fpu.h>
#include <asm/smp.h>
@@ -18,11 +19,28 @@ static unsigned int euen_mask = CSR_EUEN_FPEN;
static DEFINE_PER_CPU(bool, in_kernel_fpu);
static DEFINE_PER_CPU(unsigned int, euen_current);
+static inline void fpregs_lock(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+ else
+ local_bh_disable();
+}
+
+static inline void fpregs_unlock(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
+ else
+ local_bh_enable();
+}
+
void kernel_fpu_begin(void)
{
unsigned int *euen_curr;
- preempt_disable();
+ if (!irqs_disabled())
+ fpregs_lock();
WARN_ON(this_cpu_read(in_kernel_fpu));
@@ -73,7 +91,8 @@ void kernel_fpu_end(void)
this_cpu_write(in_kernel_fpu, false);
- preempt_enable();
+ if (!irqs_disabled())
+ fpregs_unlock();
}
EXPORT_SYMBOL_GPL(kernel_fpu_end);
diff --git a/arch/loongarch/kernel/kgdb.c b/arch/loongarch/kernel/kgdb.c
index 445c452d72a7..7be5b4c0c900 100644
--- a/arch/loongarch/kernel/kgdb.c
+++ b/arch/loongarch/kernel/kgdb.c
@@ -8,6 +8,7 @@
#include <linux/hw_breakpoint.h>
#include <linux/kdebug.h>
#include <linux/kgdb.h>
+#include <linux/objtool.h>
#include <linux/processor.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
@@ -224,13 +225,13 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
regs->csr_era = pc;
}
-void arch_kgdb_breakpoint(void)
+noinline void arch_kgdb_breakpoint(void)
{
__asm__ __volatile__ ( \
".globl kgdb_breakinst\n\t" \
- "nop\n" \
"kgdb_breakinst:\tbreak 2\n\t"); /* BRK_KDB = 2 */
}
+STACK_FRAME_NON_STANDARD(arch_kgdb_breakpoint);
/*
* Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
diff --git a/arch/loongarch/kernel/kprobes.c b/arch/loongarch/kernel/kprobes.c
index 17b040bd6067..8ba391cfabb0 100644
--- a/arch/loongarch/kernel/kprobes.c
+++ b/arch/loongarch/kernel/kprobes.c
@@ -4,8 +4,8 @@
#include <linux/preempt.h>
#include <asm/break.h>
-#define KPROBE_BP_INSN larch_insn_gen_break(BRK_KPROBE_BP)
-#define KPROBE_SSTEPBP_INSN larch_insn_gen_break(BRK_KPROBE_SSTEPBP)
+#define KPROBE_BP_INSN __emit_break(BRK_KPROBE_BP)
+#define KPROBE_SSTEPBP_INSN __emit_break(BRK_KPROBE_SSTEPBP)
DEFINE_PER_CPU(struct kprobe *, current_kprobe);
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
diff --git a/arch/loongarch/kernel/lbt.S b/arch/loongarch/kernel/lbt.S
index 001f061d226a..71678912d24c 100644
--- a/arch/loongarch/kernel/lbt.S
+++ b/arch/loongarch/kernel/lbt.S
@@ -90,6 +90,7 @@ SYM_FUNC_START(_save_lbt_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_lbt_context)
+EXPORT_SYMBOL_GPL(_save_lbt_context)
/*
* a0: scr
@@ -110,6 +111,7 @@ SYM_FUNC_START(_restore_lbt_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_lbt_context)
+EXPORT_SYMBOL_GPL(_restore_lbt_context)
/*
* a0: ftop
@@ -120,6 +122,7 @@ SYM_FUNC_START(_save_ftop_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_ftop_context)
+EXPORT_SYMBOL_GPL(_save_ftop_context)
/*
* a0: ftop
@@ -150,6 +153,7 @@ SYM_FUNC_START(_restore_ftop_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_ftop_context)
+EXPORT_SYMBOL_GPL(_restore_ftop_context)
.L_lbt_fault:
li.w a0, -EFAULT # failure
diff --git a/arch/loongarch/kernel/machine_kexec.c b/arch/loongarch/kernel/machine_kexec.c
index 8ae641dc53bb..d7fafda1d541 100644
--- a/arch/loongarch/kernel/machine_kexec.c
+++ b/arch/loongarch/kernel/machine_kexec.c
@@ -39,49 +39,37 @@ static unsigned long systable_ptr;
static unsigned long start_addr;
static unsigned long first_ind_entry;
-static void kexec_image_info(const struct kimage *kimage)
-{
- unsigned long i;
-
- pr_debug("kexec kimage info:\n");
- pr_debug("\ttype: %d\n", kimage->type);
- pr_debug("\tstart: %lx\n", kimage->start);
- pr_debug("\thead: %lx\n", kimage->head);
- pr_debug("\tnr_segments: %lu\n", kimage->nr_segments);
-
- for (i = 0; i < kimage->nr_segments; i++) {
- pr_debug("\t segment[%lu]: %016lx - %016lx", i,
- kimage->segment[i].mem,
- kimage->segment[i].mem + kimage->segment[i].memsz);
- pr_debug("\t\t0x%lx bytes, %lu pages\n",
- (unsigned long)kimage->segment[i].memsz,
- (unsigned long)kimage->segment[i].memsz / PAGE_SIZE);
- }
-}
-
int machine_kexec_prepare(struct kimage *kimage)
{
int i;
char *bootloader = "kexec";
void *cmdline_ptr = (void *)KEXEC_CMDLINE_ADDR;
- kexec_image_info(kimage);
-
kimage->arch.efi_boot = fw_arg0;
kimage->arch.systable_ptr = fw_arg2;
- /* Find the command line */
- for (i = 0; i < kimage->nr_segments; i++) {
- if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) {
- if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE))
- kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr;
- break;
+ if (kimage->file_mode == 1) {
+ /*
+ * kimage->cmdline_buf will be released in kexec_file_load, so copy
+ * to the KEXEC_CMDLINE_ADDR safe area.
+ */
+ memcpy((void *)KEXEC_CMDLINE_ADDR, (void *)kimage->arch.cmdline_ptr,
+ strlen((char *)kimage->arch.cmdline_ptr) + 1);
+ kimage->arch.cmdline_ptr = (unsigned long)KEXEC_CMDLINE_ADDR;
+ } else {
+ /* Find the command line */
+ for (i = 0; i < kimage->nr_segments; i++) {
+ if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) {
+ if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE))
+ kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr;
+ break;
+ }
}
- }
- if (!kimage->arch.cmdline_ptr) {
- pr_err("Command line not included in the provided image\n");
- return -EINVAL;
+ if (!kimage->arch.cmdline_ptr) {
+ pr_err("Command line not included in the provided image\n");
+ return -EINVAL;
+ }
}
/* kexec/kdump need a safe page to save reboot_code_buffer */
@@ -126,14 +114,14 @@ void kexec_reboot(void)
/* All secondary cpus go to kexec_smp_wait */
if (smp_processor_id() > 0) {
relocated_kexec_smp_wait(NULL);
- unreachable();
+ BUG();
}
#endif
do_kexec = (void *)reboot_code_buffer;
do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry);
- unreachable();
+ BUG();
}
@@ -249,6 +237,7 @@ void machine_crash_shutdown(struct pt_regs *regs)
#ifdef CONFIG_SMP
crash_smp_send_stop();
#endif
+ machine_kexec_mask_interrupts();
cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
pr_info("Starting crashdump kernel...\n");
@@ -286,10 +275,12 @@ void machine_kexec(struct kimage *image)
/* We do not want to be bothered. */
local_irq_disable();
+ machine_kexec_mask_interrupts();
- pr_notice("EFI boot flag 0x%lx\n", efi_boot);
- pr_notice("Command line at 0x%lx\n", cmdline_ptr);
- pr_notice("System table at 0x%lx\n", systable_ptr);
+ pr_notice("EFI boot flag: 0x%lx\n", efi_boot);
+ pr_notice("Command line addr: 0x%lx\n", cmdline_ptr);
+ pr_notice("Command line string: %s\n", (char *)cmdline_ptr);
+ pr_notice("System table addr: 0x%lx\n", systable_ptr);
pr_notice("We will call new kernel at 0x%lx\n", start_addr);
pr_notice("Bye ...\n");
diff --git a/arch/loongarch/kernel/machine_kexec_file.c b/arch/loongarch/kernel/machine_kexec_file.c
new file mode 100644
index 000000000000..fb57026f5f25
--- /dev/null
+++ b/arch/loongarch/kernel/machine_kexec_file.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * kexec_file for LoongArch
+ *
+ * Author: Youling Tang <tangyouling@kylinos.cn>
+ * Copyright (C) 2025 KylinSoft Corporation.
+ *
+ * Most code is derived from LoongArch port of kexec-tools
+ */
+
+#define pr_fmt(fmt) "kexec_file: " fmt
+
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <asm/bootinfo.h>
+
+const struct kexec_file_ops * const kexec_file_loaders[] = {
+ &kexec_efi_ops,
+ &kexec_elf_ops,
+ NULL
+};
+
+int arch_kimage_file_post_load_cleanup(struct kimage *image)
+{
+ vfree(image->elf_headers);
+ image->elf_headers = NULL;
+ image->elf_headers_sz = 0;
+
+ return kexec_image_post_load_cleanup_default(image);
+}
+
+/* Add the "kexec_file" command line parameter to command line. */
+static void cmdline_add_loader(unsigned long *cmdline_tmplen, char *modified_cmdline)
+{
+ int loader_strlen;
+
+ loader_strlen = sprintf(modified_cmdline + (*cmdline_tmplen), "kexec_file ");
+ *cmdline_tmplen += loader_strlen;
+}
+
+/* Add the "initrd=start,size" command line parameter to command line. */
+static void cmdline_add_initrd(struct kimage *image, unsigned long *cmdline_tmplen,
+ char *modified_cmdline, unsigned long initrd)
+{
+ int initrd_strlen;
+
+ initrd_strlen = sprintf(modified_cmdline + (*cmdline_tmplen), "initrd=0x%lx,0x%lx ",
+ initrd, image->initrd_buf_len);
+ *cmdline_tmplen += initrd_strlen;
+}
+
+#ifdef CONFIG_CRASH_DUMP
+
+static int prepare_elf_headers(void **addr, unsigned long *sz)
+{
+ int ret, nr_ranges;
+ uint64_t i;
+ phys_addr_t start, end;
+ struct crash_mem *cmem;
+
+ nr_ranges = 2; /* for exclusion of crashkernel region */
+ for_each_mem_range(i, &start, &end)
+ nr_ranges++;
+
+ cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL);
+ if (!cmem)
+ return -ENOMEM;
+
+ cmem->max_nr_ranges = nr_ranges;
+ cmem->nr_ranges = 0;
+ for_each_mem_range(i, &start, &end) {
+ cmem->ranges[cmem->nr_ranges].start = start;
+ cmem->ranges[cmem->nr_ranges].end = end - 1;
+ cmem->nr_ranges++;
+ }
+
+ /* Exclude crashkernel region */
+ ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
+ if (ret < 0)
+ goto out;
+
+ if (crashk_low_res.end) {
+ ret = crash_exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
+
+out:
+ kfree(cmem);
+ return ret;
+}
+
+/*
+ * Add the "mem=size@start" command line parameter to command line, indicating the
+ * memory region the new kernel can use to boot into.
+ */
+static void cmdline_add_mem(unsigned long *cmdline_tmplen, char *modified_cmdline)
+{
+ int mem_strlen = 0;
+
+ mem_strlen = sprintf(modified_cmdline + (*cmdline_tmplen), "mem=0x%llx@0x%llx ",
+ crashk_res.end - crashk_res.start + 1, crashk_res.start);
+ *cmdline_tmplen += mem_strlen;
+
+ if (crashk_low_res.end) {
+ mem_strlen = sprintf(modified_cmdline + (*cmdline_tmplen), "mem=0x%llx@0x%llx ",
+ crashk_low_res.end - crashk_low_res.start + 1, crashk_low_res.start);
+ *cmdline_tmplen += mem_strlen;
+ }
+}
+
+/* Add the "elfcorehdr=size@start" command line parameter to command line. */
+static void cmdline_add_elfcorehdr(struct kimage *image, unsigned long *cmdline_tmplen,
+ char *modified_cmdline, unsigned long elfcorehdr_sz)
+{
+ int elfcorehdr_strlen = 0;
+
+ elfcorehdr_strlen = sprintf(modified_cmdline + (*cmdline_tmplen), "elfcorehdr=0x%lx@0x%lx ",
+ elfcorehdr_sz, image->elf_load_addr);
+ *cmdline_tmplen += elfcorehdr_strlen;
+}
+
+#endif
+
+/*
+ * Try to add the initrd to the image. If it is not possible to find valid
+ * locations, this function will undo changes to the image and return non zero.
+ */
+int load_other_segments(struct kimage *image,
+ unsigned long kernel_load_addr, unsigned long kernel_size,
+ char *initrd, unsigned long initrd_len, char *cmdline, unsigned long cmdline_len)
+{
+ int ret = 0;
+ unsigned long cmdline_tmplen = 0;
+ unsigned long initrd_load_addr = 0;
+ unsigned long orig_segments = image->nr_segments;
+ char *modified_cmdline = NULL;
+ struct kexec_buf kbuf = {};
+
+ kbuf.image = image;
+ /* Don't allocate anything below the kernel */
+ kbuf.buf_min = kernel_load_addr + kernel_size;
+
+ modified_cmdline = kzalloc(COMMAND_LINE_SIZE, GFP_KERNEL);
+ if (!modified_cmdline)
+ return -EINVAL;
+
+ cmdline_add_loader(&cmdline_tmplen, modified_cmdline);
+ /* Ensure it's null terminated */
+ modified_cmdline[COMMAND_LINE_SIZE - 1] = '\0';
+
+#ifdef CONFIG_CRASH_DUMP
+ /* Load elf core header */
+ if (image->type == KEXEC_TYPE_CRASH) {
+ void *headers;
+ unsigned long headers_sz;
+
+ ret = prepare_elf_headers(&headers, &headers_sz);
+ if (ret < 0) {
+ pr_err("Preparing elf core header failed\n");
+ goto out_err;
+ }
+
+ kbuf.buffer = headers;
+ kbuf.bufsz = headers_sz;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ kbuf.memsz = headers_sz;
+ kbuf.buf_align = SZ_64K; /* largest supported page size */
+ kbuf.buf_max = ULONG_MAX;
+ kbuf.top_down = true;
+
+ ret = kexec_add_buffer(&kbuf);
+ if (ret < 0) {
+ vfree(headers);
+ goto out_err;
+ }
+ image->elf_headers = headers;
+ image->elf_load_addr = kbuf.mem;
+ image->elf_headers_sz = headers_sz;
+
+ kexec_dprintk("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
+
+ /* Add the mem=size@start parameter to the command line */
+ cmdline_add_mem(&cmdline_tmplen, modified_cmdline);
+
+ /* Add the elfcorehdr=size@start parameter to the command line */
+ cmdline_add_elfcorehdr(image, &cmdline_tmplen, modified_cmdline, headers_sz);
+ }
+#endif
+
+ /* Load initrd */
+ if (initrd) {
+ kbuf.buffer = initrd;
+ kbuf.bufsz = initrd_len;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ kbuf.memsz = initrd_len;
+ kbuf.buf_align = 0;
+ /* within 1GB-aligned window of up to 32GB in size */
+ kbuf.buf_max = round_down(kernel_load_addr, SZ_1G) + (unsigned long)SZ_1G * 32;
+ kbuf.top_down = false;
+
+ ret = kexec_add_buffer(&kbuf);
+ if (ret < 0)
+ goto out_err;
+ initrd_load_addr = kbuf.mem;
+
+ kexec_dprintk("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ initrd_load_addr, kbuf.bufsz, kbuf.memsz);
+
+ /* Add the initrd=start,size parameter to the command line */
+ cmdline_add_initrd(image, &cmdline_tmplen, modified_cmdline, initrd_load_addr);
+ }
+
+ if (cmdline_len + cmdline_tmplen > COMMAND_LINE_SIZE) {
+ pr_err("Appending command line exceeds COMMAND_LINE_SIZE\n");
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ memcpy(modified_cmdline + cmdline_tmplen, cmdline, cmdline_len);
+ cmdline = modified_cmdline;
+ image->arch.cmdline_ptr = (unsigned long)cmdline;
+
+ return 0;
+
+out_err:
+ image->nr_segments = orig_segments;
+ kfree(modified_cmdline);
+ return ret;
+}
diff --git a/arch/loongarch/kernel/mcount.S b/arch/loongarch/kernel/mcount.S
index 3015896016a0..b6850503e061 100644
--- a/arch/loongarch/kernel/mcount.S
+++ b/arch/loongarch/kernel/mcount.S
@@ -79,10 +79,11 @@ SYM_FUNC_START(ftrace_graph_caller)
SYM_FUNC_END(ftrace_graph_caller)
SYM_FUNC_START(return_to_handler)
- PTR_ADDI sp, sp, -FGRET_REGS_SIZE
- PTR_S a0, sp, FGRET_REGS_A0
- PTR_S a1, sp, FGRET_REGS_A1
- PTR_S zero, sp, FGRET_REGS_FP
+ /* Save return value regs */
+ PTR_ADDI sp, sp, -PT_SIZE
+ PTR_S a0, sp, PT_R4
+ PTR_S a1, sp, PT_R5
+ PTR_S zero, sp, PT_R22
move a0, sp
bl ftrace_return_to_handler
@@ -90,9 +91,11 @@ SYM_FUNC_START(return_to_handler)
/* Restore the real parent address: a0 -> ra */
move ra, a0
- PTR_L a0, sp, FGRET_REGS_A0
- PTR_L a1, sp, FGRET_REGS_A1
- PTR_ADDI sp, sp, FGRET_REGS_SIZE
+ /* Restore return value regs */
+ PTR_L a0, sp, PT_R4
+ PTR_L a1, sp, PT_R5
+ PTR_ADDI sp, sp, PT_SIZE
+
jr ra
SYM_FUNC_END(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/loongarch/kernel/mcount_dyn.S b/arch/loongarch/kernel/mcount_dyn.S
index 0c65cf09110c..d6b474ad1d5e 100644
--- a/arch/loongarch/kernel/mcount_dyn.S
+++ b/arch/loongarch/kernel/mcount_dyn.S
@@ -140,19 +140,19 @@ SYM_CODE_END(ftrace_graph_caller)
SYM_CODE_START(return_to_handler)
UNWIND_HINT_UNDEFINED
/* Save return value regs */
- PTR_ADDI sp, sp, -FGRET_REGS_SIZE
- PTR_S a0, sp, FGRET_REGS_A0
- PTR_S a1, sp, FGRET_REGS_A1
- PTR_S zero, sp, FGRET_REGS_FP
+ PTR_ADDI sp, sp, -PT_SIZE
+ PTR_S a0, sp, PT_R4
+ PTR_S a1, sp, PT_R5
+ PTR_S zero, sp, PT_R22
move a0, sp
bl ftrace_return_to_handler
move ra, a0
/* Restore return value regs */
- PTR_L a0, sp, FGRET_REGS_A0
- PTR_L a1, sp, FGRET_REGS_A1
- PTR_ADDI sp, sp, FGRET_REGS_SIZE
+ PTR_L a0, sp, PT_R4
+ PTR_L a1, sp, PT_R5
+ PTR_ADDI sp, sp, PT_SIZE
jr ra
SYM_CODE_END(return_to_handler)
diff --git a/arch/loongarch/kernel/mem.c b/arch/loongarch/kernel/mem.c
index aed901c57fb4..8ab1ffedc52c 100644
--- a/arch/loongarch/kernel/mem.c
+++ b/arch/loongarch/kernel/mem.c
@@ -13,7 +13,7 @@
void __init memblock_init(void)
{
u32 mem_type;
- u64 mem_start, mem_end, mem_size;
+ u64 mem_start, mem_size;
efi_memory_desc_t *md;
/* Parse memory information */
@@ -21,7 +21,6 @@ void __init memblock_init(void)
mem_type = md->type;
mem_start = md->phys_addr;
mem_size = md->num_pages << EFI_PAGE_SHIFT;
- mem_end = mem_start + mem_size;
switch (mem_type) {
case EFI_LOADER_CODE:
@@ -31,8 +30,6 @@ void __init memblock_init(void)
case EFI_PERSISTENT_MEMORY:
case EFI_CONVENTIONAL_MEMORY:
memblock_add(mem_start, mem_size);
- if (max_low_pfn < (mem_end >> PAGE_SHIFT))
- max_low_pfn = mem_end >> PAGE_SHIFT;
break;
case EFI_PAL_CODE:
case EFI_UNUSABLE_MEMORY:
@@ -49,6 +46,8 @@ void __init memblock_init(void)
}
}
+ max_pfn = PFN_DOWN(memblock_end_of_DRAM());
+ max_low_pfn = min(PFN_DOWN(HIGHMEM_START), max_pfn);
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
/* Reserve the first 2MB */
diff --git a/arch/loongarch/kernel/module-sections.c b/arch/loongarch/kernel/module-sections.c
index e2f30ff9afde..a43ba7f9f987 100644
--- a/arch/loongarch/kernel/module-sections.c
+++ b/arch/loongarch/kernel/module-sections.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/moduleloader.h>
#include <linux/ftrace.h>
+#include <linux/sort.h>
Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val)
{
@@ -61,39 +62,38 @@ Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr v
return (Elf_Addr)&plt[nr];
}
-static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y)
-{
- return x->r_info == y->r_info && x->r_addend == y->r_addend;
-}
+#define cmp_3way(a, b) ((a) < (b) ? -1 : (a) > (b))
-static bool duplicate_rela(const Elf_Rela *rela, int idx)
+static int compare_rela(const void *x, const void *y)
{
- int i;
+ int ret;
+ const Elf_Rela *rela_x = x, *rela_y = y;
- for (i = 0; i < idx; i++) {
- if (is_rela_equal(&rela[i], &rela[idx]))
- return true;
- }
+ ret = cmp_3way(rela_x->r_info, rela_y->r_info);
+ if (ret == 0)
+ ret = cmp_3way(rela_x->r_addend, rela_y->r_addend);
- return false;
+ return ret;
}
static void count_max_entries(Elf_Rela *relas, int num,
unsigned int *plts, unsigned int *gots)
{
- unsigned int i, type;
+ unsigned int i;
+
+ sort(relas, num, sizeof(Elf_Rela), compare_rela, NULL);
for (i = 0; i < num; i++) {
- type = ELF_R_TYPE(relas[i].r_info);
- switch (type) {
+ if (i && !compare_rela(&relas[i-1], &relas[i]))
+ continue;
+
+ switch (ELF_R_TYPE(relas[i].r_info)) {
case R_LARCH_SOP_PUSH_PLT_PCREL:
case R_LARCH_B26:
- if (!duplicate_rela(relas, i))
- (*plts)++;
+ (*plts)++;
break;
case R_LARCH_GOT_PC_HI20:
- if (!duplicate_rela(relas, i))
- (*gots)++;
+ (*gots)++;
break;
default:
break; /* Do nothing. */
diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c
index 8fe21f868f72..8b89898e20df 100644
--- a/arch/loongarch/kernel/numa.c
+++ b/arch/loongarch/kernel/numa.c
@@ -11,6 +11,7 @@
#include <linux/mmzone.h>
#include <linux/export.h>
#include <linux/nodemask.h>
+#include <linux/numa_memblks.h>
#include <linux/swap.h>
#include <linux/memblock.h>
#include <linux/pfn.h>
@@ -27,13 +28,6 @@
#include <asm/time.h>
int numa_off;
-struct pglist_data *node_data[MAX_NUMNODES];
-unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES];
-
-EXPORT_SYMBOL(node_data);
-EXPORT_SYMBOL(node_distances);
-
-static struct numa_meminfo numa_meminfo;
cpumask_t cpus_on_node[MAX_NUMNODES];
cpumask_t phys_cpus_on_node[MAX_NUMNODES];
EXPORT_SYMBOL(cpus_on_node);
@@ -46,8 +40,6 @@ s16 __cpuid_to_node[CONFIG_NR_CPUS] = {
};
EXPORT_SYMBOL(__cpuid_to_node);
-nodemask_t numa_nodes_parsed __initdata;
-
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
@@ -148,66 +140,6 @@ void numa_remove_cpu(unsigned int cpu)
cpumask_clear_cpu(cpu, &cpus_on_node[nid]);
}
-static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
- struct numa_meminfo *mi)
-{
- /* ignore zero length blks */
- if (start == end)
- return 0;
-
- /* whine about and ignore invalid blks */
- if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
- pr_warn("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
- nid, start, end - 1);
- return 0;
- }
-
- if (mi->nr_blks >= NR_NODE_MEMBLKS) {
- pr_err("NUMA: too many memblk ranges\n");
- return -EINVAL;
- }
-
- mi->blk[mi->nr_blks].start = PFN_ALIGN(start);
- mi->blk[mi->nr_blks].end = PFN_ALIGN(end - PAGE_SIZE + 1);
- mi->blk[mi->nr_blks].nid = nid;
- mi->nr_blks++;
- return 0;
-}
-
-/**
- * numa_add_memblk - Add one numa_memblk to numa_meminfo
- * @nid: NUMA node ID of the new memblk
- * @start: Start address of the new memblk
- * @end: End address of the new memblk
- *
- * Add a new memblk to the default numa_meminfo.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int __init numa_add_memblk(int nid, u64 start, u64 end)
-{
- return numa_add_memblk_to(nid, start, end, &numa_meminfo);
-}
-
-static void __init alloc_node_data(int nid)
-{
- void *nd;
- unsigned long nd_pa;
- size_t nd_sz = roundup(sizeof(pg_data_t), PAGE_SIZE);
-
- nd_pa = memblock_phys_alloc_try_nid(nd_sz, SMP_CACHE_BYTES, nid);
- if (!nd_pa) {
- pr_err("Cannot find %zu Byte for node_data (initial node: %d)\n", nd_sz, nid);
- return;
- }
-
- nd = __va(nd_pa);
-
- node_data[nid] = nd;
- memset(nd, 0, sizeof(pg_data_t));
-}
-
static void __init node_mem_init(unsigned int node)
{
unsigned long start_pfn, end_pfn;
@@ -226,54 +158,9 @@ static void __init node_mem_init(unsigned int node)
#ifdef CONFIG_ACPI_NUMA
-static void __init add_node_intersection(u32 node, u64 start, u64 size, u32 type)
-{
- static unsigned long num_physpages;
-
- num_physpages += (size >> PAGE_SHIFT);
- pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
- node, type, start, size);
- pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
- start >> PAGE_SHIFT, (start + size) >> PAGE_SHIFT, num_physpages);
- memblock_set_node(start, size, &memblock.memory, node);
-}
-
-/*
- * add_numamem_region
- *
- * Add a uasable memory region described by BIOS. The
- * routine gets each intersection between BIOS's region
- * and node's region, and adds them into node's memblock
- * pool.
- *
- */
-static void __init add_numamem_region(u64 start, u64 end, u32 type)
-{
- u32 i;
- u64 ofs = start;
-
- if (start >= end) {
- pr_debug("Invalid region: %016llx-%016llx\n", start, end);
- return;
- }
-
- for (i = 0; i < numa_meminfo.nr_blks; i++) {
- struct numa_memblk *mb = &numa_meminfo.blk[i];
-
- if (ofs > mb->end)
- continue;
-
- if (end > mb->end) {
- add_node_intersection(mb->nid, ofs, mb->end - ofs, type);
- ofs = mb->end;
- } else {
- add_node_intersection(mb->nid, ofs, end - ofs, type);
- break;
- }
- }
-}
+static unsigned long num_physpages;
-static void __init init_node_memblock(void)
+static void __init info_node_memblock(void)
{
u32 mem_type;
u64 mem_end, mem_start, mem_size;
@@ -293,12 +180,20 @@ static void __init init_node_memblock(void)
case EFI_BOOT_SERVICES_DATA:
case EFI_PERSISTENT_MEMORY:
case EFI_CONVENTIONAL_MEMORY:
- add_numamem_region(mem_start, mem_end, mem_type);
+ num_physpages += (mem_size >> PAGE_SHIFT);
+ pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
+ (u32)pa_to_nid(mem_start), mem_type, mem_start, mem_size);
+ pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
+ mem_start >> PAGE_SHIFT, mem_end >> PAGE_SHIFT, num_physpages);
break;
case EFI_PAL_CODE:
case EFI_UNUSABLE_MEMORY:
case EFI_ACPI_RECLAIM_MEMORY:
- add_numamem_region(mem_start, mem_end, mem_type);
+ num_physpages += (mem_size >> PAGE_SHIFT);
+ pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
+ (u32)pa_to_nid(mem_start), mem_type, mem_start, mem_size);
+ pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
+ mem_start >> PAGE_SHIFT, mem_end >> PAGE_SHIFT, num_physpages);
fallthrough;
case EFI_RESERVED_TYPE:
case EFI_RUNTIME_SERVICES_CODE:
@@ -312,24 +207,6 @@ static void __init init_node_memblock(void)
}
}
-static void __init numa_default_distance(void)
-{
- int row, col;
-
- for (row = 0; row < MAX_NUMNODES; row++)
- for (col = 0; col < MAX_NUMNODES; col++) {
- if (col == row)
- node_distances[row][col] = LOCAL_DISTANCE;
- else
- /* We assume that one node per package here!
- *
- * A SLIT should be used for multiple nodes
- * per package to override default setting.
- */
- node_distances[row][col] = REMOTE_DISTANCE;
- }
-}
-
/*
* fake_numa_init() - For Non-ACPI systems
* Return: 0 on success, -errno on failure.
@@ -354,22 +231,16 @@ int __init init_numa_memory(void)
for (i = 0; i < NR_CPUS; i++)
set_cpuid_to_node(i, NUMA_NO_NODE);
- numa_default_distance();
- nodes_clear(numa_nodes_parsed);
- nodes_clear(node_possible_map);
- nodes_clear(node_online_map);
- memset(&numa_meminfo, 0, sizeof(numa_meminfo));
-
/* Parse SRAT and SLIT if provided by firmware. */
- ret = acpi_disabled ? fake_numa_init() : acpi_numa_init();
+ if (!acpi_disabled)
+ ret = numa_memblks_init(acpi_numa_init, false);
+ else
+ ret = numa_memblks_init(fake_numa_init, false);
+
if (ret < 0)
return ret;
- node_possible_map = numa_nodes_parsed;
- if (WARN_ON(nodes_empty(node_possible_map)))
- return -EINVAL;
-
- init_node_memblock();
+ info_node_memblock();
if (!memblock_validate_numa_coverage(SZ_1M))
return -EINVAL;
@@ -377,7 +248,8 @@ int __init init_numa_memory(void)
node_mem_init(node);
node_set_online(node);
}
- max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
+ max_pfn = PFN_DOWN(memblock_end_of_DRAM());
+ max_low_pfn = min(PFN_DOWN(HIGHMEM_START), max_pfn);
setup_nr_node_ids();
loongson_sysconf.nr_nodes = nr_node_ids;
@@ -388,32 +260,6 @@ int __init init_numa_memory(void)
#endif
-void __init paging_init(void)
-{
- unsigned int node;
- unsigned long zones_size[MAX_NR_ZONES] = {0, };
-
- for_each_online_node(node) {
- unsigned long start_pfn, end_pfn;
-
- get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
-
- if (end_pfn > max_low_pfn)
- max_low_pfn = end_pfn;
- }
-#ifdef CONFIG_ZONE_DMA32
- zones_size[ZONE_DMA32] = MAX_DMA32_PFN;
-#endif
- zones_size[ZONE_NORMAL] = max_low_pfn;
- free_area_init(zones_size);
-}
-
-void __init mem_init(void)
-{
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
- memblock_free_all();
-}
-
int pcibus_to_node(struct pci_bus *bus)
{
return dev_to_node(&bus->dev);
diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
index 1633ed4f692f..b1b51f920b23 100644
--- a/arch/loongarch/kernel/paravirt.c
+++ b/arch/loongarch/kernel/paravirt.c
@@ -1,14 +1,18 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/export.h>
#include <linux/types.h>
#include <linux/interrupt.h>
+#include <linux/irq_work.h>
#include <linux/jump_label.h>
#include <linux/kvm_para.h>
+#include <linux/reboot.h>
#include <linux/static_call.h>
#include <asm/paravirt.h>
+static int has_steal_clock;
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
+static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
+DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
static u64 native_steal_clock(int cpu)
{
@@ -17,12 +21,47 @@ static u64 native_steal_clock(int cpu)
DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
+static bool steal_acc = true;
+
+static int __init parse_no_stealacc(char *arg)
+{
+ steal_acc = false;
+ return 0;
+}
+early_param("no-steal-acc", parse_no_stealacc);
+
+static u64 paravt_steal_clock(int cpu)
+{
+ int version;
+ u64 steal;
+ struct kvm_steal_time *src;
+
+ src = &per_cpu(steal_time, cpu);
+ do {
+
+ version = src->version;
+ virt_rmb(); /* Make sure that the version is read before the steal */
+ steal = src->steal;
+ virt_rmb(); /* Make sure that the steal is read before the next version */
+
+ } while ((version & 1) || (version != src->version));
+
+ return steal;
+}
+
#ifdef CONFIG_SMP
+static struct smp_ops native_ops;
+
static void pv_send_ipi_single(int cpu, unsigned int action)
{
int min, old;
irq_cpustat_t *info = &per_cpu(irq_stat, cpu);
+ if (unlikely(action == ACTION_BOOT_CPU)) {
+ native_ops.send_ipi_single(cpu, action);
+ return;
+ }
+
old = atomic_fetch_or(BIT(action), &info->message);
if (old)
return;
@@ -42,6 +81,11 @@ static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action)
if (cpumask_empty(mask))
return;
+ if (unlikely(action == ACTION_BOOT_CPU)) {
+ native_ops.send_ipi_mask(mask, action);
+ return;
+ }
+
action = BIT(action);
for_each_cpu(i, mask) {
info = &per_cpu(irq_stat, i);
@@ -97,6 +141,16 @@ static irqreturn_t pv_ipi_interrupt(int irq, void *dev)
info->ipi_irqs[IPI_CALL_FUNCTION]++;
}
+ if (action & SMP_IRQ_WORK) {
+ irq_work_run();
+ info->ipi_irqs[IPI_IRQ_WORK]++;
+ }
+
+ if (action & SMP_CLEAR_VECTOR) {
+ complete_irq_moving();
+ info->ipi_irqs[IPI_CLEAR_VECTOR]++;
+ }
+
return IRQ_HANDLED;
}
@@ -104,6 +158,8 @@ static void pv_init_ipi(void)
{
int r, swi;
+ /* Init native ipi irq for ACTION_BOOT_CPU */
+ native_ops.init_ipi();
swi = get_percpu_irq(INT_SWI0);
if (swi < 0)
panic("SWI0 IRQ mapping failed\n");
@@ -114,11 +170,14 @@ static void pv_init_ipi(void)
}
#endif
-static bool kvm_para_available(void)
+bool kvm_para_available(void)
{
int config;
static int hypervisor_type;
+ if (!cpu_has_hypervisor)
+ return false;
+
if (!hypervisor_type) {
config = read_cpucfg(CPUCFG_KVM_SIG);
if (!memcmp(&config, KVM_SIGNATURE, 4))
@@ -128,20 +187,26 @@ static bool kvm_para_available(void)
return hypervisor_type == HYPERVISOR_KVM;
}
-int __init pv_ipi_init(void)
+unsigned int kvm_arch_para_features(void)
{
- int feature;
+ static unsigned int feature;
- if (!cpu_has_hypervisor)
- return 0;
if (!kvm_para_available())
return 0;
- feature = read_cpucfg(CPUCFG_KVM_FEATURE);
- if (!(feature & KVM_FEATURE_IPI))
+ if (!feature)
+ feature = read_cpucfg(CPUCFG_KVM_FEATURE);
+
+ return feature;
+}
+
+int __init pv_ipi_init(void)
+{
+ if (!kvm_para_has_feature(KVM_FEATURE_IPI))
return 0;
#ifdef CONFIG_SMP
+ native_ops = mp_ops;
mp_ops.init_ipi = pv_init_ipi;
mp_ops.send_ipi_single = pv_send_ipi_single;
mp_ops.send_ipi_mask = pv_send_ipi_mask;
@@ -149,3 +214,121 @@ int __init pv_ipi_init(void)
return 0;
}
+
+static int pv_enable_steal_time(void)
+{
+ int cpu = smp_processor_id();
+ unsigned long addr;
+ struct kvm_steal_time *st;
+
+ if (!has_steal_clock)
+ return -EPERM;
+
+ st = &per_cpu(steal_time, cpu);
+ addr = per_cpu_ptr_to_phys(st);
+
+ /* The whole structure kvm_steal_time should be in one page */
+ if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) {
+ pr_warn("Illegal PV steal time addr %lx\n", addr);
+ return -EFAULT;
+ }
+
+ addr |= KVM_STEAL_PHYS_VALID;
+ kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), addr);
+
+ return 0;
+}
+
+static void pv_disable_steal_time(void)
+{
+ if (has_steal_clock)
+ kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), 0);
+}
+
+#ifdef CONFIG_SMP
+static int pv_time_cpu_online(unsigned int cpu)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ pv_enable_steal_time();
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int pv_time_cpu_down_prepare(unsigned int cpu)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ pv_disable_steal_time();
+ local_irq_restore(flags);
+
+ return 0;
+}
+#endif
+
+static void pv_cpu_reboot(void *unused)
+{
+ pv_disable_steal_time();
+}
+
+static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused)
+{
+ on_each_cpu(pv_cpu_reboot, NULL, 1);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block pv_reboot_nb = {
+ .notifier_call = pv_reboot_notify,
+};
+
+int __init pv_time_init(void)
+{
+ int r;
+
+ if (!kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
+ return 0;
+
+ has_steal_clock = 1;
+ r = pv_enable_steal_time();
+ if (r < 0) {
+ has_steal_clock = 0;
+ return 0;
+ }
+ register_reboot_notifier(&pv_reboot_nb);
+
+#ifdef CONFIG_SMP
+ r = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "loongarch/pv_time:online",
+ pv_time_cpu_online, pv_time_cpu_down_prepare);
+ if (r < 0) {
+ has_steal_clock = 0;
+ pr_err("Failed to install cpu hotplug callbacks\n");
+ return r;
+ }
+#endif
+
+ static_call_update(pv_steal_clock, paravt_steal_clock);
+
+ static_key_slow_inc(&paravirt_steal_enabled);
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+ if (steal_acc)
+ static_key_slow_inc(&paravirt_steal_rq_enabled);
+#endif
+
+ pr_info("Using paravirt steal-time\n");
+
+ return 0;
+}
+
+int __init pv_spinlock_init(void)
+{
+ if (!cpu_has_hypervisor)
+ return 0;
+
+ static_branch_enable(&virt_spin_lock_key);
+
+ return 0;
+}
diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c
index f86a4b838dd7..9d257c8519c9 100644
--- a/arch/loongarch/kernel/perf_event.c
+++ b/arch/loongarch/kernel/perf_event.c
@@ -479,8 +479,7 @@ static void handle_associated_event(struct cpu_hw_events *cpuc, int idx,
if (!loongarch_pmu_event_set_period(event, hwc, idx))
return;
- if (perf_event_overflow(event, data, regs))
- loongarch_pmu_disable_event(idx);
+ perf_event_overflow(event, data, regs);
}
static irqreturn_t pmu_handle_irq(int irq, void *dev)
@@ -846,13 +845,14 @@ static const struct loongarch_perf_event *loongarch_pmu_map_raw_event(u64 config
static int __init init_hw_perf_events(void)
{
- int counters;
+ int bits, counters;
if (!cpu_has_pmp)
return -ENODEV;
pr_info("Performance counters: ");
- counters = ((read_cpucfg(LOONGARCH_CPUCFG6) & CPUCFG6_PMNUM) >> 4) + 1;
+ bits = ((read_cpucfg(LOONGARCH_CPUCFG6) & CPUCFG6_PMBITS) >> CPUCFG6_PMBITS_SHIFT) + 1;
+ counters = ((read_cpucfg(LOONGARCH_CPUCFG6) & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT) + 1;
loongarch_pmu.num_counters = counters;
loongarch_pmu.max_period = (1ULL << 63) - 1;
@@ -868,7 +868,7 @@ static int __init init_hw_perf_events(void)
on_each_cpu(reset_counters, NULL, 1);
pr_cont("%s PMU enabled, %d %d-bit counters available to each CPU.\n",
- loongarch_pmu.name, counters, 64);
+ loongarch_pmu.name, counters, bits);
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c
index 0d33cbc47e51..63d2b7e7e844 100644
--- a/arch/loongarch/kernel/proc.c
+++ b/arch/loongarch/kernel/proc.c
@@ -13,27 +13,13 @@
#include <asm/processor.h>
#include <asm/time.h>
-/*
- * No lock; only written during early bootup by CPU 0.
- */
-static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain);
-
-int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb)
-{
- return raw_notifier_chain_register(&proc_cpuinfo_chain, nb);
-}
-
-int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
-{
- return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v);
-}
-
static int show_cpuinfo(struct seq_file *m, void *v)
{
unsigned long n = (unsigned long) v - 1;
+ unsigned int isa = cpu_data[n].isa_level;
+ unsigned int prid = cpu_data[n].processor_id;
unsigned int version = cpu_data[n].processor_id & 0xff;
unsigned int fp_version = cpu_data[n].fpu_vers;
- struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
#ifdef CONFIG_SMP
if (!cpu_online(n))
@@ -52,6 +38,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "global_id\t\t: %d\n", cpu_data[n].global_id);
seq_printf(m, "CPU Family\t\t: %s\n", __cpu_family[n]);
seq_printf(m, "Model Name\t\t: %s\n", __cpu_full_name[n]);
+ seq_printf(m, "PRID\t\t\t: %s (%08x)\n", id_to_core_name(prid), prid);
seq_printf(m, "CPU Revision\t\t: 0x%02x\n", version);
seq_printf(m, "FPU Revision\t\t: 0x%02x\n", fp_version);
seq_printf(m, "CPU MHz\t\t\t: %llu.%02llu\n",
@@ -64,9 +51,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
cpu_pabits + 1, cpu_vabits + 1);
seq_printf(m, "ISA\t\t\t:");
- if (cpu_has_loongarch32)
- seq_printf(m, " loongarch32");
- if (cpu_has_loongarch64)
+ if (isa & LOONGARCH_CPU_ISA_LA32R)
+ seq_printf(m, " loongarch32r");
+ if (isa & LOONGARCH_CPU_ISA_LA32S)
+ seq_printf(m, " loongarch32s");
+ if (isa & LOONGARCH_CPU_ISA_LA64)
seq_printf(m, " loongarch64");
seq_printf(m, "\n");
@@ -81,26 +70,20 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu_has_complex) seq_printf(m, " complex");
if (cpu_has_crypto) seq_printf(m, " crypto");
if (cpu_has_ptw) seq_printf(m, " ptw");
+ if (cpu_has_lspw) seq_printf(m, " lspw");
if (cpu_has_lvz) seq_printf(m, " lvz");
if (cpu_has_lbt_x86) seq_printf(m, " lbt_x86");
if (cpu_has_lbt_arm) seq_printf(m, " lbt_arm");
if (cpu_has_lbt_mips) seq_printf(m, " lbt_mips");
seq_printf(m, "\n");
- seq_printf(m, "Hardware Watchpoint\t: %s",
- cpu_has_watch ? "yes, " : "no\n");
+ seq_printf(m, "Hardware Watchpoint\t: %s", str_yes_no(cpu_has_watch));
if (cpu_has_watch) {
- seq_printf(m, "iwatch count: %d, dwatch count: %d\n",
+ seq_printf(m, ", iwatch count: %d, dwatch count: %d",
cpu_data[n].watch_ireg_count, cpu_data[n].watch_dreg_count);
}
- proc_cpuinfo_notifier_args.m = m;
- proc_cpuinfo_notifier_args.n = n;
-
- raw_notifier_call_chain(&proc_cpuinfo_chain, 0,
- &proc_cpuinfo_notifier_args);
-
- seq_printf(m, "\n");
+ seq_printf(m, "\n\n");
return 0;
}
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
index f2ff8b5d591e..efd9edf65603 100644
--- a/arch/loongarch/kernel/process.c
+++ b/arch/loongarch/kernel/process.c
@@ -13,6 +13,7 @@
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/entry-common.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
@@ -34,6 +35,7 @@
#include <linux/nmi.h>
#include <asm/asm.h>
+#include <asm/asm-prototypes.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/elf.h>
@@ -47,6 +49,7 @@
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/reg.h>
+#include <asm/switch_to.h>
#include <asm/unwind.h>
#include <asm/vdso.h>
@@ -63,8 +66,9 @@ EXPORT_SYMBOL(__stack_chk_guard);
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override);
-asmlinkage void ret_from_fork(void);
-asmlinkage void ret_from_kernel_thread(void);
+asmlinkage void restore_and_ret(void);
+asmlinkage void ret_from_fork_asm(void);
+asmlinkage void ret_from_kernel_thread_asm(void);
void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
{
@@ -138,6 +142,23 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
return 0;
}
+asmlinkage void noinstr __no_stack_protector ret_from_fork(struct task_struct *prev,
+ struct pt_regs *regs)
+{
+ schedule_tail(prev);
+ syscall_exit_to_user_mode(regs);
+}
+
+asmlinkage void noinstr __no_stack_protector ret_from_kernel_thread(struct task_struct *prev,
+ struct pt_regs *regs,
+ int (*fn)(void *),
+ void *fn_arg)
+{
+ schedule_tail(prev);
+ fn(fn_arg);
+ syscall_exit_to_user_mode(regs);
+}
+
/*
* Copy architecture-specific thread state
*/
@@ -146,7 +167,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
unsigned long childksp;
unsigned long tls = args->tls;
unsigned long usp = args->stack;
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
struct pt_regs *childregs, *regs = current_pt_regs();
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
@@ -165,8 +186,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
p->thread.reg03 = childksp;
p->thread.reg23 = (unsigned long)args->fn;
p->thread.reg24 = (unsigned long)args->fn_arg;
- p->thread.reg01 = (unsigned long)ret_from_kernel_thread;
- p->thread.sched_ra = (unsigned long)ret_from_kernel_thread;
+ p->thread.reg01 = (unsigned long)ret_from_kernel_thread_asm;
+ p->thread.sched_ra = (unsigned long)ret_from_kernel_thread_asm;
memset(childregs, 0, sizeof(struct pt_regs));
childregs->csr_euen = p->thread.csr_euen;
childregs->csr_crmd = p->thread.csr_crmd;
@@ -182,8 +203,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
childregs->regs[3] = usp;
p->thread.reg03 = (unsigned long) childregs;
- p->thread.reg01 = (unsigned long) ret_from_fork;
- p->thread.sched_ra = (unsigned long) ret_from_fork;
+ p->thread.reg01 = (unsigned long) ret_from_fork_asm;
+ p->thread.sched_ra = (unsigned long) ret_from_fork_asm;
/*
* New tasks lose permission to use the fpu. This accelerates context
@@ -293,13 +314,15 @@ unsigned long stack_top(void)
{
unsigned long top = TASK_SIZE & PAGE_MASK;
- /* Space for the VDSO & data page */
- top -= PAGE_ALIGN(current->thread.vdso->size);
- top -= VVAR_SIZE;
+ if (current->thread.vdso) {
+ /* Space for the VDSO & data page */
+ top -= PAGE_ALIGN(current->thread.vdso->size);
+ top -= VVAR_SIZE;
- /* Space to randomize the VDSO base */
- if (current->flags & PF_RANDOMIZE)
- top -= VDSO_RANDOMIZE_SIZE;
+ /* Space to randomize the VDSO base */
+ if (current->flags & PF_RANDOMIZE)
+ top -= VDSO_RANDOMIZE_SIZE;
+ }
return top;
}
diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
index 200109de1971..8edd0954e55a 100644
--- a/arch/loongarch/kernel/ptrace.c
+++ b/arch/loongarch/kernel/ptrace.c
@@ -589,6 +589,7 @@ static int ptrace_hbp_set_ctrl(unsigned int note_type,
struct perf_event *bp;
struct perf_event_attr attr;
struct arch_hw_breakpoint_ctrl ctrl;
+ struct thread_info *ti = task_thread_info(tsk);
bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
if (IS_ERR(bp))
@@ -613,8 +614,10 @@ static int ptrace_hbp_set_ctrl(unsigned int note_type,
if (err)
return err;
attr.disabled = 0;
+ set_ti_thread_flag(ti, TIF_LOAD_WATCH);
} else {
attr.disabled = 1;
+ clear_ti_thread_flag(ti, TIF_LOAD_WATCH);
}
return modify_user_hw_breakpoint(bp, &attr);
@@ -717,7 +720,7 @@ static int hw_break_set(struct task_struct *target,
unsigned int note_type = regset->core_note_type;
/* Resource info */
- offset = offsetof(struct user_watch_state, dbg_regs);
+ offset = offsetof(struct user_watch_state_v2, dbg_regs);
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
/* (address, mask, ctrl) registers */
@@ -861,7 +864,7 @@ enum loongarch_regset {
static const struct user_regset loongarch64_regsets[] = {
[REGSET_GPR] = {
- .core_note_type = NT_PRSTATUS,
+ USER_REGSET_NOTE_TYPE(PRSTATUS),
.n = ELF_NGREG,
.size = sizeof(elf_greg_t),
.align = sizeof(elf_greg_t),
@@ -869,7 +872,7 @@ static const struct user_regset loongarch64_regsets[] = {
.set = gpr_set,
},
[REGSET_FPR] = {
- .core_note_type = NT_PRFPREG,
+ USER_REGSET_NOTE_TYPE(PRFPREG),
.n = ELF_NFPREG,
.size = sizeof(elf_fpreg_t),
.align = sizeof(elf_fpreg_t),
@@ -877,7 +880,7 @@ static const struct user_regset loongarch64_regsets[] = {
.set = fpr_set,
},
[REGSET_CPUCFG] = {
- .core_note_type = NT_LOONGARCH_CPUCFG,
+ USER_REGSET_NOTE_TYPE(LOONGARCH_CPUCFG),
.n = 64,
.size = sizeof(u32),
.align = sizeof(u32),
@@ -886,7 +889,7 @@ static const struct user_regset loongarch64_regsets[] = {
},
#ifdef CONFIG_CPU_HAS_LSX
[REGSET_LSX] = {
- .core_note_type = NT_LOONGARCH_LSX,
+ USER_REGSET_NOTE_TYPE(LOONGARCH_LSX),
.n = NUM_FPU_REGS,
.size = 16,
.align = 16,
@@ -896,7 +899,7 @@ static const struct user_regset loongarch64_regsets[] = {
#endif
#ifdef CONFIG_CPU_HAS_LASX
[REGSET_LASX] = {
- .core_note_type = NT_LOONGARCH_LASX,
+ USER_REGSET_NOTE_TYPE(LOONGARCH_LASX),
.n = NUM_FPU_REGS,
.size = 32,
.align = 32,
@@ -906,7 +909,7 @@ static const struct user_regset loongarch64_regsets[] = {
#endif
#ifdef CONFIG_CPU_HAS_LBT
[REGSET_LBT] = {
- .core_note_type = NT_LOONGARCH_LBT,
+ USER_REGSET_NOTE_TYPE(LOONGARCH_LBT),
.n = 5,
.size = sizeof(u64),
.align = sizeof(u64),
@@ -916,16 +919,16 @@ static const struct user_regset loongarch64_regsets[] = {
#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
[REGSET_HW_BREAK] = {
- .core_note_type = NT_LOONGARCH_HW_BREAK,
- .n = sizeof(struct user_watch_state) / sizeof(u32),
+ USER_REGSET_NOTE_TYPE(LOONGARCH_HW_BREAK),
+ .n = sizeof(struct user_watch_state_v2) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
.regset_get = hw_break_get,
.set = hw_break_set,
},
[REGSET_HW_WATCH] = {
- .core_note_type = NT_LOONGARCH_HW_WATCH,
- .n = sizeof(struct user_watch_state) / sizeof(u32),
+ USER_REGSET_NOTE_TYPE(LOONGARCH_HW_WATCH),
+ .n = sizeof(struct user_watch_state_v2) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
.regset_get = hw_break_get,
diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c
index 1acfa704c8d0..b5e2312a2fca 100644
--- a/arch/loongarch/kernel/relocate.c
+++ b/arch/loongarch/kernel/relocate.c
@@ -13,6 +13,7 @@
#include <asm/bootinfo.h>
#include <asm/early_ioremap.h>
#include <asm/inst.h>
+#include <asm/io.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -34,11 +35,27 @@ static inline void __init relocate_relative(void)
if (rela->r_info != R_LARCH_RELATIVE)
continue;
- if (relocated_addr >= VMLINUX_LOAD_ADDRESS)
- relocated_addr = (Elf64_Addr)RELOCATED(relocated_addr);
-
+ relocated_addr = (Elf64_Addr)RELOCATED(relocated_addr);
*(Elf64_Addr *)RELOCATED(addr) = relocated_addr;
}
+
+#ifdef CONFIG_RELR
+ u64 *addr = NULL;
+ u64 *relr = (u64 *)&__relr_dyn_begin;
+ u64 *relr_end = (u64 *)&__relr_dyn_end;
+
+ for ( ; relr < relr_end; relr++) {
+ if ((*relr & 1) == 0) {
+ addr = (u64 *)(*relr + reloc_offset);
+ *addr++ += reloc_offset;
+ } else {
+ for (u64 *p = addr, r = *relr >> 1; r; p++, r >>= 1)
+ if (r & 1)
+ *p += reloc_offset;
+ addr += 63;
+ }
+ }
+#endif
}
static inline void __init relocate_absolute(long random_offset)
@@ -123,6 +140,36 @@ static inline __init bool kaslr_disabled(void)
if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
return true;
+#ifdef CONFIG_HIBERNATION
+ str = strstr(builtin_cmdline, "nohibernate");
+ if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' '))
+ return false;
+
+ str = strstr(boot_command_line, "nohibernate");
+ if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
+ return false;
+
+ str = strstr(builtin_cmdline, "noresume");
+ if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' '))
+ return false;
+
+ str = strstr(boot_command_line, "noresume");
+ if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
+ return false;
+
+ str = strstr(builtin_cmdline, "resume=");
+ if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' '))
+ return true;
+
+ str = strstr(boot_command_line, "resume=");
+ if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
+ return true;
+#endif
+
+ str = strstr(boot_command_line, "kexec_file");
+ if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
+ return true;
+
return false;
}
@@ -170,7 +217,7 @@ unsigned long __init relocate_kernel(void)
unsigned long kernel_length;
unsigned long random_offset = 0;
void *location_new = _text; /* Default to original kernel start */
- char *cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
+ char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
@@ -182,6 +229,7 @@ unsigned long __init relocate_kernel(void)
random_offset = (unsigned long)location_new - (unsigned long)(_text);
#endif
reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS;
+ early_memunmap(cmdline, COMMAND_LINE_SIZE);
if (random_offset) {
kernel_length = (long)(_end) - (long)(_text);
diff --git a/arch/loongarch/kernel/relocate_kernel.S b/arch/loongarch/kernel/relocate_kernel.S
index 84e6de2fd973..8b5140ac9ea1 100644
--- a/arch/loongarch/kernel/relocate_kernel.S
+++ b/arch/loongarch/kernel/relocate_kernel.S
@@ -109,4 +109,4 @@ SYM_CODE_END(kexec_smp_wait)
relocate_new_kernel_end:
.section ".data"
-SYM_DATA(relocate_new_kernel_size, .long relocate_new_kernel_end - relocate_new_kernel)
+SYM_DATA(relocate_new_kernel_size, .quad relocate_new_kernel_end - relocate_new_kernel)
diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c
index 1ef8c6383535..de8fa5a8a825 100644
--- a/arch/loongarch/kernel/reset.c
+++ b/arch/loongarch/kernel/reset.c
@@ -33,7 +33,7 @@ void machine_halt(void)
console_flush_on_panic(CONSOLE_FLUSH_PENDING);
while (true) {
- __arch_cpu_idle();
+ __asm__ __volatile__("idle 0" : : : "memory");
}
}
@@ -53,7 +53,7 @@ void machine_power_off(void)
#endif
while (true) {
- __arch_cpu_idle();
+ __asm__ __volatile__("idle 0" : : : "memory");
}
}
@@ -74,6 +74,6 @@ void machine_restart(char *command)
acpi_reboot();
while (true) {
- __arch_cpu_idle();
+ __asm__ __volatile__("idle 0" : : : "memory");
}
}
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index 3d048f1be143..25a87378e48e 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -55,6 +55,7 @@
#define SMBIOS_FREQHIGH_OFFSET 0x17
#define SMBIOS_FREQLOW_MASK 0xFF
#define SMBIOS_CORE_PACKAGE_OFFSET 0x23
+#define SMBIOS_THREAD_PACKAGE_OFFSET 0x25
#define LOONGSON_EFI_ENABLE (1 << 3)
unsigned long fw_arg0, fw_arg1, fw_arg2;
@@ -125,7 +126,7 @@ static void __init parse_cpu_table(const struct dmi_header *dm)
cpu_clock_freq = freq_temp * 1000000;
loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]);
- loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_CORE_PACKAGE_OFFSET);
+ loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_THREAD_PACKAGE_OFFSET);
pr_info("CpuClock = %llu\n", cpu_clock_freq);
}
@@ -190,6 +191,16 @@ static int __init early_parse_mem(char *p)
return -EINVAL;
}
+ start = 0;
+ size = memparse(p, &p);
+ if (*p == '@') /* Every mem=... should contain '@' */
+ start = memparse(p + 1, &p);
+ else { /* Only one mem=... is allowed if no '@' */
+ usermem = 1;
+ memblock_enforce_memory_limit(size);
+ return 0;
+ }
+
/*
* If a user specifies memory size, we
* blow away any automatically generated
@@ -200,14 +211,6 @@ static int __init early_parse_mem(char *p)
memblock_remove(memblock_start_of_DRAM(),
memblock_end_of_DRAM() - memblock_start_of_DRAM());
}
- start = 0;
- size = memparse(p, &p);
- if (*p == '@')
- start = memparse(p + 1, &p);
- else {
- pr_err("Invalid format!\n");
- return -EINVAL;
- }
if (!IS_ENABLED(CONFIG_NUMA))
memblock_add(start, size);
@@ -258,18 +261,17 @@ static void __init arch_reserve_crashkernel(void)
int ret;
unsigned long long low_size = 0;
unsigned long long crash_base, crash_size;
- char *cmdline = boot_command_line;
bool high = false;
if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
return;
- ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
- &crash_size, &crash_base, &low_size, &high);
+ ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
+ &crash_size, &crash_base, &low_size, NULL, &high);
if (ret)
return;
- reserve_crashkernel_generic(cmdline, crash_size, crash_base, low_size, high);
+ reserve_crashkernel_generic(crash_size, crash_base, low_size, high);
}
static void __init fdt_setup(void)
@@ -290,10 +292,8 @@ static void __init fdt_setup(void)
if (!fdt_pointer || fdt_check_header(fdt_pointer))
return;
- early_init_dt_scan(fdt_pointer);
+ early_init_dt_scan(fdt_pointer, __pa(fdt_pointer));
early_init_fdt_reserve_self();
-
- max_low_pfn = PFN_PHYS(memblock_end_of_DRAM());
#endif
}
@@ -353,6 +353,7 @@ void __init platform_init(void)
#ifdef CONFIG_ACPI
acpi_table_upgrade();
+ acpi_gbl_use_global_lock = false;
acpi_gbl_use_default_register_widths = false;
acpi_boot_table_init();
#endif
@@ -386,6 +387,10 @@ static void __init check_kernel_sections_mem(void)
*/
static void __init arch_mem_init(char **cmdline_p)
{
+ /* Recalculate max_low_pfn for "mem=xxx" */
+ max_pfn = PFN_DOWN(memblock_end_of_DRAM());
+ max_low_pfn = min(PFN_DOWN(HIGHMEM_START), max_pfn);
+
if (usermem)
pr_info("User-defined physical RAM map overwrite\n");
@@ -430,7 +435,7 @@ static void __init resource_init(void)
num_standard_resources = memblock.memory.cnt;
res_size = num_standard_resources * sizeof(*standard_resources);
- standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
+ standard_resources = memblock_alloc_or_panic(res_size, SMP_CACHE_BYTES);
for_each_mem_region(region) {
res = &standard_resources[i++];
@@ -576,8 +581,10 @@ static void __init prefill_possible_map(void)
for (i = 0; i < possible; i++)
set_cpu_possible(i, true);
- for (; i < NR_CPUS; i++)
+ for (; i < NR_CPUS; i++) {
+ set_cpu_present(i, false);
set_cpu_possible(i, false);
+ }
set_nr_cpu_ids(possible);
}
@@ -601,6 +608,8 @@ void __init setup_arch(char **cmdline_p)
arch_mem_init(cmdline_p);
resource_init();
+ jump_label_init(); /* Initialise the static keys for paravirtualization */
+
#ifdef CONFIG_SMP
plat_smp_setup();
prefill_possible_map();
diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c
index 7a555b600171..c9f7ca778364 100644
--- a/arch/loongarch/kernel/signal.c
+++ b/arch/loongarch/kernel/signal.c
@@ -51,27 +51,6 @@
#define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); })
#define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); })
-/* Assembly functions to move context to/from the FPU */
-extern asmlinkage int
-_save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
-extern asmlinkage int
-_restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
-extern asmlinkage int
-_save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
-extern asmlinkage int
-_restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
-extern asmlinkage int
-_save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
-extern asmlinkage int
-_restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
-
-#ifdef CONFIG_CPU_HAS_LBT
-extern asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
-extern asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
-extern asmlinkage int _save_ftop_context(void __user *ftop);
-extern asmlinkage int _restore_ftop_context(void __user *ftop);
-#endif
-
struct rt_sigframe {
struct siginfo rs_info;
struct ucontext rs_uctx;
@@ -698,6 +677,11 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
for (i = 1; i < 32; i++)
err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
+#ifdef CONFIG_CPU_HAS_LBT
+ if (extctx->lbt.addr)
+ err |= protected_save_lbt_context(extctx);
+#endif
+
if (extctx->lasx.addr)
err |= protected_save_lasx_context(extctx);
else if (extctx->lsx.addr)
@@ -705,11 +689,6 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
else if (extctx->fpu.addr)
err |= protected_save_fpu_context(extctx);
-#ifdef CONFIG_CPU_HAS_LBT
- if (extctx->lbt.addr)
- err |= protected_save_lbt_context(extctx);
-#endif
-
/* Set the "end" magic */
info = (struct sctx_info *)extctx->end.addr;
err |= __put_user(0, &info->magic);
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
index 1436d2465939..8b2fcb3fb874 100644
--- a/arch/loongarch/kernel/smp.c
+++ b/arch/loongarch/kernel/smp.c
@@ -13,11 +13,13 @@
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irq_work.h>
#include <linux/profile.h>
#include <linux/seq_file.h>
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/export.h>
+#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/time.h>
#include <linux/tracepoint.h>
@@ -44,6 +46,10 @@ EXPORT_SYMBOL(__cpu_logical_map);
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_sibling_map);
+/* Representing the last level cache shared map of each logical CPU */
+cpumask_t cpu_llc_shared_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_llc_shared_map);
+
/* Representing the core map of multi-core chips of each logical CPU */
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
@@ -61,6 +67,9 @@ EXPORT_SYMBOL(cpu_foreign_map);
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
+/* representing cpus for which llc shared maps can be computed */
+static cpumask_t cpu_llc_shared_setup_map;
+
/* representing cpus for which core maps can be computed */
static cpumask_t cpu_core_setup_map;
@@ -70,6 +79,8 @@ static DEFINE_PER_CPU(int, cpu_state);
static const char *ipi_types[NR_IPI] __tracepoint_string = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNCTION] = "Function call interrupts",
+ [IPI_IRQ_WORK] = "IRQ work interrupts",
+ [IPI_CLEAR_VECTOR] = "Clear vector interrupts",
};
void show_ipi_list(struct seq_file *p, int prec)
@@ -79,7 +90,7 @@ void show_ipi_list(struct seq_file *p, int prec)
for (i = 0; i < NR_IPI; i++) {
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : "");
for_each_online_cpu(cpu)
- seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).ipi_irqs[i]);
+ seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, cpu).ipi_irqs[i], 10);
seq_printf(p, " LoongArch %d %s\n", i + 1, ipi_types[i]);
}
}
@@ -98,6 +109,34 @@ static inline void set_cpu_core_map(int cpu)
}
}
+static inline void set_cpu_llc_shared_map(int cpu)
+{
+ int i;
+
+ cpumask_set_cpu(cpu, &cpu_llc_shared_setup_map);
+
+ for_each_cpu(i, &cpu_llc_shared_setup_map) {
+ if (cpu_to_node(cpu) == cpu_to_node(i)) {
+ cpumask_set_cpu(i, &cpu_llc_shared_map[cpu]);
+ cpumask_set_cpu(cpu, &cpu_llc_shared_map[i]);
+ }
+ }
+}
+
+static inline void clear_cpu_llc_shared_map(int cpu)
+{
+ int i;
+
+ for_each_cpu(i, &cpu_llc_shared_setup_map) {
+ if (cpu_to_node(cpu) == cpu_to_node(i)) {
+ cpumask_clear_cpu(i, &cpu_llc_shared_map[cpu]);
+ cpumask_clear_cpu(cpu, &cpu_llc_shared_map[i]);
+ }
+ }
+
+ cpumask_clear_cpu(cpu, &cpu_llc_shared_setup_map);
+}
+
static inline void set_cpu_sibling_map(int cpu)
{
int i;
@@ -217,6 +256,13 @@ void arch_smp_send_reschedule(int cpu)
}
EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
+#ifdef CONFIG_IRQ_WORK
+void arch_irq_work_raise(void)
+{
+ mp_ops.send_ipi_single(smp_processor_id(), ACTION_IRQ_WORK);
+}
+#endif
+
static irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
{
unsigned int action;
@@ -234,6 +280,16 @@ static irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++;
}
+ if (action & SMP_IRQ_WORK) {
+ irq_work_run();
+ per_cpu(irq_stat, cpu).ipi_irqs[IPI_IRQ_WORK]++;
+ }
+
+ if (action & SMP_CLEAR_VECTOR) {
+ complete_irq_moving();
+ per_cpu(irq_stat, cpu).ipi_irqs[IPI_CLEAR_VECTOR]++;
+ }
+
return IRQ_HANDLED;
}
@@ -271,11 +327,10 @@ static void __init fdt_smp_setup(void)
if (cpuid >= nr_cpu_ids)
continue;
- if (cpuid == loongson_sysconf.boot_cpu_id) {
+ if (cpuid == loongson_sysconf.boot_cpu_id)
cpu = 0;
- } else {
- cpu = cpumask_next_zero(-1, cpu_present_mask);
- }
+ else
+ cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
num_processors++;
set_cpu_possible(cpu, true);
@@ -283,7 +338,7 @@ static void __init fdt_smp_setup(void)
__cpu_number_map[cpuid] = cpu;
__cpu_logical_map[cpu] = cpuid;
- early_numa_add_cpu(cpu, 0);
+ early_numa_add_cpu(cpuid, 0);
set_cpuid_to_node(cpuid, 0);
}
@@ -312,11 +367,11 @@ void __init loongson_prepare_cpus(unsigned int max_cpus)
int i = 0;
parse_acpi_topology();
+ cpu_data[0].global_id = cpu_logical_map(0);
for (i = 0; i < loongson_sysconf.nr_cpus; i++) {
set_cpu_present(i, true);
csr_mail_send(0, __cpu_logical_map[i], 0);
- cpu_data[i].global_id = __cpu_logical_map[i];
}
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
@@ -361,6 +416,7 @@ void loongson_init_secondary(void)
cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
cpu_data[cpu].core = pptt_enabled ? cpu_data[cpu].core :
cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
+ cpu_data[cpu].global_id = cpu_logical_map(cpu);
}
void loongson_smp_finish(void)
@@ -385,6 +441,7 @@ int loongson_cpu_disable(void)
#endif
set_cpu_online(cpu, false);
clear_cpu_sibling_map(cpu);
+ clear_cpu_llc_shared_map(cpu);
calculate_cpu_foreign_map();
local_irq_save(flags);
irq_migrate_all_off_this_cpu();
@@ -403,7 +460,7 @@ void loongson_cpu_die(unsigned int cpu)
mb();
}
-void __noreturn arch_cpu_idle_dead(void)
+static void __noreturn idle_play_dead(void)
{
register uint64_t addr;
register void (*init_fn)(void);
@@ -427,6 +484,50 @@ void __noreturn arch_cpu_idle_dead(void)
BUG();
}
+#ifdef CONFIG_HIBERNATION
+static void __noreturn poll_play_dead(void)
+{
+ register uint64_t addr;
+ register void (*init_fn)(void);
+
+ idle_task_exit();
+ __this_cpu_write(cpu_state, CPU_DEAD);
+
+ __smp_mb();
+ do {
+ __asm__ __volatile__("nop\n\t");
+ addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
+ } while (addr == 0);
+
+ init_fn = (void *)TO_CACHE(addr);
+ iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
+
+ init_fn();
+ BUG();
+}
+#endif
+
+static void (*play_dead)(void) = idle_play_dead;
+
+void __noreturn arch_cpu_idle_dead(void)
+{
+ play_dead();
+ BUG(); /* play_dead() doesn't return */
+}
+
+#ifdef CONFIG_HIBERNATION
+int hibernate_resume_nonboot_cpu_disable(void)
+{
+ int ret;
+
+ play_dead = poll_play_dead;
+ ret = suspend_disable_secondary_cpus();
+ play_dead = idle_play_dead;
+
+ return ret;
+}
+#endif
+
#endif
/*
@@ -434,28 +535,32 @@ void __noreturn arch_cpu_idle_dead(void)
*/
#ifdef CONFIG_PM
-static int loongson_ipi_suspend(void)
+static int loongson_ipi_suspend(void *data)
{
return 0;
}
-static void loongson_ipi_resume(void)
+static void loongson_ipi_resume(void *data)
{
iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
}
-static struct syscore_ops loongson_ipi_syscore_ops = {
+static const struct syscore_ops loongson_ipi_syscore_ops = {
.resume = loongson_ipi_resume,
.suspend = loongson_ipi_suspend,
};
+static struct syscore loongson_ipi_syscore = {
+ .ops = &loongson_ipi_syscore_ops,
+};
+
/*
* Enable boot cpu ipi before enabling nonboot cpus
* during syscore_resume.
*/
static int __init ipi_pm_init(void)
{
- register_syscore_ops(&loongson_ipi_syscore_ops);
+ register_syscore(&loongson_ipi_syscore);
return 0;
}
@@ -463,7 +568,7 @@ core_initcall(ipi_pm_init);
#endif
/* Preload SMP state for boot cpu */
-void smp_prepare_boot_cpu(void)
+void __init smp_prepare_boot_cpu(void)
{
unsigned int cpu, node, rr_node;
@@ -496,6 +601,8 @@ void smp_prepare_boot_cpu(void)
rr_node = next_node_in(rr_node, node_online_map);
}
}
+
+ pv_spinlock_init();
}
/* called from main before smp_init() */
@@ -505,6 +612,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
current_thread_info()->cpu = 0;
loongson_prepare_cpus(max_cpus);
set_cpu_sibling_map(0);
+ set_cpu_llc_shared_map(0);
set_cpu_core_map(0);
calculate_cpu_foreign_map();
#ifndef CONFIG_HOTPLUG_CPU
@@ -546,6 +654,7 @@ asmlinkage void start_secondary(void)
loongson_init_secondary();
set_cpu_sibling_map(cpu);
+ set_cpu_llc_shared_map(cpu);
set_cpu_core_map(cpu);
notify_cpu_starting(cpu);
diff --git a/arch/loongarch/kernel/stacktrace.c b/arch/loongarch/kernel/stacktrace.c
index 9a038d1070d7..387dc4d3c486 100644
--- a/arch/loongarch/kernel/stacktrace.c
+++ b/arch/loongarch/kernel/stacktrace.c
@@ -51,12 +51,13 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
if (task == current) {
regs->regs[3] = (unsigned long)__builtin_frame_address(0);
regs->csr_era = (unsigned long)__builtin_return_address(0);
+ regs->regs[22] = 0;
} else {
regs->regs[3] = thread_saved_fp(task);
regs->csr_era = thread_saved_ra(task);
+ regs->regs[22] = task->thread.reg22;
}
regs->regs[1] = 0;
- regs->regs[22] = 0;
for (unwind_start(&state, task, regs);
!unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
diff --git a/arch/loongarch/kernel/switch.S b/arch/loongarch/kernel/switch.S
index 31dd8199b245..9c23cb7e432f 100644
--- a/arch/loongarch/kernel/switch.S
+++ b/arch/loongarch/kernel/switch.S
@@ -12,7 +12,7 @@
/*
* task_struct *__switch_to(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti)
+ * struct thread_info *next_ti, void *sched_ra, void *sched_cfa)
*/
.align 5
SYM_FUNC_START(__switch_to)
diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c
index 8801611143ab..168bd97540f8 100644
--- a/arch/loongarch/kernel/syscall.c
+++ b/arch/loongarch/kernel/syscall.c
@@ -9,17 +9,21 @@
#include <linux/entry-common.h>
#include <linux/errno.h>
#include <linux/linkage.h>
+#include <linux/objtool.h>
+#include <linux/randomize_kstack.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <asm/asm.h>
#include <asm/exception.h>
+#include <asm/loongarch.h>
#include <asm/signal.h>
#include <asm/switch_to.h>
#include <asm-generic/syscalls.h>
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long,
prot, unsigned long, flags, unsigned long, fd, unsigned long, offset)
@@ -32,13 +36,13 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long,
void *sys_call_table[__NR_syscalls] = {
[0 ... __NR_syscalls - 1] = sys_ni_syscall,
-#include <asm/unistd.h>
+#include <asm/syscall_table_64.h>
};
typedef long (*sys_call_fn)(unsigned long, unsigned long,
unsigned long, unsigned long, unsigned long, unsigned long);
-void noinstr do_syscall(struct pt_regs *regs)
+void noinstr __no_stack_protector do_syscall(struct pt_regs *regs)
{
unsigned long nr;
sys_call_fn syscall_fn;
@@ -54,11 +58,24 @@ void noinstr do_syscall(struct pt_regs *regs)
nr = syscall_enter_from_user_mode(regs, nr);
+ add_random_kstack_offset();
+
if (nr < NR_syscalls) {
syscall_fn = sys_call_table[nr];
regs->regs[4] = syscall_fn(regs->orig_a0, regs->regs[5], regs->regs[6],
regs->regs[7], regs->regs[8], regs->regs[9]);
}
+ /*
+ * This value will get limited by KSTACK_OFFSET_MAX(), which is 10
+ * bits. The actual entropy will be further reduced by the compiler
+ * when applying stack alignment constraints: 16-bytes (i.e. 4-bits)
+ * aligned, which will remove the 4 low bits from any entropy chosen
+ * here.
+ *
+ * The resulting 6 bits of entropy is seen in SP[9:4].
+ */
+ choose_random_kstack_offset(drdtime());
+
syscall_exit_to_user_mode(regs);
}
diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
index fd5354f9be7c..6fb92cc1a4c9 100644
--- a/arch/loongarch/kernel/time.c
+++ b/arch/loongarch/kernel/time.c
@@ -5,6 +5,7 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/clockchips.h>
+#include <linux/cpuhotplug.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/init.h>
@@ -15,6 +16,7 @@
#include <asm/cpu-features.h>
#include <asm/loongarch.h>
+#include <asm/paravirt.h>
#include <asm/time.h>
u64 cpu_clock_freq;
@@ -101,7 +103,22 @@ static int constant_timer_next_event(unsigned long delta, struct clock_event_dev
return 0;
}
-static unsigned long __init get_loops_per_jiffy(void)
+static int arch_timer_starting(unsigned int cpu)
+{
+ set_csr_ecfg(ECFGF_TIMER);
+
+ return 0;
+}
+
+static int arch_timer_dying(unsigned int cpu)
+{
+ /* Clear Timer Interrupt */
+ write_csr_tintclear(CSR_TINTCLR_TI);
+
+ return 0;
+}
+
+static unsigned long get_loops_per_jiffy(void)
{
unsigned long lpj = (unsigned long)const_clock_freq;
@@ -110,7 +127,7 @@ static unsigned long __init get_loops_per_jiffy(void)
return lpj;
}
-static long init_offset __nosavedata;
+static long init_offset;
void save_counter(void)
{
@@ -126,8 +143,12 @@ void sync_counter(void)
int constant_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
- unsigned long min_delta = 0x600;
- unsigned long max_delta = (1UL << 48) - 1;
+#ifdef CONFIG_PREEMPT_RT
+ unsigned long min_delta = 100;
+#else
+ unsigned long min_delta = 1000;
+#endif
+ unsigned long max_delta = GENMASK_ULL(boot_cpu_data.timerbits, 0);
struct clock_event_device *cd;
static int irq = 0, timer_irq_installed = 0;
@@ -167,6 +188,10 @@ int constant_clockevent_init(void)
lpj_fine = get_loops_per_jiffy();
pr_info("Constant clock event device register\n");
+ cpuhp_setup_state(CPUHP_AP_LOONGARCH_ARCH_TIMER_STARTING,
+ "clockevents/loongarch/timer:starting",
+ arch_timer_starting, arch_timer_dying);
+
return 0;
}
@@ -214,4 +239,5 @@ void __init time_init(void)
constant_clockevent_init();
constant_clocksource_init();
+ pv_time_init();
}
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
index f9f4eb00c92e..da5926fead4a 100644
--- a/arch/loongarch/kernel/traps.c
+++ b/arch/loongarch/kernel/traps.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <linux/module.h>
+#include <linux/export.h>
#include <linux/extable.h>
#include <linux/mm.h>
#include <linux/sched/mm.h>
@@ -553,8 +554,12 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
die_if_kernel("Kernel ale access", regs);
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
#else
+ bool pie = regs_irqs_disabled(regs);
unsigned int *pc;
+ if (!pie)
+ local_irq_enable();
+
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
/*
@@ -579,6 +584,8 @@ sigbus:
die_if_kernel("Kernel ale access", regs);
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
out:
+ if (!pie)
+ local_irq_disable();
#endif
irqentry_exit(regs, state);
}
@@ -592,29 +599,37 @@ int is_valid_bugaddr(unsigned long addr)
static void bug_handler(struct pt_regs *regs)
{
+ if (user_mode(regs)) {
+ force_sig(SIGTRAP);
+ return;
+ }
+
switch (report_bug(regs->csr_era, regs)) {
case BUG_TRAP_TYPE_BUG:
- case BUG_TRAP_TYPE_NONE:
- die_if_kernel("Oops - BUG", regs);
- force_sig(SIGTRAP);
+ die("Oops - BUG", regs);
break;
case BUG_TRAP_TYPE_WARN:
/* Skip the BUG instruction and continue */
regs->csr_era += LOONGARCH_INSN_SIZE;
break;
+
+ default:
+ if (!fixup_exception(regs))
+ die("Oops - BUG", regs);
}
}
asmlinkage void noinstr do_bce(struct pt_regs *regs)
{
bool user = user_mode(regs);
+ bool pie = regs_irqs_disabled(regs);
unsigned long era = exception_era(regs);
u64 badv = 0, lower = 0, upper = ULONG_MAX;
union loongarch_instruction insn;
irqentry_state_t state = irqentry_enter(regs);
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_enable();
current->thread.trap_nr = read_csr_excode();
@@ -680,7 +695,7 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs)
force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
out:
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_disable();
irqentry_exit(regs, state);
@@ -698,11 +713,12 @@ bad_era:
asmlinkage void noinstr do_bp(struct pt_regs *regs)
{
bool user = user_mode(regs);
+ bool pie = regs_irqs_disabled(regs);
unsigned int opcode, bcode;
unsigned long era = exception_era(regs);
irqentry_state_t state = irqentry_enter(regs);
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_enable();
if (__get_inst(&opcode, (u32 *)era, user))
@@ -768,7 +784,7 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
}
out:
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_disable();
irqentry_exit(regs, state);
@@ -1003,6 +1019,7 @@ static void init_restore_lbt(void)
asmlinkage void noinstr do_lbt(struct pt_regs *regs)
{
+ bool pie = regs_irqs_disabled(regs);
irqentry_state_t state = irqentry_enter(regs);
/*
@@ -1012,7 +1029,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
* (including the user using 'MOVGR2GCSR' to turn on TM, which
* will not trigger the BTE), we need to check PRMD first.
*/
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_enable();
if (!cpu_has_lbt) {
@@ -1026,7 +1043,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
preempt_enable();
out:
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_disable();
irqentry_exit(regs, state);
@@ -1114,8 +1131,8 @@ static void configure_exception_vector(void)
tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
csr_write64(eentry, LOONGARCH_CSR_EENTRY);
- csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
- csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
+ csr_write64(__pa(eentry), LOONGARCH_CSR_MERRENTRY);
+ csr_write64(__pa(tlbrentry), LOONGARCH_CSR_TLBRENTRY);
}
void per_cpu_trap_init(int cpu)
diff --git a/arch/loongarch/kernel/unaligned.c b/arch/loongarch/kernel/unaligned.c
index 3abf163dda05..487be604b96a 100644
--- a/arch/loongarch/kernel/unaligned.c
+++ b/arch/loongarch/kernel/unaligned.c
@@ -482,14 +482,10 @@ sigbus:
#ifdef CONFIG_DEBUG_FS
static int __init debugfs_unaligned(void)
{
- struct dentry *d;
-
- d = debugfs_create_dir("loongarch", NULL);
-
debugfs_create_u32("unaligned_instructions_user",
- S_IRUGO, d, &unaligned_instructions_user);
+ S_IRUGO, arch_debugfs_dir, &unaligned_instructions_user);
debugfs_create_u32("unaligned_instructions_kernel",
- S_IRUGO, d, &unaligned_instructions_kernel);
+ S_IRUGO, arch_debugfs_dir, &unaligned_instructions_kernel);
return 0;
}
diff --git a/arch/loongarch/kernel/unwind_guess.c b/arch/loongarch/kernel/unwind_guess.c
index 98379b7d4147..08d7951b2f60 100644
--- a/arch/loongarch/kernel/unwind_guess.c
+++ b/arch/loongarch/kernel/unwind_guess.c
@@ -3,6 +3,7 @@
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <asm/unwind.h>
+#include <linux/export.h>
unsigned long unwind_get_return_address(struct unwind_state *state)
{
diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c
index b25722876331..0d5fa64a2225 100644
--- a/arch/loongarch/kernel/unwind_orc.c
+++ b/arch/loongarch/kernel/unwind_orc.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/objtool.h>
+#include <linux/export.h>
#include <linux/module.h>
+#include <linux/objtool.h>
#include <linux/sort.h>
#include <asm/exception.h>
#include <asm/orc_header.h>
@@ -399,7 +400,7 @@ bool unwind_next_frame(struct unwind_state *state)
return false;
/* Don't let modules unload while we're reading their ORC data. */
- preempt_disable();
+ guard(rcu)();
if (is_entry_func(state->pc))
goto end;
@@ -507,21 +508,19 @@ bool unwind_next_frame(struct unwind_state *state)
state->pc = bt_address(pc);
if (!state->pc) {
- pr_err("cannot find unwind pc at %pK\n", (void *)pc);
+ pr_err("cannot find unwind pc at %p\n", (void *)pc);
goto err;
}
if (!__kernel_text_address(state->pc))
goto err;
- preempt_enable();
return true;
err:
state->error = true;
end:
- preempt_enable();
state->stack_info.type = STACK_TYPE_UNKNOWN;
return false;
}
diff --git a/arch/loongarch/kernel/unwind_prologue.c b/arch/loongarch/kernel/unwind_prologue.c
index 929ae240280a..729e775bd40d 100644
--- a/arch/loongarch/kernel/unwind_prologue.c
+++ b/arch/loongarch/kernel/unwind_prologue.c
@@ -3,6 +3,7 @@
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <linux/cpumask.h>
+#include <linux/export.h>
#include <linux/ftrace.h>
#include <linux/kallsyms.h>
diff --git a/arch/loongarch/kernel/uprobes.c b/arch/loongarch/kernel/uprobes.c
index 87abc7137b73..6022eb0f71db 100644
--- a/arch/loongarch/kernel/uprobes.c
+++ b/arch/loongarch/kernel/uprobes.c
@@ -42,7 +42,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
utask->autask.saved_trap_nr = current->thread.trap_nr;
current->thread.trap_nr = UPROBE_TRAP_NR;
instruction_pointer_set(regs, utask->xol_vaddr);
- user_enable_single_step(current);
return 0;
}
@@ -53,13 +52,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
current->thread.trap_nr = utask->autask.saved_trap_nr;
-
- if (auprobe->simulate)
- instruction_pointer_set(regs, auprobe->resume_era);
- else
- instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE);
-
- user_disable_single_step(current);
+ instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE);
return 0;
}
@@ -70,7 +63,6 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
current->thread.trap_nr = utask->autask.saved_trap_nr;
instruction_pointer_set(regs, utask->vaddr);
- user_disable_single_step(current);
}
bool arch_uprobe_xol_was_trapped(struct task_struct *t)
@@ -90,7 +82,6 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
insn.word = auprobe->insn[0];
arch_simulate_insn(insn, regs);
- auprobe->resume_era = regs->csr_era;
return true;
}
diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c
index 90dfccb41c14..dee1a15d7f4c 100644
--- a/arch/loongarch/kernel/vdso.c
+++ b/arch/loongarch/kernel/vdso.c
@@ -14,8 +14,7 @@
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/time_namespace.h>
-#include <linux/timekeeper_internal.h>
+#include <linux/vdso_datastore.h>
#include <asm/page.h>
#include <asm/vdso.h>
@@ -26,18 +25,6 @@
extern char vdso_start[], vdso_end[];
-/* Kernel-provided data used by the VDSO. */
-static union vdso_data_store generic_vdso_data __page_aligned_data;
-
-static union {
- u8 page[LOONGARCH_VDSO_DATA_SIZE];
- struct loongarch_vdso_data vdata;
-} loongarch_vdso_data __page_aligned_data;
-
-static struct page *vdso_pages[] = { NULL };
-struct vdso_data *vdso_data = generic_vdso_data.data;
-struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata;
-
static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
{
current->mm->context.vdso = (void *)(new_vma->vm_start);
@@ -45,55 +32,12 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struc
return 0;
}
-static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
- struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- unsigned long pfn;
- struct page *timens_page = find_timens_vvar_page(vma);
-
- switch (vmf->pgoff) {
- case VVAR_GENERIC_PAGE_OFFSET:
- if (!timens_page)
- pfn = sym_to_pfn(vdso_data);
- else
- pfn = page_to_pfn(timens_page);
- break;
-#ifdef CONFIG_TIME_NS
- case VVAR_TIMENS_PAGE_OFFSET:
- /*
- * If a task belongs to a time namespace then a namespace specific
- * VVAR is mapped with the VVAR_GENERIC_PAGE_OFFSET and the real
- * VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET offset.
- * See also the comment near timens_setup_vdso_data().
- */
- if (!timens_page)
- return VM_FAULT_SIGBUS;
- else
- pfn = sym_to_pfn(vdso_data);
- break;
-#endif /* CONFIG_TIME_NS */
- case VVAR_LOONGARCH_PAGES_START ... VVAR_LOONGARCH_PAGES_END:
- pfn = sym_to_pfn(&loongarch_vdso_data) + vmf->pgoff - VVAR_LOONGARCH_PAGES_START;
- break;
- default:
- return VM_FAULT_SIGBUS;
- }
-
- return vmf_insert_pfn(vma, vmf->address, pfn);
-}
-
struct loongarch_vdso_info vdso_info = {
.vdso = vdso_start,
- .size = PAGE_SIZE,
.code_mapping = {
.name = "[vdso]",
- .pages = vdso_pages,
.mremap = vdso_mremap,
},
- .data_mapping = {
- .name = "[vvar]",
- .fault = vvar_fault,
- },
.offset_sigreturn = vdso_offset_sigreturn,
};
@@ -102,10 +46,16 @@ static int __init init_vdso(void)
unsigned long i, cpu, pfn;
BUG_ON(!PAGE_ALIGNED(vdso_info.vdso));
- BUG_ON(!PAGE_ALIGNED(vdso_info.size));
for_each_possible_cpu(cpu)
- vdso_pdata[cpu].node = cpu_to_node(cpu);
+ vdso_k_arch_data->pdata[cpu].node = cpu_to_node(cpu);
+
+ vdso_info.size = PAGE_ALIGN(vdso_end - vdso_start);
+ vdso_info.code_mapping.pages =
+ kcalloc(vdso_info.size / PAGE_SIZE, sizeof(struct page *), GFP_KERNEL);
+
+ if (!vdso_info.code_mapping.pages)
+ return -ENOMEM;
pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
@@ -115,37 +65,6 @@ static int __init init_vdso(void)
}
subsys_initcall(init_vdso);
-#ifdef CONFIG_TIME_NS
-struct vdso_data *arch_get_vdso_data(void *vvar_page)
-{
- return (struct vdso_data *)(vvar_page);
-}
-
-/*
- * The vvar mapping contains data for a specific time namespace, so when a
- * task changes namespace we must unmap its vvar data for the old namespace.
- * Subsequent faults will map in data for the new namespace.
- *
- * For more details see timens_setup_vdso_data().
- */
-int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
-{
- struct mm_struct *mm = task->mm;
- struct vm_area_struct *vma;
-
- VMA_ITERATOR(vmi, mm, 0);
-
- mmap_read_lock(mm);
- for_each_vma(vmi, vma) {
- if (vma_is_special_mapping(vma, &vdso_info.data_mapping))
- zap_vma_pages(vma);
- }
- mmap_read_unlock(mm);
-
- return 0;
-}
-#endif
-
static unsigned long vdso_base(void)
{
unsigned long base = STACK_TOP;
@@ -181,9 +100,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
goto out;
}
- vma = _install_special_mapping(mm, data_addr, VVAR_SIZE,
- VM_READ | VM_MAYREAD | VM_PFNMAP,
- &info->data_mapping);
+ vma = vdso_install_vvar_mapping(mm, data_addr);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out;
@@ -191,7 +108,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vdso_addr = data_addr + VVAR_SIZE;
vma = _install_special_mapping(mm, vdso_addr, info->size,
- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+ VM_READ | VM_EXEC |
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC |
+ VM_SEALED_SYSMAP,
&info->code_mapping);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
index 3c7595342730..08ea921cdec1 100644
--- a/arch/loongarch/kernel/vmlinux.lds.S
+++ b/arch/loongarch/kernel/vmlinux.lds.S
@@ -113,6 +113,14 @@ SECTIONS
__rela_dyn_end = .;
}
+#ifdef CONFIG_RELR
+ .relr.dyn : ALIGN(8) {
+ __relr_dyn_begin = .;
+ *(.relr.dyn)
+ __relr_dyn_end = .;
+ }
+#endif
+
.data.rel : { *(.data.rel*) }
#ifdef CONFIG_RELOCATABLE
diff --git a/arch/loongarch/kvm/Kconfig b/arch/loongarch/kvm/Kconfig
index c4ef2b4d9797..ed4f724db774 100644
--- a/arch/loongarch/kvm/Kconfig
+++ b/arch/loongarch/kvm/Kconfig
@@ -21,14 +21,18 @@ config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
depends on AS_HAS_LVZ_EXTENSION
select HAVE_KVM_DIRTY_RING_ACQ_REL
- select HAVE_KVM_VCPU_ASYNC_IOCTL
+ select HAVE_KVM_IRQ_ROUTING
+ select HAVE_KVM_IRQCHIP
+ select HAVE_KVM_MSI
+ select HAVE_KVM_READONLY_MEM
select KVM_COMMON
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_GENERIC_HARDWARE_ENABLING
select KVM_GENERIC_MMU_NOTIFIER
select KVM_MMIO
- select HAVE_KVM_READONLY_MEM
- select KVM_XFER_TO_GUEST_WORK
+ select VIRT_XFER_TO_GUEST_WORK
+ select SCHED_INFO
+ select GUEST_PERF_EVENTS if PERF_EVENTS
help
Support hosting virtualized guest machines using
hardware virtualization extensions. You will need
diff --git a/arch/loongarch/kvm/Makefile b/arch/loongarch/kvm/Makefile
index b2f4cbe01ae8..cb41d9265662 100644
--- a/arch/loongarch/kvm/Makefile
+++ b/arch/loongarch/kvm/Makefile
@@ -3,8 +3,6 @@
# Makefile for LoongArch KVM support
#
-ccflags-y += -I $(src)
-
include $(srctree)/virt/kvm/Makefile.kvm
obj-$(CONFIG_KVM) += kvm.o
@@ -18,5 +16,9 @@ kvm-y += timer.o
kvm-y += tlb.o
kvm-y += vcpu.o
kvm-y += vm.o
+kvm-y += intc/ipi.o
+kvm-y += intc/eiointc.o
+kvm-y += intc/pch_pic.o
+kvm-y += irqfd.o
-CFLAGS_exit.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_exit.o += $(call cc-disable-warning, override-init)
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
index a68573e091c0..cb493980d874 100644
--- a/arch/loongarch/kvm/exit.c
+++ b/arch/loongarch/kvm/exit.c
@@ -24,7 +24,7 @@
static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
{
int rd, rj;
- unsigned int index;
+ unsigned int index, ret;
if (inst.reg2_format.opcode != cpucfg_op)
return EMULATE_FAIL;
@@ -50,7 +50,8 @@ static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
break;
case CPUCFG_KVM_FEATURE:
- vcpu->arch.gprs[rd] = KVM_FEATURE_IPI;
+ ret = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
+ vcpu->arch.gprs[rd] = ret;
break;
default:
vcpu->arch.gprs[rd] = 0;
@@ -124,6 +125,14 @@ static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
rj = inst.reg2csr_format.rj;
csrid = inst.reg2csr_format.csr;
+ if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= vcpu->arch.max_pmu_csrid) {
+ if (kvm_guest_has_pmu(&vcpu->arch)) {
+ vcpu->arch.pc -= 4;
+ kvm_make_request(KVM_REQ_PMU, vcpu);
+ return EMULATE_DONE;
+ }
+ }
+
/* Process CSR ops */
switch (rj) {
case 0: /* process csrrd */
@@ -147,8 +156,8 @@ static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
{
- int ret;
- unsigned long val;
+ int idx, ret;
+ unsigned long *val;
u32 addr, rd, rj, opcode;
/*
@@ -158,9 +167,9 @@ int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
rj = inst.reg2_format.rj;
opcode = inst.reg2_format.opcode;
addr = vcpu->arch.gprs[rj];
- ret = EMULATE_DO_IOCSR;
run->iocsr_io.phys_addr = addr;
run->iocsr_io.is_write = 0;
+ val = &vcpu->arch.gprs[rd];
/* LoongArch is Little endian */
switch (opcode) {
@@ -193,16 +202,33 @@ int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
run->iocsr_io.is_write = 1;
break;
default:
- ret = EMULATE_FAIL;
- break;
+ return EMULATE_FAIL;
}
- if (ret == EMULATE_DO_IOCSR) {
- if (run->iocsr_io.is_write) {
- val = vcpu->arch.gprs[rd];
- memcpy(run->iocsr_io.data, &val, run->iocsr_io.len);
+ if (run->iocsr_io.is_write) {
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ if (ret == 0)
+ ret = EMULATE_DONE;
+ else {
+ ret = EMULATE_DO_IOCSR;
+ /* Save data and let user space to write it */
+ memcpy(run->iocsr_io.data, val, run->iocsr_io.len);
}
- vcpu->arch.io_gpr = rd;
+ trace_kvm_iocsr(KVM_TRACE_IOCSR_WRITE, run->iocsr_io.len, addr, val);
+ } else {
+ vcpu->arch.io_gpr = rd; /* Set register id for iocsr read completion */
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr,
+ run->iocsr_io.len, run->iocsr_io.data);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ if (ret == 0) {
+ kvm_complete_iocsr_read(vcpu, run);
+ ret = EMULATE_DONE;
+ } else
+ ret = EMULATE_DO_IOCSR;
+ trace_kvm_iocsr(KVM_TRACE_IOCSR_READ, run->iocsr_io.len, addr, NULL);
}
return ret;
@@ -263,9 +289,11 @@ static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
er = EMULATE_FAIL;
switch (((inst.word >> 24) & 0xff)) {
case 0x0: /* CPUCFG GSPR */
+ trace_kvm_exit_cpucfg(vcpu, KVM_TRACE_EXIT_CPUCFG);
er = kvm_emu_cpucfg(vcpu, inst);
break;
case 0x4: /* CSR{RD,WR,XCHG} GSPR */
+ trace_kvm_exit_csr(vcpu, KVM_TRACE_EXIT_CSR);
er = kvm_handle_csr(vcpu, inst);
break;
case 0x6: /* Cache, Idle and IOCSR GSPR */
@@ -315,7 +343,7 @@ static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
* 2) Execute CACOP/IDLE instructions;
* 3) Access to unimplemented CSRs/IOCSRs.
*/
-static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
+static int kvm_handle_gspr(struct kvm_vcpu *vcpu, int ecode)
{
int ret = RESUME_GUEST;
enum emulation_result er = EMULATE_DONE;
@@ -340,7 +368,7 @@ static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
{
- int ret;
+ int idx, ret;
unsigned int op8, opcode, rd;
struct kvm_run *run = vcpu->run;
@@ -438,19 +466,36 @@ int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
}
if (ret == EMULATE_DO_MMIO) {
- /* Set for kvm_complete_mmio_read() use */
- vcpu->arch.io_gpr = rd;
+ trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len, run->mmio.phys_addr, NULL);
+
+ vcpu->arch.io_gpr = rd; /* Set for kvm_complete_mmio_read() use */
+
+ /*
+ * If mmio device such as PCH-PIC is emulated in KVM,
+ * it need not return to user space to handle the mmio
+ * exception.
+ */
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, vcpu->arch.badv,
+ run->mmio.len, run->mmio.data);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ if (!ret) {
+ kvm_complete_mmio_read(vcpu, run);
+ update_pc(&vcpu->arch);
+ vcpu->mmio_needed = 0;
+ return EMULATE_DONE;
+ }
+
run->mmio.is_write = 0;
vcpu->mmio_is_write = 0;
- trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, run->mmio.len,
- run->mmio.phys_addr, NULL);
- } else {
- kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
- inst.word, vcpu->arch.pc, vcpu->arch.badv);
- kvm_arch_vcpu_dump_regs(vcpu);
- vcpu->mmio_needed = 0;
+ return EMULATE_DO_MMIO;
}
+ kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
+ inst.word, vcpu->arch.pc, vcpu->arch.badv);
+ kvm_arch_vcpu_dump_regs(vcpu);
+ vcpu->mmio_needed = 0;
+
return ret;
}
@@ -498,7 +543,7 @@ int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
{
- int ret;
+ int idx, ret;
unsigned int rd, op8, opcode;
unsigned long curr_pc, rd_val = 0;
struct kvm_run *run = vcpu->run;
@@ -591,23 +636,35 @@ int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
}
if (ret == EMULATE_DO_MMIO) {
+ trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, run->mmio.len, run->mmio.phys_addr, data);
+
+ /*
+ * If mmio device such as PCH-PIC is emulated in KVM,
+ * it need not return to user space to handle the mmio
+ * exception.
+ */
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, run->mmio.len, data);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ if (!ret)
+ return EMULATE_DONE;
+
run->mmio.is_write = 1;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 1;
- trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, run->mmio.len,
- run->mmio.phys_addr, data);
- } else {
- vcpu->arch.pc = curr_pc;
- kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
- inst.word, vcpu->arch.pc, vcpu->arch.badv);
- kvm_arch_vcpu_dump_regs(vcpu);
- /* Rollback PC if emulation was unsuccessful */
+ return EMULATE_DO_MMIO;
}
+ vcpu->arch.pc = curr_pc;
+ kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
+ inst.word, vcpu->arch.pc, vcpu->arch.badv);
+ kvm_arch_vcpu_dump_regs(vcpu);
+ /* Rollback PC if emulation was unsuccessful */
+
return ret;
}
-static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
+static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write, int ecode)
{
int ret;
larch_inst inst;
@@ -615,7 +672,13 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
struct kvm_run *run = vcpu->run;
unsigned long badv = vcpu->arch.badv;
- ret = kvm_handle_mm_fault(vcpu, badv, write);
+ /* Inject ADE exception if exceed max GPA size */
+ if (unlikely(badv >= vcpu->kvm->arch.gpa_size)) {
+ kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
+ return RESUME_GUEST;
+ }
+
+ ret = kvm_handle_mm_fault(vcpu, badv, write, ecode);
if (ret) {
/* Treat as MMIO */
inst.word = vcpu->arch.badi;
@@ -645,24 +708,33 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
return ret;
}
-static int kvm_handle_read_fault(struct kvm_vcpu *vcpu)
+static int kvm_handle_read_fault(struct kvm_vcpu *vcpu, int ecode)
{
- return kvm_handle_rdwr_fault(vcpu, false);
+ return kvm_handle_rdwr_fault(vcpu, false, ecode);
}
-static int kvm_handle_write_fault(struct kvm_vcpu *vcpu)
+static int kvm_handle_write_fault(struct kvm_vcpu *vcpu, int ecode)
{
- return kvm_handle_rdwr_fault(vcpu, true);
+ return kvm_handle_rdwr_fault(vcpu, true, ecode);
+}
+
+int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ update_pc(&vcpu->arch);
+ kvm_write_reg(vcpu, LOONGARCH_GPR_A0, run->hypercall.ret);
+
+ return 0;
}
/**
* kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
* @vcpu: Virtual CPU context.
+ * @ecode: Exception code.
*
* Handle when the guest attempts to use fpu which hasn't been allowed
* by the root context.
*/
-static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
+static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu, int ecode)
{
struct kvm_run *run = vcpu->run;
@@ -687,14 +759,38 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
+static long kvm_save_notify(struct kvm_vcpu *vcpu)
+{
+ unsigned long id, data;
+
+ id = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
+ data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
+ switch (id) {
+ case BIT(KVM_FEATURE_STEAL_TIME):
+ if (data & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
+ return KVM_HCALL_INVALID_PARAMETER;
+
+ vcpu->arch.st.guest_addr = data;
+ if (!(data & KVM_STEAL_PHYS_VALID))
+ return 0;
+
+ vcpu->arch.st.last_steal = current->sched_info.run_delay;
+ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+ return 0;
+ default:
+ return KVM_HCALL_INVALID_CODE;
+ }
+}
+
/*
* kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
* @vcpu: Virtual CPU context.
+ * @ecode: Exception code.
*
* Handle when the guest attempts to use LSX when it is disabled in the root
* context.
*/
-static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
+static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode)
{
if (kvm_own_lsx(vcpu))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
@@ -705,11 +801,12 @@ static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
/*
* kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
* @vcpu: Virtual CPU context.
+ * @ecode: Exception code.
*
* Handle when the guest attempts to use LASX when it is disabled in the root
* context.
*/
-static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
+static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu, int ecode)
{
if (kvm_own_lasx(vcpu))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
@@ -717,32 +814,33 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
-static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
+static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu, int ecode)
{
- unsigned int min, cpu, i;
- unsigned long ipi_bitmap;
+ if (kvm_own_lbt(vcpu))
+ kvm_queue_exception(vcpu, EXCCODE_INE, 0);
+
+ return RESUME_GUEST;
+}
+
+static void kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
+{
+ unsigned int min, cpu;
struct kvm_vcpu *dest;
+ DECLARE_BITMAP(ipi_bitmap, BITS_PER_LONG * 2) = {
+ kvm_read_reg(vcpu, LOONGARCH_GPR_A1),
+ kvm_read_reg(vcpu, LOONGARCH_GPR_A2)
+ };
min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
- for (i = 0; i < 2; i++, min += BITS_PER_LONG) {
- ipi_bitmap = kvm_read_reg(vcpu, LOONGARCH_GPR_A1 + i);
- if (!ipi_bitmap)
+ for_each_set_bit(cpu, ipi_bitmap, BITS_PER_LONG * 2) {
+ dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
+ if (!dest)
continue;
- cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG);
- while (cpu < BITS_PER_LONG) {
- dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
- cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1);
- if (!dest)
- continue;
-
- /* Send SWI0 to dest vcpu to emulate IPI interrupt */
- kvm_queue_irq(dest, INT_SWI0);
- kvm_vcpu_kick(dest);
- }
+ /* Send SWI0 to dest vcpu to emulate IPI interrupt */
+ kvm_queue_irq(dest, INT_SWI0);
+ kvm_vcpu_kick(dest);
}
-
- return 0;
}
/*
@@ -750,23 +848,28 @@ static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
*/
static void kvm_handle_service(struct kvm_vcpu *vcpu)
{
+ long ret = KVM_HCALL_INVALID_CODE;
unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
- long ret;
switch (func) {
case KVM_HCALL_FUNC_IPI:
- kvm_send_pv_ipi(vcpu);
- ret = KVM_HCALL_SUCCESS;
+ if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_IPI)) {
+ kvm_send_pv_ipi(vcpu);
+ ret = KVM_HCALL_SUCCESS;
+ }
+ break;
+ case KVM_HCALL_FUNC_NOTIFY:
+ if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME))
+ ret = kvm_save_notify(vcpu);
break;
default:
- ret = KVM_HCALL_INVALID_CODE;
break;
}
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
}
-static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
+static int kvm_handle_hypercall(struct kvm_vcpu *vcpu, int ecode)
{
int ret;
larch_inst inst;
@@ -781,6 +884,28 @@ static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
vcpu->stat.hypercall_exits++;
kvm_handle_service(vcpu);
break;
+ case KVM_HCALL_USER_SERVICE:
+ if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_USER_HCALL)) {
+ kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
+ break;
+ }
+
+ vcpu->stat.hypercall_exits++;
+ vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
+ vcpu->run->hypercall.nr = KVM_HCALL_USER_SERVICE;
+ vcpu->run->hypercall.args[0] = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
+ vcpu->run->hypercall.args[1] = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
+ vcpu->run->hypercall.args[2] = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
+ vcpu->run->hypercall.args[3] = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
+ vcpu->run->hypercall.args[4] = kvm_read_reg(vcpu, LOONGARCH_GPR_A4);
+ vcpu->run->hypercall.args[5] = kvm_read_reg(vcpu, LOONGARCH_GPR_A5);
+ vcpu->run->hypercall.flags = 0;
+ /*
+ * Set invalid return value by default, let user-mode VMM modify it.
+ */
+ vcpu->run->hypercall.ret = KVM_HCALL_INVALID_CODE;
+ ret = RESUME_HOST;
+ break;
case KVM_HCALL_SWDBG:
/* KVM_HCALL_SWDBG only in effective when SW_BP is enabled */
if (vcpu->guest_debug & KVM_GUESTDBG_SW_BP_MASK) {
@@ -804,16 +929,14 @@ static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
/*
* LoongArch KVM callback handling for unimplemented guest exiting
*/
-static int kvm_fault_ni(struct kvm_vcpu *vcpu)
+static int kvm_fault_ni(struct kvm_vcpu *vcpu, int ecode)
{
- unsigned int ecode, inst;
- unsigned long estat, badv;
+ unsigned int inst;
+ unsigned long badv;
/* Fetch the instruction */
inst = vcpu->arch.badi;
badv = vcpu->arch.badv;
- estat = vcpu->arch.host_estat;
- ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n",
ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat());
kvm_arch_vcpu_dump_regs(vcpu);
@@ -831,11 +954,12 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
[EXCCODE_FPDIS] = kvm_handle_fpu_disabled,
[EXCCODE_LSXDIS] = kvm_handle_lsx_disabled,
[EXCCODE_LASXDIS] = kvm_handle_lasx_disabled,
+ [EXCCODE_BTDIS] = kvm_handle_lbt_disabled,
[EXCCODE_GSPR] = kvm_handle_gspr,
[EXCCODE_HVC] = kvm_handle_hypercall,
};
int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
{
- return kvm_fault_tables[fault](vcpu);
+ return kvm_fault_tables[fault](vcpu, fault);
}
diff --git a/arch/loongarch/kvm/intc/eiointc.c b/arch/loongarch/kvm/intc/eiointc.c
new file mode 100644
index 000000000000..29886876143f
--- /dev/null
+++ b/arch/loongarch/kvm/intc/eiointc.c
@@ -0,0 +1,695 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#include <asm/kvm_eiointc.h>
+#include <asm/kvm_vcpu.h>
+#include <linux/count_zeros.h>
+
+static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
+{
+ int ipnum, cpu, cpuid, irq;
+ struct kvm_vcpu *vcpu;
+
+ for (irq = 0; irq < EIOINTC_IRQS; irq++) {
+ ipnum = (s->ipmap >> (irq / 32 * 8)) & 0xff;
+ if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) {
+ ipnum = count_trailing_zeros(ipnum);
+ ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
+ }
+
+ cpuid = ((u8 *)s->coremap)[irq];
+ vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid);
+ if (!vcpu)
+ continue;
+
+ cpu = vcpu->vcpu_id;
+ if (test_bit(irq, (unsigned long *)s->coreisr[cpu]))
+ __set_bit(irq, s->sw_coreisr[cpu][ipnum]);
+ else
+ __clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
+ }
+}
+
+static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
+{
+ int ipnum, cpu, found;
+ struct kvm_vcpu *vcpu;
+ struct kvm_interrupt vcpu_irq;
+
+ ipnum = (s->ipmap >> (irq / 32 * 8)) & 0xff;
+ if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) {
+ ipnum = count_trailing_zeros(ipnum);
+ ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
+ }
+
+ cpu = s->sw_coremap[irq];
+ vcpu = kvm_get_vcpu_by_id(s->kvm, cpu);
+ if (unlikely(vcpu == NULL)) {
+ kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
+ return;
+ }
+
+ if (level) {
+ /* if not enable return false */
+ if (!test_bit(irq, (unsigned long *)s->enable))
+ return;
+ __set_bit(irq, (unsigned long *)s->coreisr[cpu]);
+ found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
+ __set_bit(irq, s->sw_coreisr[cpu][ipnum]);
+ } else {
+ __clear_bit(irq, (unsigned long *)s->coreisr[cpu]);
+ __clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
+ found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
+ }
+
+ if (found < EIOINTC_IRQS)
+ return; /* other irq is handling, needn't update parent irq */
+
+ vcpu_irq.irq = level ? (INT_HWI0 + ipnum) : -(INT_HWI0 + ipnum);
+ kvm_vcpu_ioctl_interrupt(vcpu, &vcpu_irq);
+}
+
+static inline void eiointc_update_sw_coremap(struct loongarch_eiointc *s,
+ int irq, u64 val, u32 len, bool notify)
+{
+ int i, cpu, cpuid;
+ struct kvm_vcpu *vcpu;
+
+ for (i = 0; i < len; i++) {
+ cpuid = val & 0xff;
+ val = val >> 8;
+
+ if (!(s->status & BIT(EIOINTC_ENABLE_CPU_ENCODE))) {
+ cpuid = ffs(cpuid) - 1;
+ cpuid = (cpuid >= 4) ? 0 : cpuid;
+ }
+
+ vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid);
+ if (!vcpu)
+ continue;
+
+ cpu = vcpu->vcpu_id;
+ if (s->sw_coremap[irq + i] == cpu)
+ continue;
+
+ if (notify && test_bit(irq + i, (unsigned long *)s->isr)) {
+ /* lower irq at old cpu and raise irq at new cpu */
+ eiointc_update_irq(s, irq + i, 0);
+ s->sw_coremap[irq + i] = cpu;
+ eiointc_update_irq(s, irq + i, 1);
+ } else {
+ s->sw_coremap[irq + i] = cpu;
+ }
+ }
+}
+
+void eiointc_set_irq(struct loongarch_eiointc *s, int irq, int level)
+{
+ unsigned long flags;
+ unsigned long *isr = (unsigned long *)s->isr;
+
+ spin_lock_irqsave(&s->lock, flags);
+ level ? __set_bit(irq, isr) : __clear_bit(irq, isr);
+ eiointc_update_irq(s, irq, level);
+ spin_unlock_irqrestore(&s->lock, flags);
+}
+
+static int loongarch_eiointc_read(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
+ gpa_t addr, unsigned long *val)
+{
+ int index, ret = 0;
+ u64 data = 0;
+ gpa_t offset;
+
+ offset = addr - EIOINTC_BASE;
+ switch (offset) {
+ case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
+ index = (offset - EIOINTC_NODETYPE_START) >> 3;
+ data = s->nodetype[index];
+ break;
+ case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
+ index = (offset - EIOINTC_IPMAP_START) >> 3;
+ data = s->ipmap;
+ break;
+ case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
+ index = (offset - EIOINTC_ENABLE_START) >> 3;
+ data = s->enable[index];
+ break;
+ case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
+ index = (offset - EIOINTC_BOUNCE_START) >> 3;
+ data = s->bounce[index];
+ break;
+ case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
+ index = (offset - EIOINTC_COREISR_START) >> 3;
+ data = s->coreisr[vcpu->vcpu_id][index];
+ break;
+ case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
+ index = (offset - EIOINTC_COREMAP_START) >> 3;
+ data = s->coremap[index];
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ *val = data;
+
+ return ret;
+}
+
+static int kvm_eiointc_read(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *dev,
+ gpa_t addr, int len, void *val)
+{
+ int ret = -EINVAL;
+ unsigned long flags, data, offset;
+ struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
+
+ if (!eiointc) {
+ kvm_err("%s: eiointc irqchip not valid!\n", __func__);
+ return -EINVAL;
+ }
+
+ if (addr & (len - 1)) {
+ kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len);
+ return -EINVAL;
+ }
+
+ offset = addr & 0x7;
+ addr -= offset;
+ vcpu->stat.eiointc_read_exits++;
+ spin_lock_irqsave(&eiointc->lock, flags);
+ ret = loongarch_eiointc_read(vcpu, eiointc, addr, &data);
+ spin_unlock_irqrestore(&eiointc->lock, flags);
+ if (ret)
+ return ret;
+
+ data = data >> (offset * 8);
+ switch (len) {
+ case 1:
+ *(long *)val = (s8)data;
+ break;
+ case 2:
+ *(long *)val = (s16)data;
+ break;
+ case 4:
+ *(long *)val = (s32)data;
+ break;
+ default:
+ *(long *)val = (long)data;
+ break;
+ }
+
+ return 0;
+}
+
+static int loongarch_eiointc_write(struct kvm_vcpu *vcpu,
+ struct loongarch_eiointc *s,
+ gpa_t addr, u64 value, u64 field_mask)
+{
+ int index, irq, ret = 0;
+ u8 cpu;
+ u64 data, old, mask;
+ gpa_t offset;
+
+ offset = addr & 7;
+ mask = field_mask << (offset * 8);
+ data = (value & field_mask) << (offset * 8);
+
+ addr -= offset;
+ offset = addr - EIOINTC_BASE;
+
+ switch (offset) {
+ case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
+ index = (offset - EIOINTC_NODETYPE_START) >> 3;
+ old = s->nodetype[index];
+ s->nodetype[index] = (old & ~mask) | data;
+ break;
+ case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
+ /*
+ * ipmap cannot be set at runtime, can be set only at the beginning
+ * of irqchip driver, need not update upper irq level
+ */
+ old = s->ipmap;
+ s->ipmap = (old & ~mask) | data;
+ break;
+ case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
+ index = (offset - EIOINTC_ENABLE_START) >> 3;
+ old = s->enable[index];
+ s->enable[index] = (old & ~mask) | data;
+ /*
+ * 1: enable irq.
+ * update irq when isr is set.
+ */
+ data = s->enable[index] & ~old & s->isr[index];
+ while (data) {
+ irq = __ffs(data);
+ eiointc_update_irq(s, irq + index * 64, 1);
+ data &= ~BIT_ULL(irq);
+ }
+ /*
+ * 0: disable irq.
+ * update irq when isr is set.
+ */
+ data = ~s->enable[index] & old & s->isr[index];
+ while (data) {
+ irq = __ffs(data);
+ eiointc_update_irq(s, irq + index * 64, 0);
+ data &= ~BIT_ULL(irq);
+ }
+ break;
+ case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
+ /* do not emulate hw bounced irq routing */
+ index = (offset - EIOINTC_BOUNCE_START) >> 3;
+ old = s->bounce[index];
+ s->bounce[index] = (old & ~mask) | data;
+ break;
+ case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
+ index = (offset - EIOINTC_COREISR_START) >> 3;
+ /* use attrs to get current cpu index */
+ cpu = vcpu->vcpu_id;
+ old = s->coreisr[cpu][index];
+ /* write 1 to clear interrupt */
+ s->coreisr[cpu][index] = old & ~data;
+ data &= old;
+ while (data) {
+ irq = __ffs(data);
+ eiointc_update_irq(s, irq + index * 64, 0);
+ data &= ~BIT_ULL(irq);
+ }
+ break;
+ case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
+ index = (offset - EIOINTC_COREMAP_START) >> 3;
+ old = s->coremap[index];
+ s->coremap[index] = (old & ~mask) | data;
+ data = s->coremap[index];
+ eiointc_update_sw_coremap(s, index * 8, data, sizeof(data), true);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int kvm_eiointc_write(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *dev,
+ gpa_t addr, int len, const void *val)
+{
+ int ret = -EINVAL;
+ unsigned long flags, value;
+ struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
+
+ if (!eiointc) {
+ kvm_err("%s: eiointc irqchip not valid!\n", __func__);
+ return -EINVAL;
+ }
+
+ if (addr & (len - 1)) {
+ kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len);
+ return -EINVAL;
+ }
+
+ vcpu->stat.eiointc_write_exits++;
+ spin_lock_irqsave(&eiointc->lock, flags);
+ switch (len) {
+ case 1:
+ value = *(unsigned char *)val;
+ ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, 0xFF);
+ break;
+ case 2:
+ value = *(unsigned short *)val;
+ ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, USHRT_MAX);
+ break;
+ case 4:
+ value = *(unsigned int *)val;
+ ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, UINT_MAX);
+ break;
+ default:
+ value = *(unsigned long *)val;
+ ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, ULONG_MAX);
+ break;
+ }
+ spin_unlock_irqrestore(&eiointc->lock, flags);
+
+ return ret;
+}
+
+static const struct kvm_io_device_ops kvm_eiointc_ops = {
+ .read = kvm_eiointc_read,
+ .write = kvm_eiointc_write,
+};
+
+static int kvm_eiointc_virt_read(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *dev,
+ gpa_t addr, int len, void *val)
+{
+ unsigned long flags;
+ u32 *data = val;
+ struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
+
+ if (!eiointc) {
+ kvm_err("%s: eiointc irqchip not valid!\n", __func__);
+ return -EINVAL;
+ }
+
+ addr -= EIOINTC_VIRT_BASE;
+ spin_lock_irqsave(&eiointc->lock, flags);
+ switch (addr) {
+ case EIOINTC_VIRT_FEATURES:
+ *data = eiointc->features;
+ break;
+ case EIOINTC_VIRT_CONFIG:
+ *data = eiointc->status;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&eiointc->lock, flags);
+
+ return 0;
+}
+
+static int kvm_eiointc_virt_write(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *dev,
+ gpa_t addr, int len, const void *val)
+{
+ int ret = 0;
+ unsigned long flags;
+ u32 value = *(u32 *)val;
+ struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
+
+ if (!eiointc) {
+ kvm_err("%s: eiointc irqchip not valid!\n", __func__);
+ return -EINVAL;
+ }
+
+ addr -= EIOINTC_VIRT_BASE;
+ spin_lock_irqsave(&eiointc->lock, flags);
+ switch (addr) {
+ case EIOINTC_VIRT_FEATURES:
+ ret = -EPERM;
+ break;
+ case EIOINTC_VIRT_CONFIG:
+ /*
+ * eiointc features can only be set at disabled status
+ */
+ if ((eiointc->status & BIT(EIOINTC_ENABLE)) && value) {
+ ret = -EPERM;
+ break;
+ }
+ eiointc->status = value & eiointc->features;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&eiointc->lock, flags);
+
+ return ret;
+}
+
+static const struct kvm_io_device_ops kvm_eiointc_virt_ops = {
+ .read = kvm_eiointc_virt_read,
+ .write = kvm_eiointc_virt_write,
+};
+
+static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int ret = 0;
+ unsigned long flags;
+ unsigned long type = (unsigned long)attr->attr;
+ u32 i, start_irq, val;
+ void __user *data;
+ struct loongarch_eiointc *s = dev->kvm->arch.eiointc;
+
+ data = (void __user *)attr->addr;
+ switch (type) {
+ case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU:
+ case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE:
+ if (copy_from_user(&val, data, 4))
+ return -EFAULT;
+ break;
+ default:
+ break;
+ }
+
+ spin_lock_irqsave(&s->lock, flags);
+ switch (type) {
+ case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU:
+ if (val > EIOINTC_ROUTE_MAX_VCPUS)
+ ret = -EINVAL;
+ else
+ s->num_cpu = val;
+ break;
+ case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE:
+ s->features = val;
+ if (!(s->features & BIT(EIOINTC_HAS_VIRT_EXTENSION)))
+ s->status |= BIT(EIOINTC_ENABLE);
+ break;
+ case KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED:
+ eiointc_set_sw_coreisr(s);
+ for (i = 0; i < (EIOINTC_IRQS / 8); i++) {
+ start_irq = i * 8;
+ eiointc_update_sw_coremap(s, start_irq,
+ s->coremap[i], sizeof(u64), false);
+ }
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ return ret;
+}
+
+static int kvm_eiointc_regs_access(struct kvm_device *dev,
+ struct kvm_device_attr *attr,
+ bool is_write, int *data)
+{
+ int addr, cpu, offset, ret = 0;
+ unsigned long flags;
+ void *p = NULL;
+ struct loongarch_eiointc *s;
+
+ s = dev->kvm->arch.eiointc;
+ addr = attr->attr;
+ cpu = addr >> 16;
+ addr &= 0xffff;
+ switch (addr) {
+ case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
+ offset = (addr - EIOINTC_NODETYPE_START) / 4;
+ p = s->nodetype + offset * 4;
+ break;
+ case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
+ offset = (addr - EIOINTC_IPMAP_START) / 4;
+ p = &s->ipmap + offset * 4;
+ break;
+ case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
+ offset = (addr - EIOINTC_ENABLE_START) / 4;
+ p = s->enable + offset * 4;
+ break;
+ case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
+ offset = (addr - EIOINTC_BOUNCE_START) / 4;
+ p = s->bounce + offset * 4;
+ break;
+ case EIOINTC_ISR_START ... EIOINTC_ISR_END:
+ offset = (addr - EIOINTC_ISR_START) / 4;
+ p = s->isr + offset * 4;
+ break;
+ case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
+ if (cpu >= s->num_cpu)
+ return -EINVAL;
+
+ offset = (addr - EIOINTC_COREISR_START) / 4;
+ p = s->coreisr[cpu] + offset * 4;
+ break;
+ case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
+ offset = (addr - EIOINTC_COREMAP_START) / 4;
+ p = s->coremap + offset * 4;
+ break;
+ default:
+ kvm_err("%s: unknown eiointc register, addr = %d\n", __func__, addr);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&s->lock, flags);
+ if (is_write)
+ memcpy(p, data, 4);
+ else
+ memcpy(data, p, 4);
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ return ret;
+}
+
+static int kvm_eiointc_sw_status_access(struct kvm_device *dev,
+ struct kvm_device_attr *attr,
+ bool is_write, int *data)
+{
+ int addr, ret = 0;
+ unsigned long flags;
+ void *p = NULL;
+ struct loongarch_eiointc *s;
+
+ s = dev->kvm->arch.eiointc;
+ addr = attr->attr;
+ addr &= 0xffff;
+
+ switch (addr) {
+ case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU:
+ if (is_write)
+ return ret;
+
+ p = &s->num_cpu;
+ break;
+ case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE:
+ if (is_write)
+ return ret;
+
+ p = &s->features;
+ break;
+ case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE:
+ p = &s->status;
+ break;
+ default:
+ kvm_err("%s: unknown eiointc register, addr = %d\n", __func__, addr);
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&s->lock, flags);
+ if (is_write)
+ memcpy(p, data, 4);
+ else
+ memcpy(data, p, 4);
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ return ret;
+}
+
+static int kvm_eiointc_get_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int ret, data;
+
+ switch (attr->group) {
+ case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
+ ret = kvm_eiointc_regs_access(dev, attr, false, &data);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)attr->addr, &data, 4))
+ ret = -EFAULT;
+
+ return ret;
+ case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
+ ret = kvm_eiointc_sw_status_access(dev, attr, false, &data);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)attr->addr, &data, 4))
+ ret = -EFAULT;
+
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int kvm_eiointc_set_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int data;
+
+ switch (attr->group) {
+ case KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL:
+ return kvm_eiointc_ctrl_access(dev, attr);
+ case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
+ if (copy_from_user(&data, (void __user *)attr->addr, 4))
+ return -EFAULT;
+
+ return kvm_eiointc_regs_access(dev, attr, true, &data);
+ case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
+ if (copy_from_user(&data, (void __user *)attr->addr, 4))
+ return -EFAULT;
+
+ return kvm_eiointc_sw_status_access(dev, attr, true, &data);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int kvm_eiointc_create(struct kvm_device *dev, u32 type)
+{
+ int ret;
+ struct loongarch_eiointc *s;
+ struct kvm_io_device *device;
+ struct kvm *kvm = dev->kvm;
+
+ /* eiointc has been created */
+ if (kvm->arch.eiointc)
+ return -EINVAL;
+
+ s = kzalloc(sizeof(struct loongarch_eiointc), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ spin_lock_init(&s->lock);
+ s->kvm = kvm;
+
+ /*
+ * Initialize IOCSR device
+ */
+ device = &s->device;
+ kvm_iodevice_init(device, &kvm_eiointc_ops);
+ mutex_lock(&kvm->slots_lock);
+ ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS,
+ EIOINTC_BASE, EIOINTC_SIZE, device);
+ mutex_unlock(&kvm->slots_lock);
+ if (ret < 0) {
+ kfree(s);
+ return ret;
+ }
+
+ device = &s->device_vext;
+ kvm_iodevice_init(device, &kvm_eiointc_virt_ops);
+ ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS,
+ EIOINTC_VIRT_BASE, EIOINTC_VIRT_SIZE, device);
+ if (ret < 0) {
+ kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &s->device);
+ kfree(s);
+ return ret;
+ }
+ kvm->arch.eiointc = s;
+
+ return 0;
+}
+
+static void kvm_eiointc_destroy(struct kvm_device *dev)
+{
+ struct kvm *kvm;
+ struct loongarch_eiointc *eiointc;
+
+ if (!dev || !dev->kvm || !dev->kvm->arch.eiointc)
+ return;
+
+ kvm = dev->kvm;
+ eiointc = kvm->arch.eiointc;
+ kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device);
+ kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device_vext);
+ kfree(eiointc);
+}
+
+static struct kvm_device_ops kvm_eiointc_dev_ops = {
+ .name = "kvm-loongarch-eiointc",
+ .create = kvm_eiointc_create,
+ .destroy = kvm_eiointc_destroy,
+ .set_attr = kvm_eiointc_set_attr,
+ .get_attr = kvm_eiointc_get_attr,
+};
+
+int kvm_loongarch_register_eiointc_device(void)
+{
+ return kvm_register_device_ops(&kvm_eiointc_dev_ops, KVM_DEV_TYPE_LOONGARCH_EIOINTC);
+}
diff --git a/arch/loongarch/kvm/intc/ipi.c b/arch/loongarch/kvm/intc/ipi.c
new file mode 100644
index 000000000000..05cefd29282e
--- /dev/null
+++ b/arch/loongarch/kvm/intc/ipi.c
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_ipi.h>
+#include <asm/kvm_vcpu.h>
+
+static void ipi_set(struct kvm_vcpu *vcpu, uint32_t data)
+{
+ uint32_t status;
+ struct kvm_interrupt irq;
+
+ spin_lock(&vcpu->arch.ipi_state.lock);
+ status = vcpu->arch.ipi_state.status;
+ vcpu->arch.ipi_state.status |= data;
+ spin_unlock(&vcpu->arch.ipi_state.lock);
+ if ((status == 0) && data) {
+ irq.irq = LARCH_INT_IPI;
+ kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+ }
+}
+
+static void ipi_send(struct kvm *kvm, uint64_t data)
+{
+ int cpu;
+ struct kvm_vcpu *vcpu;
+
+ cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
+ vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
+ if (unlikely(vcpu == NULL)) {
+ kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
+ return;
+ }
+
+ ipi_set(vcpu, BIT(data & 0x1f));
+}
+
+static void ipi_clear(struct kvm_vcpu *vcpu, uint64_t data)
+{
+ uint32_t status;
+ struct kvm_interrupt irq;
+
+ spin_lock(&vcpu->arch.ipi_state.lock);
+ vcpu->arch.ipi_state.status &= ~data;
+ status = vcpu->arch.ipi_state.status;
+ spin_unlock(&vcpu->arch.ipi_state.lock);
+ if (status == 0) {
+ irq.irq = -LARCH_INT_IPI;
+ kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+ }
+}
+
+static uint64_t read_mailbox(struct kvm_vcpu *vcpu, int offset, int len)
+{
+ uint64_t data = 0;
+
+ spin_lock(&vcpu->arch.ipi_state.lock);
+ data = *(ulong *)((void *)vcpu->arch.ipi_state.buf + (offset - 0x20));
+ spin_unlock(&vcpu->arch.ipi_state.lock);
+
+ switch (len) {
+ case 1:
+ return data & 0xff;
+ case 2:
+ return data & 0xffff;
+ case 4:
+ return data & 0xffffffff;
+ case 8:
+ return data;
+ default:
+ kvm_err("%s: unknown data len: %d\n", __func__, len);
+ return 0;
+ }
+}
+
+static void write_mailbox(struct kvm_vcpu *vcpu, int offset, uint64_t data, int len)
+{
+ void *pbuf;
+
+ spin_lock(&vcpu->arch.ipi_state.lock);
+ pbuf = (void *)vcpu->arch.ipi_state.buf + (offset - 0x20);
+
+ switch (len) {
+ case 1:
+ *(unsigned char *)pbuf = (unsigned char)data;
+ break;
+ case 2:
+ *(unsigned short *)pbuf = (unsigned short)data;
+ break;
+ case 4:
+ *(unsigned int *)pbuf = (unsigned int)data;
+ break;
+ case 8:
+ *(unsigned long *)pbuf = (unsigned long)data;
+ break;
+ default:
+ kvm_err("%s: unknown data len: %d\n", __func__, len);
+ }
+ spin_unlock(&vcpu->arch.ipi_state.lock);
+}
+
+static int mail_send(struct kvm *kvm, uint64_t data)
+{
+ int i, cpu, mailbox, offset;
+ uint32_t val = 0, mask = 0;
+ struct kvm_vcpu *vcpu;
+
+ cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
+ vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
+ if (unlikely(vcpu == NULL)) {
+ kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
+ return -EINVAL;
+ }
+ mailbox = ((data & 0xffffffff) >> 2) & 0x7;
+ offset = IOCSR_IPI_BUF_20 + mailbox * 4;
+ if ((data >> 27) & 0xf) {
+ val = read_mailbox(vcpu, offset, 4);
+ for (i = 0; i < 4; i++)
+ if (data & (BIT(27 + i)))
+ mask |= (0xff << (i * 8));
+ val &= mask;
+ }
+
+ val |= ((uint32_t)(data >> 32) & ~mask);
+ write_mailbox(vcpu, offset, val, 4);
+
+ return 0;
+}
+
+static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
+{
+ int i, idx, ret;
+ uint64_t val = 0, mask = 0;
+
+ /*
+ * Bit 27-30 is mask for byte writing.
+ * If the mask is 0, we need not to do anything.
+ */
+ if ((data >> 27) & 0xf) {
+ /* Read the old val */
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, 4, &val);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ if (unlikely(ret)) {
+ kvm_err("%s: : read data from addr %llx failed\n", __func__, addr);
+ return ret;
+ }
+ /* Construct the mask by scanning the bit 27-30 */
+ for (i = 0; i < 4; i++) {
+ if (data & (BIT(27 + i)))
+ mask |= (0xff << (i * 8));
+ }
+ /* Save the old part of val */
+ val &= mask;
+ }
+ val |= ((uint32_t)(data >> 32) & ~mask);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, 4, &val);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ if (unlikely(ret))
+ kvm_err("%s: : write data to addr %llx failed\n", __func__, addr);
+
+ return ret;
+}
+
+static int any_send(struct kvm *kvm, uint64_t data)
+{
+ int cpu, offset;
+ struct kvm_vcpu *vcpu;
+
+ cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
+ vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
+ if (unlikely(vcpu == NULL)) {
+ kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
+ return -EINVAL;
+ }
+ offset = data & 0xffff;
+
+ return send_ipi_data(vcpu, offset, data);
+}
+
+static int loongarch_ipi_readl(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *val)
+{
+ int ret = 0;
+ uint32_t offset;
+ uint64_t res = 0;
+
+ offset = (uint32_t)(addr & 0x1ff);
+ WARN_ON_ONCE(offset & (len - 1));
+
+ switch (offset) {
+ case IOCSR_IPI_STATUS:
+ spin_lock(&vcpu->arch.ipi_state.lock);
+ res = vcpu->arch.ipi_state.status;
+ spin_unlock(&vcpu->arch.ipi_state.lock);
+ break;
+ case IOCSR_IPI_EN:
+ spin_lock(&vcpu->arch.ipi_state.lock);
+ res = vcpu->arch.ipi_state.en;
+ spin_unlock(&vcpu->arch.ipi_state.lock);
+ break;
+ case IOCSR_IPI_SET:
+ res = 0;
+ break;
+ case IOCSR_IPI_CLEAR:
+ res = 0;
+ break;
+ case IOCSR_IPI_BUF_20 ... IOCSR_IPI_BUF_38 + 7:
+ if (offset + len > IOCSR_IPI_BUF_38 + 8) {
+ kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
+ __func__, offset, len);
+ ret = -EINVAL;
+ break;
+ }
+ res = read_mailbox(vcpu, offset, len);
+ break;
+ default:
+ kvm_err("%s: unknown addr: %llx\n", __func__, addr);
+ ret = -EINVAL;
+ break;
+ }
+ *(uint64_t *)val = res;
+
+ return ret;
+}
+
+static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *val)
+{
+ int ret = 0;
+ uint64_t data;
+ uint32_t offset;
+
+ data = *(uint64_t *)val;
+
+ offset = (uint32_t)(addr & 0x1ff);
+ WARN_ON_ONCE(offset & (len - 1));
+
+ switch (offset) {
+ case IOCSR_IPI_STATUS:
+ ret = -EINVAL;
+ break;
+ case IOCSR_IPI_EN:
+ spin_lock(&vcpu->arch.ipi_state.lock);
+ vcpu->arch.ipi_state.en = data;
+ spin_unlock(&vcpu->arch.ipi_state.lock);
+ break;
+ case IOCSR_IPI_SET:
+ ipi_set(vcpu, data);
+ break;
+ case IOCSR_IPI_CLEAR:
+ /* Just clear the status of the current vcpu */
+ ipi_clear(vcpu, data);
+ break;
+ case IOCSR_IPI_BUF_20 ... IOCSR_IPI_BUF_38 + 7:
+ if (offset + len > IOCSR_IPI_BUF_38 + 8) {
+ kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
+ __func__, offset, len);
+ ret = -EINVAL;
+ break;
+ }
+ write_mailbox(vcpu, offset, data, len);
+ break;
+ case IOCSR_IPI_SEND:
+ ipi_send(vcpu->kvm, data);
+ break;
+ case IOCSR_MAIL_SEND:
+ ret = mail_send(vcpu->kvm, data);
+ break;
+ case IOCSR_ANY_SEND:
+ ret = any_send(vcpu->kvm, data);
+ break;
+ default:
+ kvm_err("%s: unknown addr: %llx\n", __func__, addr);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int kvm_ipi_read(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *dev,
+ gpa_t addr, int len, void *val)
+{
+ vcpu->stat.ipi_read_exits++;
+ return loongarch_ipi_readl(vcpu, addr, len, val);
+}
+
+static int kvm_ipi_write(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *dev,
+ gpa_t addr, int len, const void *val)
+{
+ vcpu->stat.ipi_write_exits++;
+ return loongarch_ipi_writel(vcpu, addr, len, val);
+}
+
+static const struct kvm_io_device_ops kvm_ipi_ops = {
+ .read = kvm_ipi_read,
+ .write = kvm_ipi_write,
+};
+
+static int kvm_ipi_regs_access(struct kvm_device *dev,
+ struct kvm_device_attr *attr,
+ bool is_write)
+{
+ int len = 4;
+ int cpu, addr;
+ uint64_t val;
+ void *p = NULL;
+ struct kvm_vcpu *vcpu;
+
+ cpu = (attr->attr >> 16) & 0x3ff;
+ addr = attr->attr & 0xff;
+
+ vcpu = kvm_get_vcpu_by_id(dev->kvm, cpu);
+ if (unlikely(vcpu == NULL)) {
+ kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
+ return -EINVAL;
+ }
+
+ switch (addr) {
+ case IOCSR_IPI_STATUS:
+ p = &vcpu->arch.ipi_state.status;
+ break;
+ case IOCSR_IPI_EN:
+ p = &vcpu->arch.ipi_state.en;
+ break;
+ case IOCSR_IPI_SET:
+ p = &vcpu->arch.ipi_state.set;
+ break;
+ case IOCSR_IPI_CLEAR:
+ p = &vcpu->arch.ipi_state.clear;
+ break;
+ case IOCSR_IPI_BUF_20:
+ p = &vcpu->arch.ipi_state.buf[0];
+ len = 8;
+ break;
+ case IOCSR_IPI_BUF_28:
+ p = &vcpu->arch.ipi_state.buf[1];
+ len = 8;
+ break;
+ case IOCSR_IPI_BUF_30:
+ p = &vcpu->arch.ipi_state.buf[2];
+ len = 8;
+ break;
+ case IOCSR_IPI_BUF_38:
+ p = &vcpu->arch.ipi_state.buf[3];
+ len = 8;
+ break;
+ default:
+ kvm_err("%s: unknown ipi register, addr = %d\n", __func__, addr);
+ return -EINVAL;
+ }
+
+ if (is_write) {
+ if (len == 4) {
+ if (get_user(val, (uint32_t __user *)attr->addr))
+ return -EFAULT;
+ *(uint32_t *)p = (uint32_t)val;
+ } else if (len == 8) {
+ if (get_user(val, (uint64_t __user *)attr->addr))
+ return -EFAULT;
+ *(uint64_t *)p = val;
+ }
+ } else {
+ if (len == 4) {
+ val = *(uint32_t *)p;
+ return put_user(val, (uint32_t __user *)attr->addr);
+ } else if (len == 8) {
+ val = *(uint64_t *)p;
+ return put_user(val, (uint64_t __user *)attr->addr);
+ }
+ }
+
+ return 0;
+}
+
+static int kvm_ipi_get_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_LOONGARCH_IPI_GRP_REGS:
+ return kvm_ipi_regs_access(dev, attr, false);
+ default:
+ kvm_err("%s: unknown group (%d)\n", __func__, attr->group);
+ return -EINVAL;
+ }
+}
+
+static int kvm_ipi_set_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_LOONGARCH_IPI_GRP_REGS:
+ return kvm_ipi_regs_access(dev, attr, true);
+ default:
+ kvm_err("%s: unknown group (%d)\n", __func__, attr->group);
+ return -EINVAL;
+ }
+}
+
+static int kvm_ipi_create(struct kvm_device *dev, u32 type)
+{
+ int ret;
+ struct kvm *kvm;
+ struct kvm_io_device *device;
+ struct loongarch_ipi *s;
+
+ if (!dev) {
+ kvm_err("%s: kvm_device ptr is invalid!\n", __func__);
+ return -EINVAL;
+ }
+
+ kvm = dev->kvm;
+ if (kvm->arch.ipi) {
+ kvm_err("%s: LoongArch IPI has already been created!\n", __func__);
+ return -EINVAL;
+ }
+
+ s = kzalloc(sizeof(struct loongarch_ipi), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ spin_lock_init(&s->lock);
+ s->kvm = kvm;
+
+ /*
+ * Initialize IOCSR device
+ */
+ device = &s->device;
+ kvm_iodevice_init(device, &kvm_ipi_ops);
+ mutex_lock(&kvm->slots_lock);
+ ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, IOCSR_IPI_BASE, IOCSR_IPI_SIZE, device);
+ mutex_unlock(&kvm->slots_lock);
+ if (ret < 0) {
+ kvm_err("%s: Initialize IOCSR dev failed, ret = %d\n", __func__, ret);
+ goto err;
+ }
+
+ kvm->arch.ipi = s;
+ return 0;
+
+err:
+ kfree(s);
+ return -EFAULT;
+}
+
+static void kvm_ipi_destroy(struct kvm_device *dev)
+{
+ struct kvm *kvm;
+ struct loongarch_ipi *ipi;
+
+ if (!dev || !dev->kvm || !dev->kvm->arch.ipi)
+ return;
+
+ kvm = dev->kvm;
+ ipi = kvm->arch.ipi;
+ kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &ipi->device);
+ kfree(ipi);
+}
+
+static struct kvm_device_ops kvm_ipi_dev_ops = {
+ .name = "kvm-loongarch-ipi",
+ .create = kvm_ipi_create,
+ .destroy = kvm_ipi_destroy,
+ .set_attr = kvm_ipi_set_attr,
+ .get_attr = kvm_ipi_get_attr,
+};
+
+int kvm_loongarch_register_ipi_device(void)
+{
+ return kvm_register_device_ops(&kvm_ipi_dev_ops, KVM_DEV_TYPE_LOONGARCH_IPI);
+}
diff --git a/arch/loongarch/kvm/intc/pch_pic.c b/arch/loongarch/kvm/intc/pch_pic.c
new file mode 100644
index 000000000000..a698a73de399
--- /dev/null
+++ b/arch/loongarch/kvm/intc/pch_pic.c
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#include <asm/kvm_eiointc.h>
+#include <asm/kvm_pch_pic.h>
+#include <asm/kvm_vcpu.h>
+#include <linux/count_zeros.h>
+
+/* update the isr according to irq level and route irq to eiointc */
+static void pch_pic_update_irq(struct loongarch_pch_pic *s, int irq, int level)
+{
+ u64 mask = BIT(irq);
+
+ /*
+ * set isr and route irq to eiointc and
+ * the route table is in htmsi_vector[]
+ */
+ if (level) {
+ if (mask & s->irr & ~s->mask) {
+ s->isr |= mask;
+ irq = s->htmsi_vector[irq];
+ eiointc_set_irq(s->kvm->arch.eiointc, irq, level);
+ }
+ } else {
+ if (mask & s->isr & ~s->irr) {
+ s->isr &= ~mask;
+ irq = s->htmsi_vector[irq];
+ eiointc_set_irq(s->kvm->arch.eiointc, irq, level);
+ }
+ }
+}
+
+/* update batch irqs, the irq_mask is a bitmap of irqs */
+static void pch_pic_update_batch_irqs(struct loongarch_pch_pic *s, u64 irq_mask, int level)
+{
+ unsigned int irq;
+ DECLARE_BITMAP(irqs, 64) = { BITMAP_FROM_U64(irq_mask) };
+
+ for_each_set_bit(irq, irqs, 64)
+ pch_pic_update_irq(s, irq, level);
+}
+
+/* called when a irq is triggered in pch pic */
+void pch_pic_set_irq(struct loongarch_pch_pic *s, int irq, int level)
+{
+ u64 mask = BIT(irq);
+
+ spin_lock(&s->lock);
+ if (level)
+ s->irr |= mask; /* set irr */
+ else {
+ /*
+ * In edge triggered mode, 0 does not mean to clear irq
+ * The irr register variable is cleared when cpu writes to the
+ * PCH_PIC_CLEAR_START address area
+ */
+ if (s->edge & mask) {
+ spin_unlock(&s->lock);
+ return;
+ }
+ s->irr &= ~mask;
+ }
+ pch_pic_update_irq(s, irq, level);
+ spin_unlock(&s->lock);
+}
+
+/* msi irq handler */
+void pch_msi_set_irq(struct kvm *kvm, int irq, int level)
+{
+ eiointc_set_irq(kvm->arch.eiointc, irq, level);
+}
+
+static int loongarch_pch_pic_read(struct loongarch_pch_pic *s, gpa_t addr, int len, void *val)
+{
+ int ret = 0, offset;
+ u64 data = 0;
+ void *ptemp;
+
+ offset = addr - s->pch_pic_base;
+ offset -= offset & 7;
+
+ spin_lock(&s->lock);
+ switch (offset) {
+ case PCH_PIC_INT_ID_START ... PCH_PIC_INT_ID_END:
+ data = s->id.data;
+ break;
+ case PCH_PIC_MASK_START ... PCH_PIC_MASK_END:
+ data = s->mask;
+ break;
+ case PCH_PIC_HTMSI_EN_START ... PCH_PIC_HTMSI_EN_END:
+ /* read htmsi enable reg */
+ data = s->htmsi_en;
+ break;
+ case PCH_PIC_EDGE_START ... PCH_PIC_EDGE_END:
+ /* read edge enable reg */
+ data = s->edge;
+ break;
+ case PCH_PIC_AUTO_CTRL0_START ... PCH_PIC_AUTO_CTRL0_END:
+ case PCH_PIC_AUTO_CTRL1_START ... PCH_PIC_AUTO_CTRL1_END:
+ /* we only use default mode: fixed interrupt distribution mode */
+ break;
+ case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END:
+ /* only route to int0: eiointc */
+ ptemp = s->route_entry + (offset - PCH_PIC_ROUTE_ENTRY_START);
+ data = *(u64 *)ptemp;
+ break;
+ case PCH_PIC_HTMSI_VEC_START ... PCH_PIC_HTMSI_VEC_END:
+ /* read htmsi vector */
+ ptemp = s->htmsi_vector + (offset - PCH_PIC_HTMSI_VEC_START);
+ data = *(u64 *)ptemp;
+ break;
+ case PCH_PIC_POLARITY_START ... PCH_PIC_POLARITY_END:
+ data = s->polarity;
+ break;
+ case PCH_PIC_INT_IRR_START:
+ data = s->irr;
+ break;
+ case PCH_PIC_INT_ISR_START:
+ data = s->isr;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ spin_unlock(&s->lock);
+
+ if (ret == 0) {
+ offset = (addr - s->pch_pic_base) & 7;
+ data = data >> (offset * 8);
+ memcpy(val, &data, len);
+ }
+
+ return ret;
+}
+
+static int kvm_pch_pic_read(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *dev,
+ gpa_t addr, int len, void *val)
+{
+ int ret;
+ struct loongarch_pch_pic *s = vcpu->kvm->arch.pch_pic;
+
+ if (!s) {
+ kvm_err("%s: pch pic irqchip not valid!\n", __func__);
+ return -EINVAL;
+ }
+
+ if (addr & (len - 1)) {
+ kvm_err("%s: pch pic not aligned addr %llx len %d\n", __func__, addr, len);
+ return -EINVAL;
+ }
+
+ /* statistics of pch pic reading */
+ vcpu->stat.pch_pic_read_exits++;
+ ret = loongarch_pch_pic_read(s, addr, len, val);
+
+ return ret;
+}
+
+static int loongarch_pch_pic_write(struct loongarch_pch_pic *s, gpa_t addr,
+ int len, const void *val)
+{
+ int ret = 0, offset;
+ u64 old, data, mask;
+ void *ptemp;
+
+ switch (len) {
+ case 1:
+ data = *(u8 *)val;
+ mask = 0xFF;
+ break;
+ case 2:
+ data = *(u16 *)val;
+ mask = USHRT_MAX;
+ break;
+ case 4:
+ data = *(u32 *)val;
+ mask = UINT_MAX;
+ break;
+ case 8:
+ default:
+ data = *(u64 *)val;
+ mask = ULONG_MAX;
+ break;
+ }
+
+ offset = (addr - s->pch_pic_base) & 7;
+ mask = mask << (offset * 8);
+ data = data << (offset * 8);
+ offset = (addr - s->pch_pic_base) - offset;
+
+ spin_lock(&s->lock);
+ switch (offset) {
+ case PCH_PIC_MASK_START:
+ old = s->mask;
+ s->mask = (old & ~mask) | data;
+ if (old & ~data)
+ pch_pic_update_batch_irqs(s, old & ~data, 1);
+ if (~old & data)
+ pch_pic_update_batch_irqs(s, ~old & data, 0);
+ break;
+ case PCH_PIC_HTMSI_EN_START:
+ s->htmsi_en = (s->htmsi_en & ~mask) | data;
+ break;
+ case PCH_PIC_EDGE_START:
+ s->edge = (s->edge & ~mask) | data;
+ break;
+ case PCH_PIC_POLARITY_START:
+ s->polarity = (s->polarity & ~mask) | data;
+ break;
+ case PCH_PIC_CLEAR_START:
+ old = s->irr & s->edge & data;
+ if (old) {
+ s->irr &= ~old;
+ pch_pic_update_batch_irqs(s, old, 0);
+ }
+ break;
+ case PCH_PIC_HTMSI_VEC_START ... PCH_PIC_HTMSI_VEC_END:
+ ptemp = s->htmsi_vector + (offset - PCH_PIC_HTMSI_VEC_START);
+ *(u64 *)ptemp = (*(u64 *)ptemp & ~mask) | data;
+ break;
+ /* Not implemented */
+ case PCH_PIC_AUTO_CTRL0_START:
+ case PCH_PIC_AUTO_CTRL1_START:
+ case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ spin_unlock(&s->lock);
+
+ return ret;
+}
+
+static int kvm_pch_pic_write(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *dev,
+ gpa_t addr, int len, const void *val)
+{
+ int ret;
+ struct loongarch_pch_pic *s = vcpu->kvm->arch.pch_pic;
+
+ if (!s) {
+ kvm_err("%s: pch pic irqchip not valid!\n", __func__);
+ return -EINVAL;
+ }
+
+ if (addr & (len - 1)) {
+ kvm_err("%s: pch pic not aligned addr %llx len %d\n", __func__, addr, len);
+ return -EINVAL;
+ }
+
+ /* statistics of pch pic writing */
+ vcpu->stat.pch_pic_write_exits++;
+ ret = loongarch_pch_pic_write(s, addr, len, val);
+
+ return ret;
+}
+
+static const struct kvm_io_device_ops kvm_pch_pic_ops = {
+ .read = kvm_pch_pic_read,
+ .write = kvm_pch_pic_write,
+};
+
+static int kvm_pch_pic_init(struct kvm_device *dev, u64 addr)
+{
+ int ret;
+ struct kvm *kvm = dev->kvm;
+ struct kvm_io_device *device;
+ struct loongarch_pch_pic *s = dev->kvm->arch.pch_pic;
+
+ s->pch_pic_base = addr;
+ device = &s->device;
+ /* init device by pch pic writing and reading ops */
+ kvm_iodevice_init(device, &kvm_pch_pic_ops);
+ mutex_lock(&kvm->slots_lock);
+ /* register pch pic device */
+ ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, addr, PCH_PIC_SIZE, device);
+ mutex_unlock(&kvm->slots_lock);
+
+ return (ret < 0) ? -EFAULT : 0;
+}
+
+/* used by user space to get or set pch pic registers */
+static int kvm_pch_pic_regs_access(struct kvm_device *dev,
+ struct kvm_device_attr *attr,
+ bool is_write)
+{
+ char buf[8];
+ int addr, offset, len = 8, ret = 0;
+ void __user *data;
+ void *p = NULL;
+ struct loongarch_pch_pic *s;
+
+ s = dev->kvm->arch.pch_pic;
+ addr = attr->attr;
+ data = (void __user *)attr->addr;
+
+ /* get pointer to pch pic register by addr */
+ switch (addr) {
+ case PCH_PIC_MASK_START:
+ p = &s->mask;
+ break;
+ case PCH_PIC_HTMSI_EN_START:
+ p = &s->htmsi_en;
+ break;
+ case PCH_PIC_EDGE_START:
+ p = &s->edge;
+ break;
+ case PCH_PIC_AUTO_CTRL0_START:
+ p = &s->auto_ctrl0;
+ break;
+ case PCH_PIC_AUTO_CTRL1_START:
+ p = &s->auto_ctrl1;
+ break;
+ case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END:
+ offset = addr - PCH_PIC_ROUTE_ENTRY_START;
+ p = &s->route_entry[offset];
+ len = 1;
+ break;
+ case PCH_PIC_HTMSI_VEC_START ... PCH_PIC_HTMSI_VEC_END:
+ offset = addr - PCH_PIC_HTMSI_VEC_START;
+ p = &s->htmsi_vector[offset];
+ len = 1;
+ break;
+ case PCH_PIC_INT_IRR_START:
+ p = &s->irr;
+ break;
+ case PCH_PIC_INT_ISR_START:
+ p = &s->isr;
+ break;
+ case PCH_PIC_POLARITY_START:
+ p = &s->polarity;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (is_write) {
+ if (copy_from_user(buf, data, len))
+ return -EFAULT;
+ }
+
+ spin_lock(&s->lock);
+ if (is_write)
+ memcpy(p, buf, len);
+ else
+ memcpy(buf, p, len);
+ spin_unlock(&s->lock);
+
+ if (!is_write) {
+ if (copy_to_user(data, buf, len))
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+static int kvm_pch_pic_get_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS:
+ return kvm_pch_pic_regs_access(dev, attr, false);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int kvm_pch_pic_set_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ u64 addr;
+ void __user *uaddr = (void __user *)(long)attr->addr;
+
+ switch (attr->group) {
+ case KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL:
+ switch (attr->attr) {
+ case KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT:
+ if (copy_from_user(&addr, uaddr, sizeof(addr)))
+ return -EFAULT;
+
+ if (!dev->kvm->arch.pch_pic) {
+ kvm_err("%s: please create pch_pic irqchip first!\n", __func__);
+ return -ENODEV;
+ }
+
+ return kvm_pch_pic_init(dev, addr);
+ default:
+ kvm_err("%s: unknown group (%d) attr (%lld)\n", __func__, attr->group,
+ attr->attr);
+ return -EINVAL;
+ }
+ case KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS:
+ return kvm_pch_pic_regs_access(dev, attr, true);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int kvm_setup_default_irq_routing(struct kvm *kvm)
+{
+ int i, ret;
+ u32 nr = KVM_IRQCHIP_NUM_PINS;
+ struct kvm_irq_routing_entry *entries;
+
+ entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ for (i = 0; i < nr; i++) {
+ entries[i].gsi = i;
+ entries[i].type = KVM_IRQ_ROUTING_IRQCHIP;
+ entries[i].u.irqchip.irqchip = 0;
+ entries[i].u.irqchip.pin = i;
+ }
+ ret = kvm_set_irq_routing(kvm, entries, nr, 0);
+ kfree(entries);
+
+ return ret;
+}
+
+static int kvm_pch_pic_create(struct kvm_device *dev, u32 type)
+{
+ int i, ret, irq_num;
+ struct kvm *kvm = dev->kvm;
+ struct loongarch_pch_pic *s;
+
+ /* pch pic should not has been created */
+ if (kvm->arch.pch_pic)
+ return -EINVAL;
+
+ ret = kvm_setup_default_irq_routing(kvm);
+ if (ret)
+ return -ENOMEM;
+
+ s = kzalloc(sizeof(struct loongarch_pch_pic), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ /*
+ * Interrupt controller identification register 1
+ * Bit 24-31 Interrupt Controller ID
+ * Interrupt controller identification register 2
+ * Bit 0-7 Interrupt Controller version number
+ * Bit 16-23 The number of interrupt sources supported
+ */
+ irq_num = 32;
+ s->mask = -1UL;
+ s->id.desc.id = PCH_PIC_INT_ID_VAL;
+ s->id.desc.version = PCH_PIC_INT_ID_VER;
+ s->id.desc.irq_num = irq_num - 1;
+ for (i = 0; i < irq_num; i++) {
+ s->route_entry[i] = 1;
+ s->htmsi_vector[i] = i;
+ }
+ spin_lock_init(&s->lock);
+ s->kvm = kvm;
+ kvm->arch.pch_pic = s;
+
+ return 0;
+}
+
+static void kvm_pch_pic_destroy(struct kvm_device *dev)
+{
+ struct kvm *kvm;
+ struct loongarch_pch_pic *s;
+
+ if (!dev || !dev->kvm || !dev->kvm->arch.pch_pic)
+ return;
+
+ kvm = dev->kvm;
+ s = kvm->arch.pch_pic;
+ /* unregister pch pic device and free it's memory */
+ kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &s->device);
+ kfree(s);
+}
+
+static struct kvm_device_ops kvm_pch_pic_dev_ops = {
+ .name = "kvm-loongarch-pch-pic",
+ .create = kvm_pch_pic_create,
+ .destroy = kvm_pch_pic_destroy,
+ .set_attr = kvm_pch_pic_set_attr,
+ .get_attr = kvm_pch_pic_get_attr,
+};
+
+int kvm_loongarch_register_pch_pic_device(void)
+{
+ return kvm_register_device_ops(&kvm_pch_pic_dev_ops, KVM_DEV_TYPE_LOONGARCH_PCHPIC);
+}
diff --git a/arch/loongarch/kvm/interrupt.c b/arch/loongarch/kvm/interrupt.c
index 4c3f22de4b40..a6d42d399a59 100644
--- a/arch/loongarch/kvm/interrupt.c
+++ b/arch/loongarch/kvm/interrupt.c
@@ -21,6 +21,7 @@ static unsigned int priority_to_irq[EXCCODE_INT_NUM] = {
[INT_HWI5] = CPU_IP5,
[INT_HWI6] = CPU_IP6,
[INT_HWI7] = CPU_IP7,
+ [INT_AVEC] = CPU_AVEC,
};
static int kvm_irq_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
@@ -31,6 +32,11 @@ static int kvm_irq_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
if (priority < EXCCODE_INT_NUM)
irq = priority_to_irq[priority];
+ if (cpu_has_msgint && (priority == INT_AVEC)) {
+ set_gcsr_estat(irq);
+ return 1;
+ }
+
switch (priority) {
case INT_TI:
case INT_IPI:
@@ -58,6 +64,11 @@ static int kvm_irq_clear(struct kvm_vcpu *vcpu, unsigned int priority)
if (priority < EXCCODE_INT_NUM)
irq = priority_to_irq[priority];
+ if (cpu_has_msgint && (priority == INT_AVEC)) {
+ clear_gcsr_estat(irq);
+ return 1;
+ }
+
switch (priority) {
case INT_TI:
case INT_IPI:
@@ -83,28 +94,11 @@ void kvm_deliver_intr(struct kvm_vcpu *vcpu)
unsigned long *pending = &vcpu->arch.irq_pending;
unsigned long *pending_clr = &vcpu->arch.irq_clear;
- if (!(*pending) && !(*pending_clr))
- return;
-
- if (*pending_clr) {
- priority = __ffs(*pending_clr);
- while (priority <= INT_IPI) {
- kvm_irq_clear(vcpu, priority);
- priority = find_next_bit(pending_clr,
- BITS_PER_BYTE * sizeof(*pending_clr),
- priority + 1);
- }
- }
+ for_each_set_bit(priority, pending_clr, EXCCODE_INT_NUM)
+ kvm_irq_clear(vcpu, priority);
- if (*pending) {
- priority = __ffs(*pending);
- while (priority <= INT_IPI) {
- kvm_irq_deliver(vcpu, priority);
- priority = find_next_bit(pending,
- BITS_PER_BYTE * sizeof(*pending),
- priority + 1);
- }
- }
+ for_each_set_bit(priority, pending, EXCCODE_INT_NUM)
+ kvm_irq_deliver(vcpu, priority);
}
int kvm_pending_timer(struct kvm_vcpu *vcpu)
diff --git a/arch/loongarch/kvm/irqfd.c b/arch/loongarch/kvm/irqfd.c
new file mode 100644
index 000000000000..9a39627aecf0
--- /dev/null
+++ b/arch/loongarch/kvm/irqfd.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#include <linux/kvm_host.h>
+#include <trace/events/kvm.h>
+#include <asm/kvm_pch_pic.h>
+
+static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id, int level, bool line_status)
+{
+ /* PCH-PIC pin (0 ~ 64) <---> GSI (0 ~ 64) */
+ pch_pic_set_irq(kvm->arch.pch_pic, e->irqchip.pin, level);
+
+ return 0;
+}
+
+/*
+ * kvm_set_msi: inject the MSI corresponding to the
+ * MSI routing entry
+ *
+ * This is the entry point for irqfd MSI injection
+ * and userspace MSI injection.
+ */
+int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id, int level, bool line_status)
+{
+ if (!level)
+ return -1;
+
+ pch_msi_set_irq(kvm, e->msi.data, level);
+
+ return 0;
+}
+
+/*
+ * kvm_set_routing_entry: populate a kvm routing entry
+ * from a user routing entry
+ *
+ * @kvm: the VM this entry is applied to
+ * @e: kvm kernel routing entry handle
+ * @ue: user api routing entry handle
+ * return 0 on success, -EINVAL on errors.
+ */
+int kvm_set_routing_entry(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *e,
+ const struct kvm_irq_routing_entry *ue)
+{
+ switch (ue->type) {
+ case KVM_IRQ_ROUTING_IRQCHIP:
+ e->set = kvm_set_pic_irq;
+ e->irqchip.irqchip = ue->u.irqchip.irqchip;
+ e->irqchip.pin = ue->u.irqchip.pin;
+
+ if (e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS)
+ return -EINVAL;
+
+ return 0;
+ case KVM_IRQ_ROUTING_MSI:
+ e->set = kvm_set_msi;
+ e->msi.address_lo = ue->u.msi.address_lo;
+ e->msi.address_hi = ue->u.msi.address_hi;
+ e->msi.data = ue->u.msi.data;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id, int level, bool line_status)
+{
+ switch (e->type) {
+ case KVM_IRQ_ROUTING_IRQCHIP:
+ pch_pic_set_irq(kvm->arch.pch_pic, e->irqchip.pin, level);
+ return 0;
+ case KVM_IRQ_ROUTING_MSI:
+ pch_msi_set_irq(kvm, e->msi.data, level);
+ return 0;
+ default:
+ return -EWOULDBLOCK;
+ }
+}
+
+bool kvm_arch_intc_initialized(struct kvm *kvm)
+{
+ return kvm_arch_irqchip_in_kernel(kvm);
+}
diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c
index 86a2f2d0cb27..80ea63d465b8 100644
--- a/arch/loongarch/kvm/main.c
+++ b/arch/loongarch/kvm/main.c
@@ -9,6 +9,8 @@
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/kvm_csr.h>
+#include <asm/kvm_eiointc.h>
+#include <asm/kvm_pch_pic.h>
#include "trace.h"
unsigned long vpid_mask;
@@ -242,6 +244,25 @@ void kvm_check_vpid(struct kvm_vcpu *vcpu)
kvm_update_vpid(vcpu, cpu);
trace_kvm_vpid_change(vcpu, vcpu->arch.vpid);
vcpu->cpu = cpu;
+ kvm_clear_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
+
+ /*
+ * LLBCTL is a separated guest CSR register from host, a general
+ * exception ERET instruction clears the host LLBCTL register in
+ * host mode, and clears the guest LLBCTL register in guest mode.
+ * ERET in tlb refill exception does not clear LLBCTL register.
+ *
+ * When secondary mmu mapping is changed, guest OS does not know
+ * even if the content is changed after mapping is changed.
+ *
+ * Here clear WCLLB of the guest LLBCTL register when mapping is
+ * changed. Otherwise, if mmu mapping is changed while guest is
+ * executing LL/SC pair, LL loads with the old address and set
+ * the LLBCTL flag, SC checks the LLBCTL flag and will store the
+ * new address successfully since LLBCTL_WCLLB is on, even if
+ * memory with new address is changed on other VCPUs.
+ */
+ set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
}
/* Restore GSTAT(0x50).vpid */
@@ -260,7 +281,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
return -ENOIOCTLCMD;
}
-int kvm_arch_hardware_enable(void)
+int kvm_arch_enable_virtualization_cpu(void)
{
unsigned long env, gcfg = 0;
@@ -275,16 +296,16 @@ int kvm_arch_hardware_enable(void)
/*
* Enable virtualization features granting guest direct control of
* certain features:
- * GCI=2: Trap on init or unimplement cache instruction.
+ * GCI=2: Trap on init or unimplemented cache instruction.
* TORU=0: Trap on Root Unimplement.
* CACTRL=1: Root control cache.
- * TOP=0: Trap on Previlege.
+ * TOP=0: Trap on Privilege.
* TOE=0: Trap on Exception.
* TIT=0: Trap on Timer.
*/
- if (env & CSR_GCFG_GCIP_ALL)
+ if (env & CSR_GCFG_GCIP_SECURE)
gcfg |= CSR_GCFG_GCI_SECURE;
- if (env & CSR_GCFG_MATC_ROOT)
+ if (env & CSR_GCFG_MATP_ROOT)
gcfg |= CSR_GCFG_MATC_ROOT;
write_csr_gcfg(gcfg);
@@ -296,10 +317,17 @@ int kvm_arch_hardware_enable(void)
kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx",
read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc());
+ /*
+ * HW Guest CSR registers are lost after CPU suspend and resume.
+ * Clear last_vcpu so that Guest CSR registers forced to reload
+ * from vCPU SW state.
+ */
+ this_cpu_ptr(vmcs)->last_vcpu = NULL;
+
return 0;
}
-void kvm_arch_hardware_disable(void)
+void kvm_arch_disable_virtualization_cpu(void)
{
write_csr_gcfg(0);
write_csr_gstat(0);
@@ -312,7 +340,7 @@ void kvm_arch_hardware_disable(void)
static int kvm_loongarch_env_init(void)
{
- int cpu, order;
+ int cpu, order, ret;
void *addr;
struct kvm_context *context;
@@ -366,8 +394,22 @@ static int kvm_loongarch_env_init(void)
}
kvm_init_gcsr_flag();
+ kvm_register_perf_callbacks(NULL);
- return 0;
+ /* Register LoongArch IPI interrupt controller interface. */
+ ret = kvm_loongarch_register_ipi_device();
+ if (ret)
+ return ret;
+
+ /* Register LoongArch EIOINTC interrupt controller interface. */
+ ret = kvm_loongarch_register_eiointc_device();
+ if (ret)
+ return ret;
+
+ /* Register LoongArch PCH-PIC interrupt controller interface. */
+ ret = kvm_loongarch_register_pch_pic_device();
+
+ return ret;
}
static void kvm_loongarch_env_exit(void)
@@ -384,6 +426,8 @@ static void kvm_loongarch_env_exit(void)
}
kfree(kvm_loongarch_ops);
}
+
+ kvm_unregister_perf_callbacks();
}
static int kvm_loongarch_init(void)
diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c
index 98883aa23ab8..a7fa458e3360 100644
--- a/arch/loongarch/kvm/mmu.c
+++ b/arch/loongarch/kvm/mmu.c
@@ -163,6 +163,7 @@ static kvm_pte_t *kvm_populate_gpa(struct kvm *kvm,
child = kvm_mmu_memory_cache_alloc(cache);
_kvm_pte_init(child, ctx.invalid_ptes[ctx.level - 1]);
+ smp_wmb(); /* Make pte visible before pmd */
kvm_set_pte(entry, __pa(child));
} else if (kvm_pte_huge(*entry)) {
return entry;
@@ -444,6 +445,17 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
enum kvm_mr_change change)
{
int needs_flush;
+ u32 old_flags = old ? old->flags : 0;
+ u32 new_flags = new ? new->flags : 0;
+ bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES;
+
+ /* Only track memslot flags changed */
+ if (change != KVM_MR_FLAGS_ONLY)
+ return;
+
+ /* Discard dirty page tracking on readonly memslot */
+ if ((old_flags & new_flags) & KVM_MEM_READONLY)
+ return;
/*
* If dirty page logging is enabled, write protect all pages in the slot
@@ -454,9 +466,14 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
* MOVE/DELETE: The old mappings will already have been cleaned up by
* kvm_arch_flush_shadow_memslot()
*/
- if (change == KVM_MR_FLAGS_ONLY &&
- (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
- new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
+ if (!(old_flags & KVM_MEM_LOG_DIRTY_PAGES) && log_dirty_pages) {
+ /*
+ * Initially-all-set does not require write protecting any page
+ * because they're all assumed to be dirty.
+ */
+ if (kvm_dirty_log_manual_protect_and_init_set(kvm))
+ return;
+
spin_lock(&kvm->mmu_lock);
/* Write protect GPA page table entries */
needs_flush = kvm_mkclean_gpa_pt(kvm, new->base_gfn,
@@ -535,7 +552,6 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
{
int ret = 0;
- kvm_pfn_t pfn = 0;
kvm_pte_t *ptep, changed, new;
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm *kvm = vcpu->kvm;
@@ -551,13 +567,9 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
}
/* Track access to pages marked old */
- new = *ptep;
- if (!kvm_pte_young(new))
- new = kvm_pte_mkyoung(new);
- /* call kvm_set_pfn_accessed() after unlock */
-
+ new = kvm_pte_mkyoung(*ptep);
if (write && !kvm_pte_dirty(new)) {
- if (!kvm_pte_write(new)) {
+ if (!kvm_pte_writeable(new)) {
ret = -EFAULT;
goto out;
}
@@ -579,23 +591,14 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
}
changed = new ^ (*ptep);
- if (changed) {
+ if (changed)
kvm_set_pte(ptep, new);
- pfn = kvm_pte_pfn(new);
- }
- spin_unlock(&kvm->mmu_lock);
- /*
- * Fixme: pfn may be freed after mmu_lock
- * kvm_try_get_pfn(pfn)/kvm_release_pfn pair to prevent this?
- */
- if (kvm_pte_young(changed))
- kvm_set_pfn_accessed(pfn);
+ spin_unlock(&kvm->mmu_lock);
- if (kvm_pte_dirty(changed)) {
+ if (kvm_pte_dirty(changed))
mark_page_dirty(kvm, gfn);
- kvm_set_pfn_dirty(pfn);
- }
+
return ret;
out:
spin_unlock(&kvm->mmu_lock);
@@ -695,19 +698,19 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
* value) and then p*d_offset() walks into the target huge page instead
* of the old page table (sees the new value).
*/
- pgd = READ_ONCE(*pgd_offset(kvm->mm, hva));
+ pgd = pgdp_get(pgd_offset(kvm->mm, hva));
if (pgd_none(pgd))
goto out;
- p4d = READ_ONCE(*p4d_offset(&pgd, hva));
+ p4d = p4dp_get(p4d_offset(&pgd, hva));
if (p4d_none(p4d) || !p4d_present(p4d))
goto out;
- pud = READ_ONCE(*pud_offset(&p4d, hva));
+ pud = pudp_get(pud_offset(&p4d, hva));
if (pud_none(pud) || !pud_present(pud))
goto out;
- pmd = READ_ONCE(*pmd_offset(&pud, hva));
+ pmd = pmdp_get(pmd_offset(&pud, hva));
if (pmd_none(pmd) || !pmd_present(pmd))
goto out;
@@ -737,6 +740,7 @@ static kvm_pte_t *kvm_split_huge(struct kvm_vcpu *vcpu, kvm_pte_t *ptep, gfn_t g
val += PAGE_SIZE;
}
+ smp_wmb(); /* Make pte visible before pmd */
/* The later kvm_flush_tlb_gpa() will flush hugepage tlb */
kvm_set_pte(ptep, __pa(child));
@@ -776,6 +780,7 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
struct kvm *kvm = vcpu->kvm;
struct kvm_memory_slot *memslot;
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
+ struct page *page;
/* Try the fast path to handle old / clean pages */
srcu_idx = srcu_read_lock(&kvm->srcu);
@@ -803,7 +808,7 @@ retry:
mmu_seq = kvm->mmu_invalidate_seq;
/*
* Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads in
- * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
+ * kvm_faultin_pfn() (which calls get_user_pages()), so that we don't
* risk the page we get a reference to getting unmapped before we have a
* chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
*
@@ -815,7 +820,7 @@ retry:
smp_rmb();
/* Slow path - ask KVM core whether we can access this GPA */
- pfn = gfn_to_pfn_prot(kvm, gfn, write, &writeable);
+ pfn = kvm_faultin_pfn(vcpu, gfn, write, &writeable, &page);
if (is_error_noslot_pfn(pfn)) {
err = -EFAULT;
goto out;
@@ -827,10 +832,10 @@ retry:
/*
* This can happen when mappings are changed asynchronously, but
* also synchronously if a COW is triggered by
- * gfn_to_pfn_prot().
+ * kvm_faultin_pfn().
*/
spin_unlock(&kvm->mmu_lock);
- kvm_release_pfn_clean(pfn);
+ kvm_release_page_unused(page);
if (retry_no > 100) {
retry_no = 0;
schedule();
@@ -851,18 +856,28 @@ retry:
prot_bits |= _CACHE_SUC;
if (writeable) {
- prot_bits |= _PAGE_WRITE;
- if (write)
- prot_bits |= __WRITEABLE;
+ prot_bits = kvm_pte_mkwriteable(prot_bits);
+ if (write || !kvm_slot_dirty_track_enabled(memslot))
+ prot_bits = kvm_pte_mkdirty(prot_bits);
}
/* Disable dirty logging on HugePages */
level = 0;
- if (!fault_supports_huge_mapping(memslot, hva, write)) {
- level = 0;
- } else {
+ if (fault_supports_huge_mapping(memslot, hva, write)) {
+ /* Check page level about host mmu*/
level = host_pfn_mapping_level(kvm, gfn, memslot);
if (level == 1) {
+ /*
+ * Check page level about secondary mmu
+ * Disable hugepage if it is normal page on
+ * secondary mmu already
+ */
+ ptep = kvm_populate_gpa(kvm, NULL, gpa, 0);
+ if (ptep && !kvm_pte_huge(*ptep))
+ level = 0;
+ }
+
+ if (level == 1) {
gfn = gfn & ~(PTRS_PER_PTE - 1);
pfn = pfn & ~(PTRS_PER_PTE - 1);
}
@@ -885,21 +900,19 @@ retry:
else
++kvm->stat.pages;
kvm_set_pte(ptep, new_pte);
+
+ kvm_release_faultin_page(kvm, page, false, writeable);
spin_unlock(&kvm->mmu_lock);
- if (prot_bits & _PAGE_DIRTY) {
+ if (kvm_pte_dirty(prot_bits))
mark_page_dirty_in_slot(kvm, memslot, gfn);
- kvm_set_pfn_dirty(pfn);
- }
- kvm_set_pfn_accessed(pfn);
- kvm_release_pfn_clean(pfn);
out:
srcu_read_unlock(&kvm->srcu, srcu_idx);
return err;
}
-int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
+int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write, int ecode)
{
int ret;
@@ -908,7 +921,17 @@ int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
return ret;
/* Invalidate this entry in the TLB */
- kvm_flush_tlb_gpa(vcpu, gpa);
+ if (!cpu_has_ptw || (ecode == EXCCODE_TLBM)) {
+ /*
+ * With HW PTW, invalid TLB is not added when page fault. But
+ * for EXCCODE_TLBM exception, stale TLB may exist because of
+ * the last read access.
+ *
+ * With SW PTW, invalid TLB is added in TLB refill exception.
+ */
+ vcpu->arch.flush_gpa = gpa;
+ kvm_make_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
+ }
return 0;
}
diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S
index 80e988985a6a..f1768b7a6194 100644
--- a/arch/loongarch/kvm/switch.S
+++ b/arch/loongarch/kvm/switch.S
@@ -60,16 +60,8 @@
ld.d t0, a2, KVM_ARCH_GPC
csrwr t0, LOONGARCH_CSR_ERA
- /* Save host PGDL */
- csrrd t0, LOONGARCH_CSR_PGDL
- st.d t0, a2, KVM_ARCH_HPGD
-
- /* Switch to kvm */
- ld.d t1, a2, KVM_VCPU_KVM - KVM_VCPU_ARCH
-
- /* Load guest PGDL */
- li.w t0, KVM_GPGD
- ldx.d t0, t1, t0
+ /* Load PGD for KVM hypervisor */
+ ld.d t0, a2, KVM_ARCH_KVMPGD
csrwr t0, LOONGARCH_CSR_PGDL
/* Mix GID and RID */
@@ -85,7 +77,7 @@
* Guest CRMD comes from separate GCSR_CRMD register
*/
ori t0, zero, CSR_PRMD_PIE
- csrxchg t0, t0, LOONGARCH_CSR_PRMD
+ csrwr t0, LOONGARCH_CSR_PRMD
/* Set PVM bit to setup ertn to guest context */
ori t0, zero, CSR_GSTAT_PVM
@@ -277,6 +269,10 @@ SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest)
#ifdef CONFIG_CPU_HAS_LBT
STACK_FRAME_NON_STANDARD kvm_restore_fpu
+#ifdef CONFIG_CPU_HAS_LSX
STACK_FRAME_NON_STANDARD kvm_restore_lsx
+#endif
+#ifdef CONFIG_CPU_HAS_LASX
STACK_FRAME_NON_STANDARD kvm_restore_lasx
#endif
+#endif
diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c
index bcc6b6d063d9..29c2aaba63c3 100644
--- a/arch/loongarch/kvm/timer.c
+++ b/arch/loongarch/kvm/timer.c
@@ -4,6 +4,7 @@
*/
#include <linux/kvm_host.h>
+#include <asm/delay.h>
#include <asm/kvm_csr.h>
#include <asm/kvm_vcpu.h>
@@ -95,6 +96,7 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu)
* and set CSR TVAL with -1
*/
write_gcsr_timertick(0);
+ __delay(2); /* Wait cycles until timer interrupt injected */
/*
* Writing CSR_TINTCLR_TI to LOONGARCH_CSR_TINTCLR will clear
@@ -161,10 +163,11 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu)
if (kvm_vcpu_is_blocking(vcpu)) {
/*
- * HRTIMER_MODE_PINNED is suggested since vcpu may run in
- * the same physical cpu in next time
+ * HRTIMER_MODE_PINNED_HARD is suggested since vcpu may run in
+ * the same physical cpu in next time, and the timer should run
+ * in hardirq context even in the PREEMPT_RT case.
*/
- hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
+ hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED_HARD);
}
}
@@ -188,10 +191,3 @@ void kvm_save_timer(struct kvm_vcpu *vcpu)
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
preempt_enable();
}
-
-void kvm_reset_timer(struct kvm_vcpu *vcpu)
-{
- write_gcsr_timercfg(0);
- kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG, 0);
- hrtimer_cancel(&vcpu->arch.swtimer);
-}
diff --git a/arch/loongarch/kvm/tlb.c b/arch/loongarch/kvm/tlb.c
index 02535df6b51f..ebdbe9264e9c 100644
--- a/arch/loongarch/kvm/tlb.c
+++ b/arch/loongarch/kvm/tlb.c
@@ -23,10 +23,7 @@ void kvm_flush_tlb_all(void)
void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa)
{
- unsigned long flags;
-
- local_irq_save(flags);
+ lockdep_assert_irqs_disabled();
gpa &= (PAGE_MASK << 1);
invtlb(INVTLB_GID_ADDR, read_csr_gstat() & CSR_GSTAT_GID, gpa);
- local_irq_restore(flags);
}
diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h
index 1783397b1bc8..3467ee22b704 100644
--- a/arch/loongarch/kvm/trace.h
+++ b/arch/loongarch/kvm/trace.h
@@ -46,11 +46,15 @@ DEFINE_EVENT(kvm_transition, kvm_out,
/* Further exit reasons */
#define KVM_TRACE_EXIT_IDLE 64
#define KVM_TRACE_EXIT_CACHE 65
+#define KVM_TRACE_EXIT_CPUCFG 66
+#define KVM_TRACE_EXIT_CSR 67
/* Tracepoints for VM exits */
#define kvm_trace_symbol_exit_types \
{ KVM_TRACE_EXIT_IDLE, "IDLE" }, \
- { KVM_TRACE_EXIT_CACHE, "CACHE" }
+ { KVM_TRACE_EXIT_CACHE, "CACHE" }, \
+ { KVM_TRACE_EXIT_CPUCFG, "CPUCFG" }, \
+ { KVM_TRACE_EXIT_CSR, "CSR" }
DECLARE_EVENT_CLASS(kvm_exit,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
@@ -82,6 +86,14 @@ DEFINE_EVENT(kvm_exit, kvm_exit_cache,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
TP_ARGS(vcpu, reason));
+DEFINE_EVENT(kvm_exit, kvm_exit_cpucfg,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+ TP_ARGS(vcpu, reason));
+
+DEFINE_EVENT(kvm_exit, kvm_exit_csr,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+ TP_ARGS(vcpu, reason));
+
DEFINE_EVENT(kvm_exit, kvm_exit,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
TP_ARGS(vcpu, reason));
@@ -149,6 +161,41 @@ TRACE_EVENT(kvm_aux,
__entry->pc)
);
+#define KVM_TRACE_IOCSR_READ_UNSATISFIED 0
+#define KVM_TRACE_IOCSR_READ 1
+#define KVM_TRACE_IOCSR_WRITE 2
+
+#define kvm_trace_symbol_iocsr \
+ { KVM_TRACE_IOCSR_READ_UNSATISFIED, "unsatisfied-read" }, \
+ { KVM_TRACE_IOCSR_READ, "read" }, \
+ { KVM_TRACE_IOCSR_WRITE, "write" }
+
+TRACE_EVENT(kvm_iocsr,
+ TP_PROTO(int type, int len, u64 gpa, void *val),
+ TP_ARGS(type, len, gpa, val),
+
+ TP_STRUCT__entry(
+ __field( u32, type )
+ __field( u32, len )
+ __field( u64, gpa )
+ __field( u64, val )
+ ),
+
+ TP_fast_assign(
+ __entry->type = type;
+ __entry->len = len;
+ __entry->gpa = gpa;
+ __entry->val = 0;
+ if (val)
+ memcpy(&__entry->val, val,
+ min_t(u32, sizeof(__entry->val), len));
+ ),
+
+ TP_printk("iocsr %s len %u gpa 0x%llx val 0x%llx",
+ __print_symbolic(__entry->type, kvm_trace_symbol_iocsr),
+ __entry->len, __entry->gpa, __entry->val)
+);
+
TRACE_EVENT(kvm_vpid_change,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned long vpid),
TP_ARGS(vcpu, vpid),
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 9e8030d45129..6d833599ef2e 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -4,8 +4,8 @@
*/
#include <linux/kvm_host.h>
-#include <linux/entry-kvm.h>
#include <asm/fpu.h>
+#include <asm/lbt.h>
#include <asm/loongarch.h>
#include <asm/setup.h>
#include <asm/time.h>
@@ -19,7 +19,13 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
STATS_DESC_COUNTER(VCPU, idle_exits),
STATS_DESC_COUNTER(VCPU, cpucfg_exits),
STATS_DESC_COUNTER(VCPU, signal_exits),
- STATS_DESC_COUNTER(VCPU, hypercall_exits)
+ STATS_DESC_COUNTER(VCPU, hypercall_exits),
+ STATS_DESC_COUNTER(VCPU, ipi_read_exits),
+ STATS_DESC_COUNTER(VCPU, ipi_write_exits),
+ STATS_DESC_COUNTER(VCPU, eiointc_read_exits),
+ STATS_DESC_COUNTER(VCPU, eiointc_write_exits),
+ STATS_DESC_COUNTER(VCPU, pch_pic_read_exits),
+ STATS_DESC_COUNTER(VCPU, pch_pic_write_exits)
};
const struct kvm_stats_header kvm_vcpu_stats_header = {
@@ -31,6 +37,169 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
sizeof(kvm_vcpu_stats_desc),
};
+static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu)
+{
+ struct kvm_context *context;
+
+ context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
+ context->perf_cntr[0] = read_csr_perfcntr0();
+ context->perf_cntr[1] = read_csr_perfcntr1();
+ context->perf_cntr[2] = read_csr_perfcntr2();
+ context->perf_cntr[3] = read_csr_perfcntr3();
+ context->perf_ctrl[0] = write_csr_perfctrl0(0);
+ context->perf_ctrl[1] = write_csr_perfctrl1(0);
+ context->perf_ctrl[2] = write_csr_perfctrl2(0);
+ context->perf_ctrl[3] = write_csr_perfctrl3(0);
+}
+
+static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu)
+{
+ struct kvm_context *context;
+
+ context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
+ write_csr_perfcntr0(context->perf_cntr[0]);
+ write_csr_perfcntr1(context->perf_cntr[1]);
+ write_csr_perfcntr2(context->perf_cntr[2]);
+ write_csr_perfcntr3(context->perf_cntr[3]);
+ write_csr_perfctrl0(context->perf_ctrl[0]);
+ write_csr_perfctrl1(context->perf_ctrl[1]);
+ write_csr_perfctrl2(context->perf_ctrl[2]);
+ write_csr_perfctrl3(context->perf_ctrl[3]);
+}
+
+
+static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu)
+{
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
+ kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
+ kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
+ kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
+ kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
+}
+
+static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu)
+{
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
+}
+
+static int kvm_own_pmu(struct kvm_vcpu *vcpu)
+{
+ unsigned long val;
+
+ if (!kvm_guest_has_pmu(&vcpu->arch))
+ return -EINVAL;
+
+ kvm_save_host_pmu(vcpu);
+
+ /* Set PM0-PM(num) to guest */
+ val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
+ val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
+ write_csr_gcfg(val);
+
+ kvm_restore_guest_pmu(vcpu);
+
+ return 0;
+}
+
+static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
+{
+ unsigned long val;
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+
+ if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU))
+ return;
+
+ kvm_save_guest_pmu(vcpu);
+
+ /* Disable pmu access from guest */
+ write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
+
+ /*
+ * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
+ * exiting the guest, so that the next time trap into the guest.
+ * We don't need to deal with PMU CSRs contexts.
+ *
+ * Otherwise set the request bit KVM_REQ_PMU to restore guest PMU
+ * before entering guest VM
+ */
+ val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
+ val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
+ val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
+ val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
+ if (!(val & KVM_PMU_EVENT_ENABLED))
+ vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
+ else
+ kvm_make_request(KVM_REQ_PMU, vcpu);
+
+ kvm_restore_host_pmu(vcpu);
+}
+
+static void kvm_check_pmu(struct kvm_vcpu *vcpu)
+{
+ if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
+ kvm_own_pmu(vcpu);
+ vcpu->arch.aux_inuse |= KVM_LARCH_PMU;
+ }
+}
+
+static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
+{
+ u32 version;
+ u64 steal;
+ gpa_t gpa;
+ struct kvm_memslots *slots;
+ struct kvm_steal_time __user *st;
+ struct gfn_to_hva_cache *ghc;
+
+ ghc = &vcpu->arch.st.cache;
+ gpa = vcpu->arch.st.guest_addr;
+ if (!(gpa & KVM_STEAL_PHYS_VALID))
+ return;
+
+ gpa &= KVM_STEAL_PHYS_MASK;
+ slots = kvm_memslots(vcpu->kvm);
+ if (slots->generation != ghc->generation || gpa != ghc->gpa) {
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
+ ghc->gpa = INVALID_GPA;
+ return;
+ }
+ }
+
+ st = (struct kvm_steal_time __user *)ghc->hva;
+ unsafe_get_user(version, &st->version, out);
+ if (version & 1)
+ version += 1; /* first time write, random junk */
+
+ version += 1;
+ unsafe_put_user(version, &st->version, out);
+ smp_wmb();
+
+ unsafe_get_user(steal, &st->steal, out);
+ steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
+ vcpu->arch.st.last_steal = current->sched_info.run_delay;
+ unsafe_put_user(steal, &st->steal, out);
+
+ smp_wmb();
+ version += 1;
+ unsafe_put_user(version, &st->version, out);
+out:
+ mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
+}
+
/*
* kvm_check_requests - check and handle pending vCPU requests
*
@@ -48,9 +217,22 @@ static int kvm_check_requests(struct kvm_vcpu *vcpu)
if (kvm_dirty_ring_check_request(vcpu))
return RESUME_HOST;
+ if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
+ kvm_update_stolen_time(vcpu);
+
return RESUME_GUEST;
}
+static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
+{
+ lockdep_assert_irqs_disabled();
+ if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu))
+ if (vcpu->arch.flush_gpa != INVALID_GPA) {
+ kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
+ vcpu->arch.flush_gpa = INVALID_GPA;
+ }
+}
+
/*
* Check and handle pending signal and vCPU requests etc
* Run with irq enabled and preempt enabled
@@ -62,16 +244,18 @@ static int kvm_check_requests(struct kvm_vcpu *vcpu)
*/
static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
{
- int ret;
+ int idx, ret;
/*
* Check conditions before entering the guest
*/
- ret = xfer_to_guest_mode_handle_work(vcpu);
+ ret = kvm_xfer_to_guest_mode_handle_work(vcpu);
if (ret < 0)
return ret;
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
ret = kvm_check_requests(vcpu);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
return ret;
}
@@ -101,11 +285,23 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
/* Make sure the vcpu mode has been written */
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
kvm_check_vpid(vcpu);
+ kvm_check_pmu(vcpu);
+
+ /*
+ * Called after function kvm_check_vpid()
+ * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
+ * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
+ */
+ kvm_late_check_requests(vcpu);
vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
/* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
+ if (vcpu->arch.aux_inuse & KVM_LARCH_PMU) {
+ kvm_lose_pmu(vcpu);
+ kvm_make_request(KVM_REQ_PMU, vcpu);
+ }
/* make sure the vcpu mode has been written */
smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
local_irq_enable();
@@ -123,7 +319,7 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
int ret = RESUME_GUEST;
unsigned long estat = vcpu->arch.host_estat;
- u32 intr = estat & 0x1fff; /* Ignore NMI */
+ u32 intr = estat & CSR_ESTAT_IS;
u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
vcpu->mode = OUTSIDE_GUEST_MODE;
@@ -131,6 +327,8 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Set a default exit reason */
run->exit_reason = KVM_EXIT_UNKNOWN;
+ kvm_lose_pmu(vcpu);
+
guest_timing_exit_irqoff();
guest_state_exit_irqoff();
local_irq_enable();
@@ -171,6 +369,34 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
+ unsigned long val;
+
+ preempt_disable();
+ val = gcsr_read(LOONGARCH_CSR_CRMD);
+ preempt_enable();
+
+ return (val & CSR_PRMD_PPLV) == PLV_KERN;
+}
+
+#ifdef CONFIG_GUEST_PERF_EVENTS
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.pc;
+}
+
+/*
+ * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
+ * arrived in guest context. For LoongArch64, if PMU is not passthrough to VM,
+ * any event that arrives while a vCPU is loaded is considered to be "in guest".
+ */
+bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
+{
+ return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU));
+}
+#endif
+
+bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
+{
return false;
}
@@ -354,6 +580,17 @@ static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
return -EINVAL;
if (id == LOONGARCH_CSR_ESTAT) {
+ preempt_disable();
+ vcpu_load(vcpu);
+ /*
+ * Sync pending interrupts into ESTAT so that interrupt
+ * remains during VM migration stage
+ */
+ kvm_deliver_intr(vcpu);
+ vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
+ vcpu_put(vcpu);
+ preempt_enable();
+
/* ESTAT IP0~IP7 get from GINTC */
gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
*val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
@@ -393,6 +630,22 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
kvm_write_sw_gcsr(csr, id, val);
+ /*
+ * After modifying the PMU CSR register value of the vcpu.
+ * If the PMU CSRs are used, we need to set KVM_REQ_PMU.
+ */
+ if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) {
+ unsigned long val;
+
+ val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) |
+ kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) |
+ kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) |
+ kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
+
+ if (val & KVM_PMU_EVENT_ENABLED)
+ kvm_make_request(KVM_REQ_PMU, vcpu);
+ }
+
return ret;
}
@@ -406,8 +659,7 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v)
*v = GENMASK(31, 0);
return 0;
case LOONGARCH_CPUCFG1:
- /* CPUCFG1_MSGINT is not supported by KVM */
- *v = GENMASK(25, 0);
+ *v = GENMASK(26, 0);
return 0;
case LOONGARCH_CPUCFG2:
/* CPUCFG2 features unconditionally supported by KVM */
@@ -422,6 +674,14 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v)
*v |= CPUCFG2_LSX;
if (cpu_has_lasx)
*v |= CPUCFG2_LASX;
+ if (cpu_has_lbt_x86)
+ *v |= CPUCFG2_X86BT;
+ if (cpu_has_lbt_arm)
+ *v |= CPUCFG2_ARMBT;
+ if (cpu_has_lbt_mips)
+ *v |= CPUCFG2_MIPSBT;
+ if (cpu_has_ptw)
+ *v |= CPUCFG2_PTW;
return 0;
case LOONGARCH_CPUCFG3:
@@ -431,6 +691,12 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v)
case LOONGARCH_CPUCFG5:
*v = GENMASK(31, 0);
return 0;
+ case LOONGARCH_CPUCFG6:
+ if (cpu_has_pmp)
+ *v = GENMASK(14, 0);
+ else
+ *v = 0;
+ return 0;
case LOONGARCH_CPUCFG16:
*v = GENMASK(16, 0);
return 0;
@@ -461,6 +727,10 @@ static int kvm_check_cpucfg(int id, u64 val)
return -EINVAL;
switch (id) {
+ case LOONGARCH_CPUCFG1:
+ if ((val & CPUCFG1_MSGINT) && !cpu_has_msgint)
+ return -EINVAL;
+ return 0;
case LOONGARCH_CPUCFG2:
if (!(val & CPUCFG2_LLFTP))
/* Guests must have a constant timer */
@@ -475,6 +745,17 @@ static int kvm_check_cpucfg(int id, u64 val)
/* LASX architecturally implies LSX and FP but val does not satisfy that */
return -EINVAL;
return 0;
+ case LOONGARCH_CPUCFG6:
+ if (val & CPUCFG6_PMP) {
+ u32 host = read_cpucfg(LOONGARCH_CPUCFG6);
+ if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
+ return -EINVAL;
+ if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM))
+ return -EINVAL;
+ if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM))
+ return -EINVAL;
+ }
+ return 0;
default:
/*
* Values for the other CPUCFG IDs are not being further validated
@@ -502,6 +783,34 @@ static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
else
ret = -EINVAL;
break;
+ case KVM_REG_LOONGARCH_LBT:
+ if (!kvm_guest_has_lbt(&vcpu->arch))
+ return -ENXIO;
+
+ switch (reg->id) {
+ case KVM_REG_LOONGARCH_LBT_SCR0:
+ *v = vcpu->arch.lbt.scr0;
+ break;
+ case KVM_REG_LOONGARCH_LBT_SCR1:
+ *v = vcpu->arch.lbt.scr1;
+ break;
+ case KVM_REG_LOONGARCH_LBT_SCR2:
+ *v = vcpu->arch.lbt.scr2;
+ break;
+ case KVM_REG_LOONGARCH_LBT_SCR3:
+ *v = vcpu->arch.lbt.scr3;
+ break;
+ case KVM_REG_LOONGARCH_LBT_EFLAGS:
+ *v = vcpu->arch.lbt.eflags;
+ break;
+ case KVM_REG_LOONGARCH_LBT_FTOP:
+ *v = vcpu->arch.fpu.ftop;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
case KVM_REG_LOONGARCH_KVM:
switch (reg->id) {
case KVM_REG_LOONGARCH_COUNTER:
@@ -560,6 +869,37 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
if (ret)
break;
vcpu->arch.cpucfg[id] = (u32)v;
+ if (id == LOONGARCH_CPUCFG6)
+ vcpu->arch.max_pmu_csrid =
+ LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1;
+ break;
+ case KVM_REG_LOONGARCH_LBT:
+ if (!kvm_guest_has_lbt(&vcpu->arch))
+ return -ENXIO;
+
+ switch (reg->id) {
+ case KVM_REG_LOONGARCH_LBT_SCR0:
+ vcpu->arch.lbt.scr0 = v;
+ break;
+ case KVM_REG_LOONGARCH_LBT_SCR1:
+ vcpu->arch.lbt.scr1 = v;
+ break;
+ case KVM_REG_LOONGARCH_LBT_SCR2:
+ vcpu->arch.lbt.scr2 = v;
+ break;
+ case KVM_REG_LOONGARCH_LBT_SCR3:
+ vcpu->arch.lbt.scr3 = v;
+ break;
+ case KVM_REG_LOONGARCH_LBT_EFLAGS:
+ vcpu->arch.lbt.eflags = v;
+ break;
+ case KVM_REG_LOONGARCH_LBT_FTOP:
+ vcpu->arch.fpu.ftop = v;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
break;
case KVM_REG_LOONGARCH_KVM:
switch (reg->id) {
@@ -572,9 +912,16 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
break;
case KVM_REG_LOONGARCH_VCPU_RESET:
- kvm_reset_timer(vcpu);
+ vcpu->arch.st.guest_addr = 0;
memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
+
+ /*
+ * When vCPU reset, clear the ESTAT and GINTC registers
+ * Other CSR registers are cleared with function _kvm_setcsr().
+ */
+ kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
+ kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
break;
default:
ret = -EINVAL;
@@ -653,7 +1000,10 @@ static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
switch (attr->attr) {
- case 2:
+ case LOONGARCH_CPUCFG2:
+ case LOONGARCH_CPUCFG6:
+ return 0;
+ case CPUCFG_KVM_FEATURE:
return 0;
default:
return -ENXIO;
@@ -662,6 +1012,16 @@ static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
return -ENXIO;
}
+static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
+ || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
+ return -ENXIO;
+
+ return 0;
+}
+
static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
@@ -671,6 +1031,9 @@ static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
case KVM_LOONGARCH_VCPU_CPUCFG:
ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
break;
+ case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
+ ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
+ break;
default:
break;
}
@@ -678,22 +1041,48 @@ static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
return ret;
}
-static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu,
+static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
int ret = 0;
uint64_t val;
uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
- ret = _kvm_get_cpucfg_mask(attr->attr, &val);
- if (ret)
- return ret;
+ switch (attr->attr) {
+ case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
+ ret = _kvm_get_cpucfg_mask(attr->attr, &val);
+ if (ret)
+ return ret;
+ break;
+ case CPUCFG_KVM_FEATURE:
+ val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
+ break;
+ default:
+ return -ENXIO;
+ }
put_user(val, uaddr);
return ret;
}
+static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ u64 gpa;
+ u64 __user *user = (u64 __user *)attr->addr;
+
+ if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
+ || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
+ return -ENXIO;
+
+ gpa = vcpu->arch.st.guest_addr;
+ if (put_user(gpa, user))
+ return -EFAULT;
+
+ return 0;
+}
+
static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
@@ -701,7 +1090,10 @@ static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
switch (attr->group) {
case KVM_LOONGARCH_VCPU_CPUCFG:
- ret = kvm_loongarch_get_cpucfg_attr(vcpu, attr);
+ ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
+ break;
+ case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
+ ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
break;
default:
break;
@@ -713,7 +1105,65 @@ static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
- return -ENXIO;
+ u64 val, valid;
+ u64 __user *user = (u64 __user *)attr->addr;
+ struct kvm *kvm = vcpu->kvm;
+
+ switch (attr->attr) {
+ case CPUCFG_KVM_FEATURE:
+ if (get_user(val, user))
+ return -EFAULT;
+
+ valid = LOONGARCH_PV_FEAT_MASK;
+ if (val & ~valid)
+ return -EINVAL;
+
+ /* All vCPUs need set the same PV features */
+ if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED)
+ && ((kvm->arch.pv_features & valid) != val))
+ return -EINVAL;
+ kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED;
+ return 0;
+ default:
+ return -ENXIO;
+ }
+}
+
+static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ int idx, ret = 0;
+ u64 gpa, __user *user = (u64 __user *)attr->addr;
+ struct kvm *kvm = vcpu->kvm;
+
+ if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
+ || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
+ return -ENXIO;
+
+ if (get_user(gpa, user))
+ return -EFAULT;
+
+ if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
+ return -EINVAL;
+
+ if (!(gpa & KVM_STEAL_PHYS_VALID)) {
+ vcpu->arch.st.guest_addr = gpa;
+ return 0;
+ }
+
+ /* Check the address is in a valid memslot */
+ idx = srcu_read_lock(&kvm->srcu);
+ if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
+ ret = -EINVAL;
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ if (!ret) {
+ vcpu->arch.st.guest_addr = gpa;
+ vcpu->arch.st.last_steal = current->sched_info.run_delay;
+ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+ }
+
+ return ret;
}
static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
@@ -725,6 +1175,9 @@ static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
case KVM_LOONGARCH_VCPU_CPUCFG:
ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
break;
+ case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
+ ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
+ break;
default:
break;
}
@@ -829,12 +1282,68 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
return 0;
}
+#ifdef CONFIG_CPU_HAS_LBT
+int kvm_own_lbt(struct kvm_vcpu *vcpu)
+{
+ if (!kvm_guest_has_lbt(&vcpu->arch))
+ return -EINVAL;
+
+ preempt_disable();
+ if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
+ set_csr_euen(CSR_EUEN_LBTEN);
+ _restore_lbt(&vcpu->arch.lbt);
+ vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
+ }
+ preempt_enable();
+
+ return 0;
+}
+
+static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
+ _save_lbt(&vcpu->arch.lbt);
+ clear_csr_euen(CSR_EUEN_LBTEN);
+ vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
+ }
+ preempt_enable();
+}
+
+static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr)
+{
+ /*
+ * If TM is enabled, top register save/restore will
+ * cause lbt exception, here enable lbt in advance
+ */
+ if (fcsr & FPU_CSR_TM)
+ kvm_own_lbt(vcpu);
+}
+
+static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
+ if (vcpu->arch.aux_inuse & KVM_LARCH_LBT)
+ return;
+ kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0));
+ }
+}
+#else
+static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
+static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { }
+static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
+#endif
+
/* Enable FPU and restore context */
void kvm_own_fpu(struct kvm_vcpu *vcpu)
{
preempt_disable();
- /* Enable FPU */
+ /*
+ * Enable FPU for guest
+ * Set FR and FRE according to guest context
+ */
+ kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
set_csr_euen(CSR_EUEN_FPEN);
kvm_restore_fpu(&vcpu->arch.fpu);
@@ -854,6 +1363,7 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu)
preempt_disable();
/* Enable LSX for guest */
+ kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
case KVM_LARCH_FPU:
@@ -888,6 +1398,7 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu)
preempt_disable();
+ kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
case KVM_LARCH_LSX:
@@ -919,6 +1430,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
{
preempt_disable();
+ kvm_check_fcsr_alive(vcpu);
if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
kvm_save_lasx(&vcpu->arch.fpu);
vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
@@ -941,6 +1453,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
/* Disable FPU */
clear_csr_euen(CSR_EUEN_FPEN);
}
+ kvm_lose_lbt(vcpu);
preempt_enable();
}
@@ -963,8 +1476,8 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
return 0;
}
-long kvm_arch_vcpu_async_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
+long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg)
{
void __user *argp = (void __user *)arg;
struct kvm_vcpu *vcpu = filp->private_data;
@@ -994,9 +1507,19 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
struct loongarch_csrs *csr;
vcpu->arch.vpid = 0;
+ vcpu->arch.flush_gpa = INVALID_GPA;
- hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
- vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
+ hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS_PINNED_HARD);
+
+ /* Get GPA (=HVA) of PGD for kvm hypervisor */
+ vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd);
+
+ /*
+ * Get PGD for primary mmu, virtual address is used since there is
+ * memory access after loading from CSR_PGD in tlb exception fast path.
+ */
+ vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd;
vcpu->arch.handle_exit = kvm_handle_exit;
vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
@@ -1013,6 +1536,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
/* Init */
vcpu->arch.last_sched_cpu = -1;
+ /* Init ipi_state lock */
+ spin_lock_init(&vcpu->arch.ipi_state.lock);
+
/*
* Initialize guest register state to valid architectural reset state.
*/
@@ -1081,9 +1607,7 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
/* Restore timer state regardless */
kvm_restore_timer(vcpu);
-
- /* Control guest page CCA attribute */
- change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
+ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
/* Don't bother restoring registers multiple times unless necessary */
if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
@@ -1136,6 +1660,12 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
+ if (cpu_has_msgint) {
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR0);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR1);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR2);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR3);
+ }
/* Restore Root.GINTC from unused Guest.GINTC register */
write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
@@ -1225,6 +1755,12 @@ static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
+ if (cpu_has_msgint) {
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR0);
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR1);
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR2);
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR3);
+ }
vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
@@ -1261,12 +1797,17 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
vcpu->mmio_needed = 0;
}
- if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) {
+ switch (run->exit_reason) {
+ case KVM_EXIT_HYPERCALL:
+ kvm_complete_user_service(vcpu, run);
+ break;
+ case KVM_EXIT_LOONGARCH_IOCSR:
if (!run->iocsr_io.is_write)
kvm_complete_iocsr_read(vcpu, run);
+ break;
}
- if (run->immediate_exit)
+ if (!vcpu->wants_to_run)
return r;
/* Clear exit_reason */
diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c
index 6b2e4f66ad26..194ccbcdc3b3 100644
--- a/arch/loongarch/kvm/vm.c
+++ b/arch/loongarch/kvm/vm.c
@@ -5,6 +5,10 @@
#include <linux/kvm_host.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_vcpu.h>
+#include <asm/kvm_csr.h>
+#include <asm/kvm_eiointc.h>
+#include <asm/kvm_pch_pic.h>
const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS(),
@@ -21,6 +25,23 @@ const struct kvm_stats_header kvm_vm_stats_header = {
sizeof(kvm_vm_stats_desc),
};
+static void kvm_vm_init_features(struct kvm *kvm)
+{
+ unsigned long val;
+
+ val = read_csr_gcfg();
+ if (val & CSR_GCFG_GPMP)
+ kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PMU);
+
+ /* Enable all PV features by default */
+ kvm->arch.pv_features = BIT(KVM_FEATURE_IPI);
+ kvm->arch.kvm_features = BIT(KVM_LOONGARCH_VM_FEAT_PV_IPI);
+ if (kvm_pvtime_supported()) {
+ kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
+ kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_STEALTIME);
+ }
+}
+
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
int i;
@@ -39,7 +60,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
spin_lock_init(&kvm->arch.phyid_map_lock);
kvm_init_vmcs(kvm);
- kvm->arch.gpa_size = BIT(cpu_vabits - 1);
+ kvm_vm_init_features(kvm);
+
+ /*
+ * cpu_vabits means user address space only (a half of total).
+ * GPA size of VM is the same with the size of user address space.
+ */
+ kvm->arch.gpa_size = BIT(cpu_vabits);
kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1;
kvm->arch.invalid_ptes[0] = 0;
kvm->arch.invalid_ptes[1] = (unsigned long)invalid_pte_table;
@@ -69,6 +96,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
int r;
switch (ext) {
+ case KVM_CAP_IRQCHIP:
case KVM_CAP_ONE_REG:
case KVM_CAP_ENABLE_CAP:
case KVM_CAP_READONLY_MEM:
@@ -99,7 +127,89 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
return r;
}
+static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ switch (attr->attr) {
+ case KVM_LOONGARCH_VM_FEAT_LSX:
+ if (cpu_has_lsx)
+ return 0;
+ return -ENXIO;
+ case KVM_LOONGARCH_VM_FEAT_LASX:
+ if (cpu_has_lasx)
+ return 0;
+ return -ENXIO;
+ case KVM_LOONGARCH_VM_FEAT_X86BT:
+ if (cpu_has_lbt_x86)
+ return 0;
+ return -ENXIO;
+ case KVM_LOONGARCH_VM_FEAT_ARMBT:
+ if (cpu_has_lbt_arm)
+ return 0;
+ return -ENXIO;
+ case KVM_LOONGARCH_VM_FEAT_MIPSBT:
+ if (cpu_has_lbt_mips)
+ return 0;
+ return -ENXIO;
+ case KVM_LOONGARCH_VM_FEAT_PTW:
+ if (cpu_has_ptw)
+ return 0;
+ return -ENXIO;
+ case KVM_LOONGARCH_VM_FEAT_MSGINT:
+ if (cpu_has_msgint)
+ return 0;
+ return -ENXIO;
+ case KVM_LOONGARCH_VM_FEAT_PMU:
+ case KVM_LOONGARCH_VM_FEAT_PV_IPI:
+ case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME:
+ if (kvm_vm_support(&kvm->arch, attr->attr))
+ return 0;
+ return -ENXIO;
+ default:
+ return -ENXIO;
+ }
+}
+
+static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_LOONGARCH_VM_FEAT_CTRL:
+ return kvm_vm_feature_has_attr(kvm, attr);
+ default:
+ return -ENXIO;
+ }
+}
+
int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
{
- return -ENOIOCTLCMD;
+ void __user *argp = (void __user *)arg;
+ struct kvm *kvm = filp->private_data;
+ struct kvm_device_attr attr;
+
+ switch (ioctl) {
+ case KVM_CREATE_IRQCHIP:
+ return 0;
+ case KVM_HAS_DEVICE_ATTR:
+ if (copy_from_user(&attr, argp, sizeof(attr)))
+ return -EFAULT;
+
+ return kvm_vm_has_attr(kvm, &attr);
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, bool line_status)
+{
+ if (!kvm_arch_irqchip_in_kernel(kvm))
+ return -ENXIO;
+
+ irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
+ irq_event->irq, irq_event->level, line_status);
+
+ return 0;
+}
+
+bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
+{
+ return (kvm->arch.ipi && kvm->arch.eiointc && kvm->arch.pch_pic);
}
diff --git a/arch/loongarch/lib/csum.c b/arch/loongarch/lib/csum.c
index a5e84b403c3b..bcc9d01d8c41 100644
--- a/arch/loongarch/lib/csum.c
+++ b/arch/loongarch/lib/csum.c
@@ -2,6 +2,7 @@
// Copyright (C) 2019-2020 Arm Ltd.
#include <linux/compiler.h>
+#include <linux/export.h>
#include <linux/kasan-checks.h>
#include <linux/kernel.h>
@@ -25,7 +26,7 @@ unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len)
const u64 *ptr;
u64 data, sum64 = 0;
- if (unlikely(len == 0))
+ if (unlikely(len <= 0))
return 0;
offset = (unsigned long)buff & 7;
diff --git a/arch/loongarch/mm/Makefile b/arch/loongarch/mm/Makefile
index e4d1e581dbae..278be2c8fc36 100644
--- a/arch/loongarch/mm/Makefile
+++ b/arch/loongarch/mm/Makefile
@@ -4,7 +4,8 @@
#
obj-y += init.o cache.o tlb.o tlbex.o extable.o \
- fault.o ioremap.o maccess.o mmap.o pgtable.o page.o
+ fault.o ioremap.o maccess.o mmap.o pgtable.o \
+ page.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_KASAN) += kasan_init.o
diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
index 97b40defde06..2c93d33356e5 100644
--- a/arch/loongarch/mm/fault.c
+++ b/arch/loongarch/mm/fault.c
@@ -31,11 +31,52 @@
int show_unhandled_signals = 1;
+static int __kprobes spurious_fault(unsigned long write, unsigned long address)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if (!(address & __UA_LIMIT))
+ return 0;
+
+ pgd = pgd_offset_k(address);
+ if (!pgd_present(pgdp_get(pgd)))
+ return 0;
+
+ p4d = p4d_offset(pgd, address);
+ if (!p4d_present(p4dp_get(p4d)))
+ return 0;
+
+ pud = pud_offset(p4d, address);
+ if (!pud_present(pudp_get(pud)))
+ return 0;
+
+ pmd = pmd_offset(pud, address);
+ if (!pmd_present(pmdp_get(pmd)))
+ return 0;
+
+ if (pmd_leaf(*pmd)) {
+ return write ? pmd_write(pmdp_get(pmd)) : 1;
+ } else {
+ pte = pte_offset_kernel(pmd, address);
+ if (!pte_present(ptep_get(pte)))
+ return 0;
+
+ return write ? pte_write(ptep_get(pte)) : 1;
+ }
+}
+
static void __kprobes no_context(struct pt_regs *regs,
unsigned long write, unsigned long address)
{
const int field = sizeof(unsigned long) * 2;
+ if (spurious_fault(write, address))
+ return;
+
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
@@ -174,6 +215,58 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
flags |= FAULT_FLAG_USER;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
+ if (!(flags & FAULT_FLAG_USER))
+ goto lock_mmap;
+
+ vma = lock_vma_under_rcu(mm, address);
+ if (!vma)
+ goto lock_mmap;
+
+ if (write) {
+ flags |= FAULT_FLAG_WRITE;
+ if (!(vma->vm_flags & VM_WRITE)) {
+ vma_end_read(vma);
+ si_code = SEGV_ACCERR;
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto bad_area_nosemaphore;
+ }
+ } else {
+ if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs)) {
+ vma_end_read(vma);
+ si_code = SEGV_ACCERR;
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto bad_area_nosemaphore;
+ }
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)) && address != exception_era(regs)) {
+ vma_end_read(vma);
+ si_code = SEGV_ACCERR;
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto bad_area_nosemaphore;
+ }
+ }
+
+ fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
+ if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
+ vma_end_read(vma);
+
+ if (!(fault & VM_FAULT_RETRY)) {
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto done;
+ }
+
+ count_vm_vma_lock_event(VMA_LOCK_RETRY);
+ if (fault & VM_FAULT_MAJOR)
+ flags |= FAULT_FLAG_TRIED;
+
+ /* Quick path to respond to signals */
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ no_context(regs, write, address);
+ return;
+ }
+lock_mmap:
+
retry:
vma = lock_mm_and_find_vma(mm, address, regs);
if (unlikely(!vma))
@@ -235,8 +328,10 @@ good_area:
*/
goto retry;
}
+ mmap_read_unlock(mm);
+
+done:
if (unlikely(fault & VM_FAULT_ERROR)) {
- mmap_read_unlock(mm);
if (fault & VM_FAULT_OOM) {
do_out_of_memory(regs, write, address);
return;
@@ -249,8 +344,6 @@ good_area:
}
BUG();
}
-
- mmap_read_unlock(mm);
}
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c
index 12222c56cb59..02dad4624fe3 100644
--- a/arch/loongarch/mm/hugetlbpage.c
+++ b/arch/loongarch/mm/hugetlbpage.c
@@ -39,15 +39,16 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
pmd_t *pmd = NULL;
pgd = pgd_offset(mm, addr);
- if (pgd_present(*pgd)) {
+ if (pgd_present(pgdp_get(pgd))) {
p4d = p4d_offset(pgd, addr);
- if (p4d_present(*p4d)) {
+ if (p4d_present(p4dp_get(p4d))) {
pud = pud_offset(p4d, addr);
- if (pud_present(*pud))
+ if (pud_present(pudp_get(pud)))
pmd = pmd_offset(pud, addr);
}
}
- return (pte_t *) pmd;
+
+ return (!pmd || pmd_none(pmdp_get(pmd))) ? NULL : (pte_t *) pmd;
}
uint64_t pmd_to_entrylo(unsigned long pmd_val)
diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
index bf789d114c2d..6bfd4b8dad1b 100644
--- a/arch/loongarch/mm/init.c
+++ b/arch/loongarch/mm/init.c
@@ -60,14 +60,10 @@ int __ref page_is_ram(unsigned long pfn)
return memblock_is_memory(addr) && !memblock_is_reserved(addr);
}
-#ifndef CONFIG_NUMA
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
-#ifdef CONFIG_ZONE_DMA
- max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
-#endif
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
#endif
@@ -76,15 +72,6 @@ void __init paging_init(void)
free_area_init(max_zone_pfns);
}
-void __init mem_init(void)
-{
- max_mapnr = max_low_pfn;
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
-
- memblock_free_all();
-}
-#endif /* !CONFIG_NUMA */
-
void __ref free_initmem(void)
{
free_initmem_default(POISON_FREE_INITMEM);
@@ -117,14 +104,6 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
page += vmem_altmap_offset(altmap);
__remove_pages(start_pfn, nr_pages, altmap);
}
-
-#ifdef CONFIG_NUMA
-int memory_add_physaddr_to_nid(u64 start)
-{
- return pa_to_nid(start);
-}
-EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
-#endif
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP
@@ -141,7 +120,7 @@ void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
int __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
unsigned long addr, unsigned long next)
{
- int huge = pmd_val(*pmd) & _PAGE_HUGE;
+ int huge = pmd_val(pmdp_get(pmd)) & _PAGE_HUGE;
if (huge)
vmemmap_verify((pte_t *)pmd, node, addr, next);
@@ -173,10 +152,8 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
pud_t *pud;
pmd_t *pmd;
- if (p4d_none(*p4d)) {
- pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!pud)
- panic("%s: Failed to allocate memory\n", __func__);
+ if (p4d_none(p4dp_get(p4d))) {
+ pud = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
p4d_populate(&init_mm, p4d, pud);
#ifndef __PAGETABLE_PUD_FOLDED
pud_init(pud);
@@ -184,10 +161,8 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
}
pud = pud_offset(p4d, addr);
- if (pud_none(*pud)) {
- pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!pmd)
- panic("%s: Failed to allocate memory\n", __func__);
+ if (pud_none(pudp_get(pud))) {
+ pmd = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
pud_populate(&init_mm, pud, pmd);
#ifndef __PAGETABLE_PMD_FOLDED
pmd_init(pmd);
@@ -195,13 +170,12 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
}
pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd)) {
+ if (!pmd_present(pmdp_get(pmd))) {
pte_t *pte;
- pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!pte)
- panic("%s: Failed to allocate memory\n", __func__);
+ pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd, pte);
+ kernel_pte_init(pte);
}
return pte_offset_kernel(pmd, addr);
@@ -216,7 +190,7 @@ void __init __set_fixmap(enum fixed_addresses idx,
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
ptep = populate_kernel_pte(addr);
- if (!pte_none(*ptep)) {
+ if (!pte_none(ptep_get(ptep))) {
pte_ERROR(*ptep);
return;
}
diff --git a/arch/loongarch/mm/ioremap.c b/arch/loongarch/mm/ioremap.c
index 70ca73019811..27c336959fe8 100644
--- a/arch/loongarch/mm/ioremap.c
+++ b/arch/loongarch/mm/ioremap.c
@@ -6,7 +6,7 @@
#include <asm/io.h>
#include <asm-generic/early_ioremap.h>
-void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size)
+void __init __iomem *early_ioremap(phys_addr_t phys_addr, unsigned long size)
{
return ((void __iomem *)TO_CACHE(phys_addr));
}
@@ -16,12 +16,12 @@ void __init early_iounmap(void __iomem *addr, unsigned long size)
}
-void *early_memremap_ro(resource_size_t phys_addr, unsigned long size)
+void * __init early_memremap_ro(resource_size_t phys_addr, unsigned long size)
{
return early_memremap(phys_addr, size);
}
-void *early_memremap_prot(resource_size_t phys_addr, unsigned long size,
+void * __init early_memremap_prot(resource_size_t phys_addr, unsigned long size,
unsigned long prot_val)
{
return early_memremap(phys_addr, size);
diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c
index c608adc99845..170da98ad4f5 100644
--- a/arch/loongarch/mm/kasan_init.c
+++ b/arch/loongarch/mm/kasan_init.c
@@ -13,6 +13,13 @@
static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+#ifdef __PAGETABLE_P4D_FOLDED
+#define __pgd_none(early, pgd) (0)
+#else
+#define __pgd_none(early, pgd) (early ? (pgd_val(pgd) == 0) : \
+(__pa(pgd_val(pgd)) == (unsigned long)__pa(kasan_early_shadow_p4d)))
+#endif
+
#ifdef __PAGETABLE_PUD_FOLDED
#define __p4d_none(early, p4d) (0)
#else
@@ -33,11 +40,9 @@ static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
#define __pte_none(early, pte) (early ? pte_none(pte) : \
((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page)))
-bool kasan_early_stage = true;
-
void *kasan_mem_to_shadow(const void *addr)
{
- if (!kasan_arch_is_ready()) {
+ if (!kasan_enabled()) {
return (void *)(kasan_early_shadow_page);
} else {
unsigned long maddr = (unsigned long)addr;
@@ -55,6 +60,9 @@ void *kasan_mem_to_shadow(const void *addr)
case XKPRANGE_UC_SEG:
offset = XKPRANGE_UC_SHADOW_OFFSET;
break;
+ case XKPRANGE_WC_SEG:
+ offset = XKPRANGE_WC_SHADOW_OFFSET;
+ break;
case XKVRANGE_VC_SEG:
offset = XKVRANGE_VC_SHADOW_OFFSET;
break;
@@ -79,6 +87,8 @@ const void *kasan_shadow_to_mem(const void *shadow_addr)
if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
+ else if (addr >= XKPRANGE_WC_SHADOW_OFFSET)
+ return (void *)(((addr - XKPRANGE_WC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_WC_START);
else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
@@ -105,7 +115,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early)
{
- if (__pmd_none(early, READ_ONCE(*pmdp))) {
+ if (__pmd_none(early, pmdp_get(pmdp))) {
phys_addr_t pte_phys = early ?
__pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node);
if (!early)
@@ -118,7 +128,7 @@ static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early)
{
- if (__pud_none(early, READ_ONCE(*pudp))) {
+ if (__pud_none(early, pudp_get(pudp))) {
phys_addr_t pmd_phys = early ?
__pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node);
if (!early)
@@ -131,7 +141,7 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early)
{
- if (__p4d_none(early, READ_ONCE(*p4dp))) {
+ if (__p4d_none(early, p4dp_get(p4dp))) {
phys_addr_t pud_phys = early ?
__pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node);
if (!early)
@@ -142,6 +152,19 @@ static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
return pud_offset(p4dp, addr);
}
+static p4d_t *__init kasan_p4d_offset(pgd_t *pgdp, unsigned long addr, int node, bool early)
+{
+ if (__pgd_none(early, pgdp_get(pgdp))) {
+ phys_addr_t p4d_phys = early ?
+ __pa_symbol(kasan_early_shadow_p4d) : kasan_alloc_zeroed_page(node);
+ if (!early)
+ memcpy(__va(p4d_phys), kasan_early_shadow_p4d, sizeof(kasan_early_shadow_p4d));
+ pgd_populate(&init_mm, pgdp, (p4d_t *)__va(p4d_phys));
+ }
+
+ return p4d_offset(pgdp, addr);
+}
+
static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
unsigned long end, int node, bool early)
{
@@ -154,7 +177,7 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
: kasan_alloc_zeroed_page(node);
next = addr + PAGE_SIZE;
set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
- } while (ptep++, addr = next, addr != end && __pte_none(early, READ_ONCE(*ptep)));
+ } while (ptep++, addr = next, addr != end && __pte_none(early, ptep_get(ptep)));
}
static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
@@ -166,7 +189,7 @@ static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
do {
next = pmd_addr_end(addr, end);
kasan_pte_populate(pmdp, addr, next, node, early);
- } while (pmdp++, addr = next, addr != end && __pmd_none(early, READ_ONCE(*pmdp)));
+ } while (pmdp++, addr = next, addr != end && __pmd_none(early, pmdp_get(pmdp)));
}
static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
@@ -178,19 +201,19 @@ static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
do {
next = pud_addr_end(addr, end);
kasan_pmd_populate(pudp, addr, next, node, early);
- } while (pudp++, addr = next, addr != end);
+ } while (pudp++, addr = next, addr != end && __pud_none(early, READ_ONCE(*pudp)));
}
static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
unsigned long end, int node, bool early)
{
unsigned long next;
- p4d_t *p4dp = p4d_offset(pgdp, addr);
+ p4d_t *p4dp = kasan_p4d_offset(pgdp, addr, node, early);
do {
next = p4d_addr_end(addr, end);
kasan_pud_populate(p4dp, addr, next, node, early);
- } while (p4dp++, addr = next, addr != end);
+ } while (p4dp++, addr = next, addr != end && __p4d_none(early, READ_ONCE(*p4dp)));
}
static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
@@ -218,7 +241,7 @@ static void __init kasan_map_populate(unsigned long start, unsigned long end,
asmlinkage void __init kasan_early_init(void)
{
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
- BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END + 1, PGDIR_SIZE));
}
static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval)
@@ -233,7 +256,7 @@ static void __init clear_pgds(unsigned long start, unsigned long end)
* swapper_pg_dir. pgd_clear() can't be used
* here because it's nop on 2,3-level pagetable setups
*/
- for (; start < end; start += PGDIR_SIZE)
+ for (; start < end; start = pgd_addr_end(start, end))
kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0));
}
@@ -243,6 +266,17 @@ void __init kasan_init(void)
phys_addr_t pa_start, pa_end;
/*
+ * If PGDIR_SIZE is too large for cpu_vabits, KASAN_SHADOW_END will
+ * overflow UINTPTR_MAX and then looks like a user space address.
+ * For example, PGDIR_SIZE of CONFIG_4KB_4LEVEL is 2^39, which is too
+ * large for Loongson-2K series whose cpu_vabits = 39.
+ */
+ if (KASAN_SHADOW_END < vm_map_base) {
+ pr_warn("PGDIR_SIZE too large for cpu_vabits, KernelAddressSanitizer disabled.\n");
+ return;
+ }
+
+ /*
* PGD was populated as invalid_pmd_table or invalid_pud_table
* in pagetable_init() which depends on how many levels of page
* table you are using, but we had to clean the gpd of kasan
@@ -262,7 +296,8 @@ void __init kasan_init(void)
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
kasan_mem_to_shadow((void *)KFENCE_AREA_END));
- kasan_early_stage = false;
+ /* Enable KASAN here before kasan_mem_to_shadow(). */
+ kasan_init_generic();
/* Populate the linear mapping */
for_each_mem_range(i, &pa_start, &pa_end) {
@@ -293,5 +328,4 @@ void __init kasan_init(void)
/* At this point kasan is fully initialized. Enable error messages */
init_task.kasan_depth = 0;
- pr_info("KernelAddressSanitizer initialized.\n");
}
diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c
index 889030985135..1df9e99582cc 100644
--- a/arch/loongarch/mm/mmap.c
+++ b/arch/loongarch/mm/mmap.c
@@ -3,6 +3,7 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/export.h>
+#include <linux/hugetlb.h>
#include <linux/io.h>
#include <linux/kfence.h>
#include <linux/memblock.h>
@@ -63,8 +64,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
}
info.length = len;
- info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
+ if (filp && is_file_hugepages(filp))
+ info.align_mask = huge_page_mask_align(filp);
+ else
+ info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
if (dir == DOWN) {
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
@@ -89,7 +93,8 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff, unsigned long flags,
+ vm_flags_t vm_flags)
{
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, UP);
@@ -101,7 +106,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
*/
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, DOWN);
diff --git a/arch/loongarch/mm/pageattr.c b/arch/loongarch/mm/pageattr.c
new file mode 100644
index 000000000000..f5e910b68229
--- /dev/null
+++ b/arch/loongarch/mm/pageattr.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#include <linux/memblock.h>
+#include <linux/pagewalk.h>
+#include <linux/pgtable.h>
+#include <asm/set_memory.h>
+#include <asm/tlbflush.h>
+
+struct pageattr_masks {
+ pgprot_t set_mask;
+ pgprot_t clear_mask;
+};
+
+static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
+{
+ unsigned long new_val = val;
+ struct pageattr_masks *masks = walk->private;
+
+ new_val &= ~(pgprot_val(masks->clear_mask));
+ new_val |= (pgprot_val(masks->set_mask));
+
+ return new_val;
+}
+
+static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pgd_t val = pgdp_get(pgd);
+
+ if (pgd_leaf(val)) {
+ val = __pgd(set_pageattr_masks(pgd_val(val), walk));
+ set_pgd(pgd, val);
+ }
+
+ return 0;
+}
+
+static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ p4d_t val = p4dp_get(p4d);
+
+ if (p4d_leaf(val)) {
+ val = __p4d(set_pageattr_masks(p4d_val(val), walk));
+ set_p4d(p4d, val);
+ }
+
+ return 0;
+}
+
+static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pud_t val = pudp_get(pud);
+
+ if (pud_leaf(val)) {
+ val = __pud(set_pageattr_masks(pud_val(val), walk));
+ set_pud(pud, val);
+ }
+
+ return 0;
+}
+
+static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pmd_t val = pmdp_get(pmd);
+
+ if (pmd_leaf(val)) {
+ val = __pmd(set_pageattr_masks(pmd_val(val), walk));
+ set_pmd(pmd, val);
+ }
+
+ return 0;
+}
+
+static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pte_t val = ptep_get(pte);
+
+ val = __pte(set_pageattr_masks(pte_val(val), walk));
+ set_pte(pte, val);
+
+ return 0;
+}
+
+static int pageattr_pte_hole(unsigned long addr, unsigned long next,
+ int depth, struct mm_walk *walk)
+{
+ return 0;
+}
+
+static const struct mm_walk_ops pageattr_ops = {
+ .pgd_entry = pageattr_pgd_entry,
+ .p4d_entry = pageattr_p4d_entry,
+ .pud_entry = pageattr_pud_entry,
+ .pmd_entry = pageattr_pmd_entry,
+ .pte_entry = pageattr_pte_entry,
+ .pte_hole = pageattr_pte_hole,
+ .walk_lock = PGWALK_RDLOCK,
+};
+
+static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, pgprot_t clear_mask)
+{
+ int ret;
+ unsigned long start = addr;
+ unsigned long end = start + PAGE_SIZE * numpages;
+ struct pageattr_masks masks = {
+ .set_mask = set_mask,
+ .clear_mask = clear_mask
+ };
+
+ if (!numpages)
+ return 0;
+
+ mmap_write_lock(&init_mm);
+ ret = walk_kernel_page_table_range(start, end, &pageattr_ops, NULL, &masks);
+ mmap_write_unlock(&init_mm);
+
+ flush_tlb_kernel_range(start, end);
+
+ return ret;
+}
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_NO_EXEC));
+}
+
+int set_memory_nx(unsigned long addr, int numpages)
+{
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, numpages, __pgprot(_PAGE_NO_EXEC), __pgprot(0));
+}
+
+int set_memory_ro(unsigned long addr, int numpages)
+{
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_WRITE | _PAGE_DIRTY));
+}
+
+int set_memory_rw(unsigned long addr, int numpages)
+{
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, numpages, __pgprot(_PAGE_WRITE | _PAGE_DIRTY), __pgprot(0));
+}
+
+bool kernel_page_present(struct page *page)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long addr = (unsigned long)page_address(page);
+
+ if (addr < vm_map_base)
+ return memblock_is_memory(__pa(addr));
+
+ pgd = pgd_offset_k(addr);
+ if (pgd_none(pgdp_get(pgd)))
+ return false;
+ if (pgd_leaf(pgdp_get(pgd)))
+ return true;
+
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_none(p4dp_get(p4d)))
+ return false;
+ if (p4d_leaf(p4dp_get(p4d)))
+ return true;
+
+ pud = pud_offset(p4d, addr);
+ if (pud_none(pudp_get(pud)))
+ return false;
+ if (pud_leaf(pudp_get(pud)))
+ return true;
+
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(pmdp_get(pmd)))
+ return false;
+ if (pmd_leaf(pmdp_get(pmd)))
+ return true;
+
+ pte = pte_offset_kernel(pmd, addr);
+ return pte_present(ptep_get(pte));
+}
+
+int set_direct_map_default_noflush(struct page *page)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, 1, PAGE_KERNEL, __pgprot(0));
+}
+
+int set_direct_map_invalid_noflush(struct page *page)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, 1, __pgprot(0), __pgprot(_PAGE_PRESENT | _PAGE_VALID));
+}
+
+int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+ pgprot_t set, clear;
+
+ if (addr < vm_map_base)
+ return 0;
+
+ if (valid) {
+ set = PAGE_KERNEL;
+ clear = __pgprot(0);
+ } else {
+ set = __pgprot(0);
+ clear = __pgprot(_PAGE_PRESENT | _PAGE_VALID);
+ }
+
+ return __set_memory(addr, 1, set, clear);
+}
diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c
index bda018150000..352d9b2e02ab 100644
--- a/arch/loongarch/mm/pgtable.c
+++ b/arch/loongarch/mm/pgtable.c
@@ -23,11 +23,10 @@ EXPORT_SYMBOL(tlb_virt_to_page);
pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *init, *ret = NULL;
- struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
+ pgd_t *init, *ret;
- if (ptdesc) {
- ret = (pgd_t *)ptdesc_address(ptdesc);
+ ret = __pgd_alloc(mm, 0);
+ if (ret) {
init = pgd_offset(&init_mm, 0UL);
pgd_init(ret);
memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
@@ -116,19 +115,30 @@ void pud_init(void *addr)
EXPORT_SYMBOL_GPL(pud_init);
#endif
-pmd_t mk_pmd(struct page *page, pgprot_t prot)
+void kernel_pte_init(void *addr)
{
- pmd_t pmd;
+ unsigned long *p, *end;
- pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
+ p = (unsigned long *)addr;
+ end = p + PTRS_PER_PTE;
- return pmd;
+ do {
+ p[0] = _PAGE_GLOBAL;
+ p[1] = _PAGE_GLOBAL;
+ p[2] = _PAGE_GLOBAL;
+ p[3] = _PAGE_GLOBAL;
+ p[4] = _PAGE_GLOBAL;
+ p += 8;
+ p[-3] = _PAGE_GLOBAL;
+ p[-2] = _PAGE_GLOBAL;
+ p[-1] = _PAGE_GLOBAL;
+ } while (p != end);
}
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
- *pmdp = pmd;
+ WRITE_ONCE(*pmdp, pmd);
flush_tlb_all();
}
diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c
index 5ac9beb5f093..3b427b319db2 100644
--- a/arch/loongarch/mm/tlb.c
+++ b/arch/loongarch/mm/tlb.c
@@ -289,7 +289,7 @@ static void setup_tlb_handler(int cpu)
/* Avoid lockdep warning */
rcutree_report_cpu_starting(cpu);
-#ifdef CONFIG_NUMA
+#if defined(CONFIG_NUMA) && !defined(CONFIG_PREEMPT_RT)
vec_sz = sizeof(exception_handlers);
if (pcpu_handlers[cpu])
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index 7dbefd4ba210..8dc58781b8eb 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -4,13 +4,20 @@
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
+#include <linux/memory.h>
#include "bpf_jit.h"
-#define REG_TCC LOONGARCH_GPR_A6
-#define TCC_SAVED LOONGARCH_GPR_S5
+#define LOONGARCH_MAX_REG_ARGS 8
+
+#define LOONGARCH_LONG_JUMP_NINSNS 5
+#define LOONGARCH_LONG_JUMP_NBYTES (LOONGARCH_LONG_JUMP_NINSNS * 4)
-#define SAVE_RA BIT(0)
-#define SAVE_TCC BIT(1)
+#define LOONGARCH_FENTRY_NINSNS 2
+#define LOONGARCH_FENTRY_NBYTES (LOONGARCH_FENTRY_NINSNS * 4)
+#define LOONGARCH_BPF_FENTRY_NBYTES (LOONGARCH_LONG_JUMP_NINSNS * 4)
+
+#define REG_TCC LOONGARCH_GPR_A6
+#define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (round_up(stack, 16) - 80)
static const int regmap[] = {
/* return value from in-kernel function, and exit value for eBPF program */
@@ -32,32 +39,57 @@ static const int regmap[] = {
[BPF_REG_AX] = LOONGARCH_GPR_T0,
};
-static void mark_call(struct jit_ctx *ctx)
+static void prepare_bpf_tail_call_cnt(struct jit_ctx *ctx, int *store_offset)
{
- ctx->flags |= SAVE_RA;
-}
+ const struct bpf_prog *prog = ctx->prog;
+ const bool is_main_prog = !bpf_is_subprog(prog);
-static void mark_tail_call(struct jit_ctx *ctx)
-{
- ctx->flags |= SAVE_TCC;
-}
+ if (is_main_prog) {
+ /*
+ * LOONGARCH_GPR_T3 = MAX_TAIL_CALL_CNT
+ * if (REG_TCC > T3 )
+ * std REG_TCC -> LOONGARCH_GPR_SP + store_offset
+ * else
+ * std REG_TCC -> LOONGARCH_GPR_SP + store_offset
+ * REG_TCC = LOONGARCH_GPR_SP + store_offset
+ *
+ * std REG_TCC -> LOONGARCH_GPR_SP + store_offset
+ *
+ * The purpose of this code is to first push the TCC into stack,
+ * and then push the address of TCC into stack.
+ * In cases where bpf2bpf and tailcall are used in combination,
+ * the value in REG_TCC may be a count or an address,
+ * these two cases need to be judged and handled separately.
+ */
+ emit_insn(ctx, addid, LOONGARCH_GPR_T3, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT);
+ *store_offset -= sizeof(long);
-static bool seen_call(struct jit_ctx *ctx)
-{
- return (ctx->flags & SAVE_RA);
-}
+ emit_cond_jmp(ctx, BPF_JGT, REG_TCC, LOONGARCH_GPR_T3, 4);
-static bool seen_tail_call(struct jit_ctx *ctx)
-{
- return (ctx->flags & SAVE_TCC);
-}
+ /*
+ * If REG_TCC < MAX_TAIL_CALL_CNT, the value in REG_TCC is a count,
+ * push tcc into stack
+ */
+ emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset);
-static u8 tail_call_reg(struct jit_ctx *ctx)
-{
- if (seen_call(ctx))
- return TCC_SAVED;
+ /* Push the address of TCC into the REG_TCC */
+ emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_SP, *store_offset);
- return REG_TCC;
+ emit_uncond_jmp(ctx, 2);
+
+ /*
+ * If REG_TCC > MAX_TAIL_CALL_CNT, the value in REG_TCC is an address,
+ * push tcc_ptr into stack
+ */
+ emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset);
+ } else {
+ *store_offset -= sizeof(long);
+ emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset);
+ }
+
+ /* Push tcc_ptr into stack */
+ *store_offset -= sizeof(long);
+ emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset);
}
/*
@@ -80,6 +112,10 @@ static u8 tail_call_reg(struct jit_ctx *ctx)
* | $s4 |
* +-------------------------+
* | $s5 |
+ * +-------------------------+
+ * | tcc |
+ * +-------------------------+
+ * | tcc_ptr |
* +-------------------------+ <--BPF_REG_FP
* | prog->aux->stack_depth |
* | (optional) |
@@ -88,22 +124,32 @@ static u8 tail_call_reg(struct jit_ctx *ctx)
*/
static void build_prologue(struct jit_ctx *ctx)
{
- int stack_adjust = 0, store_offset, bpf_stack_adjust;
+ int i, stack_adjust = 0, store_offset, bpf_stack_adjust;
+ const struct bpf_prog *prog = ctx->prog;
+ const bool is_main_prog = !bpf_is_subprog(prog);
bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
- /* To store ra, fp, s0, s1, s2, s3, s4 and s5. */
+ /* To store ra, fp, s0, s1, s2, s3, s4, s5 */
stack_adjust += sizeof(long) * 8;
+ /* To store tcc and tcc_ptr */
+ stack_adjust += sizeof(long) * 2;
+
stack_adjust = round_up(stack_adjust, 16);
stack_adjust += bpf_stack_adjust;
+ /* Reserve space for the move_imm + jirl instruction */
+ for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++)
+ emit_insn(ctx, nop);
+
/*
- * First instruction initializes the tail call count (TCC).
- * On tail call we skip this instruction, and the TCC is
- * passed in REG_TCC from the caller.
+ * First instruction initializes the tail call count (TCC)
+ * register to zero. On tail call we skip this instruction,
+ * and the TCC is passed in REG_TCC from the caller.
*/
- emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT);
+ if (is_main_prog)
+ emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_ZERO, 0);
emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_adjust);
@@ -131,18 +177,13 @@ static void build_prologue(struct jit_ctx *ctx)
store_offset -= sizeof(long);
emit_insn(ctx, std, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, store_offset);
+ prepare_bpf_tail_call_cnt(ctx, &store_offset);
+
emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_adjust);
if (bpf_stack_adjust)
emit_insn(ctx, addid, regmap[BPF_REG_FP], LOONGARCH_GPR_SP, bpf_stack_adjust);
- /*
- * Program contains calls and tail calls, so REG_TCC need
- * to be saved across calls.
- */
- if (seen_tail_call(ctx) && seen_call(ctx))
- move_reg(ctx, TCC_SAVED, REG_TCC);
-
ctx->stack_size = stack_adjust;
}
@@ -175,19 +216,29 @@ static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call)
load_offset -= sizeof(long);
emit_insn(ctx, ldd, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, load_offset);
+ /*
+ * When push into the stack, follow the order of tcc then tcc_ptr.
+ * When pop from the stack, first pop tcc_ptr then followed by tcc.
+ */
+ load_offset -= 2 * sizeof(long);
+ emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, load_offset);
+
+ load_offset += sizeof(long);
+ emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, load_offset);
+
emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_adjust);
if (!is_tail_call) {
/* Set return value */
- move_reg(ctx, LOONGARCH_GPR_A0, regmap[BPF_REG_0]);
+ emit_insn(ctx, addiw, LOONGARCH_GPR_A0, regmap[BPF_REG_0], 0);
/* Return to the caller */
- emit_insn(ctx, jirl, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0);
+ emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0);
} else {
/*
* Call the next bpf prog and skip the first instruction
* of TCC initialization.
*/
- emit_insn(ctx, jirl, LOONGARCH_GPR_T3, LOONGARCH_GPR_ZERO, 1);
+ emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T3, 6);
}
}
@@ -206,12 +257,10 @@ bool bpf_jit_supports_far_kfunc_call(void)
return true;
}
-/* initialized on the first pass of build_body() */
-static int out_offset = -1;
-static int emit_bpf_tail_call(struct jit_ctx *ctx)
+static int emit_bpf_tail_call(struct jit_ctx *ctx, int insn)
{
- int off;
- u8 tcc = tail_call_reg(ctx);
+ int off, tc_ninsn = 0;
+ int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(ctx->stack_size);
u8 a1 = LOONGARCH_GPR_A1;
u8 a2 = LOONGARCH_GPR_A2;
u8 t1 = LOONGARCH_GPR_T1;
@@ -220,7 +269,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
const int idx0 = ctx->idx;
#define cur_offset (ctx->idx - idx0)
-#define jmp_offset (out_offset - (cur_offset))
+#define jmp_offset (tc_ninsn - (cur_offset))
/*
* a0: &ctx
@@ -230,6 +279,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
* if (index >= array->map.max_entries)
* goto out;
*/
+ tc_ninsn = insn ? ctx->offset[insn+1] - ctx->offset[insn] : ctx->offset[0];
off = offsetof(struct bpf_array, map.max_entries);
emit_insn(ctx, ldwu, t1, a1, off);
/* bgeu $a2, $t1, jmp_offset */
@@ -237,11 +287,15 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
goto toofar;
/*
- * if (--TCC < 0)
- * goto out;
+ * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
+ * goto out;
*/
- emit_insn(ctx, addid, REG_TCC, tcc, -1);
- if (emit_tailcall_jmp(ctx, BPF_JSLT, REG_TCC, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
+ emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, tcc_ptr_off);
+ emit_insn(ctx, ldd, t3, REG_TCC, 0);
+ emit_insn(ctx, addid, t3, t3, 1);
+ emit_insn(ctx, std, t3, REG_TCC, 0);
+ emit_insn(ctx, addid, t2, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT);
+ if (emit_tailcall_jmp(ctx, BPF_JSGT, t3, t2, jmp_offset) < 0)
goto toofar;
/*
@@ -261,15 +315,6 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
emit_insn(ctx, ldd, t3, t2, off);
__build_epilogue(ctx, true);
- /* out: */
- if (out_offset == -1)
- out_offset = cur_offset;
- if (cur_offset != out_offset) {
- pr_err_once("tail_call out_offset = %d, expected %d!\n",
- cur_offset, out_offset);
- return -1;
- }
-
return 0;
toofar:
@@ -461,7 +506,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
u64 func_addr;
bool func_addr_fixed, sign_extend;
int i = insn - ctx->prog->insnsi;
- int ret, jmp_offset;
+ int ret, jmp_offset, tcc_ptr_off;
const u8 code = insn->code;
const u8 cond = BPF_OP(code);
const u8 t1 = LOONGARCH_GPR_T1;
@@ -482,13 +527,11 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
emit_zext_32(ctx, dst, is32);
break;
case 8:
- move_reg(ctx, t1, src);
- emit_insn(ctx, extwb, dst, t1);
+ emit_insn(ctx, extwb, dst, src);
emit_zext_32(ctx, dst, is32);
break;
case 16:
- move_reg(ctx, t1, src);
- emit_insn(ctx, extwh, dst, t1);
+ emit_insn(ctx, extwh, dst, src);
emit_zext_32(ctx, dst, is32);
break;
case 32:
@@ -897,21 +940,27 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
/* function call */
case BPF_JMP | BPF_CALL:
- mark_call(ctx);
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
&func_addr, &func_addr_fixed);
if (ret < 0)
return ret;
+ if (insn->src_reg == BPF_PSEUDO_CALL) {
+ tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(ctx->stack_size);
+ emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, tcc_ptr_off);
+ }
+
move_addr(ctx, t1, func_addr);
- emit_insn(ctx, jirl, t1, LOONGARCH_GPR_RA, 0);
- move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
+ emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0);
+
+ if (insn->src_reg != BPF_PSEUDO_CALL)
+ move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
+
break;
/* tail call */
case BPF_JMP | BPF_TAIL_CALL:
- mark_tail_call(ctx);
- if (emit_bpf_tail_call(ctx) < 0)
+ if (emit_bpf_tail_call(ctx, i) < 0)
return -EINVAL;
break;
@@ -930,7 +979,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
{
const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
- move_imm(ctx, dst, imm64, is32);
+ if (bpf_pseudo_func(insn))
+ move_addr(ctx, dst, imm64);
+ else
+ move_imm(ctx, dst, imm64, is32);
return 1;
}
@@ -1172,12 +1224,572 @@ static int validate_code(struct jit_ctx *ctx)
return -1;
}
+ return 0;
+}
+
+static int validate_ctx(struct jit_ctx *ctx)
+{
+ if (validate_code(ctx))
+ return -1;
+
if (WARN_ON_ONCE(ctx->num_exentries != ctx->prog->aux->num_exentries))
return -1;
return 0;
}
+static int emit_jump_and_link(struct jit_ctx *ctx, u8 rd, u64 target)
+{
+ if (!target) {
+ pr_err("bpf_jit: jump target address is error\n");
+ return -EFAULT;
+ }
+
+ move_imm(ctx, LOONGARCH_GPR_T1, target, false);
+ emit_insn(ctx, jirl, rd, LOONGARCH_GPR_T1, 0);
+
+ return 0;
+}
+
+static int emit_jump_or_nops(void *target, void *ip, u32 *insns, bool is_call)
+{
+ int i;
+ struct jit_ctx ctx;
+
+ ctx.idx = 0;
+ ctx.image = (union loongarch_instruction *)insns;
+
+ if (!target) {
+ for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++)
+ emit_insn((&ctx), nop);
+ return 0;
+ }
+
+ return emit_jump_and_link(&ctx, is_call ? LOONGARCH_GPR_T0 : LOONGARCH_GPR_ZERO, (u64)target);
+}
+
+static int emit_call(struct jit_ctx *ctx, u64 addr)
+{
+ return emit_jump_and_link(ctx, LOONGARCH_GPR_RA, addr);
+}
+
+void *bpf_arch_text_copy(void *dst, void *src, size_t len)
+{
+ int ret;
+
+ mutex_lock(&text_mutex);
+ ret = larch_insn_text_copy(dst, src, len);
+ mutex_unlock(&text_mutex);
+
+ return ret ? ERR_PTR(-EINVAL) : dst;
+}
+
+int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
+ enum bpf_text_poke_type new_t, void *old_addr,
+ void *new_addr)
+{
+ int ret;
+ bool is_call;
+ u32 old_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
+ u32 new_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
+
+ /* Only poking bpf text is supported. Since kernel function entry
+ * is set up by ftrace, we rely on ftrace to poke kernel functions.
+ */
+ if (!is_bpf_text_address((unsigned long)ip))
+ return -ENOTSUPP;
+
+ is_call = old_t == BPF_MOD_CALL;
+ ret = emit_jump_or_nops(old_addr, ip, old_insns, is_call);
+ if (ret)
+ return ret;
+
+ if (memcmp(ip, old_insns, LOONGARCH_LONG_JUMP_NBYTES))
+ return -EFAULT;
+
+ is_call = new_t == BPF_MOD_CALL;
+ ret = emit_jump_or_nops(new_addr, ip, new_insns, is_call);
+ if (ret)
+ return ret;
+
+ mutex_lock(&text_mutex);
+ if (memcmp(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES))
+ ret = larch_insn_text_copy(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES);
+ mutex_unlock(&text_mutex);
+
+ return ret;
+}
+
+int bpf_arch_text_invalidate(void *dst, size_t len)
+{
+ int i;
+ int ret = 0;
+ u32 *inst;
+
+ inst = kvmalloc(len, GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ for (i = 0; i < (len / sizeof(u32)); i++)
+ inst[i] = INSN_BREAK;
+
+ mutex_lock(&text_mutex);
+ if (larch_insn_text_copy(dst, inst, len))
+ ret = -EINVAL;
+ mutex_unlock(&text_mutex);
+
+ kvfree(inst);
+
+ return ret;
+}
+
+static void store_args(struct jit_ctx *ctx, int nargs, int args_off)
+{
+ int i;
+
+ for (i = 0; i < nargs; i++) {
+ emit_insn(ctx, std, LOONGARCH_GPR_A0 + i, LOONGARCH_GPR_FP, -args_off);
+ args_off -= 8;
+ }
+}
+
+static void restore_args(struct jit_ctx *ctx, int nargs, int args_off)
+{
+ int i;
+
+ for (i = 0; i < nargs; i++) {
+ emit_insn(ctx, ldd, LOONGARCH_GPR_A0 + i, LOONGARCH_GPR_FP, -args_off);
+ args_off -= 8;
+ }
+}
+
+static int invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
+ int args_off, int retval_off, int run_ctx_off, bool save_ret)
+{
+ int ret;
+ u32 *branch;
+ struct bpf_prog *p = l->link.prog;
+ int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
+
+ if (l->cookie) {
+ move_imm(ctx, LOONGARCH_GPR_T1, l->cookie, false);
+ emit_insn(ctx, std, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -run_ctx_off + cookie_off);
+ } else {
+ emit_insn(ctx, std, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_FP, -run_ctx_off + cookie_off);
+ }
+
+ /* arg1: prog */
+ move_imm(ctx, LOONGARCH_GPR_A0, (const s64)p, false);
+ /* arg2: &run_ctx */
+ emit_insn(ctx, addid, LOONGARCH_GPR_A1, LOONGARCH_GPR_FP, -run_ctx_off);
+ ret = emit_call(ctx, (const u64)bpf_trampoline_enter(p));
+ if (ret)
+ return ret;
+
+ /* store prog start time */
+ move_reg(ctx, LOONGARCH_GPR_S1, LOONGARCH_GPR_A0);
+
+ /*
+ * if (__bpf_prog_enter(prog) == 0)
+ * goto skip_exec_of_prog;
+ */
+ branch = (u32 *)ctx->image + ctx->idx;
+ /* nop reserved for conditional jump */
+ emit_insn(ctx, nop);
+
+ /* arg1: &args_off */
+ emit_insn(ctx, addid, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -args_off);
+ if (!p->jited)
+ move_imm(ctx, LOONGARCH_GPR_A1, (const s64)p->insnsi, false);
+ ret = emit_call(ctx, (const u64)p->bpf_func);
+ if (ret)
+ return ret;
+
+ if (save_ret) {
+ emit_insn(ctx, std, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off);
+ emit_insn(ctx, std, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8));
+ }
+
+ /* update branch with beqz */
+ if (ctx->image) {
+ int offset = (void *)(&ctx->image[ctx->idx]) - (void *)branch;
+ *branch = larch_insn_gen_beq(LOONGARCH_GPR_A0, LOONGARCH_GPR_ZERO, offset);
+ }
+
+ /* arg1: prog */
+ move_imm(ctx, LOONGARCH_GPR_A0, (const s64)p, false);
+ /* arg2: prog start time */
+ move_reg(ctx, LOONGARCH_GPR_A1, LOONGARCH_GPR_S1);
+ /* arg3: &run_ctx */
+ emit_insn(ctx, addid, LOONGARCH_GPR_A2, LOONGARCH_GPR_FP, -run_ctx_off);
+ ret = emit_call(ctx, (const u64)bpf_trampoline_exit(p));
+
+ return ret;
+}
+
+static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl,
+ int args_off, int retval_off, int run_ctx_off, u32 **branches)
+{
+ int i;
+
+ emit_insn(ctx, std, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_FP, -retval_off);
+ for (i = 0; i < tl->nr_links; i++) {
+ invoke_bpf_prog(ctx, tl->links[i], args_off, retval_off, run_ctx_off, true);
+ emit_insn(ctx, ldd, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -retval_off);
+ branches[i] = (u32 *)ctx->image + ctx->idx;
+ emit_insn(ctx, nop);
+ }
+}
+
+void *arch_alloc_bpf_trampoline(unsigned int size)
+{
+ return bpf_prog_pack_alloc(size, jit_fill_hole);
+}
+
+void arch_free_bpf_trampoline(void *image, unsigned int size)
+{
+ bpf_prog_pack_free(image, size);
+}
+
+/*
+ * Sign-extend the register if necessary
+ */
+static void sign_extend(struct jit_ctx *ctx, int rd, int rj, u8 size, bool sign)
+{
+ /* ABI requires unsigned char/short to be zero-extended */
+ if (!sign && (size == 1 || size == 2)) {
+ if (rd != rj)
+ move_reg(ctx, rd, rj);
+ return;
+ }
+
+ switch (size) {
+ case 1:
+ emit_insn(ctx, extwb, rd, rj);
+ break;
+ case 2:
+ emit_insn(ctx, extwh, rd, rj);
+ break;
+ case 4:
+ emit_insn(ctx, addiw, rd, rj, 0);
+ break;
+ case 8:
+ if (rd != rj)
+ move_reg(ctx, rd, rj);
+ break;
+ default:
+ pr_warn("bpf_jit: invalid size %d for sign_extend\n", size);
+ }
+}
+
+static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
+ const struct btf_func_model *m, struct bpf_tramp_links *tlinks,
+ void *func_addr, u32 flags)
+{
+ int i, ret, save_ret;
+ int stack_size, nargs;
+ int retval_off, args_off, nargs_off, ip_off, run_ctx_off, sreg_off, tcc_ptr_off;
+ bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT;
+ void *orig_call = func_addr;
+ struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
+ struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
+ struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
+ u32 **branches = NULL;
+
+ /*
+ * FP + 8 [ RA to parent func ] return address to parent
+ * function
+ * FP + 0 [ FP of parent func ] frame pointer of parent
+ * function
+ * FP - 8 [ T0 to traced func ] return address of traced
+ * function
+ * FP - 16 [ FP of traced func ] frame pointer of traced
+ * function
+ *
+ * FP - retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or
+ * BPF_TRAMP_F_RET_FENTRY_RET
+ * [ argN ]
+ * [ ... ]
+ * FP - args_off [ arg1 ]
+ *
+ * FP - nargs_off [ regs count ]
+ *
+ * FP - ip_off [ traced func ] BPF_TRAMP_F_IP_ARG
+ *
+ * FP - run_ctx_off [ bpf_tramp_run_ctx ]
+ *
+ * FP - sreg_off [ callee saved reg ]
+ *
+ * FP - tcc_ptr_off [ tail_call_cnt_ptr ]
+ */
+
+ if (m->nr_args > LOONGARCH_MAX_REG_ARGS)
+ return -ENOTSUPP;
+
+ /* FIXME: No support of struct argument */
+ for (i = 0; i < m->nr_args; i++) {
+ if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
+ return -ENOTSUPP;
+ }
+
+ if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY))
+ return -ENOTSUPP;
+
+ /* Room of trampoline frame to store return address and frame pointer */
+ stack_size = 16;
+
+ save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
+ if (save_ret)
+ stack_size += 16; /* Save BPF R0 and A0 */
+
+ retval_off = stack_size;
+
+ /* Room of trampoline frame to store args */
+ nargs = m->nr_args;
+ stack_size += nargs * 8;
+ args_off = stack_size;
+
+ /* Room of trampoline frame to store args number */
+ stack_size += 8;
+ nargs_off = stack_size;
+
+ /* Room of trampoline frame to store ip address */
+ if (flags & BPF_TRAMP_F_IP_ARG) {
+ stack_size += 8;
+ ip_off = stack_size;
+ }
+
+ /* Room of trampoline frame to store struct bpf_tramp_run_ctx */
+ stack_size += round_up(sizeof(struct bpf_tramp_run_ctx), 8);
+ run_ctx_off = stack_size;
+
+ stack_size += 8;
+ sreg_off = stack_size;
+
+ /* Room of trampoline frame to store tail_call_cnt_ptr */
+ if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
+ stack_size += 8;
+ tcc_ptr_off = stack_size;
+ }
+
+ stack_size = round_up(stack_size, 16);
+
+ if (is_struct_ops) {
+ /*
+ * For the trampoline called directly, just handle
+ * the frame of trampoline.
+ */
+ emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_size);
+ emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, stack_size - 8);
+ emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16);
+ emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size);
+ } else {
+ /*
+ * For the trampoline called from function entry,
+ * the frame of traced function and the frame of
+ * trampoline need to be considered.
+ */
+ /* RA and FP for parent function */
+ emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -16);
+ emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, 8);
+ emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 0);
+ emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 16);
+
+ /* RA and FP for traced function */
+ emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_size);
+ emit_insn(ctx, std, LOONGARCH_GPR_T0, LOONGARCH_GPR_SP, stack_size - 8);
+ emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16);
+ emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size);
+ }
+
+ if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
+ emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_FP, -tcc_ptr_off);
+
+ /* callee saved register S1 to pass start time */
+ emit_insn(ctx, std, LOONGARCH_GPR_S1, LOONGARCH_GPR_FP, -sreg_off);
+
+ /* store ip address of the traced function */
+ if (flags & BPF_TRAMP_F_IP_ARG) {
+ move_imm(ctx, LOONGARCH_GPR_T1, (const s64)func_addr, false);
+ emit_insn(ctx, std, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -ip_off);
+ }
+
+ /* store nargs number */
+ move_imm(ctx, LOONGARCH_GPR_T1, nargs, false);
+ emit_insn(ctx, std, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -nargs_off);
+
+ store_args(ctx, nargs, args_off);
+
+ /* To traced function */
+ /* Ftrace jump skips 2 NOP instructions */
+ if (is_kernel_text((unsigned long)orig_call))
+ orig_call += LOONGARCH_FENTRY_NBYTES;
+ /* Direct jump skips 5 NOP instructions */
+ else if (is_bpf_text_address((unsigned long)orig_call))
+ orig_call += LOONGARCH_BPF_FENTRY_NBYTES;
+ /* Module tracing not supported - cause kernel lockups */
+ else if (is_module_text_address((unsigned long)orig_call))
+ return -ENOTSUPP;
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im);
+ ret = emit_call(ctx, (const u64)__bpf_tramp_enter);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < fentry->nr_links; i++) {
+ ret = invoke_bpf_prog(ctx, fentry->links[i], args_off, retval_off,
+ run_ctx_off, flags & BPF_TRAMP_F_RET_FENTRY_RET);
+ if (ret)
+ return ret;
+ }
+ if (fmod_ret->nr_links) {
+ branches = kcalloc(fmod_ret->nr_links, sizeof(u32 *), GFP_KERNEL);
+ if (!branches)
+ return -ENOMEM;
+
+ invoke_bpf_mod_ret(ctx, fmod_ret, args_off, retval_off, run_ctx_off, branches);
+ }
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ restore_args(ctx, m->nr_args, args_off);
+
+ if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
+ emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_FP, -tcc_ptr_off);
+
+ ret = emit_call(ctx, (const u64)orig_call);
+ if (ret)
+ goto out;
+ emit_insn(ctx, std, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off);
+ emit_insn(ctx, std, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8));
+ im->ip_after_call = ctx->ro_image + ctx->idx;
+ /* Reserve space for the move_imm + jirl instruction */
+ for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++)
+ emit_insn(ctx, nop);
+ }
+
+ for (i = 0; ctx->image && i < fmod_ret->nr_links; i++) {
+ int offset = (void *)(&ctx->image[ctx->idx]) - (void *)branches[i];
+ *branches[i] = larch_insn_gen_bne(LOONGARCH_GPR_T1, LOONGARCH_GPR_ZERO, offset);
+ }
+
+ for (i = 0; i < fexit->nr_links; i++) {
+ ret = invoke_bpf_prog(ctx, fexit->links[i], args_off, retval_off, run_ctx_off, false);
+ if (ret)
+ goto out;
+ }
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ im->ip_epilogue = ctx->ro_image + ctx->idx;
+ move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im);
+ ret = emit_call(ctx, (const u64)__bpf_tramp_exit);
+ if (ret)
+ goto out;
+ }
+
+ if (flags & BPF_TRAMP_F_RESTORE_REGS)
+ restore_args(ctx, m->nr_args, args_off);
+
+ if (save_ret) {
+ emit_insn(ctx, ldd, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8));
+ if (is_struct_ops)
+ sign_extend(ctx, LOONGARCH_GPR_A0, regmap[BPF_REG_0],
+ m->ret_size, m->ret_flags & BTF_FMODEL_SIGNED_ARG);
+ else
+ emit_insn(ctx, ldd, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off);
+ }
+
+ emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_FP, -sreg_off);
+
+ if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
+ emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_FP, -tcc_ptr_off);
+
+ if (is_struct_ops) {
+ /* trampoline called directly */
+ emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, stack_size - 8);
+ emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16);
+ emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_size);
+
+ emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0);
+ } else {
+ /* trampoline called from function entry */
+ emit_insn(ctx, ldd, LOONGARCH_GPR_T0, LOONGARCH_GPR_SP, stack_size - 8);
+ emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16);
+ emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_size);
+
+ emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, 8);
+ emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 0);
+ emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, 16);
+
+ if (flags & BPF_TRAMP_F_SKIP_FRAME)
+ /* return to parent function */
+ emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0);
+ else
+ /* return to traced function */
+ emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T0, 0);
+ }
+
+ ret = ctx->idx;
+out:
+ kfree(branches);
+
+ return ret;
+}
+
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
+ void *ro_image_end, const struct btf_func_model *m,
+ u32 flags, struct bpf_tramp_links *tlinks, void *func_addr)
+{
+ int ret, size;
+ void *image, *tmp;
+ struct jit_ctx ctx;
+
+ size = ro_image_end - ro_image;
+ image = kvmalloc(size, GFP_KERNEL);
+ if (!image)
+ return -ENOMEM;
+
+ ctx.image = (union loongarch_instruction *)image;
+ ctx.ro_image = (union loongarch_instruction *)ro_image;
+ ctx.idx = 0;
+
+ jit_fill_hole(image, (unsigned int)(ro_image_end - ro_image));
+ ret = __arch_prepare_bpf_trampoline(&ctx, im, m, tlinks, func_addr, flags);
+ if (ret < 0)
+ goto out;
+
+ if (validate_code(&ctx) < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ tmp = bpf_arch_text_copy(ro_image, image, size);
+ if (IS_ERR(tmp)) {
+ ret = PTR_ERR(tmp);
+ goto out;
+ }
+
+out:
+ kvfree(image);
+ return ret < 0 ? ret : size;
+}
+
+int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
+ struct bpf_tramp_links *tlinks, void *func_addr)
+{
+ int ret;
+ struct jit_ctx ctx;
+ struct bpf_tramp_image im;
+
+ ctx.image = NULL;
+ ctx.idx = 0;
+
+ ret = __arch_prepare_bpf_trampoline(&ctx, &im, m, tlinks, func_addr, flags);
+
+ return ret < 0 ? ret : ret * LOONGARCH_INSN_SIZE;
+}
+
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
bool tmp_blinded = false, extra_pass = false;
@@ -1280,7 +1892,7 @@ skip_init_ctx:
build_epilogue(&ctx);
/* 3. Extra pass to validate JITed code */
- if (validate_code(&ctx)) {
+ if (validate_ctx(&ctx)) {
bpf_jit_binary_free(header);
prog = orig_prog;
goto out_offset;
@@ -1334,7 +1946,6 @@ out:
if (tmp_blinded)
bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog);
- out_offset = -1;
return prog;
@@ -1346,6 +1957,16 @@ out_free:
goto out_offset;
}
+bool bpf_jit_bypass_spec_v1(void)
+{
+ return true;
+}
+
+bool bpf_jit_bypass_spec_v4(void)
+{
+ return true;
+}
+
/* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
bool bpf_jit_supports_subprog_tailcalls(void)
{
diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h
index 68586338ecf8..5697158fd164 100644
--- a/arch/loongarch/net/bpf_jit.h
+++ b/arch/loongarch/net/bpf_jit.h
@@ -18,6 +18,7 @@ struct jit_ctx {
u32 *offset;
int num_exentries;
union loongarch_instruction *image;
+ union loongarch_instruction *ro_image;
u32 stack_size;
};
@@ -27,6 +28,11 @@ struct jit_data {
struct jit_ctx ctx;
};
+static inline void emit_nop(union loongarch_instruction *insn)
+{
+ insn->word = INSN_NOP;
+}
+
#define emit_insn(ctx, func, ...) \
do { \
if (ctx->image != NULL) { \
@@ -303,3 +309,8 @@ static inline int emit_tailcall_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch
return -EINVAL;
}
+
+static inline void bpf_flush_icache(void *start, void *end)
+{
+ flush_icache_range((unsigned long)start, (unsigned long)end);
+}
diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c
index 365f7de771cb..50c9016641a4 100644
--- a/arch/loongarch/pci/acpi.c
+++ b/arch/loongarch/pci/acpi.c
@@ -194,6 +194,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
{
struct pci_bus *bus;
struct pci_root_info *info;
+ struct pci_host_bridge *host;
struct acpi_pci_root_ops *root_ops;
int domain = root->segment;
int busnum = root->secondary.start;
@@ -225,6 +226,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
if (bus) {
memcpy(bus->sysdata, info->cfg, sizeof(struct pci_config_window));
kfree(info);
+ kfree(root_ops);
} else {
struct pci_bus *child;
@@ -236,8 +238,17 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
return NULL;
}
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
+ /* If we must preserve the resource configuration, claim now */
+ host = pci_find_host_bridge(bus);
+ if (host->preserve_config)
+ pci_bus_claim_resources(bus);
+
+ /*
+ * Assign whatever was left unassigned. If we didn't claim above,
+ * this will reassign everything.
+ */
+ pci_assign_unassigned_root_bus_resources(bus);
+
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
}
diff --git a/arch/loongarch/pci/pci.c b/arch/loongarch/pci/pci.c
index 2726639150bc..d9fc5d520b37 100644
--- a/arch/loongarch/pci/pci.c
+++ b/arch/loongarch/pci/pci.c
@@ -3,7 +3,6 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/kernel.h>
-#include <linux/export.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/types.h>
@@ -51,11 +50,11 @@ static int __init pcibios_init(void)
*/
lsize = cpu_last_level_cache_line_size();
- BUG_ON(!lsize);
+ if (lsize) {
+ pci_dfl_cache_line_size = lsize >> 2;
- pci_dfl_cache_line_size = lsize >> 2;
-
- pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize);
+ pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize);
+ }
return 0;
}
diff --git a/arch/loongarch/power/hibernate.c b/arch/loongarch/power/hibernate.c
index 1e0590542f98..e7b7346592cb 100644
--- a/arch/loongarch/power/hibernate.c
+++ b/arch/loongarch/power/hibernate.c
@@ -2,6 +2,7 @@
#include <asm/fpu.h>
#include <asm/loongson.h>
#include <asm/sections.h>
+#include <asm/time.h>
#include <asm/tlbflush.h>
#include <linux/suspend.h>
@@ -14,6 +15,7 @@ struct pt_regs saved_regs;
void save_processor_state(void)
{
+ save_counter();
saved_crmd = csr_read32(LOONGARCH_CSR_CRMD);
saved_prmd = csr_read32(LOONGARCH_CSR_PRMD);
saved_euen = csr_read32(LOONGARCH_CSR_EUEN);
@@ -26,6 +28,7 @@ void save_processor_state(void)
void restore_processor_state(void)
{
+ sync_counter();
csr_write32(saved_crmd, LOONGARCH_CSR_CRMD);
csr_write32(saved_prmd, LOONGARCH_CSR_PRMD);
csr_write32(saved_euen, LOONGARCH_CSR_EUEN);
diff --git a/arch/loongarch/power/platform.c b/arch/loongarch/power/platform.c
index 3ea8e07aa225..5bbdb9fd76e5 100644
--- a/arch/loongarch/power/platform.c
+++ b/arch/loongarch/power/platform.c
@@ -17,7 +17,7 @@ void enable_gpe_wakeup(void)
if (acpi_gbl_reduced_hardware)
return;
- acpi_enable_all_wakeup_gpes();
+ acpi_hw_enable_all_wakeup_gpes();
}
void enable_pci_wakeup(void)
@@ -34,22 +34,49 @@ void enable_pci_wakeup(void)
acpi_write_bit_register(ACPI_BITREG_PCIEXP_WAKE_DISABLE, 0);
}
+static struct platform_device loongson3_cpufreq_device = {
+ .name = "loongson3_cpufreq",
+ .id = -1,
+};
+
+static int __init loongson_cpufreq_init(void)
+{
+ if (!cpu_has_scalefreq)
+ return -ENODEV;
+
+ return platform_device_register(&loongson3_cpufreq_device);
+}
+
+arch_initcall(loongson_cpufreq_init);
+
+static void default_suspend_addr(void)
+{
+ acpi_enter_sleep_state(ACPI_STATE_S3);
+}
+
static int __init loongson3_acpi_suspend_init(void)
{
#ifdef CONFIG_ACPI
acpi_status status;
uint64_t suspend_addr = 0;
- if (acpi_disabled || acpi_gbl_reduced_hardware)
+ if (acpi_disabled)
+ return 0;
+
+ if (!acpi_gbl_reduced_hardware)
+ acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
+
+ if (!acpi_sleep_state_supported(ACPI_STATE_S3))
return 0;
- acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
status = acpi_evaluate_integer(NULL, "\\SADR", NULL, &suspend_addr);
if (ACPI_FAILURE(status) || !suspend_addr) {
- pr_err("ACPI S3 is not support!\n");
- return -1;
+ pr_info("ACPI S3 supported with hardware register default\n");
+ loongson_sysconf.suspend_addr = (u64)default_suspend_addr;
+ } else {
+ pr_info("ACPI S3 supported with Loongson ACPI SADR extension\n");
+ loongson_sysconf.suspend_addr = (u64)phys_to_virt(PHYSADDR(suspend_addr));
}
- loongson_sysconf.suspend_addr = (u64)phys_to_virt(PHYSADDR(suspend_addr));
#endif
return 0;
}
diff --git a/arch/loongarch/power/suspend_asm.S b/arch/loongarch/power/suspend_asm.S
index e2fc3b4e31f0..df0865df26fa 100644
--- a/arch/loongarch/power/suspend_asm.S
+++ b/arch/loongarch/power/suspend_asm.S
@@ -30,9 +30,6 @@
st.d $r29, sp, PT_R29
st.d $r30, sp, PT_R30
st.d $r31, sp, PT_R31
-
- la.pcrel t0, acpi_saved_sp
- st.d sp, t0, 0
.endm
.macro SETUP_WAKEUP
@@ -51,6 +48,7 @@
ld.d $r29, sp, PT_R29
ld.d $r30, sp, PT_R30
ld.d $r31, sp, PT_R31
+ addi.d sp, sp, PT_SIZE
.endm
.text
@@ -59,6 +57,10 @@
/* Sleep/wakeup code for Loongson-3 */
SYM_FUNC_START(loongarch_suspend_enter)
SETUP_SLEEP
+
+ la.pcrel t0, acpi_saved_sp
+ st.d sp, t0, 0
+
bl __flush_cache_all
/* Pass RA and SP to BIOS */
@@ -66,18 +68,14 @@ SYM_FUNC_START(loongarch_suspend_enter)
la.pcrel a0, loongarch_wakeup_start
la.pcrel t0, loongarch_suspend_addr
ld.d t0, t0, 0
- jirl a0, t0, 0 /* Call BIOS's STR sleep routine */
+ jirl ra, t0, 0 /* Call BIOS's STR sleep routine */
/*
* This is where we return upon wakeup.
* Reload all of the registers and return.
*/
SYM_INNER_LABEL(loongarch_wakeup_start, SYM_L_GLOBAL)
- li.d t0, CSR_DMW0_INIT # UC, PLV0
- csrwr t0, LOONGARCH_CSR_DMWIN0
- li.d t0, CSR_DMW1_INIT # CA, PLV0
- csrwr t0, LOONGARCH_CSR_DMWIN1
-
+ SETUP_DMWINS t0
JUMP_VIRT_ADDR t0, t1
/* Enable PG */
@@ -86,7 +84,7 @@ SYM_INNER_LABEL(loongarch_wakeup_start, SYM_L_GLOBAL)
la.pcrel t0, acpi_saved_sp
ld.d sp, t0, 0
+
SETUP_WAKEUP
- addi.d sp, sp, PT_SIZE
jr ra
SYM_FUNC_END(loongarch_suspend_enter)
diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
index d724d46b07c8..c0cc3ca5da9f 100644
--- a/arch/loongarch/vdso/Makefile
+++ b/arch/loongarch/vdso/Makefile
@@ -2,9 +2,10 @@
# Objects to go into the VDSO.
# Include the generic Makefile to check the built vdso.
-include $(srctree)/lib/vdso/Makefile
+include $(srctree)/lib/vdso/Makefile.include
-obj-vdso-y := elf.o vgetcpu.o vgettimeofday.o sigreturn.o
+obj-vdso-y := elf.o vgetcpu.o vgettimeofday.o vgetrandom.o \
+ vgetrandom-chacha.o sigreturn.o
# Common compiler flags between ABIs.
ccflags-vdso := \
@@ -18,7 +19,7 @@ ccflags-vdso := \
cflags-vdso := $(ccflags-vdso) \
-isystem $(shell $(CC) -print-file-name=include) \
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
- -O2 -g -fno-strict-aliasing -fno-common -fno-builtin \
+ -std=gnu11 -fms-extensions -O2 -g -fno-strict-aliasing -fno-common -fno-builtin \
-fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \
$(call cc-option, -fno-asynchronous-unwind-tables) \
$(call cc-option, -fno-stack-protector)
@@ -29,10 +30,13 @@ ifneq ($(c-gettimeofday-y),)
CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
endif
+ifneq ($(c-getrandom-y),)
+ CFLAGS_vgetrandom.o += -include $(c-getrandom-y)
+endif
+
# VDSO linker flags.
ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
- $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \
- --hash-style=sysv --build-id -T
+ $(filter -E%,$(KBUILD_CFLAGS)) -shared --build-id -T
#
# Shared build commands.
diff --git a/arch/loongarch/vdso/vdso.lds.S b/arch/loongarch/vdso/vdso.lds.S
index 56ad855896de..8ff986499947 100644
--- a/arch/loongarch/vdso/vdso.lds.S
+++ b/arch/loongarch/vdso/vdso.lds.S
@@ -3,6 +3,9 @@
* Author: Huacai Chen <chenhuacai@loongson.cn>
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
+#include <asm/page.h>
+#include <generated/asm-offsets.h>
+#include <vdso/datapage.h>
OUTPUT_FORMAT("elf64-loongarch", "elf64-loongarch", "elf64-loongarch")
@@ -10,7 +13,8 @@ OUTPUT_ARCH(loongarch)
SECTIONS
{
- PROVIDE(_start = .);
+ VDSO_VVAR_SYMS
+
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
@@ -62,6 +66,7 @@ VERSION
__vdso_clock_getres;
__vdso_clock_gettime;
__vdso_gettimeofday;
+ __vdso_getrandom;
__vdso_rt_sigreturn;
local: *;
};
diff --git a/arch/loongarch/vdso/vgetcpu.c b/arch/loongarch/vdso/vgetcpu.c
index 9e445be39763..5301cd9d0f83 100644
--- a/arch/loongarch/vdso/vgetcpu.c
+++ b/arch/loongarch/vdso/vgetcpu.c
@@ -19,27 +19,19 @@ static __always_inline int read_cpu_id(void)
return cpu_id;
}
-static __always_inline const struct vdso_pcpu_data *get_pcpu_data(void)
-{
- return (struct vdso_pcpu_data *)(get_vdso_data() + VVAR_LOONGARCH_PAGES_START * PAGE_SIZE);
-}
-
extern
int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused);
int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused)
{
int cpu_id;
- const struct vdso_pcpu_data *data;
cpu_id = read_cpu_id();
if (cpu)
*cpu = cpu_id;
- if (node) {
- data = get_pcpu_data();
- *node = data[cpu_id].node;
- }
+ if (node)
+ *node = vdso_u_arch_data.pdata[cpu_id].node;
return 0;
}
diff --git a/arch/loongarch/vdso/vgetrandom-chacha.S b/arch/loongarch/vdso/vgetrandom-chacha.S
new file mode 100644
index 000000000000..c4dd2bab8825
--- /dev/null
+++ b/arch/loongarch/vdso/vgetrandom-chacha.S
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Xi Ruoyao <xry111@xry111.site>. All Rights Reserved.
+ */
+
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <linux/linkage.h>
+
+.text
+
+.macro OP_4REG op d0 d1 d2 d3 s0 s1 s2 s3
+ \op \d0, \d0, \s0
+ \op \d1, \d1, \s1
+ \op \d2, \d2, \s2
+ \op \d3, \d3, \s3
+.endm
+
+/*
+ * Very basic LoongArch implementation of ChaCha20. Produces a given positive
+ * number of blocks of output with a nonce of 0, taking an input key and
+ * 8-byte counter. Importantly does not spill to the stack. Its arguments
+ * are:
+ *
+ * a0: output bytes
+ * a1: 32-byte key input
+ * a2: 8-byte counter input/output
+ * a3: number of 64-byte blocks to write to output
+ */
+SYM_FUNC_START(__arch_chacha20_blocks_nostack)
+
+/* We don't need a frame pointer */
+#define s9 fp
+
+#define output a0
+#define key a1
+#define counter a2
+#define nblocks a3
+#define i a4
+#define state0 s0
+#define state1 s1
+#define state2 s2
+#define state3 s3
+#define state4 s4
+#define state5 s5
+#define state6 s6
+#define state7 s7
+#define state8 s8
+#define state9 s9
+#define state10 a5
+#define state11 a6
+#define state12 a7
+#define state13 t0
+#define state14 t1
+#define state15 t2
+#define cnt_lo t3
+#define cnt_hi t4
+#define copy0 t5
+#define copy1 t6
+#define copy2 t7
+#define copy3 t8
+
+/* Packs to be used with OP_4REG */
+#define line0 state0, state1, state2, state3
+#define line1 state4, state5, state6, state7
+#define line2 state8, state9, state10, state11
+#define line3 state12, state13, state14, state15
+
+#define line1_perm state5, state6, state7, state4
+#define line2_perm state10, state11, state8, state9
+#define line3_perm state15, state12, state13, state14
+
+#define copy copy0, copy1, copy2, copy3
+
+#define _16 16, 16, 16, 16
+#define _20 20, 20, 20, 20
+#define _24 24, 24, 24, 24
+#define _25 25, 25, 25, 25
+
+ /*
+ * The ABI requires s0-s9 saved, and sp aligned to 16-byte.
+ * This does not violate the stack-less requirement: no sensitive data
+ * is spilled onto the stack.
+ */
+ PTR_ADDI sp, sp, (-SZREG * 10) & STACK_ALIGN
+ REG_S s0, sp, 0
+ REG_S s1, sp, SZREG
+ REG_S s2, sp, SZREG * 2
+ REG_S s3, sp, SZREG * 3
+ REG_S s4, sp, SZREG * 4
+ REG_S s5, sp, SZREG * 5
+ REG_S s6, sp, SZREG * 6
+ REG_S s7, sp, SZREG * 7
+ REG_S s8, sp, SZREG * 8
+ REG_S s9, sp, SZREG * 9
+
+ li.w copy0, 0x61707865
+ li.w copy1, 0x3320646e
+ li.w copy2, 0x79622d32
+ li.w copy3, 0x6b206574
+
+ ld.w cnt_lo, counter, 0
+ ld.w cnt_hi, counter, 4
+
+.Lblock:
+ /* state[0,1,2,3] = "expand 32-byte k" */
+ move state0, copy0
+ move state1, copy1
+ move state2, copy2
+ move state3, copy3
+
+ /* state[4,5,..,11] = key */
+ ld.w state4, key, 0
+ ld.w state5, key, 4
+ ld.w state6, key, 8
+ ld.w state7, key, 12
+ ld.w state8, key, 16
+ ld.w state9, key, 20
+ ld.w state10, key, 24
+ ld.w state11, key, 28
+
+ /* state[12,13] = counter */
+ move state12, cnt_lo
+ move state13, cnt_hi
+
+ /* state[14,15] = 0 */
+ move state14, zero
+ move state15, zero
+
+ li.w i, 10
+.Lpermute:
+ /* odd round */
+ OP_4REG add.w line0, line1
+ OP_4REG xor line3, line0
+ OP_4REG rotri.w line3, _16
+
+ OP_4REG add.w line2, line3
+ OP_4REG xor line1, line2
+ OP_4REG rotri.w line1, _20
+
+ OP_4REG add.w line0, line1
+ OP_4REG xor line3, line0
+ OP_4REG rotri.w line3, _24
+
+ OP_4REG add.w line2, line3
+ OP_4REG xor line1, line2
+ OP_4REG rotri.w line1, _25
+
+ /* even round */
+ OP_4REG add.w line0, line1_perm
+ OP_4REG xor line3_perm, line0
+ OP_4REG rotri.w line3_perm, _16
+
+ OP_4REG add.w line2_perm, line3_perm
+ OP_4REG xor line1_perm, line2_perm
+ OP_4REG rotri.w line1_perm, _20
+
+ OP_4REG add.w line0, line1_perm
+ OP_4REG xor line3_perm, line0
+ OP_4REG rotri.w line3_perm, _24
+
+ OP_4REG add.w line2_perm, line3_perm
+ OP_4REG xor line1_perm, line2_perm
+ OP_4REG rotri.w line1_perm, _25
+
+ addi.w i, i, -1
+ bnez i, .Lpermute
+
+ /* output[0,1,2,3] = copy[0,1,2,3] + state[0,1,2,3] */
+ OP_4REG add.w line0, copy
+ st.w state0, output, 0
+ st.w state1, output, 4
+ st.w state2, output, 8
+ st.w state3, output, 12
+
+ /* from now on state[0,1,2,3] are scratch registers */
+
+ /* state[0,1,2,3] = lo32(key) */
+ ld.w state0, key, 0
+ ld.w state1, key, 4
+ ld.w state2, key, 8
+ ld.w state3, key, 12
+
+ /* output[4,5,6,7] = state[0,1,2,3] + state[4,5,6,7] */
+ OP_4REG add.w line1, line0
+ st.w state4, output, 16
+ st.w state5, output, 20
+ st.w state6, output, 24
+ st.w state7, output, 28
+
+ /* state[0,1,2,3] = hi32(key) */
+ ld.w state0, key, 16
+ ld.w state1, key, 20
+ ld.w state2, key, 24
+ ld.w state3, key, 28
+
+ /* output[8,9,10,11] = state[0,1,2,3] + state[8,9,10,11] */
+ OP_4REG add.w line2, line0
+ st.w state8, output, 32
+ st.w state9, output, 36
+ st.w state10, output, 40
+ st.w state11, output, 44
+
+ /* output[12,13,14,15] = state[12,13,14,15] + [cnt_lo, cnt_hi, 0, 0] */
+ add.w state12, state12, cnt_lo
+ add.w state13, state13, cnt_hi
+ st.w state12, output, 48
+ st.w state13, output, 52
+ st.w state14, output, 56
+ st.w state15, output, 60
+
+ /* ++counter */
+ addi.w cnt_lo, cnt_lo, 1
+ sltui state0, cnt_lo, 1
+ add.w cnt_hi, cnt_hi, state0
+
+ /* output += 64 */
+ PTR_ADDI output, output, 64
+ /* --nblocks */
+ PTR_ADDI nblocks, nblocks, -1
+ bnez nblocks, .Lblock
+
+ /* counter = [cnt_lo, cnt_hi] */
+ st.w cnt_lo, counter, 0
+ st.w cnt_hi, counter, 4
+
+ /*
+ * Zero out the potentially sensitive regs, in case nothing uses these
+ * again. As at now copy[0,1,2,3] just contains "expand 32-byte k" and
+ * state[0,...,9] are s0-s9 those we'll restore in the epilogue, so we
+ * only need to zero state[11,...,15].
+ */
+ move state10, zero
+ move state11, zero
+ move state12, zero
+ move state13, zero
+ move state14, zero
+ move state15, zero
+
+ REG_L s0, sp, 0
+ REG_L s1, sp, SZREG
+ REG_L s2, sp, SZREG * 2
+ REG_L s3, sp, SZREG * 3
+ REG_L s4, sp, SZREG * 4
+ REG_L s5, sp, SZREG * 5
+ REG_L s6, sp, SZREG * 6
+ REG_L s7, sp, SZREG * 7
+ REG_L s8, sp, SZREG * 8
+ REG_L s9, sp, SZREG * 9
+ PTR_ADDI sp, sp, -((-SZREG * 10) & STACK_ALIGN)
+
+ jr ra
+SYM_FUNC_END(__arch_chacha20_blocks_nostack)
diff --git a/arch/loongarch/vdso/vgetrandom.c b/arch/loongarch/vdso/vgetrandom.c
new file mode 100644
index 000000000000..d5f258ac4a36
--- /dev/null
+++ b/arch/loongarch/vdso/vgetrandom.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Xi Ruoyao <xry111@xry111.site>. All Rights Reserved.
+ */
+#include <linux/types.h>
+
+ssize_t __vdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len)
+{
+ return __cvdso_getrandom(buffer, len, flags, opaque_state, opaque_len);
+}