summaryrefslogtreecommitdiff
path: root/arch/loongarch
diff options
context:
space:
mode:
Diffstat (limited to 'arch/loongarch')
-rw-r--r--arch/loongarch/Kconfig29
-rw-r--r--arch/loongarch/Makefile11
-rw-r--r--arch/loongarch/boot/dts/loongson-2k0500.dtsi160
-rw-r--r--arch/loongarch/boot/dts/loongson-2k1000-ref.dts24
-rw-r--r--arch/loongarch/boot/dts/loongson-2k1000.dtsi42
-rw-r--r--arch/loongarch/boot/dts/loongson-2k2000.dtsi60
-rwxr-xr-xarch/loongarch/boot/install.sh56
-rw-r--r--arch/loongarch/configs/loongson3_defconfig29
-rw-r--r--arch/loongarch/include/asm/Kbuild1
-rw-r--r--arch/loongarch/include/asm/acpi.h2
-rw-r--r--arch/loongarch/include/asm/addrspace.h8
-rw-r--r--arch/loongarch/include/asm/alternative-asm.h4
-rw-r--r--arch/loongarch/include/asm/alternative.h4
-rw-r--r--arch/loongarch/include/asm/asm-extable.h6
-rw-r--r--arch/loongarch/include/asm/asm-prototypes.h8
-rw-r--r--arch/loongarch/include/asm/asm.h8
-rw-r--r--arch/loongarch/include/asm/cache.h2
-rw-r--r--arch/loongarch/include/asm/cpu.h4
-rw-r--r--arch/loongarch/include/asm/entry-common.h8
-rw-r--r--arch/loongarch/include/asm/fpu.h39
-rw-r--r--arch/loongarch/include/asm/ftrace.h4
-rw-r--r--arch/loongarch/include/asm/gpr-num.h6
-rw-r--r--arch/loongarch/include/asm/hugetlb.h14
-rw-r--r--arch/loongarch/include/asm/irq.h2
-rw-r--r--arch/loongarch/include/asm/irqflags.h20
-rw-r--r--arch/loongarch/include/asm/jump_label.h4
-rw-r--r--arch/loongarch/include/asm/kasan.h2
-rw-r--r--arch/loongarch/include/asm/kvm_host.h14
-rw-r--r--arch/loongarch/include/asm/kvm_vcpu.h2
-rw-r--r--arch/loongarch/include/asm/lbt.h10
-rw-r--r--arch/loongarch/include/asm/loongarch.h20
-rw-r--r--arch/loongarch/include/asm/numa.h14
-rw-r--r--arch/loongarch/include/asm/orc_types.h4
-rw-r--r--arch/loongarch/include/asm/page.h4
-rw-r--r--arch/loongarch/include/asm/pgalloc.h9
-rw-r--r--arch/loongarch/include/asm/pgtable-bits.h10
-rw-r--r--arch/loongarch/include/asm/pgtable.h32
-rw-r--r--arch/loongarch/include/asm/prefetch.h2
-rw-r--r--arch/loongarch/include/asm/ptrace.h6
-rw-r--r--arch/loongarch/include/asm/smp.h3
-rw-r--r--arch/loongarch/include/asm/sparsemem.h5
-rw-r--r--arch/loongarch/include/asm/stackframe.h6
-rw-r--r--arch/loongarch/include/asm/stacktrace.h8
-rw-r--r--arch/loongarch/include/asm/syscall.h15
-rw-r--r--arch/loongarch/include/asm/thread_info.h4
-rw-r--r--arch/loongarch/include/asm/topology.h15
-rw-r--r--arch/loongarch/include/asm/types.h2
-rw-r--r--arch/loongarch/include/asm/unwind_hints.h12
-rw-r--r--arch/loongarch/include/asm/uprobes.h1
-rw-r--r--arch/loongarch/include/asm/vdso/arch_data.h4
-rw-r--r--arch/loongarch/include/asm/vdso/getrandom.h6
-rw-r--r--arch/loongarch/include/asm/vdso/gettimeofday.h10
-rw-r--r--arch/loongarch/include/asm/vdso/processor.h4
-rw-r--r--arch/loongarch/include/asm/vdso/vdso.h4
-rw-r--r--arch/loongarch/include/asm/vdso/vsyscall.h4
-rw-r--r--arch/loongarch/kernel/Makefile10
-rw-r--r--arch/loongarch/kernel/acpi.c53
-rw-r--r--arch/loongarch/kernel/alternative.c1
-rw-r--r--arch/loongarch/kernel/efi-header.S4
-rw-r--r--arch/loongarch/kernel/efi.c12
-rw-r--r--arch/loongarch/kernel/elf.c1
-rw-r--r--arch/loongarch/kernel/entry.S25
-rw-r--r--arch/loongarch/kernel/env.c2
-rw-r--r--arch/loongarch/kernel/fpu.S6
-rw-r--r--arch/loongarch/kernel/genex.S7
-rw-r--r--arch/loongarch/kernel/head.S2
-rw-r--r--arch/loongarch/kernel/kfpu.c23
-rw-r--r--arch/loongarch/kernel/kgdb.c5
-rw-r--r--arch/loongarch/kernel/lbt.S4
-rw-r--r--arch/loongarch/kernel/numa.c108
-rw-r--r--arch/loongarch/kernel/paravirt.c1
-rw-r--r--arch/loongarch/kernel/perf_event.c3
-rw-r--r--arch/loongarch/kernel/process.c33
-rw-r--r--arch/loongarch/kernel/ptrace.c16
-rw-r--r--arch/loongarch/kernel/setup.c2
-rw-r--r--arch/loongarch/kernel/signal.c21
-rw-r--r--arch/loongarch/kernel/smp.c38
-rw-r--r--arch/loongarch/kernel/time.c4
-rw-r--r--arch/loongarch/kernel/traps.c21
-rw-r--r--arch/loongarch/kernel/unwind_guess.c1
-rw-r--r--arch/loongarch/kernel/unwind_orc.c3
-rw-r--r--arch/loongarch/kernel/unwind_prologue.c1
-rw-r--r--arch/loongarch/kernel/uprobes.c11
-rw-r--r--arch/loongarch/kernel/vdso.c4
-rw-r--r--arch/loongarch/kvm/Makefile2
-rw-r--r--arch/loongarch/kvm/exit.c70
-rw-r--r--arch/loongarch/kvm/intc/eiointc.c618
-rw-r--r--arch/loongarch/kvm/intc/ipi.c32
-rw-r--r--arch/loongarch/kvm/intc/pch_pic.c4
-rw-r--r--arch/loongarch/kvm/interrupt.c25
-rw-r--r--arch/loongarch/kvm/main.c4
-rw-r--r--arch/loongarch/kvm/mmu.c15
-rw-r--r--arch/loongarch/kvm/trace.h14
-rw-r--r--arch/loongarch/kvm/vcpu.c16
-rw-r--r--arch/loongarch/lib/Makefile2
-rw-r--r--arch/loongarch/lib/crc32-loongarch.c135
-rw-r--r--arch/loongarch/lib/csum.c1
-rw-r--r--arch/loongarch/mm/hugetlbpage.c3
-rw-r--r--arch/loongarch/mm/init.c11
-rw-r--r--arch/loongarch/mm/ioremap.c4
-rw-r--r--arch/loongarch/mm/pageattr.c2
-rw-r--r--arch/loongarch/mm/pgtable.c9
-rw-r--r--arch/loongarch/net/bpf_jit.c12
-rw-r--r--arch/loongarch/net/bpf_jit.h5
-rw-r--r--arch/loongarch/pci/acpi.c14
-rw-r--r--arch/loongarch/pci/pci.c1
-rw-r--r--arch/loongarch/power/hibernate.c3
-rw-r--r--arch/loongarch/vdso/Makefile3
-rw-r--r--arch/loongarch/vdso/vgetrandom-chacha.S13
109 files changed, 1062 insertions, 1165 deletions
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 687502917ae2..f0abc38c40ac 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -15,7 +15,6 @@ config LOONGARCH
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_CPU_FINALIZE_INIT
- select ARCH_HAS_CRC32
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_FAST_MULTIPLIER
@@ -25,11 +24,11 @@ config LOONGARCH
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PREEMPT_LAZY
- select ARCH_HAS_PTE_DEVMAP
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAS_UBSAN
select ARCH_HAS_VDSO_ARCH_DATA
select ARCH_INLINE_READ_LOCK if !PREEMPTION
select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
@@ -68,10 +67,12 @@ config LOONGARCH
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select ARCH_SUPPORTS_LTO_CLANG
select ARCH_SUPPORTS_LTO_CLANG_THIN
+ select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_SUPPORTS_RT
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_DEFAULT_BPF_JIT
@@ -117,6 +118,7 @@ config LOONGARCH
select HAVE_ARCH_KASAN
select HAVE_ARCH_KFENCE
select HAVE_ARCH_KGDB if PERF_EVENTS
+ select HAVE_ARCH_KSTACK_ERASE
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP
@@ -140,7 +142,6 @@ config LOONGARCH
select HAVE_EXIT_THREAD
select HAVE_GUP_FAST
select HAVE_FTRACE_GRAPH_FUNC
- select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_FREGS
@@ -177,7 +178,7 @@ config LOONGARCH
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_TIF_NOHZ
- select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
+ select HAVE_VIRT_CPU_ACCOUNTING_GEN
select IRQ_FORCED_THREADING
select IRQ_LOONGARCH_CPU
select LOCK_MM_AND_FIND_VMA
@@ -185,6 +186,7 @@ config LOONGARCH
select MODULES_USE_ELF_RELA if MODULES
select NEED_PER_CPU_EMBED_FIRST_CHUNK
select NEED_PER_CPU_PAGE_FIRST_CHUNK
+ select NUMA_MEMBLKS if NUMA
select OF
select OF_EARLY_FLATTREE
select PCI
@@ -387,8 +389,8 @@ config CMDLINE_BOOTLOADER
config CMDLINE_EXTEND
bool "Use built-in to extend bootloader kernel arguments"
help
- The command-line arguments provided during boot will be
- appended to the built-in command line. This is useful in
+ The built-in command line will be appended to the command-
+ line arguments provided during boot. This is useful in
cases where the provided arguments are insufficient and
you don't want to or cannot modify them.
@@ -454,6 +456,15 @@ config SCHED_SMT
Improves scheduler's performance when there are multiple
threads in one physical core.
+config SCHED_MC
+ bool "Multi-core scheduler support"
+ depends on SMP
+ default y
+ help
+ Multi-core scheduler support improves the CPU scheduler's decision
+ making when dealing with multi-core CPU chips at a cost of slightly
+ increased overhead in some places.
+
config SMP
bool "Multi-Processing support"
help
@@ -483,10 +494,10 @@ config HOTPLUG_CPU
Say N if you want to disable CPU hotplug.
config NR_CPUS
- int "Maximum number of CPUs (2-256)"
- range 2 256
+ int "Maximum number of CPUs (2-2048)"
+ range 2 2048
+ default "2048"
depends on SMP
- default "64"
help
This allows you to specify the maximum number of CPUs which this
kernel will support.
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index 0304eabbe606..b0703a4e02a2 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -181,11 +181,14 @@ vmlinux.elf vmlinux.efi vmlinuz.efi: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(bootvars-y) $(boot)/$@
install:
- $(Q)install -D -m 755 $(KBUILD_IMAGE) $(INSTALL_PATH)/$(image-name-y)-$(KERNELRELEASE)
- $(Q)install -D -m 644 .config $(INSTALL_PATH)/config-$(KERNELRELEASE)
- $(Q)install -D -m 644 System.map $(INSTALL_PATH)/System.map-$(KERNELRELEASE)
+ $(call cmd,install)
define archhelp
- echo ' install - install kernel into $(INSTALL_PATH)'
+ echo ' vmlinux.elf - Uncompressed ELF kernel image (arch/loongarch/boot/vmlinux.elf)'
+ echo ' vmlinux.efi - Uncompressed EFI kernel image (arch/loongarch/boot/vmlinux.efi)'
+ echo ' vmlinuz.efi - GZIP/ZSTD-compressed EFI kernel image (arch/loongarch/boot/vmlinuz.efi)'
+ echo ' Default when CONFIG_EFI_ZBOOT=y'
+ echo ' install - Install kernel using (your) ~/bin/$(INSTALLKERNEL) or'
+ echo ' (distribution) /sbin/$(INSTALLKERNEL) or install.sh to $$(INSTALL_PATH)'
echo
endef
diff --git a/arch/loongarch/boot/dts/loongson-2k0500.dtsi b/arch/loongarch/boot/dts/loongson-2k0500.dtsi
index 3b38ff8853a7..760c60eebb89 100644
--- a/arch/loongarch/boot/dts/loongson-2k0500.dtsi
+++ b/arch/loongarch/boot/dts/loongson-2k0500.dtsi
@@ -169,6 +169,166 @@
interrupts = <3>;
};
+ pwm@1ff5c000 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c000 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c010 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c010 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c020 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c020 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c030 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c030 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c040 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c040 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c050 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c050 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c060 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c060 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c070 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c070 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c080 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c080 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c090 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c090 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c0a0 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c0a0 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c0b0 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c0b0 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c0c0 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c0c0 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c0d0 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c0d0 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c0e0 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c0e0 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1ff5c0f0 {
+ compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1ff5c0f0 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
gmac0: ethernet@1f020000 {
compatible = "snps,dwmac-3.70a";
reg = <0x0 0x1f020000 0x0 0x10000>;
diff --git a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
index 3514ea78f525..78ea995abf1c 100644
--- a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
+++ b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
@@ -5,6 +5,7 @@
/dts-v1/;
+#include "dt-bindings/thermal/thermal.h"
#include "loongson-2k1000.dtsi"
/ {
@@ -38,6 +39,13 @@
linux,cma-default;
};
};
+
+ fan0: pwm-fan {
+ compatible = "pwm-fan";
+ cooling-levels = <255 153 85 25>;
+ pwms = <&pwm1 0 100000 0>;
+ #cooling-cells = <2>;
+ };
};
&gmac0 {
@@ -92,6 +100,22 @@
#size-cells = <0>;
};
+&pwm1 {
+ status = "okay";
+
+ pinctrl-0 = <&pwm1_pins_default>;
+ pinctrl-names = "default";
+};
+
+&cpu_thermal {
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert>;
+ cooling-device = <&fan0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+};
+
&ehci0 {
status = "okay";
};
diff --git a/arch/loongarch/boot/dts/loongson-2k1000.dtsi b/arch/loongarch/boot/dts/loongson-2k1000.dtsi
index 8dff2aa52417..1da3beb00f0e 100644
--- a/arch/loongarch/boot/dts/loongson-2k1000.dtsi
+++ b/arch/loongarch/boot/dts/loongson-2k1000.dtsi
@@ -68,7 +68,7 @@
};
thermal-zones {
- cpu-thermal {
+ cpu_thermal: cpu-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tsensor 0>;
@@ -322,6 +322,46 @@
status = "disabled";
};
+ pwm@1fe22000 {
+ compatible = "loongson,ls2k1000-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1fe22000 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm1: pwm@1fe22010 {
+ compatible = "loongson,ls2k1000-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1fe22010 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1fe22020 {
+ compatible = "loongson,ls2k1000-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1fe22020 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@1fe22030 {
+ compatible = "loongson,ls2k1000-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x1fe22030 0x0 0x10>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
pmc: power-management@1fe27000 {
compatible = "loongson,ls2k1000-pmc", "loongson,ls2k0500-pmc", "syscon";
reg = <0x0 0x1fe27000 0x0 0x58>;
diff --git a/arch/loongarch/boot/dts/loongson-2k2000.dtsi b/arch/loongarch/boot/dts/loongson-2k2000.dtsi
index b4ff55a33e90..9e0411f2754c 100644
--- a/arch/loongarch/boot/dts/loongson-2k2000.dtsi
+++ b/arch/loongarch/boot/dts/loongson-2k2000.dtsi
@@ -165,6 +165,66 @@
interrupt-parent = <&eiointc>;
};
+ pwm@100a0000 {
+ compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x100a0000 0x0 0x10>;
+ interrupt-parent = <&pic>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_MISC_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@100a0100 {
+ compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x100a0100 0x0 0x10>;
+ interrupt-parent = <&pic>;
+ interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_MISC_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@100a0200 {
+ compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x100a0200 0x0 0x10>;
+ interrupt-parent = <&pic>;
+ interrupts = <26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_MISC_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@100a0300 {
+ compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x100a0300 0x0 0x10>;
+ interrupt-parent = <&pic>;
+ interrupts = <27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_MISC_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@100a0400 {
+ compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x100a0400 0x0 0x10>;
+ interrupt-parent = <&pic>;
+ interrupts = <38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_MISC_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm@100a0500 {
+ compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm";
+ reg = <0x0 0x100a0500 0x0 0x10>;
+ interrupt-parent = <&pic>;
+ interrupts = <39 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_MISC_CLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
rtc0: rtc@100d0100 {
compatible = "loongson,ls2k2000-rtc", "loongson,ls7a-rtc";
reg = <0x0 0x100d0100 0x0 0x100>;
diff --git a/arch/loongarch/boot/install.sh b/arch/loongarch/boot/install.sh
new file mode 100755
index 000000000000..daac197d3315
--- /dev/null
+++ b/arch/loongarch/boot/install.sh
@@ -0,0 +1,56 @@
+#!/bin/sh
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1995 by Linus Torvalds
+#
+# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
+# Adapted from code in arch/i386/boot/install.sh by Russell King
+#
+# "make install" script for the LoongArch Linux port
+#
+# Arguments:
+# $1 - kernel version
+# $2 - kernel image file
+# $3 - kernel map file
+# $4 - default install path (blank if root directory)
+
+set -e
+
+case "${2##*/}" in
+vmlinux.elf)
+ echo "Installing uncompressed vmlinux.elf kernel"
+ base=vmlinux
+ ;;
+vmlinux.efi)
+ echo "Installing uncompressed vmlinux.efi kernel"
+ base=vmlinux
+ ;;
+vmlinuz.efi)
+ echo "Installing gzip/zstd compressed vmlinuz.efi kernel"
+ base=vmlinuz
+ ;;
+*)
+ echo "Warning: Unexpected kernel type"
+ exit 1
+ ;;
+esac
+
+if [ -f $4/$base-$1 ]; then
+ mv $4/$base-$1 $4/$base-$1.old
+fi
+cat $2 > $4/$base-$1
+
+# Install system map file
+if [ -f $4/System.map-$1 ]; then
+ mv $4/System.map-$1 $4/System.map-$1.old
+fi
+cp $3 $4/System.map-$1
+
+# Install kernel config file
+if [ -f $4/config-$1 ]; then
+ mv $4/config-$1 $4/config-$1.old
+fi
+cp .config $4/config-$1
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
index c9f564e1d4d9..34eaee0384c9 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -24,9 +24,9 @@ CONFIG_NUMA_BALANCING=y
CONFIG_MEMCG=y
CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
-CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_RDMA=y
+CONFIG_CGROUP_DMEM=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
CONFIG_CPUSETS=y
@@ -225,7 +225,6 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_CPU=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
@@ -665,6 +664,10 @@ CONFIG_RTW88_8723DE=m
CONFIG_RTW88_8723DU=m
CONFIG_RTW88_8821CE=m
CONFIG_RTW88_8821CU=m
+CONFIG_RTW88_8821AU=m
+CONFIG_RTW88_8812AU=m
+CONFIG_RTW88_8814AE=m
+CONFIG_RTW88_8814AU=m
CONFIG_RTW89=m
CONFIG_RTW89_8851BE=m
CONFIG_RTW89_8852AE=m
@@ -748,6 +751,7 @@ CONFIG_MEDIA_PCI_SUPPORT=y
CONFIG_VIDEO_BT848=m
CONFIG_DVB_BT8XX=m
CONFIG_DRM=y
+CONFIG_DRM_LOAD_EDID_FIRMWARE=y
CONFIG_DRM_RADEON=m
CONFIG_DRM_RADEON_USERPTR=y
CONFIG_DRM_AMDGPU=m
@@ -761,6 +765,7 @@ CONFIG_DRM_LOONGSON=y
CONFIG_FB=y
CONFIG_FB_EFI=y
CONFIG_FB_RADEON=y
+CONFIG_FIRMWARE_EDID=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_PLATFORM=m
# CONFIG_VGA_CONSOLE is not set
@@ -778,8 +783,23 @@ CONFIG_SND_HDA_HWDEP=y
CONFIG_SND_HDA_INPUT_BEEP=y
CONFIG_SND_HDA_PATCH_LOADER=y
CONFIG_SND_HDA_CODEC_REALTEK=y
+CONFIG_SND_HDA_CODEC_REALTEK_LIB=y
+CONFIG_SND_HDA_CODEC_ALC260=y
+CONFIG_SND_HDA_CODEC_ALC262=y
+CONFIG_SND_HDA_CODEC_ALC268=y
+CONFIG_SND_HDA_CODEC_ALC269=y
+CONFIG_SND_HDA_CODEC_ALC662=y
+CONFIG_SND_HDA_CODEC_ALC680=y
+CONFIG_SND_HDA_CODEC_ALC861=y
+CONFIG_SND_HDA_CODEC_ALC861VD=y
+CONFIG_SND_HDA_CODEC_ALC880=y
+CONFIG_SND_HDA_CODEC_ALC882=y
CONFIG_SND_HDA_CODEC_SIGMATEL=y
CONFIG_SND_HDA_CODEC_HDMI=y
+CONFIG_SND_HDA_CODEC_HDMI_GENERIC=y
+CONFIG_SND_HDA_CODEC_HDMI_INTEL=y
+CONFIG_SND_HDA_CODEC_HDMI_ATI=y
+CONFIG_SND_HDA_CODEC_HDMI_NVIDIA=y
CONFIG_SND_HDA_CODEC_CONEXANT=y
CONFIG_SND_USB_AUDIO=m
CONFIG_SND_SOC=m
@@ -843,6 +863,9 @@ CONFIG_TYPEC_TCPCI=m
CONFIG_TYPEC_UCSI=m
CONFIG_UCSI_ACPI=m
CONFIG_INFINIBAND=m
+CONFIG_EDAC=y
+# CONFIG_EDAC_LEGACY_SYSFS is not set
+CONFIG_EDAC_LOONGSON=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_EFI=y
CONFIG_RTC_DRV_LOONGSON=y
@@ -1017,7 +1040,7 @@ CONFIG_SECURITY_APPARMOR=y
CONFIG_SECURITY_YAMA=y
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_CRYPTO_USER=m
-# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_SELFTESTS=y
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_ANUBIS=m
diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild
index 80ddb5edb845..b04d2cef935f 100644
--- a/arch/loongarch/include/asm/Kbuild
+++ b/arch/loongarch/include/asm/Kbuild
@@ -10,5 +10,4 @@ generic-y += user.h
generic-y += ioctl.h
generic-y += mmzone.h
generic-y += statfs.h
-generic-y += param.h
generic-y += text-patching.h
diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h
index 313f66f7913a..7376840fa9f7 100644
--- a/arch/loongarch/include/asm/acpi.h
+++ b/arch/loongarch/include/asm/acpi.h
@@ -33,7 +33,7 @@ static inline bool acpi_has_cpu_in_madt(void)
return true;
}
-#define MAX_CORE_PIC 256
+#define MAX_CORE_PIC 2048
extern struct list_head acpi_wakeup_device_list;
extern struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC];
diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h
index fe198b473f84..e739dbc6329d 100644
--- a/arch/loongarch/include/asm/addrspace.h
+++ b/arch/loongarch/include/asm/addrspace.h
@@ -18,12 +18,12 @@
/*
* This gives the physical RAM offset.
*/
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifndef PHYS_OFFSET
#define PHYS_OFFSET _UL(0)
#endif
extern unsigned long vm_map_base;
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#ifndef IO_BASE
#define IO_BASE CSR_DMW0_BASE
@@ -66,7 +66,7 @@ extern unsigned long vm_map_base;
#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000)
#endif
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define _ATYPE_
#define _ATYPE32_
#define _ATYPE64_
@@ -85,7 +85,7 @@ extern unsigned long vm_map_base;
/*
* 32/64-bit LoongArch address spaces
*/
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define _ACAST32_
#define _ACAST64_
#else
diff --git a/arch/loongarch/include/asm/alternative-asm.h b/arch/loongarch/include/asm/alternative-asm.h
index ff3d10ac393f..7dc29bd9b2f0 100644
--- a/arch/loongarch/include/asm/alternative-asm.h
+++ b/arch/loongarch/include/asm/alternative-asm.h
@@ -2,7 +2,7 @@
#ifndef _ASM_ALTERNATIVE_ASM_H
#define _ASM_ALTERNATIVE_ASM_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#include <asm/asm.h>
@@ -77,6 +77,6 @@
.previous
.endm
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_ALTERNATIVE_ASM_H */
diff --git a/arch/loongarch/include/asm/alternative.h b/arch/loongarch/include/asm/alternative.h
index cee7b29785ab..b5bae21fb3c8 100644
--- a/arch/loongarch/include/asm/alternative.h
+++ b/arch/loongarch/include/asm/alternative.h
@@ -2,7 +2,7 @@
#ifndef _ASM_ALTERNATIVE_H
#define _ASM_ALTERNATIVE_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <linux/stddef.h>
@@ -106,6 +106,6 @@ extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \
(asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory"))
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_ALTERNATIVE_H */
diff --git a/arch/loongarch/include/asm/asm-extable.h b/arch/loongarch/include/asm/asm-extable.h
index df05005f2b80..d60bdf2e6377 100644
--- a/arch/loongarch/include/asm/asm-extable.h
+++ b/arch/loongarch/include/asm/asm-extable.h
@@ -7,7 +7,7 @@
#define EX_TYPE_UACCESS_ERR_ZERO 2
#define EX_TYPE_BPF 3
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
.pushsection __ex_table, "a"; \
@@ -22,7 +22,7 @@
__ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0)
.endm
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
#include <linux/bits.h>
#include <linux/stringify.h>
@@ -60,6 +60,6 @@
#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_ASM_EXTABLE_H */
diff --git a/arch/loongarch/include/asm/asm-prototypes.h b/arch/loongarch/include/asm/asm-prototypes.h
index 51f224bcfc65..704066b4f736 100644
--- a/arch/loongarch/include/asm/asm-prototypes.h
+++ b/arch/loongarch/include/asm/asm-prototypes.h
@@ -12,3 +12,11 @@ __int128_t __ashlti3(__int128_t a, int b);
__int128_t __ashrti3(__int128_t a, int b);
__int128_t __lshrti3(__int128_t a, int b);
#endif
+
+asmlinkage void noinstr __no_stack_protector ret_from_fork(struct task_struct *prev,
+ struct pt_regs *regs);
+
+asmlinkage void noinstr __no_stack_protector ret_from_kernel_thread(struct task_struct *prev,
+ struct pt_regs *regs,
+ int (*fn)(void *),
+ void *fn_arg);
diff --git a/arch/loongarch/include/asm/asm.h b/arch/loongarch/include/asm/asm.h
index f591b3245def..f018d26fc995 100644
--- a/arch/loongarch/include/asm/asm.h
+++ b/arch/loongarch/include/asm/asm.h
@@ -110,7 +110,7 @@
#define LONG_SRA srai.w
#define LONG_SRAV sra.w
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define LONG .word
#endif
#define LONGSIZE 4
@@ -131,7 +131,7 @@
#define LONG_SRA srai.d
#define LONG_SRAV sra.d
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define LONG .dword
#endif
#define LONGSIZE 8
@@ -158,7 +158,7 @@
#define PTR_SCALESHIFT 2
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define PTR .word
#endif
#define PTRSIZE 4
@@ -181,7 +181,7 @@
#define PTR_SCALESHIFT 3
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define PTR .dword
#endif
#define PTRSIZE 8
diff --git a/arch/loongarch/include/asm/cache.h b/arch/loongarch/include/asm/cache.h
index 1b6d09617199..aa622c754414 100644
--- a/arch/loongarch/include/asm/cache.h
+++ b/arch/loongarch/include/asm/cache.h
@@ -8,6 +8,8 @@
#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define ARCH_DMA_MINALIGN (16)
+
#define __read_mostly __section(".data..read_mostly")
#endif /* _ASM_CACHE_H */
diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h
index 98cf4d7b4b0a..dfb982fe8701 100644
--- a/arch/loongarch/include/asm/cpu.h
+++ b/arch/loongarch/include/asm/cpu.h
@@ -46,7 +46,7 @@
#define PRID_PRODUCT_MASK 0x0fff
-#if !defined(__ASSEMBLY__)
+#if !defined(__ASSEMBLER__)
enum cpu_type_enum {
CPU_UNKNOWN,
@@ -55,7 +55,7 @@ enum cpu_type_enum {
CPU_LAST
};
-#endif /* !__ASSEMBLY */
+#endif /* !__ASSEMBLER__ */
/*
* ISA Level encodings
diff --git a/arch/loongarch/include/asm/entry-common.h b/arch/loongarch/include/asm/entry-common.h
index 0fe2a098ded9..099132980dc9 100644
--- a/arch/loongarch/include/asm/entry-common.h
+++ b/arch/loongarch/include/asm/entry-common.h
@@ -2,12 +2,6 @@
#ifndef ARCH_LOONGARCH_ENTRY_COMMON_H
#define ARCH_LOONGARCH_ENTRY_COMMON_H
-#include <linux/sched.h>
-#include <linux/processor.h>
-
-static inline bool on_thread_stack(void)
-{
- return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
-}
+#include <asm/stacktrace.h> /* For on_thread_stack() */
#endif
diff --git a/arch/loongarch/include/asm/fpu.h b/arch/loongarch/include/asm/fpu.h
index 3177674228f8..45514f314664 100644
--- a/arch/loongarch/include/asm/fpu.h
+++ b/arch/loongarch/include/asm/fpu.h
@@ -22,22 +22,29 @@
struct sigcontext;
#define kernel_fpu_available() cpu_has_fpu
-extern void kernel_fpu_begin(void);
-extern void kernel_fpu_end(void);
-
-extern void _init_fpu(unsigned int);
-extern void _save_fp(struct loongarch_fpu *);
-extern void _restore_fp(struct loongarch_fpu *);
-
-extern void _save_lsx(struct loongarch_fpu *fpu);
-extern void _restore_lsx(struct loongarch_fpu *fpu);
-extern void _init_lsx_upper(void);
-extern void _restore_lsx_upper(struct loongarch_fpu *fpu);
-
-extern void _save_lasx(struct loongarch_fpu *fpu);
-extern void _restore_lasx(struct loongarch_fpu *fpu);
-extern void _init_lasx_upper(void);
-extern void _restore_lasx_upper(struct loongarch_fpu *fpu);
+
+void kernel_fpu_begin(void);
+void kernel_fpu_end(void);
+
+asmlinkage void _init_fpu(unsigned int);
+asmlinkage void _save_fp(struct loongarch_fpu *);
+asmlinkage void _restore_fp(struct loongarch_fpu *);
+asmlinkage int _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
+asmlinkage int _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
+
+asmlinkage void _save_lsx(struct loongarch_fpu *fpu);
+asmlinkage void _restore_lsx(struct loongarch_fpu *fpu);
+asmlinkage void _init_lsx_upper(void);
+asmlinkage void _restore_lsx_upper(struct loongarch_fpu *fpu);
+asmlinkage int _save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+asmlinkage int _restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+
+asmlinkage void _save_lasx(struct loongarch_fpu *fpu);
+asmlinkage void _restore_lasx(struct loongarch_fpu *fpu);
+asmlinkage void _init_lasx_upper(void);
+asmlinkage void _restore_lasx_upper(struct loongarch_fpu *fpu);
+asmlinkage int _save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+asmlinkage int _restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
static inline void enable_lsx(void);
static inline void disable_lsx(void);
diff --git a/arch/loongarch/include/asm/ftrace.h b/arch/loongarch/include/asm/ftrace.h
index 6e0a99763a9a..f4caaf764f9e 100644
--- a/arch/loongarch/include/asm/ftrace.h
+++ b/arch/loongarch/include/asm/ftrace.h
@@ -14,7 +14,7 @@
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifndef CONFIG_DYNAMIC_FTRACE
@@ -84,7 +84,7 @@ __arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr)
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/arch/loongarch/include/asm/gpr-num.h b/arch/loongarch/include/asm/gpr-num.h
index 996038da806d..af95b941f48b 100644
--- a/arch/loongarch/include/asm/gpr-num.h
+++ b/arch/loongarch/include/asm/gpr-num.h
@@ -2,7 +2,7 @@
#ifndef __ASM_GPR_NUM_H
#define __ASM_GPR_NUM_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
.equ .L__gpr_num_zero, 0
.irp num,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
@@ -25,7 +25,7 @@
.equ .L__gpr_num_$s\num, 23 + \num
.endr
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
#define __DEFINE_ASM_GPR_NUMS \
" .equ .L__gpr_num_zero, 0\n" \
@@ -47,6 +47,6 @@
" .equ .L__gpr_num_$s\\num, 23 + \\num\n" \
" .endr\n" \
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_GPR_NUM_H */
diff --git a/arch/loongarch/include/asm/hugetlb.h b/arch/loongarch/include/asm/hugetlb.h
index 4dc4b3e04225..ab68b594f889 100644
--- a/arch/loongarch/include/asm/hugetlb.h
+++ b/arch/loongarch/include/asm/hugetlb.h
@@ -10,20 +10,6 @@
uint64_t pmd_to_entrylo(unsigned long pmd_val);
-#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
-static inline int prepare_hugepage_range(struct file *file,
- unsigned long addr,
- unsigned long len)
-{
- unsigned long task_size = STACK_TOP;
-
- if (len > task_size)
- return -ENOMEM;
- if (task_size - len < addr)
- return -EINVAL;
- return 0;
-}
-
#define __HAVE_ARCH_HUGE_PTE_CLEAR
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz)
diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h
index a0ca84da8541..12bd15578c33 100644
--- a/arch/loongarch/include/asm/irq.h
+++ b/arch/loongarch/include/asm/irq.h
@@ -53,7 +53,7 @@ void spurious_interrupt(void);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu);
-#define MAX_IO_PICS 2
+#define MAX_IO_PICS 8
#define NR_IRQS (64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS))
struct acpi_vector_group {
diff --git a/arch/loongarch/include/asm/irqflags.h b/arch/loongarch/include/asm/irqflags.h
index 319a8c616f1f..620163628a7f 100644
--- a/arch/loongarch/include/asm/irqflags.h
+++ b/arch/loongarch/include/asm/irqflags.h
@@ -5,7 +5,7 @@
#ifndef _ASM_IRQFLAGS_H
#define _ASM_IRQFLAGS_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/compiler.h>
#include <linux/stringify.h>
@@ -14,40 +14,48 @@
static inline void arch_local_irq_enable(void)
{
u32 flags = CSR_CRMD_IE;
+ register u32 mask asm("t0") = CSR_CRMD_IE;
+
__asm__ __volatile__(
"csrxchg %[val], %[mask], %[reg]\n\t"
: [val] "+r" (flags)
- : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+ : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
: "memory");
}
static inline void arch_local_irq_disable(void)
{
u32 flags = 0;
+ register u32 mask asm("t0") = CSR_CRMD_IE;
+
__asm__ __volatile__(
"csrxchg %[val], %[mask], %[reg]\n\t"
: [val] "+r" (flags)
- : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+ : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
: "memory");
}
static inline unsigned long arch_local_irq_save(void)
{
u32 flags = 0;
+ register u32 mask asm("t0") = CSR_CRMD_IE;
+
__asm__ __volatile__(
"csrxchg %[val], %[mask], %[reg]\n\t"
: [val] "+r" (flags)
- : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+ : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
: "memory");
return flags;
}
static inline void arch_local_irq_restore(unsigned long flags)
{
+ register u32 mask asm("t0") = CSR_CRMD_IE;
+
__asm__ __volatile__(
"csrxchg %[val], %[mask], %[reg]\n\t"
: [val] "+r" (flags)
- : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+ : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
: "memory");
}
@@ -72,6 +80,6 @@ static inline int arch_irqs_disabled(void)
return arch_irqs_disabled_flags(arch_local_save_flags());
}
-#endif /* #ifndef __ASSEMBLY__ */
+#endif /* #ifndef __ASSEMBLER__ */
#endif /* _ASM_IRQFLAGS_H */
diff --git a/arch/loongarch/include/asm/jump_label.h b/arch/loongarch/include/asm/jump_label.h
index 8a924bd69d19..4000c7603d8e 100644
--- a/arch/loongarch/include/asm/jump_label.h
+++ b/arch/loongarch/include/asm/jump_label.h
@@ -7,7 +7,7 @@
#ifndef __ASM_JUMP_LABEL_H
#define __ASM_JUMP_LABEL_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
@@ -50,5 +50,5 @@ l_yes:
return true;
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_JUMP_LABEL_H */
diff --git a/arch/loongarch/include/asm/kasan.h b/arch/loongarch/include/asm/kasan.h
index 7f52bd31b9d4..62f139a9c87d 100644
--- a/arch/loongarch/include/asm/kasan.h
+++ b/arch/loongarch/include/asm/kasan.h
@@ -2,7 +2,7 @@
#ifndef __ASM_KASAN_H
#define __ASM_KASAN_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/linkage.h>
#include <linux/mmzone.h>
diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
index f457c2662e2f..0cecbd038bb3 100644
--- a/arch/loongarch/include/asm/kvm_host.h
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -50,12 +50,6 @@ struct kvm_vm_stat {
struct kvm_vm_stat_generic generic;
u64 pages;
u64 hugepages;
- u64 ipi_read_exits;
- u64 ipi_write_exits;
- u64 eiointc_read_exits;
- u64 eiointc_write_exits;
- u64 pch_pic_read_exits;
- u64 pch_pic_write_exits;
};
struct kvm_vcpu_stat {
@@ -65,6 +59,12 @@ struct kvm_vcpu_stat {
u64 cpucfg_exits;
u64 signal_exits;
u64 hypercall_exits;
+ u64 ipi_read_exits;
+ u64 ipi_write_exits;
+ u64 eiointc_read_exits;
+ u64 eiointc_write_exits;
+ u64 pch_pic_read_exits;
+ u64 pch_pic_write_exits;
};
#define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0)
@@ -301,7 +301,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
/* MMU handling */
void kvm_flush_tlb_all(void);
void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
-int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
+int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write, int ecode);
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
index 2c349f961bfb..f1efd7cfbc20 100644
--- a/arch/loongarch/include/asm/kvm_vcpu.h
+++ b/arch/loongarch/include/asm/kvm_vcpu.h
@@ -37,7 +37,7 @@
#define KVM_LOONGSON_IRQ_NUM_MASK 0xffff
typedef union loongarch_instruction larch_inst;
-typedef int (*exit_handle_fn)(struct kvm_vcpu *);
+typedef int (*exit_handle_fn)(struct kvm_vcpu *, int);
int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst);
int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst);
diff --git a/arch/loongarch/include/asm/lbt.h b/arch/loongarch/include/asm/lbt.h
index e671978bf552..38566574e562 100644
--- a/arch/loongarch/include/asm/lbt.h
+++ b/arch/loongarch/include/asm/lbt.h
@@ -12,9 +12,13 @@
#include <asm/loongarch.h>
#include <asm/processor.h>
-extern void _init_lbt(void);
-extern void _save_lbt(struct loongarch_lbt *);
-extern void _restore_lbt(struct loongarch_lbt *);
+asmlinkage void _init_lbt(void);
+asmlinkage void _save_lbt(struct loongarch_lbt *);
+asmlinkage void _restore_lbt(struct loongarch_lbt *);
+asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
+asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
+asmlinkage int _save_ftop_context(void __user *ftop);
+asmlinkage int _restore_ftop_context(void __user *ftop);
static inline int is_lbt_enabled(void)
{
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index 52651aa0e583..a0994d226eff 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -9,15 +9,15 @@
#include <linux/linkage.h>
#include <linux/types.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <larchintrin.h>
/* CPUCFG */
#define read_cpucfg(reg) __cpucfg(reg)
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
/* LoongArch Registers */
#define REG_ZERO 0x0
@@ -53,7 +53,7 @@
#define REG_S7 0x1e
#define REG_S8 0x1f
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
/* Bit fields for CPUCFG registers */
#define LOONGARCH_CPUCFG0 0x0
@@ -171,7 +171,7 @@
* SW emulation for KVM hypervirsor, see arch/loongarch/include/uapi/asm/kvm_para.h
*/
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/* CSR */
#define csr_read32(reg) __csrrd_w(reg)
@@ -187,7 +187,7 @@
#define iocsr_write32(val, reg) __iocsrwr_w(val, reg)
#define iocsr_write64(val, reg) __iocsrwr_d(val, reg)
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/* CSR register number */
@@ -411,8 +411,8 @@
/* Config CSR registers */
#define LOONGARCH_CSR_CPUID 0x20 /* CPU core id */
-#define CSR_CPUID_COREID_WIDTH 9
-#define CSR_CPUID_COREID _ULCAST_(0x1ff)
+#define CSR_CPUID_COREID_WIDTH 11
+#define CSR_CPUID_COREID _ULCAST_(0x7ff)
#define LOONGARCH_CSR_PRCFG1 0x21 /* Config1 */
#define CSR_CONF1_VSMAX_SHIFT 12
@@ -1195,7 +1195,7 @@
#define LOONGARCH_IOCSR_EXTIOI_ROUTE_BASE 0x1c00
#define IOCSR_EXTIOI_VECTOR_NUM 256
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
static __always_inline u64 drdtime(void)
{
@@ -1357,7 +1357,7 @@ __BUILD_CSR_OP(tlbidx)
#define clear_csr_estat(val) \
csr_xchg32(~(val), val, LOONGARCH_CSR_ESTAT)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
/* Generic EntryLo bit definitions */
#define ENTRYLO_V (_ULCAST_(1) << 0)
diff --git a/arch/loongarch/include/asm/numa.h b/arch/loongarch/include/asm/numa.h
index b5f9de9f102e..bbf9f70bd25f 100644
--- a/arch/loongarch/include/asm/numa.h
+++ b/arch/loongarch/include/asm/numa.h
@@ -22,20 +22,6 @@ extern int numa_off;
extern s16 __cpuid_to_node[CONFIG_NR_CPUS];
extern nodemask_t numa_nodes_parsed __initdata;
-struct numa_memblk {
- u64 start;
- u64 end;
- int nid;
-};
-
-#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
-struct numa_meminfo {
- int nr_blks;
- struct numa_memblk blk[NR_NODE_MEMBLKS];
-};
-
-extern int __init numa_add_memblk(int nodeid, u64 start, u64 end);
-
extern void __init early_numa_add_cpu(int cpuid, s16 node);
extern void numa_add_cpu(unsigned int cpu);
extern void numa_remove_cpu(unsigned int cpu);
diff --git a/arch/loongarch/include/asm/orc_types.h b/arch/loongarch/include/asm/orc_types.h
index caf1f71a1057..d5fa98d1d177 100644
--- a/arch/loongarch/include/asm/orc_types.h
+++ b/arch/loongarch/include/asm/orc_types.h
@@ -34,7 +34,7 @@
#define ORC_TYPE_REGS 3
#define ORC_TYPE_REGS_PARTIAL 4
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* This struct is more or less a vastly simplified version of the DWARF Call
* Frame Information standard. It contains only the necessary parts of DWARF
@@ -53,6 +53,6 @@ struct orc_entry {
unsigned int type:3;
unsigned int signal:1;
};
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ORC_TYPES_H */
diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h
index 7368f12b7cb1..a3aaf34fba16 100644
--- a/arch/loongarch/include/asm/page.h
+++ b/arch/loongarch/include/asm/page.h
@@ -15,7 +15,7 @@
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/kernel.h>
#include <linux/pfn.h>
@@ -110,6 +110,6 @@ extern int __virt_addr_valid(volatile void *kaddr);
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_PAGE_H */
diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h
index 7211dff8c969..1c63a9d9a6d3 100644
--- a/arch/loongarch/include/asm/pgalloc.h
+++ b/arch/loongarch/include/asm/pgalloc.h
@@ -55,11 +55,8 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
return pte;
}
-#define __pte_free_tlb(tlb, pte, address) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, address) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#ifndef __PAGETABLE_PMD_FOLDED
@@ -72,7 +69,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
if (!ptdesc)
return NULL;
- if (!pagetable_pmd_ctor(ptdesc)) {
+ if (!pagetable_pmd_ctor(mm, ptdesc)) {
pagetable_free(ptdesc);
return NULL;
}
diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h
index 45bfc65a0c9f..2fc3789220ac 100644
--- a/arch/loongarch/include/asm/pgtable-bits.h
+++ b/arch/loongarch/include/asm/pgtable-bits.h
@@ -22,7 +22,6 @@
#define _PAGE_PFN_SHIFT 12
#define _PAGE_SWP_EXCLUSIVE_SHIFT 23
#define _PAGE_PFN_END_SHIFT 48
-#define _PAGE_DEVMAP_SHIFT 59
#define _PAGE_PRESENT_INVALID_SHIFT 60
#define _PAGE_NO_READ_SHIFT 61
#define _PAGE_NO_EXEC_SHIFT 62
@@ -36,7 +35,6 @@
#define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT)
#define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT)
#define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT)
-#define _PAGE_DEVMAP (_ULCAST_(1) << _PAGE_DEVMAP_SHIFT)
/* We borrow bit 23 to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE (_ULCAST_(1) << _PAGE_SWP_EXCLUSIVE_SHIFT)
@@ -76,8 +74,8 @@
#define __READABLE (_PAGE_VALID)
#define __WRITEABLE (_PAGE_DIRTY | _PAGE_WRITE)
-#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PAGE_DEVMAP | _PFN_MASK | _CACHE_MASK | _PAGE_PLV)
-#define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PAGE_DEVMAP | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE)
+#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV)
+#define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE)
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_NO_READ | \
_PAGE_USER | _CACHE_CC)
@@ -92,7 +90,7 @@
#define PAGE_KERNEL_WUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
_PAGE_GLOBAL | _PAGE_KERN | _CACHE_WUC)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_SUC)
@@ -127,6 +125,6 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
return __pgprot(prot);
}
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_PGTABLE_BITS_H */
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index da346733a1da..bd128696e96d 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -55,7 +55,7 @@
#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/mm_types.h>
#include <linux/mmzone.h>
@@ -255,7 +255,6 @@ static inline void pmd_clear(pmd_t *pmdp)
#define pmd_page_vaddr(pmd) pmd_val(pmd)
-extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
#define pte_page(x) pfn_to_page(pte_pfn(x))
@@ -302,7 +301,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
#define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE })
-static inline int pte_swp_exclusive(pte_t pte)
+static inline bool pte_swp_exclusive(pte_t pte)
{
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
}
@@ -410,9 +409,6 @@ static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL;
static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; }
#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
-static inline int pte_devmap(pte_t pte) { return !!(pte_val(pte) & _PAGE_DEVMAP); }
-static inline pte_t pte_mkdevmap(pte_t pte) { pte_val(pte) |= _PAGE_DEVMAP; return pte; }
-
#define pte_accessible pte_accessible
static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
{
@@ -426,12 +422,6 @@ static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
return false;
}
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
@@ -547,17 +537,6 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
return pmd;
}
-static inline int pmd_devmap(pmd_t pmd)
-{
- return !!(pmd_val(pmd) & _PAGE_DEVMAP);
-}
-
-static inline pmd_t pmd_mkdevmap(pmd_t pmd)
-{
- pmd_val(pmd) |= _PAGE_DEVMAP;
- return pmd;
-}
-
static inline struct page *pmd_page(pmd_t pmd)
{
if (pmd_trans_huge(pmd))
@@ -613,11 +592,6 @@ static inline long pmd_protnone(pmd_t pmd)
#define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0)
#define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0)
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define pud_devmap(pud) (0)
-#define pgd_devmap(pgd) (0)
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
/*
* We provide our own get_unmapped area to cope with the virtual aliasing
* constraints placed on us by the cache architecture.
@@ -625,6 +599,6 @@ static inline long pmd_protnone(pmd_t pmd)
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_PGTABLE_H */
diff --git a/arch/loongarch/include/asm/prefetch.h b/arch/loongarch/include/asm/prefetch.h
index 1672262a5e2e..0b168cdaae9a 100644
--- a/arch/loongarch/include/asm/prefetch.h
+++ b/arch/loongarch/include/asm/prefetch.h
@@ -8,7 +8,7 @@
#define Pref_Load 0
#define Pref_Store 8
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
.macro __pref hint addr
#ifdef CONFIG_CPU_HAS_PREFETCH
diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h
index f3ddaed9ef7f..e5d21e836d99 100644
--- a/arch/loongarch/include/asm/ptrace.h
+++ b/arch/loongarch/include/asm/ptrace.h
@@ -33,9 +33,9 @@ struct pt_regs {
unsigned long __last[];
} __aligned(8);
-static inline int regs_irqs_disabled(struct pt_regs *regs)
+static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
{
- return arch_irqs_disabled_flags(regs->csr_prmd);
+ return !(regs->csr_prmd & CSR_PRMD_PIE);
}
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
@@ -55,7 +55,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long v
/* Query offset/name of register from its name/offset */
extern int regs_query_register_offset(const char *name);
-#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last) - sizeof(unsigned long))
/**
* regs_get_register() - get register value from its offset
diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h
index b87d1d5e5890..3a47f52959a8 100644
--- a/arch/loongarch/include/asm/smp.h
+++ b/arch/loongarch/include/asm/smp.h
@@ -25,6 +25,7 @@ extern int smp_num_siblings;
extern int num_processors;
extern int disabled_cpus;
extern cpumask_t cpu_sibling_map[];
+extern cpumask_t cpu_llc_shared_map[];
extern cpumask_t cpu_core_map[];
extern cpumask_t cpu_foreign_map[];
@@ -38,7 +39,7 @@ int loongson_cpu_disable(void);
void loongson_cpu_die(unsigned int cpu);
#endif
-static inline void plat_smp_setup(void)
+static inline void __init plat_smp_setup(void)
{
loongson_smp_setup();
}
diff --git a/arch/loongarch/include/asm/sparsemem.h b/arch/loongarch/include/asm/sparsemem.h
index 8d4af6aff8a8..4501efac1a87 100644
--- a/arch/loongarch/include/asm/sparsemem.h
+++ b/arch/loongarch/include/asm/sparsemem.h
@@ -21,11 +21,6 @@
#define VMEMMAP_SIZE 0 /* 1, For FLATMEM; 2, For SPARSEMEM without VMEMMAP. */
#endif
-#ifdef CONFIG_MEMORY_HOTPLUG
-int memory_add_physaddr_to_nid(u64 addr);
-#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
-#endif
-
#define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS)
#endif /* _LOONGARCH_SPARSEMEM_H */
diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h
index 66736837085b..3eda298702b1 100644
--- a/arch/loongarch/include/asm/stackframe.h
+++ b/arch/loongarch/include/asm/stackframe.h
@@ -57,6 +57,12 @@
jirl zero, \temp1, 0xc
.endm
+ .macro STACKLEAK_ERASE
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+ bl stackleak_erase_on_task_stack
+#endif
+ .endm
+
.macro BACKUP_T0T1
csrwr t0, EXCEPTION_KS0
csrwr t1, EXCEPTION_KS1
diff --git a/arch/loongarch/include/asm/stacktrace.h b/arch/loongarch/include/asm/stacktrace.h
index f23adb15f418..5c8be156567c 100644
--- a/arch/loongarch/include/asm/stacktrace.h
+++ b/arch/loongarch/include/asm/stacktrace.h
@@ -8,6 +8,7 @@
#include <asm/asm.h>
#include <asm/ptrace.h>
#include <asm/loongarch.h>
+#include <asm/unwind_hints.h>
#include <linux/stringify.h>
enum stack_type {
@@ -30,6 +31,11 @@ bool in_irq_stack(unsigned long stack, struct stack_info *info);
bool in_task_stack(unsigned long stack, struct task_struct *task, struct stack_info *info);
int get_stack_info(unsigned long stack, struct task_struct *task, struct stack_info *info);
+static __always_inline bool on_thread_stack(void)
+{
+ return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
+}
+
#define STR_LONG_L __stringify(LONG_L)
#define STR_LONG_S __stringify(LONG_S)
#define STR_LONGSIZE __stringify(LONGSIZE)
@@ -43,6 +49,7 @@ int get_stack_info(unsigned long stack, struct task_struct *task, struct stack_i
static __always_inline void prepare_frametrace(struct pt_regs *regs)
{
__asm__ __volatile__(
+ UNWIND_HINT_SAVE
/* Save $ra */
STORE_ONE_REG(1)
/* Use $ra to save PC */
@@ -80,6 +87,7 @@ static __always_inline void prepare_frametrace(struct pt_regs *regs)
STORE_ONE_REG(29)
STORE_ONE_REG(30)
STORE_ONE_REG(31)
+ UNWIND_HINT_RESTORE
: "=m" (regs->csr_era)
: "r" (regs->regs)
: "memory");
diff --git a/arch/loongarch/include/asm/syscall.h b/arch/loongarch/include/asm/syscall.h
index e286dc58476e..81d2733f7b94 100644
--- a/arch/loongarch/include/asm/syscall.h
+++ b/arch/loongarch/include/asm/syscall.h
@@ -26,6 +26,13 @@ static inline long syscall_get_nr(struct task_struct *task,
return regs->regs[11];
}
+static inline void syscall_set_nr(struct task_struct *task,
+ struct pt_regs *regs,
+ int nr)
+{
+ regs->regs[11] = nr;
+}
+
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
@@ -61,6 +68,14 @@ static inline void syscall_get_arguments(struct task_struct *task,
memcpy(&args[1], &regs->regs[5], 5 * sizeof(long));
}
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *args)
+{
+ regs->orig_a0 = args[0];
+ memcpy(&regs->regs[5], &args[1], 5 * sizeof(long));
+}
+
static inline int syscall_get_arch(struct task_struct *task)
{
return AUDIT_ARCH_LOONGARCH64;
diff --git a/arch/loongarch/include/asm/thread_info.h b/arch/loongarch/include/asm/thread_info.h
index 4f5a9441754e..9dfa2ef00816 100644
--- a/arch/loongarch/include/asm/thread_info.h
+++ b/arch/loongarch/include/asm/thread_info.h
@@ -10,7 +10,7 @@
#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/processor.h>
@@ -53,7 +53,7 @@ static inline struct thread_info *current_thread_info(void)
register unsigned long current_stack_pointer __asm__("$sp");
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/* thread information allocation */
#define THREAD_SIZE SZ_16K
diff --git a/arch/loongarch/include/asm/topology.h b/arch/loongarch/include/asm/topology.h
index 50273c9187d0..f06e7ff25bb7 100644
--- a/arch/loongarch/include/asm/topology.h
+++ b/arch/loongarch/include/asm/topology.h
@@ -19,17 +19,22 @@ extern int pcibus_to_node(struct pci_bus *);
#define cpumask_of_pcibus(bus) (cpu_online_mask)
-extern unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES];
-
-void numa_set_distance(int from, int to, int distance);
-
-#define node_distance(from, to) (node_distances[(from)][(to)])
+int __node_distance(int from, int to);
+#define node_distance(from, to) __node_distance(from, to)
#else
#define pcibus_to_node(bus) 0
#endif
#ifdef CONFIG_SMP
+/*
+ * Return cpus that shares the last level cache.
+ */
+static inline const struct cpumask *cpu_coregroup_mask(int cpu)
+{
+ return &cpu_llc_shared_map[cpu];
+}
+
#define topology_physical_package_id(cpu) (cpu_data[cpu].package)
#define topology_core_id(cpu) (cpu_data[cpu].core)
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
diff --git a/arch/loongarch/include/asm/types.h b/arch/loongarch/include/asm/types.h
index baf15a0dcf8b..0edd731f3d6a 100644
--- a/arch/loongarch/include/asm/types.h
+++ b/arch/loongarch/include/asm/types.h
@@ -8,7 +8,7 @@
#include <asm-generic/int-ll64.h>
#include <uapi/asm/types.h>
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define _ULCAST_
#define _U64CAST_
#else
diff --git a/arch/loongarch/include/asm/unwind_hints.h b/arch/loongarch/include/asm/unwind_hints.h
index a01086ad9dde..16c7f7e465a0 100644
--- a/arch/loongarch/include/asm/unwind_hints.h
+++ b/arch/loongarch/include/asm/unwind_hints.h
@@ -5,7 +5,7 @@
#include <linux/objtool.h>
#include <asm/orc_types.h>
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
.macro UNWIND_HINT_UNDEFINED
UNWIND_HINT type=UNWIND_HINT_TYPE_UNDEFINED
@@ -23,6 +23,14 @@
UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_CALL
.endm
-#endif /* __ASSEMBLY__ */
+#else /* !__ASSEMBLER__ */
+
+#define UNWIND_HINT_SAVE \
+ UNWIND_HINT(UNWIND_HINT_TYPE_SAVE, 0, 0, 0)
+
+#define UNWIND_HINT_RESTORE \
+ UNWIND_HINT(UNWIND_HINT_TYPE_RESTORE, 0, 0, 0)
+
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_LOONGARCH_UNWIND_HINTS_H */
diff --git a/arch/loongarch/include/asm/uprobes.h b/arch/loongarch/include/asm/uprobes.h
index 99a0d198927f..025fc3f0a102 100644
--- a/arch/loongarch/include/asm/uprobes.h
+++ b/arch/loongarch/include/asm/uprobes.h
@@ -15,7 +15,6 @@ typedef u32 uprobe_opcode_t;
#define UPROBE_XOLBP_INSN __emit_break(BRK_UPROBE_XOLBP)
struct arch_uprobe {
- unsigned long resume_era;
u32 insn[2];
u32 ixol[2];
bool simulate;
diff --git a/arch/loongarch/include/asm/vdso/arch_data.h b/arch/loongarch/include/asm/vdso/arch_data.h
index 322d0a5f1c84..395ec223bcbe 100644
--- a/arch/loongarch/include/asm/vdso/arch_data.h
+++ b/arch/loongarch/include/asm/vdso/arch_data.h
@@ -7,7 +7,7 @@
#ifndef _VDSO_ARCH_DATA_H
#define _VDSO_ARCH_DATA_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/asm.h>
#include <asm/vdso.h>
@@ -20,6 +20,6 @@ struct vdso_arch_data {
struct vdso_pcpu_data pdata[NR_CPUS];
};
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/loongarch/include/asm/vdso/getrandom.h b/arch/loongarch/include/asm/vdso/getrandom.h
index 48c43f55b039..2ff05003c6e7 100644
--- a/arch/loongarch/include/asm/vdso/getrandom.h
+++ b/arch/loongarch/include/asm/vdso/getrandom.h
@@ -5,7 +5,7 @@
#ifndef __ASM_VDSO_GETRANDOM_H
#define __ASM_VDSO_GETRANDOM_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/unistd.h>
#include <asm/vdso/vdso.h>
@@ -20,7 +20,7 @@ static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, uns
asm volatile(
" syscall 0\n"
- : "+r" (ret)
+ : "=r" (ret)
: "r" (nr), "r" (buffer), "r" (len), "r" (flags)
: "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
"memory");
@@ -28,6 +28,6 @@ static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, uns
return ret;
}
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* __ASM_VDSO_GETRANDOM_H */
diff --git a/arch/loongarch/include/asm/vdso/gettimeofday.h b/arch/loongarch/include/asm/vdso/gettimeofday.h
index 88cfcf133116..dcafabca9bb6 100644
--- a/arch/loongarch/include/asm/vdso/gettimeofday.h
+++ b/arch/loongarch/include/asm/vdso/gettimeofday.h
@@ -7,7 +7,7 @@
#ifndef __ASM_VDSO_GETTIMEOFDAY_H
#define __ASM_VDSO_GETTIMEOFDAY_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/unistd.h>
#include <asm/vdso/vdso.h>
@@ -25,7 +25,7 @@ static __always_inline long gettimeofday_fallback(
asm volatile(
" syscall 0\n"
- : "+r" (ret)
+ : "=r" (ret)
: "r" (nr), "r" (tv), "r" (tz)
: "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7",
"$t8", "memory");
@@ -44,7 +44,7 @@ static __always_inline long clock_gettime_fallback(
asm volatile(
" syscall 0\n"
- : "+r" (ret)
+ : "=r" (ret)
: "r" (nr), "r" (clkid), "r" (ts)
: "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7",
"$t8", "memory");
@@ -63,7 +63,7 @@ static __always_inline int clock_getres_fallback(
asm volatile(
" syscall 0\n"
- : "+r" (ret)
+ : "=r" (ret)
: "r" (nr), "r" (clkid), "r" (ts)
: "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7",
"$t8", "memory");
@@ -89,6 +89,6 @@ static inline bool loongarch_vdso_hres_capable(void)
}
#define __arch_vdso_hres_capable loongarch_vdso_hres_capable
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/loongarch/include/asm/vdso/processor.h b/arch/loongarch/include/asm/vdso/processor.h
index ef5770b343a0..1e255373b0b8 100644
--- a/arch/loongarch/include/asm/vdso/processor.h
+++ b/arch/loongarch/include/asm/vdso/processor.h
@@ -5,10 +5,10 @@
#ifndef __ASM_VDSO_PROCESSOR_H
#define __ASM_VDSO_PROCESSOR_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define cpu_relax() barrier()
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/loongarch/include/asm/vdso/vdso.h b/arch/loongarch/include/asm/vdso/vdso.h
index 50c65fb29daf..04bd2d452876 100644
--- a/arch/loongarch/include/asm/vdso/vdso.h
+++ b/arch/loongarch/include/asm/vdso/vdso.h
@@ -7,7 +7,7 @@
#ifndef _ASM_VDSO_VDSO_H
#define _ASM_VDSO_VDSO_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/asm.h>
#include <asm/page.h>
@@ -16,6 +16,6 @@
#define VVAR_SIZE (VDSO_NR_PAGES << PAGE_SHIFT)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/loongarch/include/asm/vdso/vsyscall.h b/arch/loongarch/include/asm/vdso/vsyscall.h
index 1140b54b4bc8..558eb9dfda52 100644
--- a/arch/loongarch/include/asm/vdso/vsyscall.h
+++ b/arch/loongarch/include/asm/vdso/vsyscall.h
@@ -2,13 +2,13 @@
#ifndef __ASM_VDSO_VSYSCALL_H
#define __ASM_VDSO_VSYSCALL_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <vdso/datapage.h>
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index 4853e8b04c6f..6f5a4574a911 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -5,7 +5,7 @@
OBJECT_FILES_NON_STANDARD_head.o := y
-extra-y := vmlinux.lds
+always-$(KBUILD_BUILTIN) := vmlinux.lds
obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \
traps.o irq.o idle.o process.o dma.o mem.o reset.o switch.o \
@@ -21,10 +21,10 @@ obj-$(CONFIG_CPU_HAS_LBT) += lbt.o
obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o
-CFLAGS_module.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_syscall.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_traps.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_perf_event.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_module.o += $(call cc-disable-warning, override-init)
+CFLAGS_syscall.o += $(call cc-disable-warning, override-init)
+CFLAGS_traps.o += $(call cc-disable-warning, override-init)
+CFLAGS_perf_event.o += $(call cc-disable-warning, override-init)
ifdef CONFIG_FUNCTION_TRACER
ifndef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
index 1120ac2824f6..1367ca759468 100644
--- a/arch/loongarch/kernel/acpi.c
+++ b/arch/loongarch/kernel/acpi.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/efi-bgrt.h>
+#include <linux/export.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/memblock.h>
@@ -244,22 +245,6 @@ fdt_earlycon:
#ifdef CONFIG_ACPI_NUMA
-static __init int setup_node(int pxm)
-{
- return acpi_map_pxm_to_node(pxm);
-}
-
-void __init numa_set_distance(int from, int to, int distance)
-{
- if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
- pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
- from, to, distance);
- return;
- }
-
- node_distances[from][to] = distance;
-}
-
/* Callback for Proximity Domain -> CPUID mapping */
void __init
acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
@@ -280,7 +265,41 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
pxm |= (pa->proximity_domain_hi[1] << 16);
pxm |= (pa->proximity_domain_hi[2] << 24);
}
- node = setup_node(pxm);
+ node = acpi_map_pxm_to_node(pxm);
+ if (node < 0) {
+ pr_err("SRAT: Too many proximity domains %x\n", pxm);
+ bad_srat();
+ return;
+ }
+
+ if (pa->apic_id >= CONFIG_NR_CPUS) {
+ pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n",
+ pxm, pa->apic_id, node);
+ return;
+ }
+
+ early_numa_add_cpu(pa->apic_id, node);
+
+ set_cpuid_to_node(pa->apic_id, node);
+ node_set(node, numa_nodes_parsed);
+ pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
+}
+
+void __init
+acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
+{
+ int pxm, node;
+
+ if (srat_disabled())
+ return;
+ if (pa->header.length < sizeof(struct acpi_srat_x2apic_cpu_affinity)) {
+ bad_srat();
+ return;
+ }
+ if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
+ return;
+ pxm = pa->proximity_domain;
+ node = acpi_map_pxm_to_node(pxm);
if (node < 0) {
pr_err("SRAT: Too many proximity domains %x\n", pxm);
bad_srat();
diff --git a/arch/loongarch/kernel/alternative.c b/arch/loongarch/kernel/alternative.c
index 4ad13847e962..0e0c766df1e3 100644
--- a/arch/loongarch/kernel/alternative.c
+++ b/arch/loongarch/kernel/alternative.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/export.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <asm/alternative.h>
diff --git a/arch/loongarch/kernel/efi-header.S b/arch/loongarch/kernel/efi-header.S
index 5f23b85d78ca..ba0bdbf86aa8 100644
--- a/arch/loongarch/kernel/efi-header.S
+++ b/arch/loongarch/kernel/efi-header.S
@@ -7,7 +7,7 @@
#include <linux/sizes.h>
.macro __EFI_PE_HEADER
- .long PE_MAGIC
+ .long IMAGE_NT_SIGNATURE
.Lcoff_header:
.short IMAGE_FILE_MACHINE_LOONGARCH64 /* Machine */
.short .Lsection_count /* NumberOfSections */
@@ -20,7 +20,7 @@
IMAGE_FILE_LINE_NUMS_STRIPPED /* Characteristics */
.Loptional_header:
- .short PE_OPT_MAGIC_PE32PLUS /* PE32+ format */
+ .short IMAGE_NT_OPTIONAL_HDR64_MAGIC /* PE32+ format */
.byte 0x02 /* MajorLinkerVersion */
.byte 0x14 /* MinorLinkerVersion */
.long __inittext_end - .Lefi_header_end /* SizeOfCode */
diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c
index de21e72759ee..860a3bc030e0 100644
--- a/arch/loongarch/kernel/efi.c
+++ b/arch/loongarch/kernel/efi.c
@@ -144,6 +144,18 @@ void __init efi_init(void)
if (efi_memmap_init_early(&data) < 0)
panic("Unable to map EFI memory map.\n");
+ /*
+ * Reserve the physical memory region occupied by the EFI
+ * memory map table (header + descriptors). This is crucial
+ * for kdump, as the kdump kernel relies on this original
+ * memmap passed by the bootloader. Without reservation,
+ * this region could be overwritten by the primary kernel.
+ * Also, set the EFI_PRESERVE_BS_REGIONS flag to indicate that
+ * critical boot services code/data regions like this are preserved.
+ */
+ memblock_reserve((phys_addr_t)boot_memmap, sizeof(*tbl) + data.size);
+ set_bit(EFI_PRESERVE_BS_REGIONS, &efi.flags);
+
early_memunmap(tbl, sizeof(*tbl));
}
diff --git a/arch/loongarch/kernel/elf.c b/arch/loongarch/kernel/elf.c
index 0fa81ced28dc..3d98c6aa00db 100644
--- a/arch/loongarch/kernel/elf.c
+++ b/arch/loongarch/kernel/elf.c
@@ -6,7 +6,6 @@
#include <linux/binfmts.h>
#include <linux/elf.h>
-#include <linux/export.h>
#include <linux/sched.h>
#include <asm/cpu-features.h>
diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S
index 48e7e34e355e..47e1db9a1ce4 100644
--- a/arch/loongarch/kernel/entry.S
+++ b/arch/loongarch/kernel/entry.S
@@ -73,28 +73,29 @@ SYM_CODE_START(handle_syscall)
move a0, sp
bl do_syscall
+ STACKLEAK_ERASE
RESTORE_ALL_AND_RET
SYM_CODE_END(handle_syscall)
_ASM_NOKPROBE(handle_syscall)
-SYM_CODE_START(ret_from_fork)
+SYM_CODE_START(ret_from_fork_asm)
UNWIND_HINT_REGS
- bl schedule_tail # a0 = struct task_struct *prev
- move a0, sp
- bl syscall_exit_to_user_mode
+ move a1, sp
+ bl ret_from_fork
+ STACKLEAK_ERASE
RESTORE_STATIC
RESTORE_SOME
RESTORE_SP_AND_RET
-SYM_CODE_END(ret_from_fork)
+SYM_CODE_END(ret_from_fork_asm)
-SYM_CODE_START(ret_from_kernel_thread)
+SYM_CODE_START(ret_from_kernel_thread_asm)
UNWIND_HINT_REGS
- bl schedule_tail # a0 = struct task_struct *prev
- move a0, s1
- jirl ra, s0, 0
- move a0, sp
- bl syscall_exit_to_user_mode
+ move a1, sp
+ move a2, s0
+ move a3, s1
+ bl ret_from_kernel_thread
+ STACKLEAK_ERASE
RESTORE_STATIC
RESTORE_SOME
RESTORE_SP_AND_RET
-SYM_CODE_END(ret_from_kernel_thread)
+SYM_CODE_END(ret_from_kernel_thread_asm)
diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c
index 2f1f5b08638f..27144de5c5fe 100644
--- a/arch/loongarch/kernel/env.c
+++ b/arch/loongarch/kernel/env.c
@@ -68,6 +68,8 @@ static int __init fdt_cpu_clk_init(void)
return -ENODEV;
clk = of_clk_get(np, 0);
+ of_node_put(np);
+
if (IS_ERR(clk))
return -ENODEV;
diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
index 6ab640101457..28caf416ae36 100644
--- a/arch/loongarch/kernel/fpu.S
+++ b/arch/loongarch/kernel/fpu.S
@@ -458,6 +458,7 @@ SYM_FUNC_START(_save_fp_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_fp_context)
+EXPORT_SYMBOL_GPL(_save_fp_context)
/*
* a0: fpregs
@@ -471,6 +472,7 @@ SYM_FUNC_START(_restore_fp_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_fp_context)
+EXPORT_SYMBOL_GPL(_restore_fp_context)
/*
* a0: fpregs
@@ -484,6 +486,7 @@ SYM_FUNC_START(_save_lsx_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_lsx_context)
+EXPORT_SYMBOL_GPL(_save_lsx_context)
/*
* a0: fpregs
@@ -497,6 +500,7 @@ SYM_FUNC_START(_restore_lsx_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_lsx_context)
+EXPORT_SYMBOL_GPL(_restore_lsx_context)
/*
* a0: fpregs
@@ -510,6 +514,7 @@ SYM_FUNC_START(_save_lasx_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_lasx_context)
+EXPORT_SYMBOL_GPL(_save_lasx_context)
/*
* a0: fpregs
@@ -523,6 +528,7 @@ SYM_FUNC_START(_restore_lasx_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_lasx_context)
+EXPORT_SYMBOL_GPL(_restore_lasx_context)
.L_fpu_fault:
li.w a0, -EFAULT # failure
diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
index 4f0912141781..733a7665e434 100644
--- a/arch/loongarch/kernel/genex.S
+++ b/arch/loongarch/kernel/genex.S
@@ -16,6 +16,7 @@
#include <asm/stackframe.h>
#include <asm/thread_info.h>
+ .section .cpuidle.text, "ax"
.align 5
SYM_FUNC_START(__arch_cpu_idle)
/* start of idle interrupt region */
@@ -31,14 +32,16 @@ SYM_FUNC_START(__arch_cpu_idle)
*/
idle 0
/* end of idle interrupt region */
-1: jr ra
+idle_exit:
+ jr ra
SYM_FUNC_END(__arch_cpu_idle)
+ .previous
SYM_CODE_START(handle_vint)
UNWIND_HINT_UNDEFINED
BACKUP_T0T1
SAVE_ALL
- la_abs t1, 1b
+ la_abs t1, idle_exit
LONG_L t0, sp, PT_ERA
/* 3 instructions idle interrupt region */
ori t0, t0, 0b1100
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
index 506a99a5bbc7..e3865e92a917 100644
--- a/arch/loongarch/kernel/head.S
+++ b/arch/loongarch/kernel/head.S
@@ -20,7 +20,7 @@
__HEAD
_head:
- .word MZ_MAGIC /* "MZ", MS-DOS header */
+ .word IMAGE_DOS_SIGNATURE /* "MZ", MS-DOS header */
.org 0x8
.dword _kernel_entry /* Kernel entry point (physical address) */
.dword _kernel_asize /* Kernel image effective size */
diff --git a/arch/loongarch/kernel/kfpu.c b/arch/loongarch/kernel/kfpu.c
index ec5b28e570c9..141b49bd989c 100644
--- a/arch/loongarch/kernel/kfpu.c
+++ b/arch/loongarch/kernel/kfpu.c
@@ -4,6 +4,7 @@
*/
#include <linux/cpu.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <asm/fpu.h>
#include <asm/smp.h>
@@ -18,11 +19,28 @@ static unsigned int euen_mask = CSR_EUEN_FPEN;
static DEFINE_PER_CPU(bool, in_kernel_fpu);
static DEFINE_PER_CPU(unsigned int, euen_current);
+static inline void fpregs_lock(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+ else
+ local_bh_disable();
+}
+
+static inline void fpregs_unlock(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
+ else
+ local_bh_enable();
+}
+
void kernel_fpu_begin(void)
{
unsigned int *euen_curr;
- preempt_disable();
+ if (!irqs_disabled())
+ fpregs_lock();
WARN_ON(this_cpu_read(in_kernel_fpu));
@@ -73,7 +91,8 @@ void kernel_fpu_end(void)
this_cpu_write(in_kernel_fpu, false);
- preempt_enable();
+ if (!irqs_disabled())
+ fpregs_unlock();
}
EXPORT_SYMBOL_GPL(kernel_fpu_end);
diff --git a/arch/loongarch/kernel/kgdb.c b/arch/loongarch/kernel/kgdb.c
index 445c452d72a7..7be5b4c0c900 100644
--- a/arch/loongarch/kernel/kgdb.c
+++ b/arch/loongarch/kernel/kgdb.c
@@ -8,6 +8,7 @@
#include <linux/hw_breakpoint.h>
#include <linux/kdebug.h>
#include <linux/kgdb.h>
+#include <linux/objtool.h>
#include <linux/processor.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
@@ -224,13 +225,13 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
regs->csr_era = pc;
}
-void arch_kgdb_breakpoint(void)
+noinline void arch_kgdb_breakpoint(void)
{
__asm__ __volatile__ ( \
".globl kgdb_breakinst\n\t" \
- "nop\n" \
"kgdb_breakinst:\tbreak 2\n\t"); /* BRK_KDB = 2 */
}
+STACK_FRAME_NON_STANDARD(arch_kgdb_breakpoint);
/*
* Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
diff --git a/arch/loongarch/kernel/lbt.S b/arch/loongarch/kernel/lbt.S
index 001f061d226a..71678912d24c 100644
--- a/arch/loongarch/kernel/lbt.S
+++ b/arch/loongarch/kernel/lbt.S
@@ -90,6 +90,7 @@ SYM_FUNC_START(_save_lbt_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_lbt_context)
+EXPORT_SYMBOL_GPL(_save_lbt_context)
/*
* a0: scr
@@ -110,6 +111,7 @@ SYM_FUNC_START(_restore_lbt_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_lbt_context)
+EXPORT_SYMBOL_GPL(_restore_lbt_context)
/*
* a0: ftop
@@ -120,6 +122,7 @@ SYM_FUNC_START(_save_ftop_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_save_ftop_context)
+EXPORT_SYMBOL_GPL(_save_ftop_context)
/*
* a0: ftop
@@ -150,6 +153,7 @@ SYM_FUNC_START(_restore_ftop_context)
li.w a0, 0 # success
jr ra
SYM_FUNC_END(_restore_ftop_context)
+EXPORT_SYMBOL_GPL(_restore_ftop_context)
.L_lbt_fault:
li.w a0, -EFAULT # failure
diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c
index 30a72fd528c0..d6e73e8f9c0b 100644
--- a/arch/loongarch/kernel/numa.c
+++ b/arch/loongarch/kernel/numa.c
@@ -11,6 +11,7 @@
#include <linux/mmzone.h>
#include <linux/export.h>
#include <linux/nodemask.h>
+#include <linux/numa_memblks.h>
#include <linux/swap.h>
#include <linux/memblock.h>
#include <linux/pfn.h>
@@ -27,10 +28,6 @@
#include <asm/time.h>
int numa_off;
-unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES];
-EXPORT_SYMBOL(node_distances);
-
-static struct numa_meminfo numa_meminfo;
cpumask_t cpus_on_node[MAX_NUMNODES];
cpumask_t phys_cpus_on_node[MAX_NUMNODES];
EXPORT_SYMBOL(cpus_on_node);
@@ -43,8 +40,6 @@ s16 __cpuid_to_node[CONFIG_NR_CPUS] = {
};
EXPORT_SYMBOL(__cpuid_to_node);
-nodemask_t numa_nodes_parsed __initdata;
-
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
@@ -145,48 +140,6 @@ void numa_remove_cpu(unsigned int cpu)
cpumask_clear_cpu(cpu, &cpus_on_node[nid]);
}
-static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
- struct numa_meminfo *mi)
-{
- /* ignore zero length blks */
- if (start == end)
- return 0;
-
- /* whine about and ignore invalid blks */
- if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
- pr_warn("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
- nid, start, end - 1);
- return 0;
- }
-
- if (mi->nr_blks >= NR_NODE_MEMBLKS) {
- pr_err("NUMA: too many memblk ranges\n");
- return -EINVAL;
- }
-
- mi->blk[mi->nr_blks].start = PFN_ALIGN(start);
- mi->blk[mi->nr_blks].end = PFN_ALIGN(end - PAGE_SIZE + 1);
- mi->blk[mi->nr_blks].nid = nid;
- mi->nr_blks++;
- return 0;
-}
-
-/**
- * numa_add_memblk - Add one numa_memblk to numa_meminfo
- * @nid: NUMA node ID of the new memblk
- * @start: Start address of the new memblk
- * @end: End address of the new memblk
- *
- * Add a new memblk to the default numa_meminfo.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int __init numa_add_memblk(int nid, u64 start, u64 end)
-{
- return numa_add_memblk_to(nid, start, end, &numa_meminfo);
-}
-
static void __init node_mem_init(unsigned int node)
{
unsigned long start_pfn, end_pfn;
@@ -205,18 +158,6 @@ static void __init node_mem_init(unsigned int node)
#ifdef CONFIG_ACPI_NUMA
-static void __init add_node_intersection(u32 node, u64 start, u64 size, u32 type)
-{
- static unsigned long num_physpages;
-
- num_physpages += (size >> PAGE_SHIFT);
- pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
- node, type, start, size);
- pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
- start >> PAGE_SHIFT, (start + size) >> PAGE_SHIFT, num_physpages);
- memblock_set_node(start, size, &memblock.memory, node);
-}
-
/*
* add_numamem_region
*
@@ -228,28 +169,21 @@ static void __init add_node_intersection(u32 node, u64 start, u64 size, u32 type
*/
static void __init add_numamem_region(u64 start, u64 end, u32 type)
{
- u32 i;
- u64 ofs = start;
+ u32 node = pa_to_nid(start);
+ u64 size = end - start;
+ static unsigned long num_physpages;
if (start >= end) {
pr_debug("Invalid region: %016llx-%016llx\n", start, end);
return;
}
- for (i = 0; i < numa_meminfo.nr_blks; i++) {
- struct numa_memblk *mb = &numa_meminfo.blk[i];
-
- if (ofs > mb->end)
- continue;
-
- if (end > mb->end) {
- add_node_intersection(mb->nid, ofs, mb->end - ofs, type);
- ofs = mb->end;
- } else {
- add_node_intersection(mb->nid, ofs, end - ofs, type);
- break;
- }
- }
+ num_physpages += (size >> PAGE_SHIFT);
+ pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
+ node, type, start, size);
+ pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
+ start >> PAGE_SHIFT, end >> PAGE_SHIFT, num_physpages);
+ memblock_set_node(start, size, &memblock.memory, node);
}
static void __init init_node_memblock(void)
@@ -291,24 +225,6 @@ static void __init init_node_memblock(void)
}
}
-static void __init numa_default_distance(void)
-{
- int row, col;
-
- for (row = 0; row < MAX_NUMNODES; row++)
- for (col = 0; col < MAX_NUMNODES; col++) {
- if (col == row)
- node_distances[row][col] = LOCAL_DISTANCE;
- else
- /* We assume that one node per package here!
- *
- * A SLIT should be used for multiple nodes
- * per package to override default setting.
- */
- node_distances[row][col] = REMOTE_DISTANCE;
- }
-}
-
/*
* fake_numa_init() - For Non-ACPI systems
* Return: 0 on success, -errno on failure.
@@ -333,11 +249,11 @@ int __init init_numa_memory(void)
for (i = 0; i < NR_CPUS; i++)
set_cpuid_to_node(i, NUMA_NO_NODE);
- numa_default_distance();
+ numa_reset_distance();
nodes_clear(numa_nodes_parsed);
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
- memset(&numa_meminfo, 0, sizeof(numa_meminfo));
+ WARN_ON(memblock_clear_hotplug(0, PHYS_ADDR_MAX));
/* Parse SRAT and SLIT if provided by firmware. */
ret = acpi_disabled ? fake_numa_init() : acpi_numa_init();
diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
index e5a39bbad078..b1b51f920b23 100644
--- a/arch/loongarch/kernel/paravirt.c
+++ b/arch/loongarch/kernel/paravirt.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/export.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/irq_work.h>
diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c
index f86a4b838dd7..8ad098703488 100644
--- a/arch/loongarch/kernel/perf_event.c
+++ b/arch/loongarch/kernel/perf_event.c
@@ -479,8 +479,7 @@ static void handle_associated_event(struct cpu_hw_events *cpuc, int idx,
if (!loongarch_pmu_event_set_period(event, hwc, idx))
return;
- if (perf_event_overflow(event, data, regs))
- loongarch_pmu_disable_event(idx);
+ perf_event_overflow(event, data, regs);
}
static irqreturn_t pmu_handle_irq(int irq, void *dev)
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
index 6e58f65455c7..3582f591bab2 100644
--- a/arch/loongarch/kernel/process.c
+++ b/arch/loongarch/kernel/process.c
@@ -13,6 +13,7 @@
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/entry-common.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
@@ -34,6 +35,7 @@
#include <linux/nmi.h>
#include <asm/asm.h>
+#include <asm/asm-prototypes.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/elf.h>
@@ -47,6 +49,7 @@
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/reg.h>
+#include <asm/switch_to.h>
#include <asm/unwind.h>
#include <asm/vdso.h>
@@ -63,8 +66,9 @@ EXPORT_SYMBOL(__stack_chk_guard);
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override);
-asmlinkage void ret_from_fork(void);
-asmlinkage void ret_from_kernel_thread(void);
+asmlinkage void restore_and_ret(void);
+asmlinkage void ret_from_fork_asm(void);
+asmlinkage void ret_from_kernel_thread_asm(void);
void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
{
@@ -138,6 +142,23 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
return 0;
}
+asmlinkage void noinstr __no_stack_protector ret_from_fork(struct task_struct *prev,
+ struct pt_regs *regs)
+{
+ schedule_tail(prev);
+ syscall_exit_to_user_mode(regs);
+}
+
+asmlinkage void noinstr __no_stack_protector ret_from_kernel_thread(struct task_struct *prev,
+ struct pt_regs *regs,
+ int (*fn)(void *),
+ void *fn_arg)
+{
+ schedule_tail(prev);
+ fn(fn_arg);
+ syscall_exit_to_user_mode(regs);
+}
+
/*
* Copy architecture-specific thread state
*/
@@ -165,8 +186,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
p->thread.reg03 = childksp;
p->thread.reg23 = (unsigned long)args->fn;
p->thread.reg24 = (unsigned long)args->fn_arg;
- p->thread.reg01 = (unsigned long)ret_from_kernel_thread;
- p->thread.sched_ra = (unsigned long)ret_from_kernel_thread;
+ p->thread.reg01 = (unsigned long)ret_from_kernel_thread_asm;
+ p->thread.sched_ra = (unsigned long)ret_from_kernel_thread_asm;
memset(childregs, 0, sizeof(struct pt_regs));
childregs->csr_euen = p->thread.csr_euen;
childregs->csr_crmd = p->thread.csr_crmd;
@@ -182,8 +203,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
childregs->regs[3] = usp;
p->thread.reg03 = (unsigned long) childregs;
- p->thread.reg01 = (unsigned long) ret_from_fork;
- p->thread.sched_ra = (unsigned long) ret_from_fork;
+ p->thread.reg01 = (unsigned long) ret_from_fork_asm;
+ p->thread.sched_ra = (unsigned long) ret_from_fork_asm;
/*
* New tasks lose permission to use the fpu. This accelerates context
diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
index 5e2402cfcab0..8edd0954e55a 100644
--- a/arch/loongarch/kernel/ptrace.c
+++ b/arch/loongarch/kernel/ptrace.c
@@ -864,7 +864,7 @@ enum loongarch_regset {
static const struct user_regset loongarch64_regsets[] = {
[REGSET_GPR] = {
- .core_note_type = NT_PRSTATUS,
+ USER_REGSET_NOTE_TYPE(PRSTATUS),
.n = ELF_NGREG,
.size = sizeof(elf_greg_t),
.align = sizeof(elf_greg_t),
@@ -872,7 +872,7 @@ static const struct user_regset loongarch64_regsets[] = {
.set = gpr_set,
},
[REGSET_FPR] = {
- .core_note_type = NT_PRFPREG,
+ USER_REGSET_NOTE_TYPE(PRFPREG),
.n = ELF_NFPREG,
.size = sizeof(elf_fpreg_t),
.align = sizeof(elf_fpreg_t),
@@ -880,7 +880,7 @@ static const struct user_regset loongarch64_regsets[] = {
.set = fpr_set,
},
[REGSET_CPUCFG] = {
- .core_note_type = NT_LOONGARCH_CPUCFG,
+ USER_REGSET_NOTE_TYPE(LOONGARCH_CPUCFG),
.n = 64,
.size = sizeof(u32),
.align = sizeof(u32),
@@ -889,7 +889,7 @@ static const struct user_regset loongarch64_regsets[] = {
},
#ifdef CONFIG_CPU_HAS_LSX
[REGSET_LSX] = {
- .core_note_type = NT_LOONGARCH_LSX,
+ USER_REGSET_NOTE_TYPE(LOONGARCH_LSX),
.n = NUM_FPU_REGS,
.size = 16,
.align = 16,
@@ -899,7 +899,7 @@ static const struct user_regset loongarch64_regsets[] = {
#endif
#ifdef CONFIG_CPU_HAS_LASX
[REGSET_LASX] = {
- .core_note_type = NT_LOONGARCH_LASX,
+ USER_REGSET_NOTE_TYPE(LOONGARCH_LASX),
.n = NUM_FPU_REGS,
.size = 32,
.align = 32,
@@ -909,7 +909,7 @@ static const struct user_regset loongarch64_regsets[] = {
#endif
#ifdef CONFIG_CPU_HAS_LBT
[REGSET_LBT] = {
- .core_note_type = NT_LOONGARCH_LBT,
+ USER_REGSET_NOTE_TYPE(LOONGARCH_LBT),
.n = 5,
.size = sizeof(u64),
.align = sizeof(u64),
@@ -919,7 +919,7 @@ static const struct user_regset loongarch64_regsets[] = {
#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
[REGSET_HW_BREAK] = {
- .core_note_type = NT_LOONGARCH_HW_BREAK,
+ USER_REGSET_NOTE_TYPE(LOONGARCH_HW_BREAK),
.n = sizeof(struct user_watch_state_v2) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
@@ -927,7 +927,7 @@ static const struct user_regset loongarch64_regsets[] = {
.set = hw_break_set,
},
[REGSET_HW_WATCH] = {
- .core_note_type = NT_LOONGARCH_HW_WATCH,
+ USER_REGSET_NOTE_TYPE(LOONGARCH_HW_WATCH),
.n = sizeof(struct user_watch_state_v2) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index b99fbb388fe0..22b27cd447a1 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -265,7 +265,7 @@ static void __init arch_reserve_crashkernel(void)
return;
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
- &crash_size, &crash_base, &low_size, &high);
+ &crash_size, &crash_base, &low_size, NULL, &high);
if (ret)
return;
diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c
index 7a555b600171..4740cb5b2388 100644
--- a/arch/loongarch/kernel/signal.c
+++ b/arch/loongarch/kernel/signal.c
@@ -51,27 +51,6 @@
#define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); })
#define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); })
-/* Assembly functions to move context to/from the FPU */
-extern asmlinkage int
-_save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
-extern asmlinkage int
-_restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
-extern asmlinkage int
-_save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
-extern asmlinkage int
-_restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
-extern asmlinkage int
-_save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
-extern asmlinkage int
-_restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
-
-#ifdef CONFIG_CPU_HAS_LBT
-extern asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
-extern asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
-extern asmlinkage int _save_ftop_context(void __user *ftop);
-extern asmlinkage int _restore_ftop_context(void __user *ftop);
-#endif
-
struct rt_sigframe {
struct siginfo rs_info;
struct ucontext rs_uctx;
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
index 4b24589c0b56..46036d98da75 100644
--- a/arch/loongarch/kernel/smp.c
+++ b/arch/loongarch/kernel/smp.c
@@ -46,6 +46,10 @@ EXPORT_SYMBOL(__cpu_logical_map);
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_sibling_map);
+/* Representing the last level cache shared map of each logical CPU */
+cpumask_t cpu_llc_shared_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_llc_shared_map);
+
/* Representing the core map of multi-core chips of each logical CPU */
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
@@ -63,6 +67,9 @@ EXPORT_SYMBOL(cpu_foreign_map);
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
+/* representing cpus for which llc shared maps can be computed */
+static cpumask_t cpu_llc_shared_setup_map;
+
/* representing cpus for which core maps can be computed */
static cpumask_t cpu_core_setup_map;
@@ -102,6 +109,34 @@ static inline void set_cpu_core_map(int cpu)
}
}
+static inline void set_cpu_llc_shared_map(int cpu)
+{
+ int i;
+
+ cpumask_set_cpu(cpu, &cpu_llc_shared_setup_map);
+
+ for_each_cpu(i, &cpu_llc_shared_setup_map) {
+ if (cpu_to_node(cpu) == cpu_to_node(i)) {
+ cpumask_set_cpu(i, &cpu_llc_shared_map[cpu]);
+ cpumask_set_cpu(cpu, &cpu_llc_shared_map[i]);
+ }
+ }
+}
+
+static inline void clear_cpu_llc_shared_map(int cpu)
+{
+ int i;
+
+ for_each_cpu(i, &cpu_llc_shared_setup_map) {
+ if (cpu_to_node(cpu) == cpu_to_node(i)) {
+ cpumask_clear_cpu(i, &cpu_llc_shared_map[cpu]);
+ cpumask_clear_cpu(cpu, &cpu_llc_shared_map[i]);
+ }
+ }
+
+ cpumask_clear_cpu(cpu, &cpu_llc_shared_setup_map);
+}
+
static inline void set_cpu_sibling_map(int cpu)
{
int i;
@@ -406,6 +441,7 @@ int loongson_cpu_disable(void)
#endif
set_cpu_online(cpu, false);
clear_cpu_sibling_map(cpu);
+ clear_cpu_llc_shared_map(cpu);
calculate_cpu_foreign_map();
local_irq_save(flags);
irq_migrate_all_off_this_cpu();
@@ -572,6 +608,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
current_thread_info()->cpu = 0;
loongson_prepare_cpus(max_cpus);
set_cpu_sibling_map(0);
+ set_cpu_llc_shared_map(0);
set_cpu_core_map(0);
calculate_cpu_foreign_map();
#ifndef CONFIG_HOTPLUG_CPU
@@ -613,6 +650,7 @@ asmlinkage void start_secondary(void)
loongson_init_secondary();
set_cpu_sibling_map(cpu);
+ set_cpu_llc_shared_map(cpu);
set_cpu_core_map(cpu);
notify_cpu_starting(cpu);
diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
index e2d3bfeb6366..367906b10f81 100644
--- a/arch/loongarch/kernel/time.c
+++ b/arch/loongarch/kernel/time.c
@@ -102,7 +102,7 @@ static int constant_timer_next_event(unsigned long delta, struct clock_event_dev
return 0;
}
-static unsigned long __init get_loops_per_jiffy(void)
+static unsigned long get_loops_per_jiffy(void)
{
unsigned long lpj = (unsigned long)const_clock_freq;
@@ -111,7 +111,7 @@ static unsigned long __init get_loops_per_jiffy(void)
return lpj;
}
-static long init_offset __nosavedata;
+static long init_offset;
void save_counter(void)
{
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
index 2ec3106c0da3..3d9be6ca7ec5 100644
--- a/arch/loongarch/kernel/traps.c
+++ b/arch/loongarch/kernel/traps.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <linux/module.h>
+#include <linux/export.h>
#include <linux/extable.h>
#include <linux/mm.h>
#include <linux/sched/mm.h>
@@ -553,9 +554,10 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
die_if_kernel("Kernel ale access", regs);
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
#else
+ bool pie = regs_irqs_disabled(regs);
unsigned int *pc;
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_enable();
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
@@ -582,7 +584,7 @@ sigbus:
die_if_kernel("Kernel ale access", regs);
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
out:
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_disable();
#endif
irqentry_exit(regs, state);
@@ -621,12 +623,13 @@ static void bug_handler(struct pt_regs *regs)
asmlinkage void noinstr do_bce(struct pt_regs *regs)
{
bool user = user_mode(regs);
+ bool pie = regs_irqs_disabled(regs);
unsigned long era = exception_era(regs);
u64 badv = 0, lower = 0, upper = ULONG_MAX;
union loongarch_instruction insn;
irqentry_state_t state = irqentry_enter(regs);
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_enable();
current->thread.trap_nr = read_csr_excode();
@@ -692,7 +695,7 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs)
force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
out:
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_disable();
irqentry_exit(regs, state);
@@ -710,11 +713,12 @@ bad_era:
asmlinkage void noinstr do_bp(struct pt_regs *regs)
{
bool user = user_mode(regs);
+ bool pie = regs_irqs_disabled(regs);
unsigned int opcode, bcode;
unsigned long era = exception_era(regs);
irqentry_state_t state = irqentry_enter(regs);
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_enable();
if (__get_inst(&opcode, (u32 *)era, user))
@@ -780,7 +784,7 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
}
out:
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_disable();
irqentry_exit(regs, state);
@@ -1015,6 +1019,7 @@ static void init_restore_lbt(void)
asmlinkage void noinstr do_lbt(struct pt_regs *regs)
{
+ bool pie = regs_irqs_disabled(regs);
irqentry_state_t state = irqentry_enter(regs);
/*
@@ -1024,7 +1029,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
* (including the user using 'MOVGR2GCSR' to turn on TM, which
* will not trigger the BTE), we need to check PRMD first.
*/
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_enable();
if (!cpu_has_lbt) {
@@ -1038,7 +1043,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
preempt_enable();
out:
- if (regs->csr_prmd & CSR_PRMD_PIE)
+ if (!pie)
local_irq_disable();
irqentry_exit(regs, state);
diff --git a/arch/loongarch/kernel/unwind_guess.c b/arch/loongarch/kernel/unwind_guess.c
index 98379b7d4147..08d7951b2f60 100644
--- a/arch/loongarch/kernel/unwind_guess.c
+++ b/arch/loongarch/kernel/unwind_guess.c
@@ -3,6 +3,7 @@
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <asm/unwind.h>
+#include <linux/export.h>
unsigned long unwind_get_return_address(struct unwind_state *state)
{
diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c
index d623935a7547..0005be49b056 100644
--- a/arch/loongarch/kernel/unwind_orc.c
+++ b/arch/loongarch/kernel/unwind_orc.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/objtool.h>
+#include <linux/export.h>
#include <linux/module.h>
+#include <linux/objtool.h>
#include <linux/sort.h>
#include <asm/exception.h>
#include <asm/orc_header.h>
diff --git a/arch/loongarch/kernel/unwind_prologue.c b/arch/loongarch/kernel/unwind_prologue.c
index 929ae240280a..729e775bd40d 100644
--- a/arch/loongarch/kernel/unwind_prologue.c
+++ b/arch/loongarch/kernel/unwind_prologue.c
@@ -3,6 +3,7 @@
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <linux/cpumask.h>
+#include <linux/export.h>
#include <linux/ftrace.h>
#include <linux/kallsyms.h>
diff --git a/arch/loongarch/kernel/uprobes.c b/arch/loongarch/kernel/uprobes.c
index 87abc7137b73..6022eb0f71db 100644
--- a/arch/loongarch/kernel/uprobes.c
+++ b/arch/loongarch/kernel/uprobes.c
@@ -42,7 +42,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
utask->autask.saved_trap_nr = current->thread.trap_nr;
current->thread.trap_nr = UPROBE_TRAP_NR;
instruction_pointer_set(regs, utask->xol_vaddr);
- user_enable_single_step(current);
return 0;
}
@@ -53,13 +52,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
current->thread.trap_nr = utask->autask.saved_trap_nr;
-
- if (auprobe->simulate)
- instruction_pointer_set(regs, auprobe->resume_era);
- else
- instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE);
-
- user_disable_single_step(current);
+ instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE);
return 0;
}
@@ -70,7 +63,6 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
current->thread.trap_nr = utask->autask.saved_trap_nr;
instruction_pointer_set(regs, utask->vaddr);
- user_disable_single_step(current);
}
bool arch_uprobe_xol_was_trapped(struct task_struct *t)
@@ -90,7 +82,6 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
insn.word = auprobe->insn[0];
arch_simulate_insn(insn, regs);
- auprobe->resume_era = regs->csr_era;
return true;
}
diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c
index 10cf1608c7b3..7b888d9085a0 100644
--- a/arch/loongarch/kernel/vdso.c
+++ b/arch/loongarch/kernel/vdso.c
@@ -105,7 +105,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vdso_addr = data_addr + VVAR_SIZE;
vma = _install_special_mapping(mm, vdso_addr, info->size,
- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+ VM_READ | VM_EXEC |
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC |
+ VM_SEALED_SYSMAP,
&info->code_mapping);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
diff --git a/arch/loongarch/kvm/Makefile b/arch/loongarch/kvm/Makefile
index f4c8e35c216a..cb41d9265662 100644
--- a/arch/loongarch/kvm/Makefile
+++ b/arch/loongarch/kvm/Makefile
@@ -21,4 +21,4 @@ kvm-y += intc/eiointc.o
kvm-y += intc/pch_pic.o
kvm-y += irqfd.o
-CFLAGS_exit.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_exit.o += $(call cc-disable-warning, override-init)
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
index ea321403644a..2ce41f93b2a4 100644
--- a/arch/loongarch/kvm/exit.c
+++ b/arch/loongarch/kvm/exit.c
@@ -289,9 +289,11 @@ static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
er = EMULATE_FAIL;
switch (((inst.word >> 24) & 0xff)) {
case 0x0: /* CPUCFG GSPR */
+ trace_kvm_exit_cpucfg(vcpu, KVM_TRACE_EXIT_CPUCFG);
er = kvm_emu_cpucfg(vcpu, inst);
break;
case 0x4: /* CSR{RD,WR,XCHG} GSPR */
+ trace_kvm_exit_csr(vcpu, KVM_TRACE_EXIT_CSR);
er = kvm_handle_csr(vcpu, inst);
break;
case 0x6: /* Cache, Idle and IOCSR GSPR */
@@ -341,7 +343,7 @@ static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
* 2) Execute CACOP/IDLE instructions;
* 3) Access to unimplemented CSRs/IOCSRs.
*/
-static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
+static int kvm_handle_gspr(struct kvm_vcpu *vcpu, int ecode)
{
int ret = RESUME_GUEST;
enum emulation_result er = EMULATE_DONE;
@@ -661,7 +663,7 @@ int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
return ret;
}
-static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
+static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write, int ecode)
{
int ret;
larch_inst inst;
@@ -675,7 +677,7 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
return RESUME_GUEST;
}
- ret = kvm_handle_mm_fault(vcpu, badv, write);
+ ret = kvm_handle_mm_fault(vcpu, badv, write, ecode);
if (ret) {
/* Treat as MMIO */
inst.word = vcpu->arch.badi;
@@ -705,14 +707,14 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
return ret;
}
-static int kvm_handle_read_fault(struct kvm_vcpu *vcpu)
+static int kvm_handle_read_fault(struct kvm_vcpu *vcpu, int ecode)
{
- return kvm_handle_rdwr_fault(vcpu, false);
+ return kvm_handle_rdwr_fault(vcpu, false, ecode);
}
-static int kvm_handle_write_fault(struct kvm_vcpu *vcpu)
+static int kvm_handle_write_fault(struct kvm_vcpu *vcpu, int ecode)
{
- return kvm_handle_rdwr_fault(vcpu, true);
+ return kvm_handle_rdwr_fault(vcpu, true, ecode);
}
int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run)
@@ -726,11 +728,12 @@ int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run)
/**
* kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
* @vcpu: Virtual CPU context.
+ * @ecode: Exception code.
*
* Handle when the guest attempts to use fpu which hasn't been allowed
* by the root context.
*/
-static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
+static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu, int ecode)
{
struct kvm_run *run = vcpu->run;
@@ -783,11 +786,12 @@ static long kvm_save_notify(struct kvm_vcpu *vcpu)
/*
* kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
* @vcpu: Virtual CPU context.
+ * @ecode: Exception code.
*
* Handle when the guest attempts to use LSX when it is disabled in the root
* context.
*/
-static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
+static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode)
{
if (kvm_own_lsx(vcpu))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
@@ -798,11 +802,12 @@ static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
/*
* kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
* @vcpu: Virtual CPU context.
+ * @ecode: Exception code.
*
* Handle when the guest attempts to use LASX when it is disabled in the root
* context.
*/
-static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
+static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu, int ecode)
{
if (kvm_own_lasx(vcpu))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
@@ -810,7 +815,7 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
-static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu)
+static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu, int ecode)
{
if (kvm_own_lbt(vcpu))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
@@ -818,32 +823,25 @@ static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
-static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
+static void kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
{
- unsigned int min, cpu, i;
- unsigned long ipi_bitmap;
+ unsigned int min, cpu;
struct kvm_vcpu *dest;
+ DECLARE_BITMAP(ipi_bitmap, BITS_PER_LONG * 2) = {
+ kvm_read_reg(vcpu, LOONGARCH_GPR_A1),
+ kvm_read_reg(vcpu, LOONGARCH_GPR_A2)
+ };
min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
- for (i = 0; i < 2; i++, min += BITS_PER_LONG) {
- ipi_bitmap = kvm_read_reg(vcpu, LOONGARCH_GPR_A1 + i);
- if (!ipi_bitmap)
+ for_each_set_bit(cpu, ipi_bitmap, BITS_PER_LONG * 2) {
+ dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
+ if (!dest)
continue;
- cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG);
- while (cpu < BITS_PER_LONG) {
- dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
- cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1);
- if (!dest)
- continue;
-
- /* Send SWI0 to dest vcpu to emulate IPI interrupt */
- kvm_queue_irq(dest, INT_SWI0);
- kvm_vcpu_kick(dest);
- }
+ /* Send SWI0 to dest vcpu to emulate IPI interrupt */
+ kvm_queue_irq(dest, INT_SWI0);
+ kvm_vcpu_kick(dest);
}
-
- return 0;
}
/*
@@ -872,7 +870,7 @@ static void kvm_handle_service(struct kvm_vcpu *vcpu)
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
}
-static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
+static int kvm_handle_hypercall(struct kvm_vcpu *vcpu, int ecode)
{
int ret;
larch_inst inst;
@@ -932,16 +930,14 @@ static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
/*
* LoongArch KVM callback handling for unimplemented guest exiting
*/
-static int kvm_fault_ni(struct kvm_vcpu *vcpu)
+static int kvm_fault_ni(struct kvm_vcpu *vcpu, int ecode)
{
- unsigned int ecode, inst;
- unsigned long estat, badv;
+ unsigned int inst;
+ unsigned long badv;
/* Fetch the instruction */
inst = vcpu->arch.badi;
badv = vcpu->arch.badv;
- estat = vcpu->arch.host_estat;
- ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n",
ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat());
kvm_arch_vcpu_dump_regs(vcpu);
@@ -966,5 +962,5 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
{
- return kvm_fault_tables[fault](vcpu);
+ return kvm_fault_tables[fault](vcpu, fault);
}
diff --git a/arch/loongarch/kvm/intc/eiointc.c b/arch/loongarch/kvm/intc/eiointc.c
index f39929d7bf8a..a3a12af9ecbf 100644
--- a/arch/loongarch/kvm/intc/eiointc.c
+++ b/arch/loongarch/kvm/intc/eiointc.c
@@ -9,7 +9,8 @@
static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
{
- int ipnum, cpu, irq_index, irq_mask, irq;
+ int ipnum, cpu, cpuid, irq;
+ struct kvm_vcpu *vcpu;
for (irq = 0; irq < EIOINTC_IRQS; irq++) {
ipnum = s->ipmap.reg_u8[irq / 32];
@@ -17,20 +18,23 @@ static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
ipnum = count_trailing_zeros(ipnum);
ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
}
- irq_index = irq / 32;
- irq_mask = BIT(irq & 0x1f);
- cpu = s->coremap.reg_u8[irq];
- if (!!(s->coreisr.reg_u32[cpu][irq_index] & irq_mask))
- set_bit(irq, s->sw_coreisr[cpu][ipnum]);
+ cpuid = s->coremap.reg_u8[irq];
+ vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid);
+ if (!vcpu)
+ continue;
+
+ cpu = vcpu->vcpu_id;
+ if (test_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]))
+ __set_bit(irq, s->sw_coreisr[cpu][ipnum]);
else
- clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
+ __clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
}
}
static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
{
- int ipnum, cpu, found, irq_index, irq_mask;
+ int ipnum, cpu, found;
struct kvm_vcpu *vcpu;
struct kvm_interrupt vcpu_irq;
@@ -42,19 +46,16 @@ static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
cpu = s->sw_coremap[irq];
vcpu = kvm_get_vcpu(s->kvm, cpu);
- irq_index = irq / 32;
- irq_mask = BIT(irq & 0x1f);
-
if (level) {
/* if not enable return false */
- if (((s->enable.reg_u32[irq_index]) & irq_mask) == 0)
+ if (!test_bit(irq, (unsigned long *)s->enable.reg_u32))
return;
- s->coreisr.reg_u32[cpu][irq_index] |= irq_mask;
+ __set_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]);
found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
- set_bit(irq, s->sw_coreisr[cpu][ipnum]);
+ __set_bit(irq, s->sw_coreisr[cpu][ipnum]);
} else {
- s->coreisr.reg_u32[cpu][irq_index] &= ~irq_mask;
- clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
+ __clear_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]);
+ __clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
}
@@ -66,20 +67,25 @@ static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
}
static inline void eiointc_update_sw_coremap(struct loongarch_eiointc *s,
- int irq, void *pvalue, u32 len, bool notify)
+ int irq, u64 val, u32 len, bool notify)
{
- int i, cpu;
- u64 val = *(u64 *)pvalue;
+ int i, cpu, cpuid;
+ struct kvm_vcpu *vcpu;
for (i = 0; i < len; i++) {
- cpu = val & 0xff;
+ cpuid = val & 0xff;
val = val >> 8;
if (!(s->status & BIT(EIOINTC_ENABLE_CPU_ENCODE))) {
- cpu = ffs(cpu) - 1;
- cpu = (cpu >= 4) ? 0 : cpu;
+ cpuid = ffs(cpuid) - 1;
+ cpuid = (cpuid >= 4) ? 0 : cpuid;
}
+ vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid);
+ if (!vcpu)
+ continue;
+
+ cpu = vcpu->vcpu_id;
if (s->sw_coremap[irq + i] == cpu)
continue;
@@ -99,159 +105,14 @@ void eiointc_set_irq(struct loongarch_eiointc *s, int irq, int level)
unsigned long flags;
unsigned long *isr = (unsigned long *)s->isr.reg_u8;
- level ? set_bit(irq, isr) : clear_bit(irq, isr);
spin_lock_irqsave(&s->lock, flags);
+ level ? __set_bit(irq, isr) : __clear_bit(irq, isr);
eiointc_update_irq(s, irq, level);
spin_unlock_irqrestore(&s->lock, flags);
}
-static inline void eiointc_enable_irq(struct kvm_vcpu *vcpu,
- struct loongarch_eiointc *s, int index, u8 mask, int level)
-{
- u8 val;
- int irq;
-
- val = mask & s->isr.reg_u8[index];
- irq = ffs(val);
- while (irq != 0) {
- /*
- * enable bit change from 0 to 1,
- * need to update irq by pending bits
- */
- eiointc_update_irq(s, irq - 1 + index * 8, level);
- val &= ~BIT(irq - 1);
- irq = ffs(val);
- }
-}
-
-static int loongarch_eiointc_readb(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
- gpa_t addr, int len, void *val)
-{
- int index, ret = 0;
- u8 data = 0;
- gpa_t offset;
-
- offset = addr - EIOINTC_BASE;
- switch (offset) {
- case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
- index = offset - EIOINTC_NODETYPE_START;
- data = s->nodetype.reg_u8[index];
- break;
- case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
- index = offset - EIOINTC_IPMAP_START;
- data = s->ipmap.reg_u8[index];
- break;
- case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
- index = offset - EIOINTC_ENABLE_START;
- data = s->enable.reg_u8[index];
- break;
- case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
- index = offset - EIOINTC_BOUNCE_START;
- data = s->bounce.reg_u8[index];
- break;
- case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
- index = offset - EIOINTC_COREISR_START;
- data = s->coreisr.reg_u8[vcpu->vcpu_id][index];
- break;
- case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
- index = offset - EIOINTC_COREMAP_START;
- data = s->coremap.reg_u8[index];
- break;
- default:
- ret = -EINVAL;
- break;
- }
- *(u8 *)val = data;
-
- return ret;
-}
-
-static int loongarch_eiointc_readw(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
- gpa_t addr, int len, void *val)
-{
- int index, ret = 0;
- u16 data = 0;
- gpa_t offset;
-
- offset = addr - EIOINTC_BASE;
- switch (offset) {
- case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
- index = (offset - EIOINTC_NODETYPE_START) >> 1;
- data = s->nodetype.reg_u16[index];
- break;
- case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
- index = (offset - EIOINTC_IPMAP_START) >> 1;
- data = s->ipmap.reg_u16[index];
- break;
- case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
- index = (offset - EIOINTC_ENABLE_START) >> 1;
- data = s->enable.reg_u16[index];
- break;
- case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
- index = (offset - EIOINTC_BOUNCE_START) >> 1;
- data = s->bounce.reg_u16[index];
- break;
- case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
- index = (offset - EIOINTC_COREISR_START) >> 1;
- data = s->coreisr.reg_u16[vcpu->vcpu_id][index];
- break;
- case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
- index = (offset - EIOINTC_COREMAP_START) >> 1;
- data = s->coremap.reg_u16[index];
- break;
- default:
- ret = -EINVAL;
- break;
- }
- *(u16 *)val = data;
-
- return ret;
-}
-
-static int loongarch_eiointc_readl(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
- gpa_t addr, int len, void *val)
-{
- int index, ret = 0;
- u32 data = 0;
- gpa_t offset;
-
- offset = addr - EIOINTC_BASE;
- switch (offset) {
- case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
- index = (offset - EIOINTC_NODETYPE_START) >> 2;
- data = s->nodetype.reg_u32[index];
- break;
- case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
- index = (offset - EIOINTC_IPMAP_START) >> 2;
- data = s->ipmap.reg_u32[index];
- break;
- case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
- index = (offset - EIOINTC_ENABLE_START) >> 2;
- data = s->enable.reg_u32[index];
- break;
- case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
- index = (offset - EIOINTC_BOUNCE_START) >> 2;
- data = s->bounce.reg_u32[index];
- break;
- case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
- index = (offset - EIOINTC_COREISR_START) >> 2;
- data = s->coreisr.reg_u32[vcpu->vcpu_id][index];
- break;
- case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
- index = (offset - EIOINTC_COREMAP_START) >> 2;
- data = s->coremap.reg_u32[index];
- break;
- default:
- ret = -EINVAL;
- break;
- }
- *(u32 *)val = data;
-
- return ret;
-}
-
-static int loongarch_eiointc_readq(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
- gpa_t addr, int len, void *val)
+static int loongarch_eiointc_read(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
+ gpa_t addr, unsigned long *val)
{
int index, ret = 0;
u64 data = 0;
@@ -287,7 +148,7 @@ static int loongarch_eiointc_readq(struct kvm_vcpu *vcpu, struct loongarch_eioin
ret = -EINVAL;
break;
}
- *(u64 *)val = data;
+ *val = data;
return ret;
}
@@ -297,7 +158,7 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu,
gpa_t addr, int len, void *val)
{
int ret = -EINVAL;
- unsigned long flags;
+ unsigned long flags, data, offset;
struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
if (!eiointc) {
@@ -305,358 +166,120 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu,
return -EINVAL;
}
- vcpu->kvm->stat.eiointc_read_exits++;
+ if (addr & (len - 1)) {
+ kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len);
+ return -EINVAL;
+ }
+
+ offset = addr & 0x7;
+ addr -= offset;
+ vcpu->stat.eiointc_read_exits++;
spin_lock_irqsave(&eiointc->lock, flags);
+ ret = loongarch_eiointc_read(vcpu, eiointc, addr, &data);
+ spin_unlock_irqrestore(&eiointc->lock, flags);
+ if (ret)
+ return ret;
+
+ data = data >> (offset * 8);
switch (len) {
case 1:
- ret = loongarch_eiointc_readb(vcpu, eiointc, addr, len, val);
+ *(long *)val = (s8)data;
break;
case 2:
- ret = loongarch_eiointc_readw(vcpu, eiointc, addr, len, val);
+ *(long *)val = (s16)data;
break;
case 4:
- ret = loongarch_eiointc_readl(vcpu, eiointc, addr, len, val);
- break;
- case 8:
- ret = loongarch_eiointc_readq(vcpu, eiointc, addr, len, val);
+ *(long *)val = (s32)data;
break;
default:
- WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n",
- __func__, addr, len);
- }
- spin_unlock_irqrestore(&eiointc->lock, flags);
-
- return ret;
-}
-
-static int loongarch_eiointc_writeb(struct kvm_vcpu *vcpu,
- struct loongarch_eiointc *s,
- gpa_t addr, int len, const void *val)
-{
- int index, irq, bits, ret = 0;
- u8 cpu;
- u8 data, old_data;
- u8 coreisr, old_coreisr;
- gpa_t offset;
-
- data = *(u8 *)val;
- offset = addr - EIOINTC_BASE;
-
- switch (offset) {
- case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
- index = (offset - EIOINTC_NODETYPE_START);
- s->nodetype.reg_u8[index] = data;
- break;
- case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
- /*
- * ipmap cannot be set at runtime, can be set only at the beginning
- * of irqchip driver, need not update upper irq level
- */
- index = (offset - EIOINTC_IPMAP_START);
- s->ipmap.reg_u8[index] = data;
- break;
- case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
- index = (offset - EIOINTC_ENABLE_START);
- old_data = s->enable.reg_u8[index];
- s->enable.reg_u8[index] = data;
- /*
- * 1: enable irq.
- * update irq when isr is set.
- */
- data = s->enable.reg_u8[index] & ~old_data & s->isr.reg_u8[index];
- eiointc_enable_irq(vcpu, s, index, data, 1);
- /*
- * 0: disable irq.
- * update irq when isr is set.
- */
- data = ~s->enable.reg_u8[index] & old_data & s->isr.reg_u8[index];
- eiointc_enable_irq(vcpu, s, index, data, 0);
- break;
- case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
- /* do not emulate hw bounced irq routing */
- index = offset - EIOINTC_BOUNCE_START;
- s->bounce.reg_u8[index] = data;
- break;
- case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
- index = (offset - EIOINTC_COREISR_START);
- /* use attrs to get current cpu index */
- cpu = vcpu->vcpu_id;
- coreisr = data;
- old_coreisr = s->coreisr.reg_u8[cpu][index];
- /* write 1 to clear interrupt */
- s->coreisr.reg_u8[cpu][index] = old_coreisr & ~coreisr;
- coreisr &= old_coreisr;
- bits = sizeof(data) * 8;
- irq = find_first_bit((void *)&coreisr, bits);
- while (irq < bits) {
- eiointc_update_irq(s, irq + index * bits, 0);
- bitmap_clear((void *)&coreisr, irq, 1);
- irq = find_first_bit((void *)&coreisr, bits);
- }
- break;
- case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
- irq = offset - EIOINTC_COREMAP_START;
- index = irq;
- s->coremap.reg_u8[index] = data;
- eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true);
- break;
- default:
- ret = -EINVAL;
+ *(long *)val = (long)data;
break;
}
- return ret;
-}
-
-static int loongarch_eiointc_writew(struct kvm_vcpu *vcpu,
- struct loongarch_eiointc *s,
- gpa_t addr, int len, const void *val)
-{
- int i, index, irq, bits, ret = 0;
- u8 cpu;
- u16 data, old_data;
- u16 coreisr, old_coreisr;
- gpa_t offset;
-
- data = *(u16 *)val;
- offset = addr - EIOINTC_BASE;
-
- switch (offset) {
- case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
- index = (offset - EIOINTC_NODETYPE_START) >> 1;
- s->nodetype.reg_u16[index] = data;
- break;
- case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
- /*
- * ipmap cannot be set at runtime, can be set only at the beginning
- * of irqchip driver, need not update upper irq level
- */
- index = (offset - EIOINTC_IPMAP_START) >> 1;
- s->ipmap.reg_u16[index] = data;
- break;
- case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
- index = (offset - EIOINTC_ENABLE_START) >> 1;
- old_data = s->enable.reg_u32[index];
- s->enable.reg_u16[index] = data;
- /*
- * 1: enable irq.
- * update irq when isr is set.
- */
- data = s->enable.reg_u16[index] & ~old_data & s->isr.reg_u16[index];
- index = index << 1;
- for (i = 0; i < sizeof(data); i++) {
- u8 mask = (data >> (i * 8)) & 0xff;
- eiointc_enable_irq(vcpu, s, index + i, mask, 1);
- }
- /*
- * 0: disable irq.
- * update irq when isr is set.
- */
- data = ~s->enable.reg_u16[index] & old_data & s->isr.reg_u16[index];
- for (i = 0; i < sizeof(data); i++) {
- u8 mask = (data >> (i * 8)) & 0xff;
- eiointc_enable_irq(vcpu, s, index, mask, 0);
- }
- break;
- case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
- /* do not emulate hw bounced irq routing */
- index = (offset - EIOINTC_BOUNCE_START) >> 1;
- s->bounce.reg_u16[index] = data;
- break;
- case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
- index = (offset - EIOINTC_COREISR_START) >> 1;
- /* use attrs to get current cpu index */
- cpu = vcpu->vcpu_id;
- coreisr = data;
- old_coreisr = s->coreisr.reg_u16[cpu][index];
- /* write 1 to clear interrupt */
- s->coreisr.reg_u16[cpu][index] = old_coreisr & ~coreisr;
- coreisr &= old_coreisr;
- bits = sizeof(data) * 8;
- irq = find_first_bit((void *)&coreisr, bits);
- while (irq < bits) {
- eiointc_update_irq(s, irq + index * bits, 0);
- bitmap_clear((void *)&coreisr, irq, 1);
- irq = find_first_bit((void *)&coreisr, bits);
- }
- break;
- case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
- irq = offset - EIOINTC_COREMAP_START;
- index = irq >> 1;
- s->coremap.reg_u16[index] = data;
- eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
+ return 0;
}
-static int loongarch_eiointc_writel(struct kvm_vcpu *vcpu,
+static int loongarch_eiointc_write(struct kvm_vcpu *vcpu,
struct loongarch_eiointc *s,
- gpa_t addr, int len, const void *val)
+ gpa_t addr, u64 value, u64 field_mask)
{
- int i, index, irq, bits, ret = 0;
+ int index, irq, ret = 0;
u8 cpu;
- u32 data, old_data;
- u32 coreisr, old_coreisr;
+ u64 data, old, mask;
gpa_t offset;
- data = *(u32 *)val;
- offset = addr - EIOINTC_BASE;
+ offset = addr & 7;
+ mask = field_mask << (offset * 8);
+ data = (value & field_mask) << (offset * 8);
- switch (offset) {
- case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
- index = (offset - EIOINTC_NODETYPE_START) >> 2;
- s->nodetype.reg_u32[index] = data;
- break;
- case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
- /*
- * ipmap cannot be set at runtime, can be set only at the beginning
- * of irqchip driver, need not update upper irq level
- */
- index = (offset - EIOINTC_IPMAP_START) >> 2;
- s->ipmap.reg_u32[index] = data;
- break;
- case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
- index = (offset - EIOINTC_ENABLE_START) >> 2;
- old_data = s->enable.reg_u32[index];
- s->enable.reg_u32[index] = data;
- /*
- * 1: enable irq.
- * update irq when isr is set.
- */
- data = s->enable.reg_u32[index] & ~old_data & s->isr.reg_u32[index];
- index = index << 2;
- for (i = 0; i < sizeof(data); i++) {
- u8 mask = (data >> (i * 8)) & 0xff;
- eiointc_enable_irq(vcpu, s, index + i, mask, 1);
- }
- /*
- * 0: disable irq.
- * update irq when isr is set.
- */
- data = ~s->enable.reg_u32[index] & old_data & s->isr.reg_u32[index];
- for (i = 0; i < sizeof(data); i++) {
- u8 mask = (data >> (i * 8)) & 0xff;
- eiointc_enable_irq(vcpu, s, index, mask, 0);
- }
- break;
- case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
- /* do not emulate hw bounced irq routing */
- index = (offset - EIOINTC_BOUNCE_START) >> 2;
- s->bounce.reg_u32[index] = data;
- break;
- case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
- index = (offset - EIOINTC_COREISR_START) >> 2;
- /* use attrs to get current cpu index */
- cpu = vcpu->vcpu_id;
- coreisr = data;
- old_coreisr = s->coreisr.reg_u32[cpu][index];
- /* write 1 to clear interrupt */
- s->coreisr.reg_u32[cpu][index] = old_coreisr & ~coreisr;
- coreisr &= old_coreisr;
- bits = sizeof(data) * 8;
- irq = find_first_bit((void *)&coreisr, bits);
- while (irq < bits) {
- eiointc_update_irq(s, irq + index * bits, 0);
- bitmap_clear((void *)&coreisr, irq, 1);
- irq = find_first_bit((void *)&coreisr, bits);
- }
- break;
- case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
- irq = offset - EIOINTC_COREMAP_START;
- index = irq >> 2;
- s->coremap.reg_u32[index] = data;
- eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int loongarch_eiointc_writeq(struct kvm_vcpu *vcpu,
- struct loongarch_eiointc *s,
- gpa_t addr, int len, const void *val)
-{
- int i, index, irq, bits, ret = 0;
- u8 cpu;
- u64 data, old_data;
- u64 coreisr, old_coreisr;
- gpa_t offset;
-
- data = *(u64 *)val;
+ addr -= offset;
offset = addr - EIOINTC_BASE;
switch (offset) {
case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
index = (offset - EIOINTC_NODETYPE_START) >> 3;
- s->nodetype.reg_u64[index] = data;
+ old = s->nodetype.reg_u64[index];
+ s->nodetype.reg_u64[index] = (old & ~mask) | data;
break;
case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
/*
* ipmap cannot be set at runtime, can be set only at the beginning
* of irqchip driver, need not update upper irq level
*/
- index = (offset - EIOINTC_IPMAP_START) >> 3;
- s->ipmap.reg_u64 = data;
+ old = s->ipmap.reg_u64;
+ s->ipmap.reg_u64 = (old & ~mask) | data;
break;
case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
index = (offset - EIOINTC_ENABLE_START) >> 3;
- old_data = s->enable.reg_u64[index];
- s->enable.reg_u64[index] = data;
+ old = s->enable.reg_u64[index];
+ s->enable.reg_u64[index] = (old & ~mask) | data;
/*
* 1: enable irq.
* update irq when isr is set.
*/
- data = s->enable.reg_u64[index] & ~old_data & s->isr.reg_u64[index];
- index = index << 3;
- for (i = 0; i < sizeof(data); i++) {
- u8 mask = (data >> (i * 8)) & 0xff;
- eiointc_enable_irq(vcpu, s, index + i, mask, 1);
+ data = s->enable.reg_u64[index] & ~old & s->isr.reg_u64[index];
+ while (data) {
+ irq = __ffs(data);
+ eiointc_update_irq(s, irq + index * 64, 1);
+ data &= ~BIT_ULL(irq);
}
/*
* 0: disable irq.
* update irq when isr is set.
*/
- data = ~s->enable.reg_u64[index] & old_data & s->isr.reg_u64[index];
- for (i = 0; i < sizeof(data); i++) {
- u8 mask = (data >> (i * 8)) & 0xff;
- eiointc_enable_irq(vcpu, s, index, mask, 0);
+ data = ~s->enable.reg_u64[index] & old & s->isr.reg_u64[index];
+ while (data) {
+ irq = __ffs(data);
+ eiointc_update_irq(s, irq + index * 64, 0);
+ data &= ~BIT_ULL(irq);
}
break;
case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
/* do not emulate hw bounced irq routing */
index = (offset - EIOINTC_BOUNCE_START) >> 3;
- s->bounce.reg_u64[index] = data;
+ old = s->bounce.reg_u64[index];
+ s->bounce.reg_u64[index] = (old & ~mask) | data;
break;
case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
index = (offset - EIOINTC_COREISR_START) >> 3;
/* use attrs to get current cpu index */
cpu = vcpu->vcpu_id;
- coreisr = data;
- old_coreisr = s->coreisr.reg_u64[cpu][index];
+ old = s->coreisr.reg_u64[cpu][index];
/* write 1 to clear interrupt */
- s->coreisr.reg_u64[cpu][index] = old_coreisr & ~coreisr;
- coreisr &= old_coreisr;
- bits = sizeof(data) * 8;
- irq = find_first_bit((void *)&coreisr, bits);
- while (irq < bits) {
- eiointc_update_irq(s, irq + index * bits, 0);
- bitmap_clear((void *)&coreisr, irq, 1);
- irq = find_first_bit((void *)&coreisr, bits);
+ s->coreisr.reg_u64[cpu][index] = old & ~data;
+ data &= old;
+ while (data) {
+ irq = __ffs(data);
+ eiointc_update_irq(s, irq + index * 64, 0);
+ data &= ~BIT_ULL(irq);
}
break;
case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
- irq = offset - EIOINTC_COREMAP_START;
- index = irq >> 3;
- s->coremap.reg_u64[index] = data;
- eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true);
+ index = (offset - EIOINTC_COREMAP_START) >> 3;
+ old = s->coremap.reg_u64[index];
+ s->coremap.reg_u64[index] = (old & ~mask) | data;
+ data = s->coremap.reg_u64[index];
+ eiointc_update_sw_coremap(s, index * 8, data, sizeof(data), true);
break;
default:
ret = -EINVAL;
@@ -671,7 +294,7 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu,
gpa_t addr, int len, const void *val)
{
int ret = -EINVAL;
- unsigned long flags;
+ unsigned long flags, value;
struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
if (!eiointc) {
@@ -679,24 +302,30 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu,
return -EINVAL;
}
- vcpu->kvm->stat.eiointc_write_exits++;
+ if (addr & (len - 1)) {
+ kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len);
+ return -EINVAL;
+ }
+
+ vcpu->stat.eiointc_write_exits++;
spin_lock_irqsave(&eiointc->lock, flags);
switch (len) {
case 1:
- ret = loongarch_eiointc_writeb(vcpu, eiointc, addr, len, val);
+ value = *(unsigned char *)val;
+ ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, 0xFF);
break;
case 2:
- ret = loongarch_eiointc_writew(vcpu, eiointc, addr, len, val);
+ value = *(unsigned short *)val;
+ ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, USHRT_MAX);
break;
case 4:
- ret = loongarch_eiointc_writel(vcpu, eiointc, addr, len, val);
- break;
- case 8:
- ret = loongarch_eiointc_writeq(vcpu, eiointc, addr, len, val);
+ value = *(unsigned int *)val;
+ ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, UINT_MAX);
break;
default:
- WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n",
- __func__, addr, len);
+ value = *(unsigned long *)val;
+ ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, ULONG_MAX);
+ break;
}
spin_unlock_irqrestore(&eiointc->lock, flags);
@@ -787,7 +416,7 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
int ret = 0;
unsigned long flags;
unsigned long type = (unsigned long)attr->attr;
- u32 i, start_irq;
+ u32 i, start_irq, val;
void __user *data;
struct loongarch_eiointc *s = dev->kvm->arch.eiointc;
@@ -795,8 +424,14 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
spin_lock_irqsave(&s->lock, flags);
switch (type) {
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU:
- if (copy_from_user(&s->num_cpu, data, 4))
+ if (copy_from_user(&val, data, 4))
ret = -EFAULT;
+ else {
+ if (val >= EIOINTC_ROUTE_MAX_VCPUS)
+ ret = -EINVAL;
+ else
+ s->num_cpu = val;
+ }
break;
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE:
if (copy_from_user(&s->features, data, 4))
@@ -809,7 +444,7 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
for (i = 0; i < (EIOINTC_IRQS / 4); i++) {
start_irq = i * 4;
eiointc_update_sw_coremap(s, start_irq,
- (void *)&s->coremap.reg_u32[i], sizeof(u32), false);
+ s->coremap.reg_u32[i], sizeof(u32), false);
}
break;
default:
@@ -824,7 +459,7 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev,
struct kvm_device_attr *attr,
bool is_write)
{
- int addr, cpuid, offset, ret = 0;
+ int addr, cpu, offset, ret = 0;
unsigned long flags;
void *p = NULL;
void __user *data;
@@ -832,7 +467,7 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev,
s = dev->kvm->arch.eiointc;
addr = attr->attr;
- cpuid = addr >> 16;
+ cpu = addr >> 16;
addr &= 0xffff;
data = (void __user *)attr->addr;
switch (addr) {
@@ -857,8 +492,11 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev,
p = &s->isr.reg_u32[offset];
break;
case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
+ if (cpu >= s->num_cpu)
+ return -EINVAL;
+
offset = (addr - EIOINTC_COREISR_START) / 4;
- p = &s->coreisr.reg_u32[cpuid][offset];
+ p = &s->coreisr.reg_u32[cpu][offset];
break;
case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
offset = (addr - EIOINTC_COREMAP_START) / 4;
@@ -899,9 +537,15 @@ static int kvm_eiointc_sw_status_access(struct kvm_device *dev,
data = (void __user *)attr->addr;
switch (addr) {
case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU:
+ if (is_write)
+ return ret;
+
p = &s->num_cpu;
break;
case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE:
+ if (is_write)
+ return ret;
+
p = &s->features;
break;
case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE:
@@ -956,7 +600,7 @@ static int kvm_eiointc_create(struct kvm_device *dev, u32 type)
{
int ret;
struct loongarch_eiointc *s;
- struct kvm_io_device *device, *device1;
+ struct kvm_io_device *device;
struct kvm *kvm = dev->kvm;
/* eiointc has been created */
@@ -984,10 +628,10 @@ static int kvm_eiointc_create(struct kvm_device *dev, u32 type)
return ret;
}
- device1 = &s->device_vext;
- kvm_iodevice_init(device1, &kvm_eiointc_virt_ops);
+ device = &s->device_vext;
+ kvm_iodevice_init(device, &kvm_eiointc_virt_ops);
ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS,
- EIOINTC_VIRT_BASE, EIOINTC_VIRT_SIZE, device1);
+ EIOINTC_VIRT_BASE, EIOINTC_VIRT_SIZE, device);
if (ret < 0) {
kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &s->device);
kfree(s);
diff --git a/arch/loongarch/kvm/intc/ipi.c b/arch/loongarch/kvm/intc/ipi.c
index 93f4acd44523..e658d5b37c04 100644
--- a/arch/loongarch/kvm/intc/ipi.c
+++ b/arch/loongarch/kvm/intc/ipi.c
@@ -111,7 +111,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (unlikely(ret)) {
- kvm_err("%s: : read date from addr %llx failed\n", __func__, addr);
+ kvm_err("%s: : read data from addr %llx failed\n", __func__, addr);
return ret;
}
/* Construct the mask by scanning the bit 27-30 */
@@ -127,7 +127,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (unlikely(ret))
- kvm_err("%s: : write date to addr %llx failed\n", __func__, addr);
+ kvm_err("%s: : write data to addr %llx failed\n", __func__, addr);
return ret;
}
@@ -268,36 +268,16 @@ static int kvm_ipi_read(struct kvm_vcpu *vcpu,
struct kvm_io_device *dev,
gpa_t addr, int len, void *val)
{
- int ret;
- struct loongarch_ipi *ipi;
-
- ipi = vcpu->kvm->arch.ipi;
- if (!ipi) {
- kvm_err("%s: ipi irqchip not valid!\n", __func__);
- return -EINVAL;
- }
- ipi->kvm->stat.ipi_read_exits++;
- ret = loongarch_ipi_readl(vcpu, addr, len, val);
-
- return ret;
+ vcpu->stat.ipi_read_exits++;
+ return loongarch_ipi_readl(vcpu, addr, len, val);
}
static int kvm_ipi_write(struct kvm_vcpu *vcpu,
struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
- int ret;
- struct loongarch_ipi *ipi;
-
- ipi = vcpu->kvm->arch.ipi;
- if (!ipi) {
- kvm_err("%s: ipi irqchip not valid!\n", __func__);
- return -EINVAL;
- }
- ipi->kvm->stat.ipi_write_exits++;
- ret = loongarch_ipi_writel(vcpu, addr, len, val);
-
- return ret;
+ vcpu->stat.ipi_write_exits++;
+ return loongarch_ipi_writel(vcpu, addr, len, val);
}
static const struct kvm_io_device_ops kvm_ipi_ops = {
diff --git a/arch/loongarch/kvm/intc/pch_pic.c b/arch/loongarch/kvm/intc/pch_pic.c
index 08fce845f668..6f00ffe05c54 100644
--- a/arch/loongarch/kvm/intc/pch_pic.c
+++ b/arch/loongarch/kvm/intc/pch_pic.c
@@ -196,7 +196,7 @@ static int kvm_pch_pic_read(struct kvm_vcpu *vcpu,
}
/* statistics of pch pic reading */
- vcpu->kvm->stat.pch_pic_read_exits++;
+ vcpu->stat.pch_pic_read_exits++;
ret = loongarch_pch_pic_read(s, addr, len, val);
return ret;
@@ -303,7 +303,7 @@ static int kvm_pch_pic_write(struct kvm_vcpu *vcpu,
}
/* statistics of pch pic writing */
- vcpu->kvm->stat.pch_pic_write_exits++;
+ vcpu->stat.pch_pic_write_exits++;
ret = loongarch_pch_pic_write(s, addr, len, val);
return ret;
diff --git a/arch/loongarch/kvm/interrupt.c b/arch/loongarch/kvm/interrupt.c
index 4c3f22de4b40..8462083f0301 100644
--- a/arch/loongarch/kvm/interrupt.c
+++ b/arch/loongarch/kvm/interrupt.c
@@ -83,28 +83,11 @@ void kvm_deliver_intr(struct kvm_vcpu *vcpu)
unsigned long *pending = &vcpu->arch.irq_pending;
unsigned long *pending_clr = &vcpu->arch.irq_clear;
- if (!(*pending) && !(*pending_clr))
- return;
-
- if (*pending_clr) {
- priority = __ffs(*pending_clr);
- while (priority <= INT_IPI) {
- kvm_irq_clear(vcpu, priority);
- priority = find_next_bit(pending_clr,
- BITS_PER_BYTE * sizeof(*pending_clr),
- priority + 1);
- }
- }
+ for_each_set_bit(priority, pending_clr, INT_IPI + 1)
+ kvm_irq_clear(vcpu, priority);
- if (*pending) {
- priority = __ffs(*pending);
- while (priority <= INT_IPI) {
- kvm_irq_deliver(vcpu, priority);
- priority = find_next_bit(pending,
- BITS_PER_BYTE * sizeof(*pending),
- priority + 1);
- }
- }
+ for_each_set_bit(priority, pending, INT_IPI + 1)
+ kvm_irq_deliver(vcpu, priority);
}
int kvm_pending_timer(struct kvm_vcpu *vcpu)
diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c
index d165cd38c6bb..80ea63d465b8 100644
--- a/arch/loongarch/kvm/main.c
+++ b/arch/loongarch/kvm/main.c
@@ -296,10 +296,10 @@ int kvm_arch_enable_virtualization_cpu(void)
/*
* Enable virtualization features granting guest direct control of
* certain features:
- * GCI=2: Trap on init or unimplement cache instruction.
+ * GCI=2: Trap on init or unimplemented cache instruction.
* TORU=0: Trap on Root Unimplement.
* CACTRL=1: Root control cache.
- * TOP=0: Trap on Previlege.
+ * TOP=0: Trap on Privilege.
* TOE=0: Trap on Exception.
* TIT=0: Trap on Timer.
*/
diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c
index 4d203294767c..ed956c5cf2cc 100644
--- a/arch/loongarch/kvm/mmu.c
+++ b/arch/loongarch/kvm/mmu.c
@@ -912,7 +912,7 @@ out:
return err;
}
-int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
+int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write, int ecode)
{
int ret;
@@ -921,8 +921,17 @@ int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
return ret;
/* Invalidate this entry in the TLB */
- vcpu->arch.flush_gpa = gpa;
- kvm_make_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
+ if (!cpu_has_ptw || (ecode == EXCCODE_TLBM)) {
+ /*
+ * With HW PTW, invalid TLB is not added when page fault. But
+ * for EXCCODE_TLBM exception, stale TLB may exist because of
+ * the last read access.
+ *
+ * With SW PTW, invalid TLB is added in TLB refill exception.
+ */
+ vcpu->arch.flush_gpa = gpa;
+ kvm_make_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
+ }
return 0;
}
diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h
index 1783397b1bc8..145514dab6d5 100644
--- a/arch/loongarch/kvm/trace.h
+++ b/arch/loongarch/kvm/trace.h
@@ -46,11 +46,15 @@ DEFINE_EVENT(kvm_transition, kvm_out,
/* Further exit reasons */
#define KVM_TRACE_EXIT_IDLE 64
#define KVM_TRACE_EXIT_CACHE 65
+#define KVM_TRACE_EXIT_CPUCFG 66
+#define KVM_TRACE_EXIT_CSR 67
/* Tracepoints for VM exits */
#define kvm_trace_symbol_exit_types \
{ KVM_TRACE_EXIT_IDLE, "IDLE" }, \
- { KVM_TRACE_EXIT_CACHE, "CACHE" }
+ { KVM_TRACE_EXIT_CACHE, "CACHE" }, \
+ { KVM_TRACE_EXIT_CPUCFG, "CPUCFG" }, \
+ { KVM_TRACE_EXIT_CSR, "CSR" }
DECLARE_EVENT_CLASS(kvm_exit,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
@@ -82,6 +86,14 @@ DEFINE_EVENT(kvm_exit, kvm_exit_cache,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
TP_ARGS(vcpu, reason));
+DEFINE_EVENT(kvm_exit, kvm_exit_cpucfg,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+ TP_ARGS(vcpu, reason));
+
+DEFINE_EVENT(kvm_exit, kvm_exit_csr,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+ TP_ARGS(vcpu, reason));
+
DEFINE_EVENT(kvm_exit, kvm_exit,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
TP_ARGS(vcpu, reason));
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 8e427b379661..d1b8c50941ca 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -20,7 +20,13 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
STATS_DESC_COUNTER(VCPU, idle_exits),
STATS_DESC_COUNTER(VCPU, cpucfg_exits),
STATS_DESC_COUNTER(VCPU, signal_exits),
- STATS_DESC_COUNTER(VCPU, hypercall_exits)
+ STATS_DESC_COUNTER(VCPU, hypercall_exits),
+ STATS_DESC_COUNTER(VCPU, ipi_read_exits),
+ STATS_DESC_COUNTER(VCPU, ipi_write_exits),
+ STATS_DESC_COUNTER(VCPU, eiointc_read_exits),
+ STATS_DESC_COUNTER(VCPU, eiointc_write_exits),
+ STATS_DESC_COUNTER(VCPU, pch_pic_read_exits),
+ STATS_DESC_COUNTER(VCPU, pch_pic_write_exits)
};
const struct kvm_stats_header kvm_vcpu_stats_header = {
@@ -294,6 +300,7 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
+ kvm_lose_pmu(vcpu);
/* make sure the vcpu mode has been written */
smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
local_irq_enable();
@@ -902,6 +909,13 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
vcpu->arch.st.guest_addr = 0;
memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
+
+ /*
+ * When vCPU reset, clear the ESTAT and GINTC registers
+ * Other CSR registers are cleared with function _kvm_setcsr().
+ */
+ kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
+ kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
break;
default:
ret = -EINVAL;
diff --git a/arch/loongarch/lib/Makefile b/arch/loongarch/lib/Makefile
index fae77809048b..ccea3bbd4353 100644
--- a/arch/loongarch/lib/Makefile
+++ b/arch/loongarch/lib/Makefile
@@ -11,5 +11,3 @@ obj-$(CONFIG_ARCH_SUPPORTS_INT128) += tishift.o
obj-$(CONFIG_CPU_HAS_LSX) += xor_simd.o xor_simd_glue.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
-
-obj-$(CONFIG_CRC32_ARCH) += crc32-loongarch.o
diff --git a/arch/loongarch/lib/crc32-loongarch.c b/arch/loongarch/lib/crc32-loongarch.c
deleted file mode 100644
index c44ee4f32557..000000000000
--- a/arch/loongarch/lib/crc32-loongarch.c
+++ /dev/null
@@ -1,135 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * CRC32 and CRC32C using LoongArch crc* instructions
- *
- * Module based on mips/crypto/crc32-mips.c
- *
- * Copyright (C) 2014 Linaro Ltd <yazen.ghannam@linaro.org>
- * Copyright (C) 2018 MIPS Tech, LLC
- * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
- */
-
-#include <asm/cpu-features.h>
-#include <linux/crc32.h>
-#include <linux/module.h>
-#include <linux/unaligned.h>
-
-#define _CRC32(crc, value, size, type) \
-do { \
- __asm__ __volatile__( \
- #type ".w." #size ".w" " %0, %1, %0\n\t"\
- : "+r" (crc) \
- : "r" (value) \
- : "memory"); \
-} while (0)
-
-#define CRC32(crc, value, size) _CRC32(crc, value, size, crc)
-#define CRC32C(crc, value, size) _CRC32(crc, value, size, crcc)
-
-static DEFINE_STATIC_KEY_FALSE(have_crc32);
-
-u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
-{
- if (!static_branch_likely(&have_crc32))
- return crc32_le_base(crc, p, len);
-
- while (len >= sizeof(u64)) {
- u64 value = get_unaligned_le64(p);
-
- CRC32(crc, value, d);
- p += sizeof(u64);
- len -= sizeof(u64);
- }
-
- if (len & sizeof(u32)) {
- u32 value = get_unaligned_le32(p);
-
- CRC32(crc, value, w);
- p += sizeof(u32);
- }
-
- if (len & sizeof(u16)) {
- u16 value = get_unaligned_le16(p);
-
- CRC32(crc, value, h);
- p += sizeof(u16);
- }
-
- if (len & sizeof(u8)) {
- u8 value = *p++;
-
- CRC32(crc, value, b);
- }
-
- return crc;
-}
-EXPORT_SYMBOL(crc32_le_arch);
-
-u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
-{
- if (!static_branch_likely(&have_crc32))
- return crc32c_base(crc, p, len);
-
- while (len >= sizeof(u64)) {
- u64 value = get_unaligned_le64(p);
-
- CRC32C(crc, value, d);
- p += sizeof(u64);
- len -= sizeof(u64);
- }
-
- if (len & sizeof(u32)) {
- u32 value = get_unaligned_le32(p);
-
- CRC32C(crc, value, w);
- p += sizeof(u32);
- }
-
- if (len & sizeof(u16)) {
- u16 value = get_unaligned_le16(p);
-
- CRC32C(crc, value, h);
- p += sizeof(u16);
- }
-
- if (len & sizeof(u8)) {
- u8 value = *p++;
-
- CRC32C(crc, value, b);
- }
-
- return crc;
-}
-EXPORT_SYMBOL(crc32c_arch);
-
-u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
-{
- return crc32_be_base(crc, p, len);
-}
-EXPORT_SYMBOL(crc32_be_arch);
-
-static int __init crc32_loongarch_init(void)
-{
- if (cpu_has_crc32)
- static_branch_enable(&have_crc32);
- return 0;
-}
-arch_initcall(crc32_loongarch_init);
-
-static void __exit crc32_loongarch_exit(void)
-{
-}
-module_exit(crc32_loongarch_exit);
-
-u32 crc32_optimizations(void)
-{
- if (static_key_enabled(&have_crc32))
- return CRC32_LE_OPTIMIZATION | CRC32C_OPTIMIZATION;
- return 0;
-}
-EXPORT_SYMBOL(crc32_optimizations);
-
-MODULE_AUTHOR("Min Zhou <zhoumin@loongson.cn>");
-MODULE_AUTHOR("Huacai Chen <chenhuacai@loongson.cn>");
-MODULE_DESCRIPTION("CRC32 and CRC32C using LoongArch crc* instructions");
-MODULE_LICENSE("GPL v2");
diff --git a/arch/loongarch/lib/csum.c b/arch/loongarch/lib/csum.c
index df309ae4045d..bcc9d01d8c41 100644
--- a/arch/loongarch/lib/csum.c
+++ b/arch/loongarch/lib/csum.c
@@ -2,6 +2,7 @@
// Copyright (C) 2019-2020 Arm Ltd.
#include <linux/compiler.h>
+#include <linux/export.h>
#include <linux/kasan-checks.h>
#include <linux/kernel.h>
diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c
index e4068906143b..02dad4624fe3 100644
--- a/arch/loongarch/mm/hugetlbpage.c
+++ b/arch/loongarch/mm/hugetlbpage.c
@@ -47,7 +47,8 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
pmd = pmd_offset(pud, addr);
}
}
- return (pte_t *) pmd;
+
+ return (!pmd || pmd_none(pmdp_get(pmd))) ? NULL : (pte_t *) pmd;
}
uint64_t pmd_to_entrylo(unsigned long pmd_val)
diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
index fdb7f73ad160..c3e4586a7975 100644
--- a/arch/loongarch/mm/init.c
+++ b/arch/loongarch/mm/init.c
@@ -65,9 +65,6 @@ void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
-#ifdef CONFIG_ZONE_DMA
- max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
-#endif
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
#endif
@@ -109,14 +106,6 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
page += vmem_altmap_offset(altmap);
__remove_pages(start_pfn, nr_pages, altmap);
}
-
-#ifdef CONFIG_NUMA
-int memory_add_physaddr_to_nid(u64 start)
-{
- return pa_to_nid(start);
-}
-EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
-#endif
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP
diff --git a/arch/loongarch/mm/ioremap.c b/arch/loongarch/mm/ioremap.c
index 70ca73019811..df949a3d0f34 100644
--- a/arch/loongarch/mm/ioremap.c
+++ b/arch/loongarch/mm/ioremap.c
@@ -16,12 +16,12 @@ void __init early_iounmap(void __iomem *addr, unsigned long size)
}
-void *early_memremap_ro(resource_size_t phys_addr, unsigned long size)
+void * __init early_memremap_ro(resource_size_t phys_addr, unsigned long size)
{
return early_memremap(phys_addr, size);
}
-void *early_memremap_prot(resource_size_t phys_addr, unsigned long size,
+void * __init early_memremap_prot(resource_size_t phys_addr, unsigned long size,
unsigned long prot_val)
{
return early_memremap(phys_addr, size);
diff --git a/arch/loongarch/mm/pageattr.c b/arch/loongarch/mm/pageattr.c
index 99165903908a..f5e910b68229 100644
--- a/arch/loongarch/mm/pageattr.c
+++ b/arch/loongarch/mm/pageattr.c
@@ -118,7 +118,7 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, pgp
return 0;
mmap_write_lock(&init_mm);
- ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, &masks);
+ ret = walk_kernel_page_table_range(start, end, &pageattr_ops, NULL, &masks);
mmap_write_unlock(&init_mm);
flush_tlb_kernel_range(start, end);
diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c
index 22a94bb3e6e8..352d9b2e02ab 100644
--- a/arch/loongarch/mm/pgtable.c
+++ b/arch/loongarch/mm/pgtable.c
@@ -135,15 +135,6 @@ void kernel_pte_init(void *addr)
} while (p != end);
}
-pmd_t mk_pmd(struct page *page, pgprot_t prot)
-{
- pmd_t pmd;
-
- pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
-
- return pmd;
-}
-
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index ea357a3edc09..fa1500d4aa3e 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -142,6 +142,8 @@ static void build_prologue(struct jit_ctx *ctx)
*/
if (seen_tail_call(ctx) && seen_call(ctx))
move_reg(ctx, TCC_SAVED, REG_TCC);
+ else
+ emit_insn(ctx, nop);
ctx->stack_size = stack_adjust;
}
@@ -905,7 +907,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
move_addr(ctx, t1, func_addr);
emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0);
- move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
+
+ if (insn->src_reg != BPF_PSEUDO_CALL)
+ move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
+
break;
/* tail call */
@@ -930,7 +935,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
{
const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
- move_imm(ctx, dst, imm64, is32);
+ if (bpf_pseudo_func(insn))
+ move_addr(ctx, dst, imm64);
+ else
+ move_imm(ctx, dst, imm64, is32);
return 1;
}
diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h
index 68586338ecf8..f9c569f53949 100644
--- a/arch/loongarch/net/bpf_jit.h
+++ b/arch/loongarch/net/bpf_jit.h
@@ -27,6 +27,11 @@ struct jit_data {
struct jit_ctx ctx;
};
+static inline void emit_nop(union loongarch_instruction *insn)
+{
+ insn->word = INSN_NOP;
+}
+
#define emit_insn(ctx, func, ...) \
do { \
if (ctx->image != NULL) { \
diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c
index 1da4dc46df43..50c9016641a4 100644
--- a/arch/loongarch/pci/acpi.c
+++ b/arch/loongarch/pci/acpi.c
@@ -194,6 +194,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
{
struct pci_bus *bus;
struct pci_root_info *info;
+ struct pci_host_bridge *host;
struct acpi_pci_root_ops *root_ops;
int domain = root->segment;
int busnum = root->secondary.start;
@@ -237,8 +238,17 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
return NULL;
}
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
+ /* If we must preserve the resource configuration, claim now */
+ host = pci_find_host_bridge(bus);
+ if (host->preserve_config)
+ pci_bus_claim_resources(bus);
+
+ /*
+ * Assign whatever was left unassigned. If we didn't claim above,
+ * this will reassign everything.
+ */
+ pci_assign_unassigned_root_bus_resources(bus);
+
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
}
diff --git a/arch/loongarch/pci/pci.c b/arch/loongarch/pci/pci.c
index 2726639150bc..5bc9627a6cf9 100644
--- a/arch/loongarch/pci/pci.c
+++ b/arch/loongarch/pci/pci.c
@@ -3,7 +3,6 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/kernel.h>
-#include <linux/export.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/types.h>
diff --git a/arch/loongarch/power/hibernate.c b/arch/loongarch/power/hibernate.c
index 1e0590542f98..e7b7346592cb 100644
--- a/arch/loongarch/power/hibernate.c
+++ b/arch/loongarch/power/hibernate.c
@@ -2,6 +2,7 @@
#include <asm/fpu.h>
#include <asm/loongson.h>
#include <asm/sections.h>
+#include <asm/time.h>
#include <asm/tlbflush.h>
#include <linux/suspend.h>
@@ -14,6 +15,7 @@ struct pt_regs saved_regs;
void save_processor_state(void)
{
+ save_counter();
saved_crmd = csr_read32(LOONGARCH_CSR_CRMD);
saved_prmd = csr_read32(LOONGARCH_CSR_PRMD);
saved_euen = csr_read32(LOONGARCH_CSR_EUEN);
@@ -26,6 +28,7 @@ void save_processor_state(void)
void restore_processor_state(void)
{
+ sync_counter();
csr_write32(saved_crmd, LOONGARCH_CSR_CRMD);
csr_write32(saved_prmd, LOONGARCH_CSR_PRMD);
csr_write32(saved_euen, LOONGARCH_CSR_EUEN);
diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
index 1c26147aff70..ccd2c5e135c6 100644
--- a/arch/loongarch/vdso/Makefile
+++ b/arch/loongarch/vdso/Makefile
@@ -36,8 +36,7 @@ endif
# VDSO linker flags.
ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
- $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \
- --hash-style=sysv --build-id -T
+ $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared --build-id -T
#
# Shared build commands.
diff --git a/arch/loongarch/vdso/vgetrandom-chacha.S b/arch/loongarch/vdso/vgetrandom-chacha.S
index c2733e6c3a8d..c4dd2bab8825 100644
--- a/arch/loongarch/vdso/vgetrandom-chacha.S
+++ b/arch/loongarch/vdso/vgetrandom-chacha.S
@@ -58,9 +58,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
#define copy0 t5
#define copy1 t6
#define copy2 t7
-
-/* Reuse i as copy3 */
-#define copy3 i
+#define copy3 t8
/* Packs to be used with OP_4REG */
#define line0 state0, state1, state2, state3
@@ -99,6 +97,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
li.w copy0, 0x61707865
li.w copy1, 0x3320646e
li.w copy2, 0x79622d32
+ li.w copy3, 0x6b206574
ld.w cnt_lo, counter, 0
ld.w cnt_hi, counter, 4
@@ -108,7 +107,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
move state0, copy0
move state1, copy1
move state2, copy2
- li.w state3, 0x6b206574
+ move state3, copy3
/* state[4,5,..,11] = key */
ld.w state4, key, 0
@@ -167,12 +166,6 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
addi.w i, i, -1
bnez i, .Lpermute
- /*
- * copy[3] = "expa", materialize it here because copy[3] shares the
- * same register with i which just became dead.
- */
- li.w copy3, 0x6b206574
-
/* output[0,1,2,3] = copy[0,1,2,3] + state[0,1,2,3] */
OP_4REG add.w line0, copy
st.w state0, output, 0