diff options
Diffstat (limited to 'arch/loongarch')
109 files changed, 1031 insertions, 677 deletions
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 2b8bd27a852f..4b19f93379a1 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -30,6 +30,8 @@ config LOONGARCH select ARCH_HAS_SET_MEMORY select ARCH_HAS_SET_DIRECT_MAP select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST + select ARCH_HAS_UBSAN + select ARCH_HAS_VDSO_ARCH_DATA select ARCH_INLINE_READ_LOCK if !PREEMPTION select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION @@ -67,10 +69,12 @@ config LOONGARCH select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 select ARCH_SUPPORTS_LTO_CLANG select ARCH_SUPPORTS_LTO_CLANG_THIN + select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_SUPPORTS_RT select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF + select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS select ARCH_WANT_DEFAULT_BPF_JIT @@ -106,6 +110,7 @@ config LOONGARCH select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL + select GENERIC_VDSO_DATA_STORE select GENERIC_VDSO_TIME_NS select GPIOLIB select HAS_IOPORT @@ -119,6 +124,7 @@ config LOONGARCH select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET select HAVE_ARCH_SECCOMP select HAVE_ARCH_SECCOMP_FILTER + select HAVE_ARCH_STACKLEAK select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD @@ -175,7 +181,7 @@ config LOONGARCH select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS select HAVE_TIF_NOHZ - select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP + select HAVE_VIRT_CPU_ACCOUNTING_GEN select IRQ_FORCED_THREADING select IRQ_LOONGARCH_CPU select LOCK_MM_AND_FIND_VMA @@ -183,6 +189,7 @@ config LOONGARCH select MODULES_USE_ELF_RELA if MODULES select NEED_PER_CPU_EMBED_FIRST_CHUNK select NEED_PER_CPU_PAGE_FIRST_CHUNK + select NUMA_MEMBLKS if NUMA select OF select OF_EARLY_FLATTREE select PCI @@ -291,6 +298,9 @@ config AS_HAS_LBT_EXTENSION config AS_HAS_LVZ_EXTENSION def_bool $(as-instr,hvcl 0) +config CC_HAS_ANNOTATE_TABLEJUMP + def_bool $(cc-option,-mannotate-tablejump) + menu "Kernel type and options" source "kernel/Kconfig.hz" @@ -382,8 +392,8 @@ config CMDLINE_BOOTLOADER config CMDLINE_EXTEND bool "Use built-in to extend bootloader kernel arguments" help - The command-line arguments provided during boot will be - appended to the built-in command line. This is useful in + The built-in command line will be appended to the command- + line arguments provided during boot. This is useful in cases where the provided arguments are insufficient and you don't want to or cannot modify them. @@ -449,6 +459,15 @@ config SCHED_SMT Improves scheduler's performance when there are multiple threads in one physical core. +config SCHED_MC + bool "Multi-core scheduler support" + depends on SMP + default y + help + Multi-core scheduler support improves the CPU scheduler's decision + making when dealing with multi-core CPU chips at a cost of slightly + increased overhead in some places. + config SMP bool "Multi-Processing support" help @@ -478,10 +497,10 @@ config HOTPLUG_CPU Say N if you want to disable CPU hotplug. config NR_CPUS - int "Maximum number of CPUs (2-256)" - range 2 256 + int "Maximum number of CPUs (2-2048)" + range 2 2048 + default "2048" depends on SMP - default "64" help This allows you to specify the maximum number of CPUs which this kernel will support. diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile index 567bd122a9ee..b0703a4e02a2 100644 --- a/arch/loongarch/Makefile +++ b/arch/loongarch/Makefile @@ -101,7 +101,11 @@ KBUILD_AFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma) KBUILD_CFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)-mthin-add-sub) ifdef CONFIG_OBJTOOL -KBUILD_CFLAGS += -fno-jump-tables +ifdef CONFIG_CC_HAS_ANNOTATE_TABLEJUMP +KBUILD_CFLAGS += -mannotate-tablejump +else +KBUILD_CFLAGS += -fno-jump-tables # keep compatibility with older compilers +endif endif KBUILD_RUSTFLAGS += --target=loongarch64-unknown-none-softfloat -Ccode-model=small @@ -177,11 +181,14 @@ vmlinux.elf vmlinux.efi vmlinuz.efi: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(bootvars-y) $(boot)/$@ install: - $(Q)install -D -m 755 $(KBUILD_IMAGE) $(INSTALL_PATH)/$(image-name-y)-$(KERNELRELEASE) - $(Q)install -D -m 644 .config $(INSTALL_PATH)/config-$(KERNELRELEASE) - $(Q)install -D -m 644 System.map $(INSTALL_PATH)/System.map-$(KERNELRELEASE) + $(call cmd,install) define archhelp - echo ' install - install kernel into $(INSTALL_PATH)' + echo ' vmlinux.elf - Uncompressed ELF kernel image (arch/loongarch/boot/vmlinux.elf)' + echo ' vmlinux.efi - Uncompressed EFI kernel image (arch/loongarch/boot/vmlinux.efi)' + echo ' vmlinuz.efi - GZIP/ZSTD-compressed EFI kernel image (arch/loongarch/boot/vmlinuz.efi)' + echo ' Default when CONFIG_EFI_ZBOOT=y' + echo ' install - Install kernel using (your) ~/bin/$(INSTALLKERNEL) or' + echo ' (distribution) /sbin/$(INSTALLKERNEL) or install.sh to $$(INSTALL_PATH)' echo endef diff --git a/arch/loongarch/boot/dts/loongson-2k0500.dtsi b/arch/loongarch/boot/dts/loongson-2k0500.dtsi index 3b38ff8853a7..760c60eebb89 100644 --- a/arch/loongarch/boot/dts/loongson-2k0500.dtsi +++ b/arch/loongarch/boot/dts/loongson-2k0500.dtsi @@ -169,6 +169,166 @@ interrupts = <3>; }; + pwm@1ff5c000 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c000 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c010 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c010 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c020 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c020 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c030 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c030 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c040 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c040 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <25 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c050 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c050 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <25 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c060 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c060 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <25 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c070 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c070 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <25 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c080 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c080 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <26 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c090 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c090 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <26 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c0a0 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c0a0 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <26 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c0b0 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c0b0 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <26 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c0c0 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c0c0 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <27 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c0d0 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c0d0 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <27 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c0e0 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c0e0 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <27 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1ff5c0f0 { + compatible = "loongson,ls2k0500-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1ff5c0f0 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <27 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + gmac0: ethernet@1f020000 { compatible = "snps,dwmac-3.70a"; reg = <0x0 0x1f020000 0x0 0x10000>; diff --git a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts index 23cf26cc3e5f..78ea995abf1c 100644 --- a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts +++ b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts @@ -5,6 +5,7 @@ /dts-v1/; +#include "dt-bindings/thermal/thermal.h" #include "loongson-2k1000.dtsi" / { @@ -38,6 +39,13 @@ linux,cma-default; }; }; + + fan0: pwm-fan { + compatible = "pwm-fan"; + cooling-levels = <255 153 85 25>; + pwms = <&pwm1 0 100000 0>; + #cooling-cells = <2>; + }; }; &gmac0 { @@ -90,10 +98,21 @@ #address-cells = <1>; #size-cells = <0>; - spidev@0 { - compatible = "rohm,dh2228fv"; - spi-max-frequency = <100000000>; - reg = <0>; +}; + +&pwm1 { + status = "okay"; + + pinctrl-0 = <&pwm1_pins_default>; + pinctrl-names = "default"; +}; + +&cpu_thermal { + cooling-maps { + map0 { + trip = <&cpu_alert>; + cooling-device = <&fan0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; }; }; diff --git a/arch/loongarch/boot/dts/loongson-2k1000.dtsi b/arch/loongarch/boot/dts/loongson-2k1000.dtsi index 8dff2aa52417..1da3beb00f0e 100644 --- a/arch/loongarch/boot/dts/loongson-2k1000.dtsi +++ b/arch/loongarch/boot/dts/loongson-2k1000.dtsi @@ -68,7 +68,7 @@ }; thermal-zones { - cpu-thermal { + cpu_thermal: cpu-thermal { polling-delay-passive = <1000>; polling-delay = <5000>; thermal-sensors = <&tsensor 0>; @@ -322,6 +322,46 @@ status = "disabled"; }; + pwm@1fe22000 { + compatible = "loongson,ls2k1000-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1fe22000 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm1: pwm@1fe22010 { + compatible = "loongson,ls2k1000-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1fe22010 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <25 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1fe22020 { + compatible = "loongson,ls2k1000-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1fe22020 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <26 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@1fe22030 { + compatible = "loongson,ls2k1000-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x1fe22030 0x0 0x10>; + interrupt-parent = <&liointc0>; + interrupts = <27 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + pmc: power-management@1fe27000 { compatible = "loongson,ls2k1000-pmc", "loongson,ls2k0500-pmc", "syscon"; reg = <0x0 0x1fe27000 0x0 0x58>; diff --git a/arch/loongarch/boot/dts/loongson-2k2000.dtsi b/arch/loongarch/boot/dts/loongson-2k2000.dtsi index b4ff55a33e90..9e0411f2754c 100644 --- a/arch/loongarch/boot/dts/loongson-2k2000.dtsi +++ b/arch/loongarch/boot/dts/loongson-2k2000.dtsi @@ -165,6 +165,66 @@ interrupt-parent = <&eiointc>; }; + pwm@100a0000 { + compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x100a0000 0x0 0x10>; + interrupt-parent = <&pic>; + interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_MISC_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@100a0100 { + compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x100a0100 0x0 0x10>; + interrupt-parent = <&pic>; + interrupts = <25 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_MISC_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@100a0200 { + compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x100a0200 0x0 0x10>; + interrupt-parent = <&pic>; + interrupts = <26 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_MISC_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@100a0300 { + compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x100a0300 0x0 0x10>; + interrupt-parent = <&pic>; + interrupts = <27 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_MISC_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@100a0400 { + compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x100a0400 0x0 0x10>; + interrupt-parent = <&pic>; + interrupts = <38 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_MISC_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm@100a0500 { + compatible = "loongson,ls2k2000-pwm", "loongson,ls7a-pwm"; + reg = <0x0 0x100a0500 0x0 0x10>; + interrupt-parent = <&pic>; + interrupts = <39 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_MISC_CLK>; + #pwm-cells = <3>; + status = "disabled"; + }; + rtc0: rtc@100d0100 { compatible = "loongson,ls2k2000-rtc", "loongson,ls7a-rtc"; reg = <0x0 0x100d0100 0x0 0x100>; diff --git a/arch/loongarch/boot/install.sh b/arch/loongarch/boot/install.sh new file mode 100755 index 000000000000..daac197d3315 --- /dev/null +++ b/arch/loongarch/boot/install.sh @@ -0,0 +1,56 @@ +#!/bin/sh +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 1995 by Linus Torvalds +# +# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin +# Adapted from code in arch/i386/boot/install.sh by Russell King +# +# "make install" script for the LoongArch Linux port +# +# Arguments: +# $1 - kernel version +# $2 - kernel image file +# $3 - kernel map file +# $4 - default install path (blank if root directory) + +set -e + +case "${2##*/}" in +vmlinux.elf) + echo "Installing uncompressed vmlinux.elf kernel" + base=vmlinux + ;; +vmlinux.efi) + echo "Installing uncompressed vmlinux.efi kernel" + base=vmlinux + ;; +vmlinuz.efi) + echo "Installing gzip/zstd compressed vmlinuz.efi kernel" + base=vmlinuz + ;; +*) + echo "Warning: Unexpected kernel type" + exit 1 + ;; +esac + +if [ -f $4/$base-$1 ]; then + mv $4/$base-$1 $4/$base-$1.old +fi +cat $2 > $4/$base-$1 + +# Install system map file +if [ -f $4/System.map-$1 ]; then + mv $4/System.map-$1 $4/System.map-$1.old +fi +cp $3 $4/System.map-$1 + +# Install kernel config file +if [ -f $4/config-$1 ]; then + mv $4/config-$1 $4/config-$1.old +fi +cp .config $4/config-$1 diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 73c77500ac46..0d59af6007b7 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -24,9 +24,9 @@ CONFIG_NUMA_BALANCING=y CONFIG_MEMCG=y CONFIG_BLK_CGROUP=y CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_DMEM=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y @@ -109,8 +109,7 @@ CONFIG_BINFMT_MISC=m CONFIG_ZPOOL=y CONFIG_ZSWAP=y CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y -CONFIG_ZBUD=y -CONFIG_ZSMALLOC=m +CONFIG_ZSMALLOC=y # CONFIG_COMPAT_BRK is not set CONFIG_MEMORY_HOTPLUG=y # CONFIG_MHP_DEFAULT_ONLINE_TYPE_OFFLINE is not set @@ -666,6 +665,10 @@ CONFIG_RTW88_8723DE=m CONFIG_RTW88_8723DU=m CONFIG_RTW88_8821CE=m CONFIG_RTW88_8821CU=m +CONFIG_RTW88_8821AU=m +CONFIG_RTW88_8812AU=m +CONFIG_RTW88_8814AE=m +CONFIG_RTW88_8814AU=m CONFIG_RTW89=m CONFIG_RTW89_8851BE=m CONFIG_RTW89_8852AE=m @@ -749,6 +752,7 @@ CONFIG_MEDIA_PCI_SUPPORT=y CONFIG_VIDEO_BT848=m CONFIG_DVB_BT8XX=m CONFIG_DRM=y +CONFIG_DRM_LOAD_EDID_FIRMWARE=y CONFIG_DRM_RADEON=m CONFIG_DRM_RADEON_USERPTR=y CONFIG_DRM_AMDGPU=m @@ -762,6 +766,7 @@ CONFIG_DRM_LOONGSON=y CONFIG_FB=y CONFIG_FB_EFI=y CONFIG_FB_RADEON=y +CONFIG_FIRMWARE_EDID=y CONFIG_LCD_CLASS_DEVICE=y CONFIG_LCD_PLATFORM=m # CONFIG_VGA_CONSOLE is not set @@ -844,6 +849,9 @@ CONFIG_TYPEC_TCPCI=m CONFIG_TYPEC_UCSI=m CONFIG_UCSI_ACPI=m CONFIG_INFINIBAND=m +CONFIG_EDAC=y +# CONFIG_EDAC_LEGACY_SYSFS is not set +CONFIG_EDAC_LOONGSON=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_EFI=y CONFIG_RTC_DRV_LOONGSON=y @@ -981,7 +989,6 @@ CONFIG_MINIX_FS=m CONFIG_ROMFS_FS=m CONFIG_PSTORE=m CONFIG_PSTORE_COMPRESS=y -CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_EROFS_FS=m CONFIG_EROFS_FS_ZIP_LZMA=y @@ -1019,7 +1026,7 @@ CONFIG_SECURITY_APPARMOR=y CONFIG_SECURITY_YAMA=y CONFIG_DEFAULT_SECURITY_DAC=y CONFIG_CRYPTO_USER=m -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_SELFTESTS=y CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_ANUBIS=m diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h index 313f66f7913a..7376840fa9f7 100644 --- a/arch/loongarch/include/asm/acpi.h +++ b/arch/loongarch/include/asm/acpi.h @@ -33,7 +33,7 @@ static inline bool acpi_has_cpu_in_madt(void) return true; } -#define MAX_CORE_PIC 256 +#define MAX_CORE_PIC 2048 extern struct list_head acpi_wakeup_device_list; extern struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC]; diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h index fe198b473f84..e739dbc6329d 100644 --- a/arch/loongarch/include/asm/addrspace.h +++ b/arch/loongarch/include/asm/addrspace.h @@ -18,12 +18,12 @@ /* * This gives the physical RAM offset. */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #ifndef PHYS_OFFSET #define PHYS_OFFSET _UL(0) #endif extern unsigned long vm_map_base; -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #ifndef IO_BASE #define IO_BASE CSR_DMW0_BASE @@ -66,7 +66,7 @@ extern unsigned long vm_map_base; #define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000) #endif -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define _ATYPE_ #define _ATYPE32_ #define _ATYPE64_ @@ -85,7 +85,7 @@ extern unsigned long vm_map_base; /* * 32/64-bit LoongArch address spaces */ -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define _ACAST32_ #define _ACAST64_ #else diff --git a/arch/loongarch/include/asm/alternative-asm.h b/arch/loongarch/include/asm/alternative-asm.h index ff3d10ac393f..7dc29bd9b2f0 100644 --- a/arch/loongarch/include/asm/alternative-asm.h +++ b/arch/loongarch/include/asm/alternative-asm.h @@ -2,7 +2,7 @@ #ifndef _ASM_ALTERNATIVE_ASM_H #define _ASM_ALTERNATIVE_ASM_H -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #include <asm/asm.h> @@ -77,6 +77,6 @@ .previous .endm -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_ALTERNATIVE_ASM_H */ diff --git a/arch/loongarch/include/asm/alternative.h b/arch/loongarch/include/asm/alternative.h index cee7b29785ab..b5bae21fb3c8 100644 --- a/arch/loongarch/include/asm/alternative.h +++ b/arch/loongarch/include/asm/alternative.h @@ -2,7 +2,7 @@ #ifndef _ASM_ALTERNATIVE_H #define _ASM_ALTERNATIVE_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> #include <linux/stddef.h> @@ -106,6 +106,6 @@ extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); #define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \ (asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory")) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_ALTERNATIVE_H */ diff --git a/arch/loongarch/include/asm/asm-extable.h b/arch/loongarch/include/asm/asm-extable.h index df05005f2b80..d60bdf2e6377 100644 --- a/arch/loongarch/include/asm/asm-extable.h +++ b/arch/loongarch/include/asm/asm-extable.h @@ -7,7 +7,7 @@ #define EX_TYPE_UACCESS_ERR_ZERO 2 #define EX_TYPE_BPF 3 -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define __ASM_EXTABLE_RAW(insn, fixup, type, data) \ .pushsection __ex_table, "a"; \ @@ -22,7 +22,7 @@ __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0) .endm -#else /* __ASSEMBLY__ */ +#else /* __ASSEMBLER__ */ #include <linux/bits.h> #include <linux/stringify.h> @@ -60,6 +60,6 @@ #define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \ _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_ASM_EXTABLE_H */ diff --git a/arch/loongarch/include/asm/asm-prototypes.h b/arch/loongarch/include/asm/asm-prototypes.h index 51f224bcfc65..704066b4f736 100644 --- a/arch/loongarch/include/asm/asm-prototypes.h +++ b/arch/loongarch/include/asm/asm-prototypes.h @@ -12,3 +12,11 @@ __int128_t __ashlti3(__int128_t a, int b); __int128_t __ashrti3(__int128_t a, int b); __int128_t __lshrti3(__int128_t a, int b); #endif + +asmlinkage void noinstr __no_stack_protector ret_from_fork(struct task_struct *prev, + struct pt_regs *regs); + +asmlinkage void noinstr __no_stack_protector ret_from_kernel_thread(struct task_struct *prev, + struct pt_regs *regs, + int (*fn)(void *), + void *fn_arg); diff --git a/arch/loongarch/include/asm/asm.h b/arch/loongarch/include/asm/asm.h index f591b3245def..f018d26fc995 100644 --- a/arch/loongarch/include/asm/asm.h +++ b/arch/loongarch/include/asm/asm.h @@ -110,7 +110,7 @@ #define LONG_SRA srai.w #define LONG_SRAV sra.w -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define LONG .word #endif #define LONGSIZE 4 @@ -131,7 +131,7 @@ #define LONG_SRA srai.d #define LONG_SRAV sra.d -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define LONG .dword #endif #define LONGSIZE 8 @@ -158,7 +158,7 @@ #define PTR_SCALESHIFT 2 -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define PTR .word #endif #define PTRSIZE 4 @@ -181,7 +181,7 @@ #define PTR_SCALESHIFT 3 -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define PTR .dword #endif #define PTRSIZE 8 diff --git a/arch/loongarch/include/asm/cache.h b/arch/loongarch/include/asm/cache.h index 1b6d09617199..aa622c754414 100644 --- a/arch/loongarch/include/asm/cache.h +++ b/arch/loongarch/include/asm/cache.h @@ -8,6 +8,8 @@ #define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define ARCH_DMA_MINALIGN (16) + #define __read_mostly __section(".data..read_mostly") #endif /* _ASM_CACHE_H */ diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h index 98cf4d7b4b0a..dfb982fe8701 100644 --- a/arch/loongarch/include/asm/cpu.h +++ b/arch/loongarch/include/asm/cpu.h @@ -46,7 +46,7 @@ #define PRID_PRODUCT_MASK 0x0fff -#if !defined(__ASSEMBLY__) +#if !defined(__ASSEMBLER__) enum cpu_type_enum { CPU_UNKNOWN, @@ -55,7 +55,7 @@ enum cpu_type_enum { CPU_LAST }; -#endif /* !__ASSEMBLY */ +#endif /* !__ASSEMBLER__ */ /* * ISA Level encodings diff --git a/arch/loongarch/include/asm/entry-common.h b/arch/loongarch/include/asm/entry-common.h index 0fe2a098ded9..099132980dc9 100644 --- a/arch/loongarch/include/asm/entry-common.h +++ b/arch/loongarch/include/asm/entry-common.h @@ -2,12 +2,6 @@ #ifndef ARCH_LOONGARCH_ENTRY_COMMON_H #define ARCH_LOONGARCH_ENTRY_COMMON_H -#include <linux/sched.h> -#include <linux/processor.h> - -static inline bool on_thread_stack(void) -{ - return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1)); -} +#include <asm/stacktrace.h> /* For on_thread_stack() */ #endif diff --git a/arch/loongarch/include/asm/fpu.h b/arch/loongarch/include/asm/fpu.h index 3177674228f8..45514f314664 100644 --- a/arch/loongarch/include/asm/fpu.h +++ b/arch/loongarch/include/asm/fpu.h @@ -22,22 +22,29 @@ struct sigcontext; #define kernel_fpu_available() cpu_has_fpu -extern void kernel_fpu_begin(void); -extern void kernel_fpu_end(void); - -extern void _init_fpu(unsigned int); -extern void _save_fp(struct loongarch_fpu *); -extern void _restore_fp(struct loongarch_fpu *); - -extern void _save_lsx(struct loongarch_fpu *fpu); -extern void _restore_lsx(struct loongarch_fpu *fpu); -extern void _init_lsx_upper(void); -extern void _restore_lsx_upper(struct loongarch_fpu *fpu); - -extern void _save_lasx(struct loongarch_fpu *fpu); -extern void _restore_lasx(struct loongarch_fpu *fpu); -extern void _init_lasx_upper(void); -extern void _restore_lasx_upper(struct loongarch_fpu *fpu); + +void kernel_fpu_begin(void); +void kernel_fpu_end(void); + +asmlinkage void _init_fpu(unsigned int); +asmlinkage void _save_fp(struct loongarch_fpu *); +asmlinkage void _restore_fp(struct loongarch_fpu *); +asmlinkage int _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr); +asmlinkage int _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr); + +asmlinkage void _save_lsx(struct loongarch_fpu *fpu); +asmlinkage void _restore_lsx(struct loongarch_fpu *fpu); +asmlinkage void _init_lsx_upper(void); +asmlinkage void _restore_lsx_upper(struct loongarch_fpu *fpu); +asmlinkage int _save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); +asmlinkage int _restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); + +asmlinkage void _save_lasx(struct loongarch_fpu *fpu); +asmlinkage void _restore_lasx(struct loongarch_fpu *fpu); +asmlinkage void _init_lasx_upper(void); +asmlinkage void _restore_lasx_upper(struct loongarch_fpu *fpu); +asmlinkage int _save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); +asmlinkage int _restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); static inline void enable_lsx(void); static inline void disable_lsx(void); diff --git a/arch/loongarch/include/asm/ftrace.h b/arch/loongarch/include/asm/ftrace.h index 6e0a99763a9a..f4caaf764f9e 100644 --- a/arch/loongarch/include/asm/ftrace.h +++ b/arch/loongarch/include/asm/ftrace.h @@ -14,7 +14,7 @@ #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #ifndef CONFIG_DYNAMIC_FTRACE @@ -84,7 +84,7 @@ __arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr) #endif -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* CONFIG_FUNCTION_TRACER */ diff --git a/arch/loongarch/include/asm/gpr-num.h b/arch/loongarch/include/asm/gpr-num.h index 996038da806d..af95b941f48b 100644 --- a/arch/loongarch/include/asm/gpr-num.h +++ b/arch/loongarch/include/asm/gpr-num.h @@ -2,7 +2,7 @@ #ifndef __ASM_GPR_NUM_H #define __ASM_GPR_NUM_H -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ .equ .L__gpr_num_zero, 0 .irp num,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 @@ -25,7 +25,7 @@ .equ .L__gpr_num_$s\num, 23 + \num .endr -#else /* __ASSEMBLY__ */ +#else /* __ASSEMBLER__ */ #define __DEFINE_ASM_GPR_NUMS \ " .equ .L__gpr_num_zero, 0\n" \ @@ -47,6 +47,6 @@ " .equ .L__gpr_num_$s\\num, 23 + \\num\n" \ " .endr\n" \ -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_GPR_NUM_H */ diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h index e77a56eaf906..eaff72b38dc8 100644 --- a/arch/loongarch/include/asm/io.h +++ b/arch/loongarch/include/asm/io.h @@ -23,9 +23,9 @@ extern void __init early_iounmap(void __iomem *addr, unsigned long size); #ifdef CONFIG_ARCH_IOREMAP static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, - unsigned long prot_val) + pgprot_t prot) { - switch (prot_val & _CACHE_MASK) { + switch (pgprot_val(prot) & _CACHE_MASK) { case _CACHE_CC: return (void __iomem *)(unsigned long)(CACHE_BASE + offset); case _CACHE_SUC: @@ -38,7 +38,7 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, } #define ioremap(offset, size) \ - ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_SUC)) + ioremap_prot((offset), (size), PAGE_KERNEL_SUC) #define iounmap(addr) ((void)(addr)) @@ -55,10 +55,10 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, */ #define ioremap_wc(offset, size) \ ioremap_prot((offset), (size), \ - pgprot_val(wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC)) + wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC) #define ioremap_cache(offset, size) \ - ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL)) + ioremap_prot((offset), (size), PAGE_KERNEL) #define mmiowb() wmb() diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h index a0ca84da8541..12bd15578c33 100644 --- a/arch/loongarch/include/asm/irq.h +++ b/arch/loongarch/include/asm/irq.h @@ -53,7 +53,7 @@ void spurious_interrupt(void); #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu); -#define MAX_IO_PICS 2 +#define MAX_IO_PICS 8 #define NR_IRQS (64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS)) struct acpi_vector_group { diff --git a/arch/loongarch/include/asm/irqflags.h b/arch/loongarch/include/asm/irqflags.h index 319a8c616f1f..620163628a7f 100644 --- a/arch/loongarch/include/asm/irqflags.h +++ b/arch/loongarch/include/asm/irqflags.h @@ -5,7 +5,7 @@ #ifndef _ASM_IRQFLAGS_H #define _ASM_IRQFLAGS_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/compiler.h> #include <linux/stringify.h> @@ -14,40 +14,48 @@ static inline void arch_local_irq_enable(void) { u32 flags = CSR_CRMD_IE; + register u32 mask asm("t0") = CSR_CRMD_IE; + __asm__ __volatile__( "csrxchg %[val], %[mask], %[reg]\n\t" : [val] "+r" (flags) - : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD) : "memory"); } static inline void arch_local_irq_disable(void) { u32 flags = 0; + register u32 mask asm("t0") = CSR_CRMD_IE; + __asm__ __volatile__( "csrxchg %[val], %[mask], %[reg]\n\t" : [val] "+r" (flags) - : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD) : "memory"); } static inline unsigned long arch_local_irq_save(void) { u32 flags = 0; + register u32 mask asm("t0") = CSR_CRMD_IE; + __asm__ __volatile__( "csrxchg %[val], %[mask], %[reg]\n\t" : [val] "+r" (flags) - : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD) : "memory"); return flags; } static inline void arch_local_irq_restore(unsigned long flags) { + register u32 mask asm("t0") = CSR_CRMD_IE; + __asm__ __volatile__( "csrxchg %[val], %[mask], %[reg]\n\t" : [val] "+r" (flags) - : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD) : "memory"); } @@ -72,6 +80,6 @@ static inline int arch_irqs_disabled(void) return arch_irqs_disabled_flags(arch_local_save_flags()); } -#endif /* #ifndef __ASSEMBLY__ */ +#endif /* #ifndef __ASSEMBLER__ */ #endif /* _ASM_IRQFLAGS_H */ diff --git a/arch/loongarch/include/asm/jump_label.h b/arch/loongarch/include/asm/jump_label.h index 8a924bd69d19..4000c7603d8e 100644 --- a/arch/loongarch/include/asm/jump_label.h +++ b/arch/loongarch/include/asm/jump_label.h @@ -7,7 +7,7 @@ #ifndef __ASM_JUMP_LABEL_H #define __ASM_JUMP_LABEL_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> @@ -50,5 +50,5 @@ l_yes: return true; } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_JUMP_LABEL_H */ diff --git a/arch/loongarch/include/asm/kasan.h b/arch/loongarch/include/asm/kasan.h index 7f52bd31b9d4..62f139a9c87d 100644 --- a/arch/loongarch/include/asm/kasan.h +++ b/arch/loongarch/include/asm/kasan.h @@ -2,7 +2,7 @@ #ifndef __ASM_KASAN_H #define __ASM_KASAN_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/linkage.h> #include <linux/mmzone.h> diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 590982cd986e..a3c4cc46c892 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -12,6 +12,7 @@ #include <linux/kvm.h> #include <linux/kvm_types.h> #include <linux/mutex.h> +#include <linux/perf_event.h> #include <linux/spinlock.h> #include <linux/threads.h> #include <linux/types.h> @@ -176,6 +177,9 @@ struct kvm_vcpu_arch { /* Pointers stored here for easy accessing from assembly code */ int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); + /* GPA (=HVA) of PGD for secondary mmu */ + unsigned long kvm_pgd; + /* Host registers preserved across guest mode execution */ unsigned long host_sp; unsigned long host_tp; @@ -289,13 +293,15 @@ static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch) return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT; } +bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu); + /* Debug: dump vcpu state */ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); /* MMU handling */ void kvm_flush_tlb_all(void); void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa); -int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write); +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write, int ecode); int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); @@ -320,7 +326,6 @@ static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch) /* Misc */ static inline void kvm_arch_hardware_unsetup(void) {} -static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index 2c349f961bfb..f1efd7cfbc20 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -37,7 +37,7 @@ #define KVM_LOONGSON_IRQ_NUM_MASK 0xffff typedef union loongarch_instruction larch_inst; -typedef int (*exit_handle_fn)(struct kvm_vcpu *); +typedef int (*exit_handle_fn)(struct kvm_vcpu *, int); int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst); int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst); diff --git a/arch/loongarch/include/asm/lbt.h b/arch/loongarch/include/asm/lbt.h index e671978bf552..38566574e562 100644 --- a/arch/loongarch/include/asm/lbt.h +++ b/arch/loongarch/include/asm/lbt.h @@ -12,9 +12,13 @@ #include <asm/loongarch.h> #include <asm/processor.h> -extern void _init_lbt(void); -extern void _save_lbt(struct loongarch_lbt *); -extern void _restore_lbt(struct loongarch_lbt *); +asmlinkage void _init_lbt(void); +asmlinkage void _save_lbt(struct loongarch_lbt *); +asmlinkage void _restore_lbt(struct loongarch_lbt *); +asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags); +asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags); +asmlinkage int _save_ftop_context(void __user *ftop); +asmlinkage int _restore_ftop_context(void __user *ftop); static inline int is_lbt_enabled(void) { diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index 52651aa0e583..a0994d226eff 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -9,15 +9,15 @@ #include <linux/linkage.h> #include <linux/types.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <larchintrin.h> /* CPUCFG */ #define read_cpucfg(reg) __cpucfg(reg) -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ /* LoongArch Registers */ #define REG_ZERO 0x0 @@ -53,7 +53,7 @@ #define REG_S7 0x1e #define REG_S8 0x1f -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ /* Bit fields for CPUCFG registers */ #define LOONGARCH_CPUCFG0 0x0 @@ -171,7 +171,7 @@ * SW emulation for KVM hypervirsor, see arch/loongarch/include/uapi/asm/kvm_para.h */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* CSR */ #define csr_read32(reg) __csrrd_w(reg) @@ -187,7 +187,7 @@ #define iocsr_write32(val, reg) __iocsrwr_w(val, reg) #define iocsr_write64(val, reg) __iocsrwr_d(val, reg) -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ /* CSR register number */ @@ -411,8 +411,8 @@ /* Config CSR registers */ #define LOONGARCH_CSR_CPUID 0x20 /* CPU core id */ -#define CSR_CPUID_COREID_WIDTH 9 -#define CSR_CPUID_COREID _ULCAST_(0x1ff) +#define CSR_CPUID_COREID_WIDTH 11 +#define CSR_CPUID_COREID _ULCAST_(0x7ff) #define LOONGARCH_CSR_PRCFG1 0x21 /* Config1 */ #define CSR_CONF1_VSMAX_SHIFT 12 @@ -1195,7 +1195,7 @@ #define LOONGARCH_IOCSR_EXTIOI_ROUTE_BASE 0x1c00 #define IOCSR_EXTIOI_VECTOR_NUM 256 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ static __always_inline u64 drdtime(void) { @@ -1357,7 +1357,7 @@ __BUILD_CSR_OP(tlbidx) #define clear_csr_estat(val) \ csr_xchg32(~(val), val, LOONGARCH_CSR_ESTAT) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ /* Generic EntryLo bit definitions */ #define ENTRYLO_V (_ULCAST_(1) << 0) diff --git a/arch/loongarch/include/asm/numa.h b/arch/loongarch/include/asm/numa.h index b5f9de9f102e..bbf9f70bd25f 100644 --- a/arch/loongarch/include/asm/numa.h +++ b/arch/loongarch/include/asm/numa.h @@ -22,20 +22,6 @@ extern int numa_off; extern s16 __cpuid_to_node[CONFIG_NR_CPUS]; extern nodemask_t numa_nodes_parsed __initdata; -struct numa_memblk { - u64 start; - u64 end; - int nid; -}; - -#define NR_NODE_MEMBLKS (MAX_NUMNODES*2) -struct numa_meminfo { - int nr_blks; - struct numa_memblk blk[NR_NODE_MEMBLKS]; -}; - -extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); - extern void __init early_numa_add_cpu(int cpuid, s16 node); extern void numa_add_cpu(unsigned int cpu); extern void numa_remove_cpu(unsigned int cpu); diff --git a/arch/loongarch/include/asm/orc_types.h b/arch/loongarch/include/asm/orc_types.h index caf1f71a1057..d5fa98d1d177 100644 --- a/arch/loongarch/include/asm/orc_types.h +++ b/arch/loongarch/include/asm/orc_types.h @@ -34,7 +34,7 @@ #define ORC_TYPE_REGS 3 #define ORC_TYPE_REGS_PARTIAL 4 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* * This struct is more or less a vastly simplified version of the DWARF Call * Frame Information standard. It contains only the necessary parts of DWARF @@ -53,6 +53,6 @@ struct orc_entry { unsigned int type:3; unsigned int signal:1; }; -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ORC_TYPES_H */ diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h index 7368f12b7cb1..a3aaf34fba16 100644 --- a/arch/loongarch/include/asm/page.h +++ b/arch/loongarch/include/asm/page.h @@ -15,7 +15,7 @@ #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/kernel.h> #include <linux/pfn.h> @@ -110,6 +110,6 @@ extern int __virt_addr_valid(volatile void *kaddr); #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* _ASM_PAGE_H */ diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h index 7211dff8c969..1c63a9d9a6d3 100644 --- a/arch/loongarch/include/asm/pgalloc.h +++ b/arch/loongarch/include/asm/pgalloc.h @@ -55,11 +55,8 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) return pte; } -#define __pte_free_tlb(tlb, pte, address) \ -do { \ - pagetable_dtor(page_ptdesc(pte)); \ - tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \ -} while (0) +#define __pte_free_tlb(tlb, pte, address) \ + tlb_remove_ptdesc((tlb), page_ptdesc(pte)) #ifndef __PAGETABLE_PMD_FOLDED @@ -72,7 +69,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) if (!ptdesc) return NULL; - if (!pagetable_pmd_ctor(ptdesc)) { + if (!pagetable_pmd_ctor(mm, ptdesc)) { pagetable_free(ptdesc); return NULL; } diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h index 45bfc65a0c9f..7bbfb04a54cc 100644 --- a/arch/loongarch/include/asm/pgtable-bits.h +++ b/arch/loongarch/include/asm/pgtable-bits.h @@ -92,7 +92,7 @@ #define PAGE_KERNEL_WUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ _PAGE_GLOBAL | _PAGE_KERN | _CACHE_WUC) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_SUC) @@ -127,6 +127,6 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot) return __pgprot(prot); } -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* _ASM_PGTABLE_BITS_H */ diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index da346733a1da..f2aeff544cee 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -55,7 +55,7 @@ #define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/mm_types.h> #include <linux/mmzone.h> @@ -255,7 +255,6 @@ static inline void pmd_clear(pmd_t *pmdp) #define pmd_page_vaddr(pmd) pmd_val(pmd) -extern pmd_t mk_pmd(struct page *page, pgprot_t prot); extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd); #define pte_page(x) pfn_to_page(pte_pfn(x)) @@ -302,7 +301,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) #define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE }) -static inline int pte_swp_exclusive(pte_t pte) +static inline bool pte_swp_exclusive(pte_t pte) { return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; } @@ -426,12 +425,6 @@ static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) return false; } -/* - * Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. - */ -#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) - static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { return __pte((pte_val(pte) & _PAGE_CHG_MASK) | @@ -625,6 +618,6 @@ static inline long pmd_protnone(pmd_t pmd) #define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* _ASM_PGTABLE_H */ diff --git a/arch/loongarch/include/asm/prefetch.h b/arch/loongarch/include/asm/prefetch.h index 1672262a5e2e..0b168cdaae9a 100644 --- a/arch/loongarch/include/asm/prefetch.h +++ b/arch/loongarch/include/asm/prefetch.h @@ -8,7 +8,7 @@ #define Pref_Load 0 #define Pref_Store 8 -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ .macro __pref hint addr #ifdef CONFIG_CPU_HAS_PREFETCH diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h index f3ddaed9ef7f..e5d21e836d99 100644 --- a/arch/loongarch/include/asm/ptrace.h +++ b/arch/loongarch/include/asm/ptrace.h @@ -33,9 +33,9 @@ struct pt_regs { unsigned long __last[]; } __aligned(8); -static inline int regs_irqs_disabled(struct pt_regs *regs) +static __always_inline bool regs_irqs_disabled(struct pt_regs *regs) { - return arch_irqs_disabled_flags(regs->csr_prmd); + return !(regs->csr_prmd & CSR_PRMD_PIE); } static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) @@ -55,7 +55,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long v /* Query offset/name of register from its name/offset */ extern int regs_query_register_offset(const char *name); -#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last)) +#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last) - sizeof(unsigned long)) /** * regs_get_register() - get register value from its offset diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h index b87d1d5e5890..3a47f52959a8 100644 --- a/arch/loongarch/include/asm/smp.h +++ b/arch/loongarch/include/asm/smp.h @@ -25,6 +25,7 @@ extern int smp_num_siblings; extern int num_processors; extern int disabled_cpus; extern cpumask_t cpu_sibling_map[]; +extern cpumask_t cpu_llc_shared_map[]; extern cpumask_t cpu_core_map[]; extern cpumask_t cpu_foreign_map[]; @@ -38,7 +39,7 @@ int loongson_cpu_disable(void); void loongson_cpu_die(unsigned int cpu); #endif -static inline void plat_smp_setup(void) +static inline void __init plat_smp_setup(void) { loongson_smp_setup(); } diff --git a/arch/loongarch/include/asm/sparsemem.h b/arch/loongarch/include/asm/sparsemem.h index 8d4af6aff8a8..4501efac1a87 100644 --- a/arch/loongarch/include/asm/sparsemem.h +++ b/arch/loongarch/include/asm/sparsemem.h @@ -21,11 +21,6 @@ #define VMEMMAP_SIZE 0 /* 1, For FLATMEM; 2, For SPARSEMEM without VMEMMAP. */ #endif -#ifdef CONFIG_MEMORY_HOTPLUG -int memory_add_physaddr_to_nid(u64 addr); -#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid -#endif - #define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS) #endif /* _LOONGARCH_SPARSEMEM_H */ diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h index 66736837085b..3eda298702b1 100644 --- a/arch/loongarch/include/asm/stackframe.h +++ b/arch/loongarch/include/asm/stackframe.h @@ -57,6 +57,12 @@ jirl zero, \temp1, 0xc .endm + .macro STACKLEAK_ERASE +#ifdef CONFIG_GCC_PLUGIN_STACKLEAK + bl stackleak_erase_on_task_stack +#endif + .endm + .macro BACKUP_T0T1 csrwr t0, EXCEPTION_KS0 csrwr t1, EXCEPTION_KS1 diff --git a/arch/loongarch/include/asm/stacktrace.h b/arch/loongarch/include/asm/stacktrace.h index f23adb15f418..5c8be156567c 100644 --- a/arch/loongarch/include/asm/stacktrace.h +++ b/arch/loongarch/include/asm/stacktrace.h @@ -8,6 +8,7 @@ #include <asm/asm.h> #include <asm/ptrace.h> #include <asm/loongarch.h> +#include <asm/unwind_hints.h> #include <linux/stringify.h> enum stack_type { @@ -30,6 +31,11 @@ bool in_irq_stack(unsigned long stack, struct stack_info *info); bool in_task_stack(unsigned long stack, struct task_struct *task, struct stack_info *info); int get_stack_info(unsigned long stack, struct task_struct *task, struct stack_info *info); +static __always_inline bool on_thread_stack(void) +{ + return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1)); +} + #define STR_LONG_L __stringify(LONG_L) #define STR_LONG_S __stringify(LONG_S) #define STR_LONGSIZE __stringify(LONGSIZE) @@ -43,6 +49,7 @@ int get_stack_info(unsigned long stack, struct task_struct *task, struct stack_i static __always_inline void prepare_frametrace(struct pt_regs *regs) { __asm__ __volatile__( + UNWIND_HINT_SAVE /* Save $ra */ STORE_ONE_REG(1) /* Use $ra to save PC */ @@ -80,6 +87,7 @@ static __always_inline void prepare_frametrace(struct pt_regs *regs) STORE_ONE_REG(29) STORE_ONE_REG(30) STORE_ONE_REG(31) + UNWIND_HINT_RESTORE : "=m" (regs->csr_era) : "r" (regs->regs) : "memory"); diff --git a/arch/loongarch/include/asm/syscall.h b/arch/loongarch/include/asm/syscall.h index e286dc58476e..81d2733f7b94 100644 --- a/arch/loongarch/include/asm/syscall.h +++ b/arch/loongarch/include/asm/syscall.h @@ -26,6 +26,13 @@ static inline long syscall_get_nr(struct task_struct *task, return regs->regs[11]; } +static inline void syscall_set_nr(struct task_struct *task, + struct pt_regs *regs, + int nr) +{ + regs->regs[11] = nr; +} + static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { @@ -61,6 +68,14 @@ static inline void syscall_get_arguments(struct task_struct *task, memcpy(&args[1], ®s->regs[5], 5 * sizeof(long)); } +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned long *args) +{ + regs->orig_a0 = args[0]; + memcpy(®s->regs[5], &args[1], 5 * sizeof(long)); +} + static inline int syscall_get_arch(struct task_struct *task) { return AUDIT_ARCH_LOONGARCH64; diff --git a/arch/loongarch/include/asm/thread_info.h b/arch/loongarch/include/asm/thread_info.h index 4f5a9441754e..9dfa2ef00816 100644 --- a/arch/loongarch/include/asm/thread_info.h +++ b/arch/loongarch/include/asm/thread_info.h @@ -10,7 +10,7 @@ #ifdef __KERNEL__ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/processor.h> @@ -53,7 +53,7 @@ static inline struct thread_info *current_thread_info(void) register unsigned long current_stack_pointer __asm__("$sp"); -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ /* thread information allocation */ #define THREAD_SIZE SZ_16K diff --git a/arch/loongarch/include/asm/topology.h b/arch/loongarch/include/asm/topology.h index 50273c9187d0..f06e7ff25bb7 100644 --- a/arch/loongarch/include/asm/topology.h +++ b/arch/loongarch/include/asm/topology.h @@ -19,17 +19,22 @@ extern int pcibus_to_node(struct pci_bus *); #define cpumask_of_pcibus(bus) (cpu_online_mask) -extern unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES]; - -void numa_set_distance(int from, int to, int distance); - -#define node_distance(from, to) (node_distances[(from)][(to)]) +int __node_distance(int from, int to); +#define node_distance(from, to) __node_distance(from, to) #else #define pcibus_to_node(bus) 0 #endif #ifdef CONFIG_SMP +/* + * Return cpus that shares the last level cache. + */ +static inline const struct cpumask *cpu_coregroup_mask(int cpu) +{ + return &cpu_llc_shared_map[cpu]; +} + #define topology_physical_package_id(cpu) (cpu_data[cpu].package) #define topology_core_id(cpu) (cpu_data[cpu].core) #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) diff --git a/arch/loongarch/include/asm/types.h b/arch/loongarch/include/asm/types.h index baf15a0dcf8b..0edd731f3d6a 100644 --- a/arch/loongarch/include/asm/types.h +++ b/arch/loongarch/include/asm/types.h @@ -8,7 +8,7 @@ #include <asm-generic/int-ll64.h> #include <uapi/asm/types.h> -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define _ULCAST_ #define _U64CAST_ #else diff --git a/arch/loongarch/include/asm/unwind_hints.h b/arch/loongarch/include/asm/unwind_hints.h index a01086ad9dde..16c7f7e465a0 100644 --- a/arch/loongarch/include/asm/unwind_hints.h +++ b/arch/loongarch/include/asm/unwind_hints.h @@ -5,7 +5,7 @@ #include <linux/objtool.h> #include <asm/orc_types.h> -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ .macro UNWIND_HINT_UNDEFINED UNWIND_HINT type=UNWIND_HINT_TYPE_UNDEFINED @@ -23,6 +23,14 @@ UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_CALL .endm -#endif /* __ASSEMBLY__ */ +#else /* !__ASSEMBLER__ */ + +#define UNWIND_HINT_SAVE \ + UNWIND_HINT(UNWIND_HINT_TYPE_SAVE, 0, 0, 0) + +#define UNWIND_HINT_RESTORE \ + UNWIND_HINT(UNWIND_HINT_TYPE_RESTORE, 0, 0, 0) + +#endif /* !__ASSEMBLER__ */ #endif /* _ASM_LOONGARCH_UNWIND_HINTS_H */ diff --git a/arch/loongarch/include/asm/uprobes.h b/arch/loongarch/include/asm/uprobes.h index 99a0d198927f..025fc3f0a102 100644 --- a/arch/loongarch/include/asm/uprobes.h +++ b/arch/loongarch/include/asm/uprobes.h @@ -15,7 +15,6 @@ typedef u32 uprobe_opcode_t; #define UPROBE_XOLBP_INSN __emit_break(BRK_UPROBE_XOLBP) struct arch_uprobe { - unsigned long resume_era; u32 insn[2]; u32 ixol[2]; bool simulate; diff --git a/arch/loongarch/include/asm/vdso.h b/arch/loongarch/include/asm/vdso.h index d3ba35eb23e7..f72ec79e2dde 100644 --- a/arch/loongarch/include/asm/vdso.h +++ b/arch/loongarch/include/asm/vdso.h @@ -31,7 +31,6 @@ struct loongarch_vdso_info { unsigned long size; unsigned long offset_sigreturn; struct vm_special_mapping code_mapping; - struct vm_special_mapping data_mapping; }; extern struct loongarch_vdso_info vdso_info; diff --git a/arch/loongarch/include/asm/vdso/arch_data.h b/arch/loongarch/include/asm/vdso/arch_data.h new file mode 100644 index 000000000000..395ec223bcbe --- /dev/null +++ b/arch/loongarch/include/asm/vdso/arch_data.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef _VDSO_ARCH_DATA_H +#define _VDSO_ARCH_DATA_H + +#ifndef __ASSEMBLER__ + +#include <asm/asm.h> +#include <asm/vdso.h> + +struct vdso_pcpu_data { + u32 node; +} ____cacheline_aligned_in_smp; + +struct vdso_arch_data { + struct vdso_pcpu_data pdata[NR_CPUS]; +}; + +#endif /* __ASSEMBLER__ */ + +#endif diff --git a/arch/loongarch/include/asm/vdso/getrandom.h b/arch/loongarch/include/asm/vdso/getrandom.h index e80f3c4ac748..2ff05003c6e7 100644 --- a/arch/loongarch/include/asm/vdso/getrandom.h +++ b/arch/loongarch/include/asm/vdso/getrandom.h @@ -5,7 +5,7 @@ #ifndef __ASM_VDSO_GETRANDOM_H #define __ASM_VDSO_GETRANDOM_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/unistd.h> #include <asm/vdso/vdso.h> @@ -20,7 +20,7 @@ static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, uns asm volatile( " syscall 0\n" - : "+r" (ret) + : "=r" (ret) : "r" (nr), "r" (buffer), "r" (len), "r" (flags) : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8", "memory"); @@ -28,11 +28,6 @@ static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, uns return ret; } -static __always_inline const struct vdso_rng_data *__arch_get_vdso_rng_data(void) -{ - return &_loongarch_data.rng_data; -} - -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* __ASM_VDSO_GETRANDOM_H */ diff --git a/arch/loongarch/include/asm/vdso/gettimeofday.h b/arch/loongarch/include/asm/vdso/gettimeofday.h index 7eb3f041af76..dcafabca9bb6 100644 --- a/arch/loongarch/include/asm/vdso/gettimeofday.h +++ b/arch/loongarch/include/asm/vdso/gettimeofday.h @@ -7,7 +7,7 @@ #ifndef __ASM_VDSO_GETTIMEOFDAY_H #define __ASM_VDSO_GETTIMEOFDAY_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/unistd.h> #include <asm/vdso/vdso.h> @@ -25,7 +25,7 @@ static __always_inline long gettimeofday_fallback( asm volatile( " syscall 0\n" - : "+r" (ret) + : "=r" (ret) : "r" (nr), "r" (tv), "r" (tz) : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8", "memory"); @@ -44,7 +44,7 @@ static __always_inline long clock_gettime_fallback( asm volatile( " syscall 0\n" - : "+r" (ret) + : "=r" (ret) : "r" (nr), "r" (clkid), "r" (ts) : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8", "memory"); @@ -63,7 +63,7 @@ static __always_inline int clock_getres_fallback( asm volatile( " syscall 0\n" - : "+r" (ret) + : "=r" (ret) : "r" (nr), "r" (clkid), "r" (ts) : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8", "memory"); @@ -72,7 +72,7 @@ static __always_inline int clock_getres_fallback( } static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, - const struct vdso_data *vd) + const struct vdso_time_data *vd) { uint64_t count; @@ -89,18 +89,6 @@ static inline bool loongarch_vdso_hres_capable(void) } #define __arch_vdso_hres_capable loongarch_vdso_hres_capable -static __always_inline const struct vdso_data *__arch_get_vdso_data(void) -{ - return _vdso_data; -} - -#ifdef CONFIG_TIME_NS -static __always_inline -const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd) -{ - return _timens_data; -} -#endif -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* __ASM_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/loongarch/include/asm/vdso/processor.h b/arch/loongarch/include/asm/vdso/processor.h index ef5770b343a0..1e255373b0b8 100644 --- a/arch/loongarch/include/asm/vdso/processor.h +++ b/arch/loongarch/include/asm/vdso/processor.h @@ -5,10 +5,10 @@ #ifndef __ASM_VDSO_PROCESSOR_H #define __ASM_VDSO_PROCESSOR_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define cpu_relax() barrier() -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_VDSO_PROCESSOR_H */ diff --git a/arch/loongarch/include/asm/vdso/vdso.h b/arch/loongarch/include/asm/vdso/vdso.h index 1c183a9b2115..04bd2d452876 100644 --- a/arch/loongarch/include/asm/vdso/vdso.h +++ b/arch/loongarch/include/asm/vdso/vdso.h @@ -7,49 +7,15 @@ #ifndef _ASM_VDSO_VDSO_H #define _ASM_VDSO_VDSO_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/asm.h> #include <asm/page.h> #include <asm/vdso.h> +#include <vdso/datapage.h> -struct vdso_pcpu_data { - u32 node; -} ____cacheline_aligned_in_smp; +#define VVAR_SIZE (VDSO_NR_PAGES << PAGE_SHIFT) -struct loongarch_vdso_data { - struct vdso_pcpu_data pdata[NR_CPUS]; - struct vdso_rng_data rng_data; -}; - -/* - * The layout of vvar: - * - * high - * +---------------------+--------------------------+ - * | loongarch vdso data | LOONGARCH_VDSO_DATA_SIZE | - * +---------------------+--------------------------+ - * | time-ns vdso data | PAGE_SIZE | - * +---------------------+--------------------------+ - * | generic vdso data | PAGE_SIZE | - * +---------------------+--------------------------+ - * low - */ -#define LOONGARCH_VDSO_DATA_SIZE PAGE_ALIGN(sizeof(struct loongarch_vdso_data)) -#define LOONGARCH_VDSO_DATA_PAGES (LOONGARCH_VDSO_DATA_SIZE >> PAGE_SHIFT) - -enum vvar_pages { - VVAR_GENERIC_PAGE_OFFSET, - VVAR_TIMENS_PAGE_OFFSET, - VVAR_LOONGARCH_PAGES_START, - VVAR_LOONGARCH_PAGES_END = VVAR_LOONGARCH_PAGES_START + LOONGARCH_VDSO_DATA_PAGES - 1, - VVAR_NR_PAGES, -}; - -#define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT) - -extern struct loongarch_vdso_data _loongarch_data __attribute__((visibility("hidden"))); - -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif diff --git a/arch/loongarch/include/asm/vdso/vsyscall.h b/arch/loongarch/include/asm/vdso/vsyscall.h index 8987e951d0a9..558eb9dfda52 100644 --- a/arch/loongarch/include/asm/vdso/vsyscall.h +++ b/arch/loongarch/include/asm/vdso/vsyscall.h @@ -2,30 +2,13 @@ #ifndef __ASM_VDSO_VSYSCALL_H #define __ASM_VDSO_VSYSCALL_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <vdso/datapage.h> -extern struct vdso_data *vdso_data; -extern struct vdso_rng_data *vdso_rng_data; - -static __always_inline -struct vdso_data *__loongarch_get_k_vdso_data(void) -{ - return vdso_data; -} -#define __arch_get_k_vdso_data __loongarch_get_k_vdso_data - -static __always_inline -struct vdso_rng_data *__loongarch_get_k_vdso_rng_data(void) -{ - return vdso_rng_data; -} -#define __arch_get_k_vdso_rng_data __loongarch_get_k_vdso_rng_data - /* The asm-generic header needs to be included after the definitions above */ #include <asm-generic/vdso/vsyscall.h> -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* __ASM_VDSO_VSYSCALL_H */ diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile index 4853e8b04c6f..6f5a4574a911 100644 --- a/arch/loongarch/kernel/Makefile +++ b/arch/loongarch/kernel/Makefile @@ -5,7 +5,7 @@ OBJECT_FILES_NON_STANDARD_head.o := y -extra-y := vmlinux.lds +always-$(KBUILD_BUILTIN) := vmlinux.lds obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \ traps.o irq.o idle.o process.o dma.o mem.o reset.o switch.o \ @@ -21,10 +21,10 @@ obj-$(CONFIG_CPU_HAS_LBT) += lbt.o obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o -CFLAGS_module.o += $(call cc-option,-Wno-override-init,) -CFLAGS_syscall.o += $(call cc-option,-Wno-override-init,) -CFLAGS_traps.o += $(call cc-option,-Wno-override-init,) -CFLAGS_perf_event.o += $(call cc-option,-Wno-override-init,) +CFLAGS_module.o += $(call cc-disable-warning, override-init) +CFLAGS_syscall.o += $(call cc-disable-warning, override-init) +CFLAGS_traps.o += $(call cc-disable-warning, override-init) +CFLAGS_perf_event.o += $(call cc-disable-warning, override-init) ifdef CONFIG_FUNCTION_TRACER ifndef CONFIG_DYNAMIC_FTRACE diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c index 1120ac2824f6..1367ca759468 100644 --- a/arch/loongarch/kernel/acpi.c +++ b/arch/loongarch/kernel/acpi.c @@ -10,6 +10,7 @@ #include <linux/init.h> #include <linux/acpi.h> #include <linux/efi-bgrt.h> +#include <linux/export.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/memblock.h> @@ -244,22 +245,6 @@ fdt_earlycon: #ifdef CONFIG_ACPI_NUMA -static __init int setup_node(int pxm) -{ - return acpi_map_pxm_to_node(pxm); -} - -void __init numa_set_distance(int from, int to, int distance) -{ - if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) { - pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n", - from, to, distance); - return; - } - - node_distances[from][to] = distance; -} - /* Callback for Proximity Domain -> CPUID mapping */ void __init acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) @@ -280,7 +265,41 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) pxm |= (pa->proximity_domain_hi[1] << 16); pxm |= (pa->proximity_domain_hi[2] << 24); } - node = setup_node(pxm); + node = acpi_map_pxm_to_node(pxm); + if (node < 0) { + pr_err("SRAT: Too many proximity domains %x\n", pxm); + bad_srat(); + return; + } + + if (pa->apic_id >= CONFIG_NR_CPUS) { + pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n", + pxm, pa->apic_id, node); + return; + } + + early_numa_add_cpu(pa->apic_id, node); + + set_cpuid_to_node(pa->apic_id, node); + node_set(node, numa_nodes_parsed); + pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node); +} + +void __init +acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) +{ + int pxm, node; + + if (srat_disabled()) + return; + if (pa->header.length < sizeof(struct acpi_srat_x2apic_cpu_affinity)) { + bad_srat(); + return; + } + if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0) + return; + pxm = pa->proximity_domain; + node = acpi_map_pxm_to_node(pxm); if (node < 0) { pr_err("SRAT: Too many proximity domains %x\n", pxm); bad_srat(); diff --git a/arch/loongarch/kernel/alternative.c b/arch/loongarch/kernel/alternative.c index 4ad13847e962..0e0c766df1e3 100644 --- a/arch/loongarch/kernel/alternative.c +++ b/arch/loongarch/kernel/alternative.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <linux/export.h> #include <linux/mm.h> #include <linux/module.h> #include <asm/alternative.h> diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c index 8be1c38ad8eb..db1e4bb26b6a 100644 --- a/arch/loongarch/kernel/asm-offsets.c +++ b/arch/loongarch/kernel/asm-offsets.c @@ -296,6 +296,7 @@ static void __used output_kvm_defines(void) OFFSET(KVM_ARCH_HSP, kvm_vcpu_arch, host_sp); OFFSET(KVM_ARCH_HTP, kvm_vcpu_arch, host_tp); OFFSET(KVM_ARCH_HPGD, kvm_vcpu_arch, host_pgd); + OFFSET(KVM_ARCH_KVMPGD, kvm_vcpu_arch, kvm_pgd); OFFSET(KVM_ARCH_HANDLE_EXIT, kvm_vcpu_arch, handle_exit); OFFSET(KVM_ARCH_HEENTRY, kvm_vcpu_arch, host_eentry); OFFSET(KVM_ARCH_GEENTRY, kvm_vcpu_arch, guest_eentry); @@ -315,6 +316,6 @@ static void __used output_vdso_defines(void) { COMMENT("LoongArch vDSO offsets."); - DEFINE(__VVAR_PAGES, VVAR_NR_PAGES); + DEFINE(__VDSO_PAGES, VDSO_NR_PAGES); BLANK(); } diff --git a/arch/loongarch/kernel/efi-header.S b/arch/loongarch/kernel/efi-header.S index 5f23b85d78ca..ba0bdbf86aa8 100644 --- a/arch/loongarch/kernel/efi-header.S +++ b/arch/loongarch/kernel/efi-header.S @@ -7,7 +7,7 @@ #include <linux/sizes.h> .macro __EFI_PE_HEADER - .long PE_MAGIC + .long IMAGE_NT_SIGNATURE .Lcoff_header: .short IMAGE_FILE_MACHINE_LOONGARCH64 /* Machine */ .short .Lsection_count /* NumberOfSections */ @@ -20,7 +20,7 @@ IMAGE_FILE_LINE_NUMS_STRIPPED /* Characteristics */ .Loptional_header: - .short PE_OPT_MAGIC_PE32PLUS /* PE32+ format */ + .short IMAGE_NT_OPTIONAL_HDR64_MAGIC /* PE32+ format */ .byte 0x02 /* MajorLinkerVersion */ .byte 0x14 /* MinorLinkerVersion */ .long __inittext_end - .Lefi_header_end /* SizeOfCode */ diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c index de21e72759ee..860a3bc030e0 100644 --- a/arch/loongarch/kernel/efi.c +++ b/arch/loongarch/kernel/efi.c @@ -144,6 +144,18 @@ void __init efi_init(void) if (efi_memmap_init_early(&data) < 0) panic("Unable to map EFI memory map.\n"); + /* + * Reserve the physical memory region occupied by the EFI + * memory map table (header + descriptors). This is crucial + * for kdump, as the kdump kernel relies on this original + * memmap passed by the bootloader. Without reservation, + * this region could be overwritten by the primary kernel. + * Also, set the EFI_PRESERVE_BS_REGIONS flag to indicate that + * critical boot services code/data regions like this are preserved. + */ + memblock_reserve((phys_addr_t)boot_memmap, sizeof(*tbl) + data.size); + set_bit(EFI_PRESERVE_BS_REGIONS, &efi.flags); + early_memunmap(tbl, sizeof(*tbl)); } diff --git a/arch/loongarch/kernel/elf.c b/arch/loongarch/kernel/elf.c index 0fa81ced28dc..3d98c6aa00db 100644 --- a/arch/loongarch/kernel/elf.c +++ b/arch/loongarch/kernel/elf.c @@ -6,7 +6,6 @@ #include <linux/binfmts.h> #include <linux/elf.h> -#include <linux/export.h> #include <linux/sched.h> #include <asm/cpu-features.h> diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S index 48e7e34e355e..47e1db9a1ce4 100644 --- a/arch/loongarch/kernel/entry.S +++ b/arch/loongarch/kernel/entry.S @@ -73,28 +73,29 @@ SYM_CODE_START(handle_syscall) move a0, sp bl do_syscall + STACKLEAK_ERASE RESTORE_ALL_AND_RET SYM_CODE_END(handle_syscall) _ASM_NOKPROBE(handle_syscall) -SYM_CODE_START(ret_from_fork) +SYM_CODE_START(ret_from_fork_asm) UNWIND_HINT_REGS - bl schedule_tail # a0 = struct task_struct *prev - move a0, sp - bl syscall_exit_to_user_mode + move a1, sp + bl ret_from_fork + STACKLEAK_ERASE RESTORE_STATIC RESTORE_SOME RESTORE_SP_AND_RET -SYM_CODE_END(ret_from_fork) +SYM_CODE_END(ret_from_fork_asm) -SYM_CODE_START(ret_from_kernel_thread) +SYM_CODE_START(ret_from_kernel_thread_asm) UNWIND_HINT_REGS - bl schedule_tail # a0 = struct task_struct *prev - move a0, s1 - jirl ra, s0, 0 - move a0, sp - bl syscall_exit_to_user_mode + move a1, sp + move a2, s0 + move a3, s1 + bl ret_from_kernel_thread + STACKLEAK_ERASE RESTORE_STATIC RESTORE_SOME RESTORE_SP_AND_RET -SYM_CODE_END(ret_from_kernel_thread) +SYM_CODE_END(ret_from_kernel_thread_asm) diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c index 2f1f5b08638f..27144de5c5fe 100644 --- a/arch/loongarch/kernel/env.c +++ b/arch/loongarch/kernel/env.c @@ -68,6 +68,8 @@ static int __init fdt_cpu_clk_init(void) return -ENODEV; clk = of_clk_get(np, 0); + of_node_put(np); + if (IS_ERR(clk)) return -ENODEV; diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S index 6ab640101457..28caf416ae36 100644 --- a/arch/loongarch/kernel/fpu.S +++ b/arch/loongarch/kernel/fpu.S @@ -458,6 +458,7 @@ SYM_FUNC_START(_save_fp_context) li.w a0, 0 # success jr ra SYM_FUNC_END(_save_fp_context) +EXPORT_SYMBOL_GPL(_save_fp_context) /* * a0: fpregs @@ -471,6 +472,7 @@ SYM_FUNC_START(_restore_fp_context) li.w a0, 0 # success jr ra SYM_FUNC_END(_restore_fp_context) +EXPORT_SYMBOL_GPL(_restore_fp_context) /* * a0: fpregs @@ -484,6 +486,7 @@ SYM_FUNC_START(_save_lsx_context) li.w a0, 0 # success jr ra SYM_FUNC_END(_save_lsx_context) +EXPORT_SYMBOL_GPL(_save_lsx_context) /* * a0: fpregs @@ -497,6 +500,7 @@ SYM_FUNC_START(_restore_lsx_context) li.w a0, 0 # success jr ra SYM_FUNC_END(_restore_lsx_context) +EXPORT_SYMBOL_GPL(_restore_lsx_context) /* * a0: fpregs @@ -510,6 +514,7 @@ SYM_FUNC_START(_save_lasx_context) li.w a0, 0 # success jr ra SYM_FUNC_END(_save_lasx_context) +EXPORT_SYMBOL_GPL(_save_lasx_context) /* * a0: fpregs @@ -523,6 +528,7 @@ SYM_FUNC_START(_restore_lasx_context) li.w a0, 0 # success jr ra SYM_FUNC_END(_restore_lasx_context) +EXPORT_SYMBOL_GPL(_restore_lasx_context) .L_fpu_fault: li.w a0, -EFAULT # failure diff --git a/arch/loongarch/kernel/ftrace_dyn.c b/arch/loongarch/kernel/ftrace_dyn.c index 25c9a4cfd5fa..d5d81d74034c 100644 --- a/arch/loongarch/kernel/ftrace_dyn.c +++ b/arch/loongarch/kernel/ftrace_dyn.c @@ -85,14 +85,13 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod * dealing with an out-of-range condition, we can assume it * is due to a module being loaded far away from the kernel. * - * NOTE: __module_text_address() must be called with preemption - * disabled, but we can rely on ftrace_lock to ensure that 'mod' + * NOTE: __module_text_address() must be called within a RCU read + * section, but we can rely on ftrace_lock to ensure that 'mod' * retains its validity throughout the remainder of this code. */ if (!mod) { - preempt_disable(); - mod = __module_text_address(pc); - preempt_enable(); + scoped_guard(rcu) + mod = __module_text_address(pc); } if (WARN_ON(!mod)) diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S index 4f0912141781..733a7665e434 100644 --- a/arch/loongarch/kernel/genex.S +++ b/arch/loongarch/kernel/genex.S @@ -16,6 +16,7 @@ #include <asm/stackframe.h> #include <asm/thread_info.h> + .section .cpuidle.text, "ax" .align 5 SYM_FUNC_START(__arch_cpu_idle) /* start of idle interrupt region */ @@ -31,14 +32,16 @@ SYM_FUNC_START(__arch_cpu_idle) */ idle 0 /* end of idle interrupt region */ -1: jr ra +idle_exit: + jr ra SYM_FUNC_END(__arch_cpu_idle) + .previous SYM_CODE_START(handle_vint) UNWIND_HINT_UNDEFINED BACKUP_T0T1 SAVE_ALL - la_abs t1, 1b + la_abs t1, idle_exit LONG_L t0, sp, PT_ERA /* 3 instructions idle interrupt region */ ori t0, t0, 0b1100 diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S index 506a99a5bbc7..e3865e92a917 100644 --- a/arch/loongarch/kernel/head.S +++ b/arch/loongarch/kernel/head.S @@ -20,7 +20,7 @@ __HEAD _head: - .word MZ_MAGIC /* "MZ", MS-DOS header */ + .word IMAGE_DOS_SIGNATURE /* "MZ", MS-DOS header */ .org 0x8 .dword _kernel_entry /* Kernel entry point (physical address) */ .dword _kernel_asize /* Kernel image effective size */ diff --git a/arch/loongarch/kernel/kfpu.c b/arch/loongarch/kernel/kfpu.c index ec5b28e570c9..141b49bd989c 100644 --- a/arch/loongarch/kernel/kfpu.c +++ b/arch/loongarch/kernel/kfpu.c @@ -4,6 +4,7 @@ */ #include <linux/cpu.h> +#include <linux/export.h> #include <linux/init.h> #include <asm/fpu.h> #include <asm/smp.h> @@ -18,11 +19,28 @@ static unsigned int euen_mask = CSR_EUEN_FPEN; static DEFINE_PER_CPU(bool, in_kernel_fpu); static DEFINE_PER_CPU(unsigned int, euen_current); +static inline void fpregs_lock(void) +{ + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + preempt_disable(); + else + local_bh_disable(); +} + +static inline void fpregs_unlock(void) +{ + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + preempt_enable(); + else + local_bh_enable(); +} + void kernel_fpu_begin(void) { unsigned int *euen_curr; - preempt_disable(); + if (!irqs_disabled()) + fpregs_lock(); WARN_ON(this_cpu_read(in_kernel_fpu)); @@ -73,7 +91,8 @@ void kernel_fpu_end(void) this_cpu_write(in_kernel_fpu, false); - preempt_enable(); + if (!irqs_disabled()) + fpregs_unlock(); } EXPORT_SYMBOL_GPL(kernel_fpu_end); diff --git a/arch/loongarch/kernel/kgdb.c b/arch/loongarch/kernel/kgdb.c index 445c452d72a7..7be5b4c0c900 100644 --- a/arch/loongarch/kernel/kgdb.c +++ b/arch/loongarch/kernel/kgdb.c @@ -8,6 +8,7 @@ #include <linux/hw_breakpoint.h> #include <linux/kdebug.h> #include <linux/kgdb.h> +#include <linux/objtool.h> #include <linux/processor.h> #include <linux/ptrace.h> #include <linux/sched.h> @@ -224,13 +225,13 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) regs->csr_era = pc; } -void arch_kgdb_breakpoint(void) +noinline void arch_kgdb_breakpoint(void) { __asm__ __volatile__ ( \ ".globl kgdb_breakinst\n\t" \ - "nop\n" \ "kgdb_breakinst:\tbreak 2\n\t"); /* BRK_KDB = 2 */ } +STACK_FRAME_NON_STANDARD(arch_kgdb_breakpoint); /* * Calls linux_debug_hook before the kernel dies. If KGDB is enabled, diff --git a/arch/loongarch/kernel/lbt.S b/arch/loongarch/kernel/lbt.S index 001f061d226a..71678912d24c 100644 --- a/arch/loongarch/kernel/lbt.S +++ b/arch/loongarch/kernel/lbt.S @@ -90,6 +90,7 @@ SYM_FUNC_START(_save_lbt_context) li.w a0, 0 # success jr ra SYM_FUNC_END(_save_lbt_context) +EXPORT_SYMBOL_GPL(_save_lbt_context) /* * a0: scr @@ -110,6 +111,7 @@ SYM_FUNC_START(_restore_lbt_context) li.w a0, 0 # success jr ra SYM_FUNC_END(_restore_lbt_context) +EXPORT_SYMBOL_GPL(_restore_lbt_context) /* * a0: ftop @@ -120,6 +122,7 @@ SYM_FUNC_START(_save_ftop_context) li.w a0, 0 # success jr ra SYM_FUNC_END(_save_ftop_context) +EXPORT_SYMBOL_GPL(_save_ftop_context) /* * a0: ftop @@ -150,6 +153,7 @@ SYM_FUNC_START(_restore_ftop_context) li.w a0, 0 # success jr ra SYM_FUNC_END(_restore_ftop_context) +EXPORT_SYMBOL_GPL(_restore_ftop_context) .L_lbt_fault: li.w a0, -EFAULT # failure diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c index 84fe7f854820..d6e73e8f9c0b 100644 --- a/arch/loongarch/kernel/numa.c +++ b/arch/loongarch/kernel/numa.c @@ -11,6 +11,7 @@ #include <linux/mmzone.h> #include <linux/export.h> #include <linux/nodemask.h> +#include <linux/numa_memblks.h> #include <linux/swap.h> #include <linux/memblock.h> #include <linux/pfn.h> @@ -27,10 +28,6 @@ #include <asm/time.h> int numa_off; -unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES]; -EXPORT_SYMBOL(node_distances); - -static struct numa_meminfo numa_meminfo; cpumask_t cpus_on_node[MAX_NUMNODES]; cpumask_t phys_cpus_on_node[MAX_NUMNODES]; EXPORT_SYMBOL(cpus_on_node); @@ -43,8 +40,6 @@ s16 __cpuid_to_node[CONFIG_NR_CPUS] = { }; EXPORT_SYMBOL(__cpuid_to_node); -nodemask_t numa_nodes_parsed __initdata; - #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); @@ -145,48 +140,6 @@ void numa_remove_cpu(unsigned int cpu) cpumask_clear_cpu(cpu, &cpus_on_node[nid]); } -static int __init numa_add_memblk_to(int nid, u64 start, u64 end, - struct numa_meminfo *mi) -{ - /* ignore zero length blks */ - if (start == end) - return 0; - - /* whine about and ignore invalid blks */ - if (start > end || nid < 0 || nid >= MAX_NUMNODES) { - pr_warn("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n", - nid, start, end - 1); - return 0; - } - - if (mi->nr_blks >= NR_NODE_MEMBLKS) { - pr_err("NUMA: too many memblk ranges\n"); - return -EINVAL; - } - - mi->blk[mi->nr_blks].start = PFN_ALIGN(start); - mi->blk[mi->nr_blks].end = PFN_ALIGN(end - PAGE_SIZE + 1); - mi->blk[mi->nr_blks].nid = nid; - mi->nr_blks++; - return 0; -} - -/** - * numa_add_memblk - Add one numa_memblk to numa_meminfo - * @nid: NUMA node ID of the new memblk - * @start: Start address of the new memblk - * @end: End address of the new memblk - * - * Add a new memblk to the default numa_meminfo. - * - * RETURNS: - * 0 on success, -errno on failure. - */ -int __init numa_add_memblk(int nid, u64 start, u64 end) -{ - return numa_add_memblk_to(nid, start, end, &numa_meminfo); -} - static void __init node_mem_init(unsigned int node) { unsigned long start_pfn, end_pfn; @@ -205,18 +158,6 @@ static void __init node_mem_init(unsigned int node) #ifdef CONFIG_ACPI_NUMA -static void __init add_node_intersection(u32 node, u64 start, u64 size, u32 type) -{ - static unsigned long num_physpages; - - num_physpages += (size >> PAGE_SHIFT); - pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n", - node, type, start, size); - pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n", - start >> PAGE_SHIFT, (start + size) >> PAGE_SHIFT, num_physpages); - memblock_set_node(start, size, &memblock.memory, node); -} - /* * add_numamem_region * @@ -228,28 +169,21 @@ static void __init add_node_intersection(u32 node, u64 start, u64 size, u32 type */ static void __init add_numamem_region(u64 start, u64 end, u32 type) { - u32 i; - u64 ofs = start; + u32 node = pa_to_nid(start); + u64 size = end - start; + static unsigned long num_physpages; if (start >= end) { pr_debug("Invalid region: %016llx-%016llx\n", start, end); return; } - for (i = 0; i < numa_meminfo.nr_blks; i++) { - struct numa_memblk *mb = &numa_meminfo.blk[i]; - - if (ofs > mb->end) - continue; - - if (end > mb->end) { - add_node_intersection(mb->nid, ofs, mb->end - ofs, type); - ofs = mb->end; - } else { - add_node_intersection(mb->nid, ofs, end - ofs, type); - break; - } - } + num_physpages += (size >> PAGE_SHIFT); + pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n", + node, type, start, size); + pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n", + start >> PAGE_SHIFT, end >> PAGE_SHIFT, num_physpages); + memblock_set_node(start, size, &memblock.memory, node); } static void __init init_node_memblock(void) @@ -291,24 +225,6 @@ static void __init init_node_memblock(void) } } -static void __init numa_default_distance(void) -{ - int row, col; - - for (row = 0; row < MAX_NUMNODES; row++) - for (col = 0; col < MAX_NUMNODES; col++) { - if (col == row) - node_distances[row][col] = LOCAL_DISTANCE; - else - /* We assume that one node per package here! - * - * A SLIT should be used for multiple nodes - * per package to override default setting. - */ - node_distances[row][col] = REMOTE_DISTANCE; - } -} - /* * fake_numa_init() - For Non-ACPI systems * Return: 0 on success, -errno on failure. @@ -333,11 +249,11 @@ int __init init_numa_memory(void) for (i = 0; i < NR_CPUS; i++) set_cpuid_to_node(i, NUMA_NO_NODE); - numa_default_distance(); + numa_reset_distance(); nodes_clear(numa_nodes_parsed); nodes_clear(node_possible_map); nodes_clear(node_online_map); - memset(&numa_meminfo, 0, sizeof(numa_meminfo)); + WARN_ON(memblock_clear_hotplug(0, PHYS_ADDR_MAX)); /* Parse SRAT and SLIT if provided by firmware. */ ret = acpi_disabled ? fake_numa_init() : acpi_numa_init(); @@ -387,12 +303,6 @@ void __init paging_init(void) free_area_init(zones_size); } -void __init mem_init(void) -{ - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); - memblock_free_all(); -} - int pcibus_to_node(struct pci_bus *bus) { return dev_to_node(&bus->dev); diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c index e5a39bbad078..b1b51f920b23 100644 --- a/arch/loongarch/kernel/paravirt.c +++ b/arch/loongarch/kernel/paravirt.c @@ -1,5 +1,4 @@ // SPDX-License-Identifier: GPL-2.0 -#include <linux/export.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/irq_work.h> diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c index f86a4b838dd7..8ad098703488 100644 --- a/arch/loongarch/kernel/perf_event.c +++ b/arch/loongarch/kernel/perf_event.c @@ -479,8 +479,7 @@ static void handle_associated_event(struct cpu_hw_events *cpuc, int idx, if (!loongarch_pmu_event_set_period(event, hwc, idx)) return; - if (perf_event_overflow(event, data, regs)) - loongarch_pmu_disable_event(idx); + perf_event_overflow(event, data, regs); } static irqreturn_t pmu_handle_irq(int irq, void *dev) diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c index 6e58f65455c7..3582f591bab2 100644 --- a/arch/loongarch/kernel/process.c +++ b/arch/loongarch/kernel/process.c @@ -13,6 +13,7 @@ #include <linux/cpu.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/entry-common.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/sched/debug.h> @@ -34,6 +35,7 @@ #include <linux/nmi.h> #include <asm/asm.h> +#include <asm/asm-prototypes.h> #include <asm/bootinfo.h> #include <asm/cpu.h> #include <asm/elf.h> @@ -47,6 +49,7 @@ #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/reg.h> +#include <asm/switch_to.h> #include <asm/unwind.h> #include <asm/vdso.h> @@ -63,8 +66,9 @@ EXPORT_SYMBOL(__stack_chk_guard); unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; EXPORT_SYMBOL(boot_option_idle_override); -asmlinkage void ret_from_fork(void); -asmlinkage void ret_from_kernel_thread(void); +asmlinkage void restore_and_ret(void); +asmlinkage void ret_from_fork_asm(void); +asmlinkage void ret_from_kernel_thread_asm(void); void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { @@ -138,6 +142,23 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) return 0; } +asmlinkage void noinstr __no_stack_protector ret_from_fork(struct task_struct *prev, + struct pt_regs *regs) +{ + schedule_tail(prev); + syscall_exit_to_user_mode(regs); +} + +asmlinkage void noinstr __no_stack_protector ret_from_kernel_thread(struct task_struct *prev, + struct pt_regs *regs, + int (*fn)(void *), + void *fn_arg) +{ + schedule_tail(prev); + fn(fn_arg); + syscall_exit_to_user_mode(regs); +} + /* * Copy architecture-specific thread state */ @@ -165,8 +186,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) p->thread.reg03 = childksp; p->thread.reg23 = (unsigned long)args->fn; p->thread.reg24 = (unsigned long)args->fn_arg; - p->thread.reg01 = (unsigned long)ret_from_kernel_thread; - p->thread.sched_ra = (unsigned long)ret_from_kernel_thread; + p->thread.reg01 = (unsigned long)ret_from_kernel_thread_asm; + p->thread.sched_ra = (unsigned long)ret_from_kernel_thread_asm; memset(childregs, 0, sizeof(struct pt_regs)); childregs->csr_euen = p->thread.csr_euen; childregs->csr_crmd = p->thread.csr_crmd; @@ -182,8 +203,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) childregs->regs[3] = usp; p->thread.reg03 = (unsigned long) childregs; - p->thread.reg01 = (unsigned long) ret_from_fork; - p->thread.sched_ra = (unsigned long) ret_from_fork; + p->thread.reg01 = (unsigned long) ret_from_fork_asm; + p->thread.sched_ra = (unsigned long) ret_from_fork_asm; /* * New tasks lose permission to use the fpu. This accelerates context diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 90cb3ca96f08..b99fbb388fe0 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -259,18 +259,17 @@ static void __init arch_reserve_crashkernel(void) int ret; unsigned long long low_size = 0; unsigned long long crash_base, crash_size; - char *cmdline = boot_command_line; bool high = false; if (!IS_ENABLED(CONFIG_CRASH_RESERVE)) return; - ret = parse_crashkernel(cmdline, memblock_phys_mem_size(), + ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), &crash_size, &crash_base, &low_size, &high); if (ret) return; - reserve_crashkernel_generic(cmdline, crash_size, crash_base, low_size, high); + reserve_crashkernel_generic(crash_size, crash_base, low_size, high); } static void __init fdt_setup(void) diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c index 7a555b600171..4740cb5b2388 100644 --- a/arch/loongarch/kernel/signal.c +++ b/arch/loongarch/kernel/signal.c @@ -51,27 +51,6 @@ #define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); }) #define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); }) -/* Assembly functions to move context to/from the FPU */ -extern asmlinkage int -_save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr); -extern asmlinkage int -_restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr); -extern asmlinkage int -_save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); -extern asmlinkage int -_restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); -extern asmlinkage int -_save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); -extern asmlinkage int -_restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); - -#ifdef CONFIG_CPU_HAS_LBT -extern asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags); -extern asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags); -extern asmlinkage int _save_ftop_context(void __user *ftop); -extern asmlinkage int _restore_ftop_context(void __user *ftop); -#endif - struct rt_sigframe { struct siginfo rs_info; struct ucontext rs_uctx; diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 4b24589c0b56..46036d98da75 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -46,6 +46,10 @@ EXPORT_SYMBOL(__cpu_logical_map); cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_sibling_map); +/* Representing the last level cache shared map of each logical CPU */ +cpumask_t cpu_llc_shared_map[NR_CPUS] __read_mostly; +EXPORT_SYMBOL(cpu_llc_shared_map); + /* Representing the core map of multi-core chips of each logical CPU */ cpumask_t cpu_core_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_core_map); @@ -63,6 +67,9 @@ EXPORT_SYMBOL(cpu_foreign_map); /* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map; +/* representing cpus for which llc shared maps can be computed */ +static cpumask_t cpu_llc_shared_setup_map; + /* representing cpus for which core maps can be computed */ static cpumask_t cpu_core_setup_map; @@ -102,6 +109,34 @@ static inline void set_cpu_core_map(int cpu) } } +static inline void set_cpu_llc_shared_map(int cpu) +{ + int i; + + cpumask_set_cpu(cpu, &cpu_llc_shared_setup_map); + + for_each_cpu(i, &cpu_llc_shared_setup_map) { + if (cpu_to_node(cpu) == cpu_to_node(i)) { + cpumask_set_cpu(i, &cpu_llc_shared_map[cpu]); + cpumask_set_cpu(cpu, &cpu_llc_shared_map[i]); + } + } +} + +static inline void clear_cpu_llc_shared_map(int cpu) +{ + int i; + + for_each_cpu(i, &cpu_llc_shared_setup_map) { + if (cpu_to_node(cpu) == cpu_to_node(i)) { + cpumask_clear_cpu(i, &cpu_llc_shared_map[cpu]); + cpumask_clear_cpu(cpu, &cpu_llc_shared_map[i]); + } + } + + cpumask_clear_cpu(cpu, &cpu_llc_shared_setup_map); +} + static inline void set_cpu_sibling_map(int cpu) { int i; @@ -406,6 +441,7 @@ int loongson_cpu_disable(void) #endif set_cpu_online(cpu, false); clear_cpu_sibling_map(cpu); + clear_cpu_llc_shared_map(cpu); calculate_cpu_foreign_map(); local_irq_save(flags); irq_migrate_all_off_this_cpu(); @@ -572,6 +608,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) current_thread_info()->cpu = 0; loongson_prepare_cpus(max_cpus); set_cpu_sibling_map(0); + set_cpu_llc_shared_map(0); set_cpu_core_map(0); calculate_cpu_foreign_map(); #ifndef CONFIG_HOTPLUG_CPU @@ -613,6 +650,7 @@ asmlinkage void start_secondary(void) loongson_init_secondary(); set_cpu_sibling_map(cpu); + set_cpu_llc_shared_map(cpu); set_cpu_core_map(cpu); notify_cpu_starting(cpu); diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index e2d3bfeb6366..367906b10f81 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -102,7 +102,7 @@ static int constant_timer_next_event(unsigned long delta, struct clock_event_dev return 0; } -static unsigned long __init get_loops_per_jiffy(void) +static unsigned long get_loops_per_jiffy(void) { unsigned long lpj = (unsigned long)const_clock_freq; @@ -111,7 +111,7 @@ static unsigned long __init get_loops_per_jiffy(void) return lpj; } -static long init_offset __nosavedata; +static long init_offset; void save_counter(void) { diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c index 2ec3106c0da3..3d9be6ca7ec5 100644 --- a/arch/loongarch/kernel/traps.c +++ b/arch/loongarch/kernel/traps.c @@ -13,6 +13,7 @@ #include <linux/kernel.h> #include <linux/kexec.h> #include <linux/module.h> +#include <linux/export.h> #include <linux/extable.h> #include <linux/mm.h> #include <linux/sched/mm.h> @@ -553,9 +554,10 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs) die_if_kernel("Kernel ale access", regs); force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr); #else + bool pie = regs_irqs_disabled(regs); unsigned int *pc; - if (regs->csr_prmd & CSR_PRMD_PIE) + if (!pie) local_irq_enable(); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr); @@ -582,7 +584,7 @@ sigbus: die_if_kernel("Kernel ale access", regs); force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr); out: - if (regs->csr_prmd & CSR_PRMD_PIE) + if (!pie) local_irq_disable(); #endif irqentry_exit(regs, state); @@ -621,12 +623,13 @@ static void bug_handler(struct pt_regs *regs) asmlinkage void noinstr do_bce(struct pt_regs *regs) { bool user = user_mode(regs); + bool pie = regs_irqs_disabled(regs); unsigned long era = exception_era(regs); u64 badv = 0, lower = 0, upper = ULONG_MAX; union loongarch_instruction insn; irqentry_state_t state = irqentry_enter(regs); - if (regs->csr_prmd & CSR_PRMD_PIE) + if (!pie) local_irq_enable(); current->thread.trap_nr = read_csr_excode(); @@ -692,7 +695,7 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs) force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper); out: - if (regs->csr_prmd & CSR_PRMD_PIE) + if (!pie) local_irq_disable(); irqentry_exit(regs, state); @@ -710,11 +713,12 @@ bad_era: asmlinkage void noinstr do_bp(struct pt_regs *regs) { bool user = user_mode(regs); + bool pie = regs_irqs_disabled(regs); unsigned int opcode, bcode; unsigned long era = exception_era(regs); irqentry_state_t state = irqentry_enter(regs); - if (regs->csr_prmd & CSR_PRMD_PIE) + if (!pie) local_irq_enable(); if (__get_inst(&opcode, (u32 *)era, user)) @@ -780,7 +784,7 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs) } out: - if (regs->csr_prmd & CSR_PRMD_PIE) + if (!pie) local_irq_disable(); irqentry_exit(regs, state); @@ -1015,6 +1019,7 @@ static void init_restore_lbt(void) asmlinkage void noinstr do_lbt(struct pt_regs *regs) { + bool pie = regs_irqs_disabled(regs); irqentry_state_t state = irqentry_enter(regs); /* @@ -1024,7 +1029,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs) * (including the user using 'MOVGR2GCSR' to turn on TM, which * will not trigger the BTE), we need to check PRMD first. */ - if (regs->csr_prmd & CSR_PRMD_PIE) + if (!pie) local_irq_enable(); if (!cpu_has_lbt) { @@ -1038,7 +1043,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs) preempt_enable(); out: - if (regs->csr_prmd & CSR_PRMD_PIE) + if (!pie) local_irq_disable(); irqentry_exit(regs, state); diff --git a/arch/loongarch/kernel/unwind_guess.c b/arch/loongarch/kernel/unwind_guess.c index 98379b7d4147..08d7951b2f60 100644 --- a/arch/loongarch/kernel/unwind_guess.c +++ b/arch/loongarch/kernel/unwind_guess.c @@ -3,6 +3,7 @@ * Copyright (C) 2022 Loongson Technology Corporation Limited */ #include <asm/unwind.h> +#include <linux/export.h> unsigned long unwind_get_return_address(struct unwind_state *state) { diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c index b25722876331..0005be49b056 100644 --- a/arch/loongarch/kernel/unwind_orc.c +++ b/arch/loongarch/kernel/unwind_orc.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only -#include <linux/objtool.h> +#include <linux/export.h> #include <linux/module.h> +#include <linux/objtool.h> #include <linux/sort.h> #include <asm/exception.h> #include <asm/orc_header.h> @@ -399,7 +400,7 @@ bool unwind_next_frame(struct unwind_state *state) return false; /* Don't let modules unload while we're reading their ORC data. */ - preempt_disable(); + guard(rcu)(); if (is_entry_func(state->pc)) goto end; @@ -514,14 +515,12 @@ bool unwind_next_frame(struct unwind_state *state) if (!__kernel_text_address(state->pc)) goto err; - preempt_enable(); return true; err: state->error = true; end: - preempt_enable(); state->stack_info.type = STACK_TYPE_UNKNOWN; return false; } diff --git a/arch/loongarch/kernel/unwind_prologue.c b/arch/loongarch/kernel/unwind_prologue.c index 929ae240280a..729e775bd40d 100644 --- a/arch/loongarch/kernel/unwind_prologue.c +++ b/arch/loongarch/kernel/unwind_prologue.c @@ -3,6 +3,7 @@ * Copyright (C) 2022 Loongson Technology Corporation Limited */ #include <linux/cpumask.h> +#include <linux/export.h> #include <linux/ftrace.h> #include <linux/kallsyms.h> diff --git a/arch/loongarch/kernel/uprobes.c b/arch/loongarch/kernel/uprobes.c index 87abc7137b73..6022eb0f71db 100644 --- a/arch/loongarch/kernel/uprobes.c +++ b/arch/loongarch/kernel/uprobes.c @@ -42,7 +42,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) utask->autask.saved_trap_nr = current->thread.trap_nr; current->thread.trap_nr = UPROBE_TRAP_NR; instruction_pointer_set(regs, utask->xol_vaddr); - user_enable_single_step(current); return 0; } @@ -53,13 +52,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); current->thread.trap_nr = utask->autask.saved_trap_nr; - - if (auprobe->simulate) - instruction_pointer_set(regs, auprobe->resume_era); - else - instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE); - - user_disable_single_step(current); + instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE); return 0; } @@ -70,7 +63,6 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) current->thread.trap_nr = utask->autask.saved_trap_nr; instruction_pointer_set(regs, utask->vaddr); - user_disable_single_step(current); } bool arch_uprobe_xol_was_trapped(struct task_struct *t) @@ -90,7 +82,6 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) insn.word = auprobe->insn[0]; arch_simulate_insn(insn, regs); - auprobe->resume_era = regs->csr_era; return true; } diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c index 05e5fbac102a..7b888d9085a0 100644 --- a/arch/loongarch/kernel/vdso.c +++ b/arch/loongarch/kernel/vdso.c @@ -14,7 +14,7 @@ #include <linux/random.h> #include <linux/sched.h> #include <linux/slab.h> -#include <linux/time_namespace.h> +#include <linux/vdso_datastore.h> #include <asm/page.h> #include <asm/vdso.h> @@ -25,18 +25,6 @@ extern char vdso_start[], vdso_end[]; -/* Kernel-provided data used by the VDSO. */ -static union vdso_data_store generic_vdso_data __page_aligned_data; - -static union { - u8 page[LOONGARCH_VDSO_DATA_SIZE]; - struct loongarch_vdso_data vdata; -} loongarch_vdso_data __page_aligned_data; - -struct vdso_data *vdso_data = generic_vdso_data.data; -struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata; -struct vdso_rng_data *vdso_rng_data = &loongarch_vdso_data.vdata.rng_data; - static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) { current->mm->context.vdso = (void *)(new_vma->vm_start); @@ -44,53 +32,12 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struc return 0; } -static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, - struct vm_area_struct *vma, struct vm_fault *vmf) -{ - unsigned long pfn; - struct page *timens_page = find_timens_vvar_page(vma); - - switch (vmf->pgoff) { - case VVAR_GENERIC_PAGE_OFFSET: - if (!timens_page) - pfn = sym_to_pfn(vdso_data); - else - pfn = page_to_pfn(timens_page); - break; -#ifdef CONFIG_TIME_NS - case VVAR_TIMENS_PAGE_OFFSET: - /* - * If a task belongs to a time namespace then a namespace specific - * VVAR is mapped with the VVAR_GENERIC_PAGE_OFFSET and the real - * VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET offset. - * See also the comment near timens_setup_vdso_data(). - */ - if (!timens_page) - return VM_FAULT_SIGBUS; - else - pfn = sym_to_pfn(vdso_data); - break; -#endif /* CONFIG_TIME_NS */ - case VVAR_LOONGARCH_PAGES_START ... VVAR_LOONGARCH_PAGES_END: - pfn = sym_to_pfn(&loongarch_vdso_data) + vmf->pgoff - VVAR_LOONGARCH_PAGES_START; - break; - default: - return VM_FAULT_SIGBUS; - } - - return vmf_insert_pfn(vma, vmf->address, pfn); -} - struct loongarch_vdso_info vdso_info = { .vdso = vdso_start, .code_mapping = { .name = "[vdso]", .mremap = vdso_mremap, }, - .data_mapping = { - .name = "[vvar]", - .fault = vvar_fault, - }, .offset_sigreturn = vdso_offset_sigreturn, }; @@ -101,7 +48,7 @@ static int __init init_vdso(void) BUG_ON(!PAGE_ALIGNED(vdso_info.vdso)); for_each_possible_cpu(cpu) - vdso_pdata[cpu].node = cpu_to_node(cpu); + vdso_k_arch_data->pdata[cpu].node = cpu_to_node(cpu); vdso_info.size = PAGE_ALIGN(vdso_end - vdso_start); vdso_info.code_mapping.pages = @@ -115,37 +62,6 @@ static int __init init_vdso(void) } subsys_initcall(init_vdso); -#ifdef CONFIG_TIME_NS -struct vdso_data *arch_get_vdso_data(void *vvar_page) -{ - return (struct vdso_data *)(vvar_page); -} - -/* - * The vvar mapping contains data for a specific time namespace, so when a - * task changes namespace we must unmap its vvar data for the old namespace. - * Subsequent faults will map in data for the new namespace. - * - * For more details see timens_setup_vdso_data(). - */ -int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) -{ - struct mm_struct *mm = task->mm; - struct vm_area_struct *vma; - - VMA_ITERATOR(vmi, mm, 0); - - mmap_read_lock(mm); - for_each_vma(vmi, vma) { - if (vma_is_special_mapping(vma, &vdso_info.data_mapping)) - zap_vma_pages(vma); - } - mmap_read_unlock(mm); - - return 0; -} -#endif - static unsigned long vdso_base(void) { unsigned long base = STACK_TOP; @@ -181,9 +97,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) goto out; } - vma = _install_special_mapping(mm, data_addr, VVAR_SIZE, - VM_READ | VM_MAYREAD | VM_PFNMAP, - &info->data_mapping); + vma = vdso_install_vvar_mapping(mm, data_addr); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; @@ -191,7 +105,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) vdso_addr = data_addr + VVAR_SIZE; vma = _install_special_mapping(mm, vdso_addr, info->size, - VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, + VM_READ | VM_EXEC | + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | + VM_SEALED_SYSMAP, &info->code_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); diff --git a/arch/loongarch/kvm/Kconfig b/arch/loongarch/kvm/Kconfig index 97a811077ac3..40eea6da7c25 100644 --- a/arch/loongarch/kvm/Kconfig +++ b/arch/loongarch/kvm/Kconfig @@ -33,6 +33,7 @@ config KVM select KVM_MMIO select KVM_XFER_TO_GUEST_WORK select SCHED_INFO + select GUEST_PERF_EVENTS if PERF_EVENTS help Support hosting virtualized guest machines using hardware virtualization extensions. You will need diff --git a/arch/loongarch/kvm/Makefile b/arch/loongarch/kvm/Makefile index 3a01292f71cc..cb41d9265662 100644 --- a/arch/loongarch/kvm/Makefile +++ b/arch/loongarch/kvm/Makefile @@ -3,8 +3,6 @@ # Makefile for LoongArch KVM support # -ccflags-y += -I $(src) - include $(srctree)/virt/kvm/Makefile.kvm obj-$(CONFIG_KVM) += kvm.o @@ -23,4 +21,4 @@ kvm-y += intc/eiointc.o kvm-y += intc/pch_pic.o kvm-y += irqfd.o -CFLAGS_exit.o += $(call cc-option,-Wno-override-init,) +CFLAGS_exit.o += $(call cc-disable-warning, override-init) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index ea321403644a..fa52251b3bf1 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -341,7 +341,7 @@ static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu) * 2) Execute CACOP/IDLE instructions; * 3) Access to unimplemented CSRs/IOCSRs. */ -static int kvm_handle_gspr(struct kvm_vcpu *vcpu) +static int kvm_handle_gspr(struct kvm_vcpu *vcpu, int ecode) { int ret = RESUME_GUEST; enum emulation_result er = EMULATE_DONE; @@ -661,7 +661,7 @@ int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst) return ret; } -static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write) +static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write, int ecode) { int ret; larch_inst inst; @@ -675,7 +675,7 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write) return RESUME_GUEST; } - ret = kvm_handle_mm_fault(vcpu, badv, write); + ret = kvm_handle_mm_fault(vcpu, badv, write, ecode); if (ret) { /* Treat as MMIO */ inst.word = vcpu->arch.badi; @@ -705,14 +705,14 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write) return ret; } -static int kvm_handle_read_fault(struct kvm_vcpu *vcpu) +static int kvm_handle_read_fault(struct kvm_vcpu *vcpu, int ecode) { - return kvm_handle_rdwr_fault(vcpu, false); + return kvm_handle_rdwr_fault(vcpu, false, ecode); } -static int kvm_handle_write_fault(struct kvm_vcpu *vcpu) +static int kvm_handle_write_fault(struct kvm_vcpu *vcpu, int ecode) { - return kvm_handle_rdwr_fault(vcpu, true); + return kvm_handle_rdwr_fault(vcpu, true, ecode); } int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run) @@ -726,11 +726,12 @@ int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run) /** * kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host * @vcpu: Virtual CPU context. + * @ecode: Exception code. * * Handle when the guest attempts to use fpu which hasn't been allowed * by the root context. */ -static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu) +static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu, int ecode) { struct kvm_run *run = vcpu->run; @@ -783,11 +784,12 @@ static long kvm_save_notify(struct kvm_vcpu *vcpu) /* * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root. * @vcpu: Virtual CPU context. + * @ecode: Exception code. * * Handle when the guest attempts to use LSX when it is disabled in the root * context. */ -static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu) +static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode) { if (kvm_own_lsx(vcpu)) kvm_queue_exception(vcpu, EXCCODE_INE, 0); @@ -798,11 +800,12 @@ static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu) /* * kvm_handle_lasx_disabled() - Guest used LASX while disabled in root. * @vcpu: Virtual CPU context. + * @ecode: Exception code. * * Handle when the guest attempts to use LASX when it is disabled in the root * context. */ -static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu) +static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu, int ecode) { if (kvm_own_lasx(vcpu)) kvm_queue_exception(vcpu, EXCCODE_INE, 0); @@ -810,7 +813,7 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu) return RESUME_GUEST; } -static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu) +static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu, int ecode) { if (kvm_own_lbt(vcpu)) kvm_queue_exception(vcpu, EXCCODE_INE, 0); @@ -872,7 +875,7 @@ static void kvm_handle_service(struct kvm_vcpu *vcpu) kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret); } -static int kvm_handle_hypercall(struct kvm_vcpu *vcpu) +static int kvm_handle_hypercall(struct kvm_vcpu *vcpu, int ecode) { int ret; larch_inst inst; @@ -932,16 +935,14 @@ static int kvm_handle_hypercall(struct kvm_vcpu *vcpu) /* * LoongArch KVM callback handling for unimplemented guest exiting */ -static int kvm_fault_ni(struct kvm_vcpu *vcpu) +static int kvm_fault_ni(struct kvm_vcpu *vcpu, int ecode) { - unsigned int ecode, inst; - unsigned long estat, badv; + unsigned int inst; + unsigned long badv; /* Fetch the instruction */ inst = vcpu->arch.badi; badv = vcpu->arch.badv; - estat = vcpu->arch.host_estat; - ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n", ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat()); kvm_arch_vcpu_dump_regs(vcpu); @@ -966,5 +967,5 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = { int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault) { - return kvm_fault_tables[fault](vcpu); + return kvm_fault_tables[fault](vcpu, fault); } diff --git a/arch/loongarch/kvm/intc/eiointc.c b/arch/loongarch/kvm/intc/eiointc.c index f39929d7bf8a..a75f865d6fb9 100644 --- a/arch/loongarch/kvm/intc/eiointc.c +++ b/arch/loongarch/kvm/intc/eiointc.c @@ -9,7 +9,8 @@ static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s) { - int ipnum, cpu, irq_index, irq_mask, irq; + int ipnum, cpu, cpuid, irq_index, irq_mask, irq; + struct kvm_vcpu *vcpu; for (irq = 0; irq < EIOINTC_IRQS; irq++) { ipnum = s->ipmap.reg_u8[irq / 32]; @@ -20,7 +21,12 @@ static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s) irq_index = irq / 32; irq_mask = BIT(irq & 0x1f); - cpu = s->coremap.reg_u8[irq]; + cpuid = s->coremap.reg_u8[irq]; + vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid); + if (!vcpu) + continue; + + cpu = vcpu->vcpu_id; if (!!(s->coreisr.reg_u32[cpu][irq_index] & irq_mask)) set_bit(irq, s->sw_coreisr[cpu][ipnum]); else @@ -66,20 +72,25 @@ static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level) } static inline void eiointc_update_sw_coremap(struct loongarch_eiointc *s, - int irq, void *pvalue, u32 len, bool notify) + int irq, u64 val, u32 len, bool notify) { - int i, cpu; - u64 val = *(u64 *)pvalue; + int i, cpu, cpuid; + struct kvm_vcpu *vcpu; for (i = 0; i < len; i++) { - cpu = val & 0xff; + cpuid = val & 0xff; val = val >> 8; if (!(s->status & BIT(EIOINTC_ENABLE_CPU_ENCODE))) { - cpu = ffs(cpu) - 1; - cpu = (cpu >= 4) ? 0 : cpu; + cpuid = ffs(cpuid) - 1; + cpuid = (cpuid >= 4) ? 0 : cpuid; } + vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid); + if (!vcpu) + continue; + + cpu = vcpu->vcpu_id; if (s->sw_coremap[irq + i] == cpu) continue; @@ -305,6 +316,11 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu, return -EINVAL; } + if (addr & (len - 1)) { + kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); + return -EINVAL; + } + vcpu->kvm->stat.eiointc_read_exits++; spin_lock_irqsave(&eiointc->lock, flags); switch (len) { @@ -398,7 +414,7 @@ static int loongarch_eiointc_writeb(struct kvm_vcpu *vcpu, irq = offset - EIOINTC_COREMAP_START; index = irq; s->coremap.reg_u8[index] = data; - eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true); + eiointc_update_sw_coremap(s, irq, data, sizeof(data), true); break; default: ret = -EINVAL; @@ -436,17 +452,16 @@ static int loongarch_eiointc_writew(struct kvm_vcpu *vcpu, break; case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END: index = (offset - EIOINTC_ENABLE_START) >> 1; - old_data = s->enable.reg_u32[index]; + old_data = s->enable.reg_u16[index]; s->enable.reg_u16[index] = data; /* * 1: enable irq. * update irq when isr is set. */ data = s->enable.reg_u16[index] & ~old_data & s->isr.reg_u16[index]; - index = index << 1; for (i = 0; i < sizeof(data); i++) { u8 mask = (data >> (i * 8)) & 0xff; - eiointc_enable_irq(vcpu, s, index + i, mask, 1); + eiointc_enable_irq(vcpu, s, index * 2 + i, mask, 1); } /* * 0: disable irq. @@ -455,7 +470,7 @@ static int loongarch_eiointc_writew(struct kvm_vcpu *vcpu, data = ~s->enable.reg_u16[index] & old_data & s->isr.reg_u16[index]; for (i = 0; i < sizeof(data); i++) { u8 mask = (data >> (i * 8)) & 0xff; - eiointc_enable_irq(vcpu, s, index, mask, 0); + eiointc_enable_irq(vcpu, s, index * 2 + i, mask, 0); } break; case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: @@ -484,7 +499,7 @@ static int loongarch_eiointc_writew(struct kvm_vcpu *vcpu, irq = offset - EIOINTC_COREMAP_START; index = irq >> 1; s->coremap.reg_u16[index] = data; - eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true); + eiointc_update_sw_coremap(s, irq, data, sizeof(data), true); break; default: ret = -EINVAL; @@ -529,10 +544,9 @@ static int loongarch_eiointc_writel(struct kvm_vcpu *vcpu, * update irq when isr is set. */ data = s->enable.reg_u32[index] & ~old_data & s->isr.reg_u32[index]; - index = index << 2; for (i = 0; i < sizeof(data); i++) { u8 mask = (data >> (i * 8)) & 0xff; - eiointc_enable_irq(vcpu, s, index + i, mask, 1); + eiointc_enable_irq(vcpu, s, index * 4 + i, mask, 1); } /* * 0: disable irq. @@ -541,7 +555,7 @@ static int loongarch_eiointc_writel(struct kvm_vcpu *vcpu, data = ~s->enable.reg_u32[index] & old_data & s->isr.reg_u32[index]; for (i = 0; i < sizeof(data); i++) { u8 mask = (data >> (i * 8)) & 0xff; - eiointc_enable_irq(vcpu, s, index, mask, 0); + eiointc_enable_irq(vcpu, s, index * 4 + i, mask, 0); } break; case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: @@ -570,7 +584,7 @@ static int loongarch_eiointc_writel(struct kvm_vcpu *vcpu, irq = offset - EIOINTC_COREMAP_START; index = irq >> 2; s->coremap.reg_u32[index] = data; - eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true); + eiointc_update_sw_coremap(s, irq, data, sizeof(data), true); break; default: ret = -EINVAL; @@ -615,10 +629,9 @@ static int loongarch_eiointc_writeq(struct kvm_vcpu *vcpu, * update irq when isr is set. */ data = s->enable.reg_u64[index] & ~old_data & s->isr.reg_u64[index]; - index = index << 3; for (i = 0; i < sizeof(data); i++) { u8 mask = (data >> (i * 8)) & 0xff; - eiointc_enable_irq(vcpu, s, index + i, mask, 1); + eiointc_enable_irq(vcpu, s, index * 8 + i, mask, 1); } /* * 0: disable irq. @@ -627,7 +640,7 @@ static int loongarch_eiointc_writeq(struct kvm_vcpu *vcpu, data = ~s->enable.reg_u64[index] & old_data & s->isr.reg_u64[index]; for (i = 0; i < sizeof(data); i++) { u8 mask = (data >> (i * 8)) & 0xff; - eiointc_enable_irq(vcpu, s, index, mask, 0); + eiointc_enable_irq(vcpu, s, index * 8 + i, mask, 0); } break; case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: @@ -656,7 +669,7 @@ static int loongarch_eiointc_writeq(struct kvm_vcpu *vcpu, irq = offset - EIOINTC_COREMAP_START; index = irq >> 3; s->coremap.reg_u64[index] = data; - eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true); + eiointc_update_sw_coremap(s, irq, data, sizeof(data), true); break; default: ret = -EINVAL; @@ -679,6 +692,11 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu, return -EINVAL; } + if (addr & (len - 1)) { + kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); + return -EINVAL; + } + vcpu->kvm->stat.eiointc_write_exits++; spin_lock_irqsave(&eiointc->lock, flags); switch (len) { @@ -787,7 +805,7 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev, int ret = 0; unsigned long flags; unsigned long type = (unsigned long)attr->attr; - u32 i, start_irq; + u32 i, start_irq, val; void __user *data; struct loongarch_eiointc *s = dev->kvm->arch.eiointc; @@ -795,8 +813,14 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev, spin_lock_irqsave(&s->lock, flags); switch (type) { case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU: - if (copy_from_user(&s->num_cpu, data, 4)) + if (copy_from_user(&val, data, 4)) ret = -EFAULT; + else { + if (val >= EIOINTC_ROUTE_MAX_VCPUS) + ret = -EINVAL; + else + s->num_cpu = val; + } break; case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE: if (copy_from_user(&s->features, data, 4)) @@ -809,7 +833,7 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev, for (i = 0; i < (EIOINTC_IRQS / 4); i++) { start_irq = i * 4; eiointc_update_sw_coremap(s, start_irq, - (void *)&s->coremap.reg_u32[i], sizeof(u32), false); + s->coremap.reg_u32[i], sizeof(u32), false); } break; default: @@ -824,7 +848,7 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev, struct kvm_device_attr *attr, bool is_write) { - int addr, cpuid, offset, ret = 0; + int addr, cpu, offset, ret = 0; unsigned long flags; void *p = NULL; void __user *data; @@ -832,7 +856,7 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev, s = dev->kvm->arch.eiointc; addr = attr->attr; - cpuid = addr >> 16; + cpu = addr >> 16; addr &= 0xffff; data = (void __user *)attr->addr; switch (addr) { @@ -857,8 +881,11 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev, p = &s->isr.reg_u32[offset]; break; case EIOINTC_COREISR_START ... EIOINTC_COREISR_END: + if (cpu >= s->num_cpu) + return -EINVAL; + offset = (addr - EIOINTC_COREISR_START) / 4; - p = &s->coreisr.reg_u32[cpuid][offset]; + p = &s->coreisr.reg_u32[cpu][offset]; break; case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END: offset = (addr - EIOINTC_COREMAP_START) / 4; @@ -899,9 +926,15 @@ static int kvm_eiointc_sw_status_access(struct kvm_device *dev, data = (void __user *)attr->addr; switch (addr) { case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU: + if (is_write) + return ret; + p = &s->num_cpu; break; case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE: + if (is_write) + return ret; + p = &s->features; break; case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE: diff --git a/arch/loongarch/kvm/intc/ipi.c b/arch/loongarch/kvm/intc/ipi.c index 93f4acd44523..fe734dc062ed 100644 --- a/arch/loongarch/kvm/intc/ipi.c +++ b/arch/loongarch/kvm/intc/ipi.c @@ -111,7 +111,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data) ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); srcu_read_unlock(&vcpu->kvm->srcu, idx); if (unlikely(ret)) { - kvm_err("%s: : read date from addr %llx failed\n", __func__, addr); + kvm_err("%s: : read data from addr %llx failed\n", __func__, addr); return ret; } /* Construct the mask by scanning the bit 27-30 */ @@ -127,7 +127,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data) ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); srcu_read_unlock(&vcpu->kvm->srcu, idx); if (unlikely(ret)) - kvm_err("%s: : write date to addr %llx failed\n", __func__, addr); + kvm_err("%s: : write data to addr %llx failed\n", __func__, addr); return ret; } diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c index b6864d6e5ec8..80ea63d465b8 100644 --- a/arch/loongarch/kvm/main.c +++ b/arch/loongarch/kvm/main.c @@ -296,10 +296,10 @@ int kvm_arch_enable_virtualization_cpu(void) /* * Enable virtualization features granting guest direct control of * certain features: - * GCI=2: Trap on init or unimplement cache instruction. + * GCI=2: Trap on init or unimplemented cache instruction. * TORU=0: Trap on Root Unimplement. * CACTRL=1: Root control cache. - * TOP=0: Trap on Previlege. + * TOP=0: Trap on Privilege. * TOE=0: Trap on Exception. * TIT=0: Trap on Timer. */ @@ -394,6 +394,7 @@ static int kvm_loongarch_env_init(void) } kvm_init_gcsr_flag(); + kvm_register_perf_callbacks(NULL); /* Register LoongArch IPI interrupt controller interface. */ ret = kvm_loongarch_register_ipi_device(); @@ -425,6 +426,8 @@ static void kvm_loongarch_env_exit(void) } kfree(kvm_loongarch_ops); } + + kvm_unregister_perf_callbacks(); } static int kvm_loongarch_init(void) diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index 4d203294767c..ed956c5cf2cc 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -912,7 +912,7 @@ out: return err; } -int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write, int ecode) { int ret; @@ -921,8 +921,17 @@ int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) return ret; /* Invalidate this entry in the TLB */ - vcpu->arch.flush_gpa = gpa; - kvm_make_request(KVM_REQ_TLB_FLUSH_GPA, vcpu); + if (!cpu_has_ptw || (ecode == EXCCODE_TLBM)) { + /* + * With HW PTW, invalid TLB is not added when page fault. But + * for EXCCODE_TLBM exception, stale TLB may exist because of + * the last read access. + * + * With SW PTW, invalid TLB is added in TLB refill exception. + */ + vcpu->arch.flush_gpa = gpa; + kvm_make_request(KVM_REQ_TLB_FLUSH_GPA, vcpu); + } return 0; } diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S index 1be185e94807..f1768b7a6194 100644 --- a/arch/loongarch/kvm/switch.S +++ b/arch/loongarch/kvm/switch.S @@ -60,16 +60,8 @@ ld.d t0, a2, KVM_ARCH_GPC csrwr t0, LOONGARCH_CSR_ERA - /* Save host PGDL */ - csrrd t0, LOONGARCH_CSR_PGDL - st.d t0, a2, KVM_ARCH_HPGD - - /* Switch to kvm */ - ld.d t1, a2, KVM_VCPU_KVM - KVM_VCPU_ARCH - - /* Load guest PGDL */ - li.w t0, KVM_GPGD - ldx.d t0, t1, t0 + /* Load PGD for KVM hypervisor */ + ld.d t0, a2, KVM_ARCH_KVMPGD csrwr t0, LOONGARCH_CSR_PGDL /* Mix GID and RID */ diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 9e1a9b4aa4c6..5af32ec62cb1 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -294,6 +294,7 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu) vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) { + kvm_lose_pmu(vcpu); /* make sure the vcpu mode has been written */ smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE); local_irq_enable(); @@ -361,6 +362,34 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) { + unsigned long val; + + preempt_disable(); + val = gcsr_read(LOONGARCH_CSR_CRMD); + preempt_enable(); + + return (val & CSR_PRMD_PPLV) == PLV_KERN; +} + +#ifdef CONFIG_GUEST_PERF_EVENTS +unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.pc; +} + +/* + * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event, + * arrived in guest context. For LoongArch64, if PMU is not passthrough to VM, + * any event that arrives while a vCPU is loaded is considered to be "in guest". + */ +bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) +{ + return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU)); +} +#endif + +bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu) +{ return false; } @@ -874,6 +903,13 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu, vcpu->arch.st.guest_addr = 0; memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); + + /* + * When vCPU reset, clear the ESTAT and GINTC registers + * Other CSR registers are cleared with function _kvm_setcsr(). + */ + kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0); + kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0); break; default: ret = -EINVAL; @@ -1459,8 +1495,17 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) vcpu->arch.vpid = 0; vcpu->arch.flush_gpa = INVALID_GPA; - hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); - vcpu->arch.swtimer.function = kvm_swtimer_wakeup; + hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS_PINNED_HARD); + + /* Get GPA (=HVA) of PGD for kvm hypervisor */ + vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd); + + /* + * Get PGD for primary mmu, virtual address is used since there is + * memory access after loading from CSR_PGD in tlb exception fast path. + */ + vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd; vcpu->arch.handle_exit = kvm_handle_exit; vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry; diff --git a/arch/loongarch/lib/crc32-loongarch.c b/arch/loongarch/lib/crc32-loongarch.c index 8af8113ecd9d..db22c2ec55e2 100644 --- a/arch/loongarch/lib/crc32-loongarch.c +++ b/arch/loongarch/lib/crc32-loongarch.c @@ -11,6 +11,7 @@ #include <asm/cpu-features.h> #include <linux/crc32.h> +#include <linux/export.h> #include <linux/module.h> #include <linux/unaligned.h> @@ -26,7 +27,7 @@ do { \ #define CRC32(crc, value, size) _CRC32(crc, value, size, crc) #define CRC32C(crc, value, size) _CRC32(crc, value, size, crcc) -static DEFINE_STATIC_KEY_FALSE(have_crc32); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_crc32); u32 crc32_le_arch(u32 crc, const u8 *p, size_t len) { @@ -65,10 +66,10 @@ u32 crc32_le_arch(u32 crc, const u8 *p, size_t len) } EXPORT_SYMBOL(crc32_le_arch); -u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len) +u32 crc32c_arch(u32 crc, const u8 *p, size_t len) { if (!static_branch_likely(&have_crc32)) - return crc32c_le_base(crc, p, len); + return crc32c_base(crc, p, len); while (len >= sizeof(u64)) { u64 value = get_unaligned_le64(p); @@ -100,7 +101,7 @@ u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len) return crc; } -EXPORT_SYMBOL(crc32c_le_arch); +EXPORT_SYMBOL(crc32c_arch); u32 crc32_be_arch(u32 crc, const u8 *p, size_t len) { @@ -114,7 +115,7 @@ static int __init crc32_loongarch_init(void) static_branch_enable(&have_crc32); return 0; } -arch_initcall(crc32_loongarch_init); +subsys_initcall(crc32_loongarch_init); static void __exit crc32_loongarch_exit(void) { diff --git a/arch/loongarch/lib/csum.c b/arch/loongarch/lib/csum.c index df309ae4045d..bcc9d01d8c41 100644 --- a/arch/loongarch/lib/csum.c +++ b/arch/loongarch/lib/csum.c @@ -2,6 +2,7 @@ // Copyright (C) 2019-2020 Arm Ltd. #include <linux/compiler.h> +#include <linux/export.h> #include <linux/kasan-checks.h> #include <linux/kernel.h> diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c index e4068906143b..02dad4624fe3 100644 --- a/arch/loongarch/mm/hugetlbpage.c +++ b/arch/loongarch/mm/hugetlbpage.c @@ -47,7 +47,8 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, pmd = pmd_offset(pud, addr); } } - return (pte_t *) pmd; + + return (!pmd || pmd_none(pmdp_get(pmd))) ? NULL : (pte_t *) pmd; } uint64_t pmd_to_entrylo(unsigned long pmd_val) diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c index ca5aa5f46a9f..c3e4586a7975 100644 --- a/arch/loongarch/mm/init.c +++ b/arch/loongarch/mm/init.c @@ -65,9 +65,6 @@ void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; -#ifdef CONFIG_ZONE_DMA - max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; -#endif #ifdef CONFIG_ZONE_DMA32 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; #endif @@ -75,14 +72,6 @@ void __init paging_init(void) free_area_init(max_zone_pfns); } - -void __init mem_init(void) -{ - max_mapnr = max_low_pfn; - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); - - memblock_free_all(); -} #endif /* !CONFIG_NUMA */ void __ref free_initmem(void) @@ -117,14 +106,6 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) page += vmem_altmap_offset(altmap); __remove_pages(start_pfn, nr_pages, altmap); } - -#ifdef CONFIG_NUMA -int memory_add_physaddr_to_nid(u64 start) -{ - return pa_to_nid(start); -} -EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); -#endif #endif #ifdef CONFIG_SPARSEMEM_VMEMMAP diff --git a/arch/loongarch/mm/ioremap.c b/arch/loongarch/mm/ioremap.c index 70ca73019811..df949a3d0f34 100644 --- a/arch/loongarch/mm/ioremap.c +++ b/arch/loongarch/mm/ioremap.c @@ -16,12 +16,12 @@ void __init early_iounmap(void __iomem *addr, unsigned long size) } -void *early_memremap_ro(resource_size_t phys_addr, unsigned long size) +void * __init early_memremap_ro(resource_size_t phys_addr, unsigned long size) { return early_memremap(phys_addr, size); } -void *early_memremap_prot(resource_size_t phys_addr, unsigned long size, +void * __init early_memremap_prot(resource_size_t phys_addr, unsigned long size, unsigned long prot_val) { return early_memremap(phys_addr, size); diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c index 22a94bb3e6e8..352d9b2e02ab 100644 --- a/arch/loongarch/mm/pgtable.c +++ b/arch/loongarch/mm/pgtable.c @@ -135,15 +135,6 @@ void kernel_pte_init(void *addr) } while (p != end); } -pmd_t mk_pmd(struct page *page, pgprot_t prot) -{ - pmd_t pmd; - - pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot); - - return pmd; -} - void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index ea357a3edc09..fa1500d4aa3e 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -142,6 +142,8 @@ static void build_prologue(struct jit_ctx *ctx) */ if (seen_tail_call(ctx) && seen_call(ctx)) move_reg(ctx, TCC_SAVED, REG_TCC); + else + emit_insn(ctx, nop); ctx->stack_size = stack_adjust; } @@ -905,7 +907,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext move_addr(ctx, t1, func_addr); emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0); - move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0); + + if (insn->src_reg != BPF_PSEUDO_CALL) + move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0); + break; /* tail call */ @@ -930,7 +935,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext { const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm; - move_imm(ctx, dst, imm64, is32); + if (bpf_pseudo_func(insn)) + move_addr(ctx, dst, imm64); + else + move_imm(ctx, dst, imm64, is32); return 1; } diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h index 68586338ecf8..f9c569f53949 100644 --- a/arch/loongarch/net/bpf_jit.h +++ b/arch/loongarch/net/bpf_jit.h @@ -27,6 +27,11 @@ struct jit_data { struct jit_ctx ctx; }; +static inline void emit_nop(union loongarch_instruction *insn) +{ + insn->word = INSN_NOP; +} + #define emit_insn(ctx, func, ...) \ do { \ if (ctx->image != NULL) { \ diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c index 1da4dc46df43..50c9016641a4 100644 --- a/arch/loongarch/pci/acpi.c +++ b/arch/loongarch/pci/acpi.c @@ -194,6 +194,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) { struct pci_bus *bus; struct pci_root_info *info; + struct pci_host_bridge *host; struct acpi_pci_root_ops *root_ops; int domain = root->segment; int busnum = root->secondary.start; @@ -237,8 +238,17 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) return NULL; } - pci_bus_size_bridges(bus); - pci_bus_assign_resources(bus); + /* If we must preserve the resource configuration, claim now */ + host = pci_find_host_bridge(bus); + if (host->preserve_config) + pci_bus_claim_resources(bus); + + /* + * Assign whatever was left unassigned. If we didn't claim above, + * this will reassign everything. + */ + pci_assign_unassigned_root_bus_resources(bus); + list_for_each_entry(child, &bus->children, node) pcie_bus_configure_settings(child); } diff --git a/arch/loongarch/pci/pci.c b/arch/loongarch/pci/pci.c index 2726639150bc..5bc9627a6cf9 100644 --- a/arch/loongarch/pci/pci.c +++ b/arch/loongarch/pci/pci.c @@ -3,7 +3,6 @@ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ #include <linux/kernel.h> -#include <linux/export.h> #include <linux/init.h> #include <linux/acpi.h> #include <linux/types.h> diff --git a/arch/loongarch/power/hibernate.c b/arch/loongarch/power/hibernate.c index 1e0590542f98..e7b7346592cb 100644 --- a/arch/loongarch/power/hibernate.c +++ b/arch/loongarch/power/hibernate.c @@ -2,6 +2,7 @@ #include <asm/fpu.h> #include <asm/loongson.h> #include <asm/sections.h> +#include <asm/time.h> #include <asm/tlbflush.h> #include <linux/suspend.h> @@ -14,6 +15,7 @@ struct pt_regs saved_regs; void save_processor_state(void) { + save_counter(); saved_crmd = csr_read32(LOONGARCH_CSR_CRMD); saved_prmd = csr_read32(LOONGARCH_CSR_PRMD); saved_euen = csr_read32(LOONGARCH_CSR_EUEN); @@ -26,6 +28,7 @@ void save_processor_state(void) void restore_processor_state(void) { + sync_counter(); csr_write32(saved_crmd, LOONGARCH_CSR_CRMD); csr_write32(saved_prmd, LOONGARCH_CSR_PRMD); csr_write32(saved_euen, LOONGARCH_CSR_EUEN); diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile index fdde1bcd4e26..ccd2c5e135c6 100644 --- a/arch/loongarch/vdso/Makefile +++ b/arch/loongarch/vdso/Makefile @@ -2,7 +2,7 @@ # Objects to go into the VDSO. # Include the generic Makefile to check the built vdso. -include $(srctree)/lib/vdso/Makefile +include $(srctree)/lib/vdso/Makefile.include obj-vdso-y := elf.o vgetcpu.o vgettimeofday.o vgetrandom.o \ vgetrandom-chacha.o sigreturn.o @@ -36,8 +36,7 @@ endif # VDSO linker flags. ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \ - $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \ - --hash-style=sysv --build-id -T + $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared --build-id -T # # Shared build commands. diff --git a/arch/loongarch/vdso/vdso.lds.S b/arch/loongarch/vdso/vdso.lds.S index 160cfaef2de4..8ff986499947 100644 --- a/arch/loongarch/vdso/vdso.lds.S +++ b/arch/loongarch/vdso/vdso.lds.S @@ -5,6 +5,7 @@ */ #include <asm/page.h> #include <generated/asm-offsets.h> +#include <vdso/datapage.h> OUTPUT_FORMAT("elf64-loongarch", "elf64-loongarch", "elf64-loongarch") @@ -12,11 +13,8 @@ OUTPUT_ARCH(loongarch) SECTIONS { - PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE); -#ifdef CONFIG_TIME_NS - PROVIDE(_timens_data = _vdso_data + PAGE_SIZE); -#endif - PROVIDE(_loongarch_data = _vdso_data + 2 * PAGE_SIZE); + VDSO_VVAR_SYMS + . = SIZEOF_HEADERS; .hash : { *(.hash) } :text diff --git a/arch/loongarch/vdso/vgetcpu.c b/arch/loongarch/vdso/vgetcpu.c index 0db51258b2a7..5301cd9d0f83 100644 --- a/arch/loongarch/vdso/vgetcpu.c +++ b/arch/loongarch/vdso/vgetcpu.c @@ -19,27 +19,19 @@ static __always_inline int read_cpu_id(void) return cpu_id; } -static __always_inline const struct vdso_pcpu_data *get_pcpu_data(void) -{ - return _loongarch_data.pdata; -} - extern int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused); int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused) { int cpu_id; - const struct vdso_pcpu_data *data; cpu_id = read_cpu_id(); if (cpu) *cpu = cpu_id; - if (node) { - data = get_pcpu_data(); - *node = data[cpu_id].node; - } + if (node) + *node = vdso_u_arch_data.pdata[cpu_id].node; return 0; } diff --git a/arch/loongarch/vdso/vgetrandom-chacha.S b/arch/loongarch/vdso/vgetrandom-chacha.S index c2733e6c3a8d..c4dd2bab8825 100644 --- a/arch/loongarch/vdso/vgetrandom-chacha.S +++ b/arch/loongarch/vdso/vgetrandom-chacha.S @@ -58,9 +58,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack) #define copy0 t5 #define copy1 t6 #define copy2 t7 - -/* Reuse i as copy3 */ -#define copy3 i +#define copy3 t8 /* Packs to be used with OP_4REG */ #define line0 state0, state1, state2, state3 @@ -99,6 +97,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack) li.w copy0, 0x61707865 li.w copy1, 0x3320646e li.w copy2, 0x79622d32 + li.w copy3, 0x6b206574 ld.w cnt_lo, counter, 0 ld.w cnt_hi, counter, 4 @@ -108,7 +107,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack) move state0, copy0 move state1, copy1 move state2, copy2 - li.w state3, 0x6b206574 + move state3, copy3 /* state[4,5,..,11] = key */ ld.w state4, key, 0 @@ -167,12 +166,6 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack) addi.w i, i, -1 bnez i, .Lpermute - /* - * copy[3] = "expa", materialize it here because copy[3] shares the - * same register with i which just became dead. - */ - li.w copy3, 0x6b206574 - /* output[0,1,2,3] = copy[0,1,2,3] + state[0,1,2,3] */ OP_4REG add.w line0, copy st.w state0, output, 0 |