diff options
Diffstat (limited to 'arch/loongarch')
42 files changed, 1261 insertions, 401 deletions
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index f0abc38c40ac..5b1116733d88 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -9,6 +9,7 @@ config LOONGARCH select ACPI_PPTT if ACPI select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI select ARCH_BINFMT_ELF_STATE + select ARCH_NEEDS_DEFER_KASAN select ARCH_DISABLE_KASAN_INLINE select ARCH_ENABLE_MEMORY_HOTPLUG select ARCH_ENABLE_MEMORY_HOTREMOVE @@ -69,7 +70,10 @@ config LOONGARCH select ARCH_SUPPORTS_LTO_CLANG_THIN select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS select ARCH_SUPPORTS_NUMA_BALANCING + select ARCH_SUPPORTS_PER_VMA_LOCK select ARCH_SUPPORTS_RT + select ARCH_SUPPORTS_SCHED_SMT if SMP + select ARCH_SUPPORTS_SCHED_MC if SMP select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_MEMTEST @@ -108,8 +112,6 @@ config LOONGARCH select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL - select GENERIC_VDSO_DATA_STORE - select GENERIC_VDSO_TIME_NS select GPIOLIB select HAS_IOPORT select HAVE_ARCH_AUDITSYSCALL @@ -140,6 +142,7 @@ config LOONGARCH select HAVE_EBPF_JIT select HAVE_EFFICIENT_UNALIGNED_ACCESS if !ARCH_STRICT_ALIGN select HAVE_EXIT_THREAD + select HAVE_GENERIC_TIF_BITS select HAVE_GUP_FAST select HAVE_FTRACE_GRAPH_FUNC select HAVE_FUNCTION_ARG_ACCESS_API @@ -298,6 +301,10 @@ config AS_HAS_LVZ_EXTENSION config CC_HAS_ANNOTATE_TABLEJUMP def_bool $(cc-option,-mannotate-tablejump) +config RUSTC_HAS_ANNOTATE_TABLEJUMP + depends on RUST + def_bool $(rustc-option,-Cllvm-args=--loongarch-annotate-tablejump) + menu "Kernel type and options" source "kernel/Kconfig.hz" @@ -448,23 +455,6 @@ config EFI_STUB This kernel feature allows the kernel to be loaded directly by EFI firmware without the use of a bootloader. -config SCHED_SMT - bool "SMT scheduler support" - depends on SMP - default y - help - Improves scheduler's performance when there are multiple - threads in one physical core. - -config SCHED_MC - bool "Multi-core scheduler support" - depends on SMP - default y - help - Multi-core scheduler support improves the CPU scheduler's decision - making when dealing with multi-core CPU chips at a cost of slightly - increased overhead in some places. - config SMP bool "Multi-Processing support" help @@ -563,10 +553,14 @@ config ARCH_STRICT_ALIGN -mstrict-align build parameter to prevent unaligned accesses. CPUs with h/w unaligned access support: - Loongson-2K2000/2K3000/3A5000/3C5000/3D5000. + Loongson-2K2000/2K3000 and all of Loongson-3 series processors + based on LoongArch. CPUs without h/w unaligned access support: - Loongson-2K500/2K1000. + Loongson-2K0300/2K0500/2K1000. + + If you want to make sure whether to support unaligned memory access + on your hardware, please read the bit 20 (UAL) of CPUCFG1 register. This option is enabled by default to make the kernel be able to run on all LoongArch systems. But you can disable it manually if you want @@ -625,6 +619,16 @@ config CPU_HAS_PREFETCH config ARCH_SUPPORTS_KEXEC def_bool y +config ARCH_SUPPORTS_KEXEC_FILE + def_bool 64BIT + +config ARCH_SELECTS_KEXEC_FILE + def_bool 64BIT + depends on KEXEC_FILE + select KEXEC_ELF + select RELOCATABLE + select HAVE_IMA_KEXEC if IMA + config ARCH_SUPPORTS_CRASH_DUMP def_bool y diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile index b0703a4e02a2..dc5bd3f1b8d2 100644 --- a/arch/loongarch/Makefile +++ b/arch/loongarch/Makefile @@ -106,6 +106,17 @@ KBUILD_CFLAGS += -mannotate-tablejump else KBUILD_CFLAGS += -fno-jump-tables # keep compatibility with older compilers endif +ifdef CONFIG_RUSTC_HAS_ANNOTATE_TABLEJUMP +KBUILD_RUSTFLAGS += -Cllvm-args=--loongarch-annotate-tablejump +else +KBUILD_RUSTFLAGS += -Zno-jump-tables # keep compatibility with older compilers +endif +ifdef CONFIG_LTO_CLANG +# The annotate-tablejump option can not be passed to LLVM backend when LTO is enabled. +# Ensure it is aware of linker with LTO, '--loongarch-annotate-tablejump' also needs to +# be passed via '-mllvm' to ld.lld. +KBUILD_LDFLAGS += $(call ld-option,-mllvm --loongarch-annotate-tablejump) +endif endif KBUILD_RUSTFLAGS += --target=loongarch64-unknown-none-softfloat -Ccode-model=small @@ -118,7 +129,7 @@ KBUILD_RUSTFLAGS_KERNEL += -Crelocation-model=pie LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext $(call ld-option, --apply-dynamic-relocs) endif -cflags-y += $(call cc-option, -mno-check-zero-division) +cflags-y += $(call cc-option, -mno-check-zero-division -fno-isolate-erroneous-paths-dereference) ifndef CONFIG_KASAN cflags-y += -fno-builtin-memcpy -fno-builtin-memmove -fno-builtin-memset diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 34eaee0384c9..3e838c229cd5 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -45,6 +45,7 @@ CONFIG_EXPERT=y CONFIG_KALLSYMS_ALL=y CONFIG_PERF_EVENTS=y CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y CONFIG_CRASH_DUMP=y CONFIG_LOONGARCH=y CONFIG_64BIT=y @@ -55,7 +56,7 @@ CONFIG_DMI=y CONFIG_EFI=y CONFIG_SMP=y CONFIG_HOTPLUG_CPU=y -CONFIG_NR_CPUS=256 +CONFIG_NR_CPUS=2048 CONFIG_NUMA=y CONFIG_CPU_HAS_FPU=y CONFIG_CPU_HAS_LSX=y @@ -106,7 +107,6 @@ CONFIG_CMDLINE_PARTITION=y CONFIG_IOSCHED_BFQ=y CONFIG_BFQ_GROUP_IOSCHED=y CONFIG_BINFMT_MISC=m -CONFIG_ZPOOL=y CONFIG_ZSWAP=y CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y CONFIG_ZSMALLOC=y @@ -155,7 +155,16 @@ CONFIG_INET_ESPINTCP=y CONFIG_INET_IPCOMP=m CONFIG_INET_UDP_DIAG=y CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_BBR=m +CONFIG_TCP_CONG_BIC=y +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_IPV6_ROUTE_INFO=y CONFIG_INET6_AH=m @@ -332,15 +341,33 @@ CONFIG_LLC2=m CONFIG_NET_SCHED=y CONFIG_NET_SCH_HTB=m CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m CONFIG_NET_SCH_SFQ=m CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_GRED=m CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_FQ_PIE=m CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_DEFAULT=y CONFIG_NET_CLS_BASIC=m CONFIG_NET_CLS_FW=m CONFIG_NET_CLS_U32=m +CONFIG_NET_CLS_FLOW=m CONFIG_NET_CLS_CGROUP=m CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m CONFIG_NET_CLS_ACT=y CONFIG_NET_ACT_POLICE=m CONFIG_NET_ACT_GACT=m @@ -408,6 +435,7 @@ CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_FW_LOADER_COMPRESS=y CONFIG_FW_LOADER_COMPRESS_ZSTD=y +CONFIG_SYSFB_SIMPLEFB=y CONFIG_EFI_ZBOOT=y CONFIG_EFI_BOOTLOADER_CONTROL=m CONFIG_EFI_CAPSULE_LOADER=m @@ -421,6 +449,11 @@ CONFIG_MTD_CFI_AMDSTD=m CONFIG_MTD_CFI_STAA=m CONFIG_MTD_RAM=m CONFIG_MTD_ROM=m +CONFIG_MTD_RAW_NAND=m +CONFIG_MTD_NAND_PLATFORM=m +CONFIG_MTD_NAND_LOONGSON=m +CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y +CONFIG_MTD_NAND_ECC_SW_BCH=y CONFIG_MTD_UBI=m CONFIG_MTD_UBI_BLOCK=y CONFIG_PARPORT=y @@ -576,6 +609,11 @@ CONFIG_E1000=y CONFIG_E1000E=y CONFIG_IGB=y CONFIG_IXGBE=y +CONFIG_I40E=y +CONFIG_ICE=y +CONFIG_FM10K=y +CONFIG_IGC=y +CONFIG_IDPF=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MELLANOX is not set # CONFIG_NET_VENDOR_MICREL is not set @@ -680,6 +718,9 @@ CONFIG_USB4_NET=m CONFIG_INPUT_MOUSEDEV=y CONFIG_INPUT_MOUSEDEV_PSAUX=y CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_GPIO=m +CONFIG_KEYBOARD_GPIO_POLLED=m +CONFIG_KEYBOARD_MATRIX=m CONFIG_KEYBOARD_XTKBD=m CONFIG_MOUSE_PS2_ELANTECH=y CONFIG_MOUSE_PS2_SENTELIC=y @@ -704,8 +745,11 @@ CONFIG_VIRTIO_CONSOLE=y CONFIG_IPMI_HANDLER=m CONFIG_IPMI_DEVICE_INTERFACE=m CONFIG_IPMI_SI=m +CONFIG_IPMI_LS2K=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_TCG_TPM=m +CONFIG_TCG_LOONGSON=m CONFIG_I2C_CHARDEV=y CONFIG_I2C_PIIX4=y CONFIG_I2C_DESIGNWARE_CORE=y @@ -721,6 +765,10 @@ CONFIG_PINCTRL_LOONGSON2=y CONFIG_GPIO_SYSFS=y CONFIG_GPIO_LOONGSON=y CONFIG_GPIO_LOONGSON_64BIT=y +CONFIG_GPIO_PCA953X=m +CONFIG_GPIO_PCA953X_IRQ=y +CONFIG_GPIO_PCA9570=m +CONFIG_GPIO_PCF857X=m CONFIG_POWER_RESET=y CONFIG_POWER_RESET_RESTART=y CONFIG_POWER_RESET_SYSCON=y @@ -731,6 +779,7 @@ CONFIG_SENSORS_LM93=m CONFIG_SENSORS_W83795=m CONFIG_SENSORS_W83627HF=m CONFIG_LOONGSON2_THERMAL=m +CONFIG_MFD_LOONGSON_SE=m CONFIG_RC_CORE=m CONFIG_LIRC=y CONFIG_RC_DECODERS=y @@ -762,6 +811,7 @@ CONFIG_DRM_AST=y CONFIG_DRM_QXL=m CONFIG_DRM_VIRTIO_GPU=m CONFIG_DRM_LOONGSON=y +CONFIG_DRM_SIMPLEDRM=y CONFIG_FB=y CONFIG_FB_EFI=y CONFIG_FB_RADEON=y @@ -802,6 +852,7 @@ CONFIG_SND_HDA_CODEC_HDMI_ATI=y CONFIG_SND_HDA_CODEC_HDMI_NVIDIA=y CONFIG_SND_HDA_CODEC_CONEXANT=y CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_AUDIO_MIDI_V2=y CONFIG_SND_SOC=m CONFIG_SND_SOC_LOONGSON_CARD=m CONFIG_SND_SOC_ES7134=m @@ -862,6 +913,8 @@ CONFIG_TYPEC_TCPM=m CONFIG_TYPEC_TCPCI=m CONFIG_TYPEC_UCSI=m CONFIG_UCSI_ACPI=m +CONFIG_MMC=y +CONFIG_MMC_LOONGSON2=m CONFIG_INFINIBAND=m CONFIG_EDAC=y # CONFIG_EDAC_LEGACY_SYSFS is not set @@ -923,19 +976,22 @@ CONFIG_NTB_SWITCHTEC=m CONFIG_NTB_PERF=m CONFIG_NTB_TRANSPORT=m CONFIG_PWM=y +CONFIG_PWM_LOONGSON=y CONFIG_GENERIC_PHY=y CONFIG_USB4=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y CONFIG_JFS_SECURITY=y CONFIG_XFS_FS=y +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y CONFIG_GFS2_FS=m @@ -1027,9 +1083,12 @@ CONFIG_CEPH_FS_SECURITY_LABEL=y CONFIG_CIFS=m # CONFIG_CIFS_DEBUG is not set CONFIG_9P_FS=y +CONFIG_NLS_DEFAULT="utf8" CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=y CONFIG_DLM=m CONFIG_KEY_DH_OPERATIONS=y @@ -1050,9 +1109,11 @@ CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4_GENERIC=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_SM3_GENERIC=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_DEFLATE=m CONFIG_CRYPTO_LZO=m @@ -1064,6 +1125,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_CRYPTO_DEV_LOONGSON_RNG=m CONFIG_DMA_CMA=y CONFIG_DMA_NUMA_CMA=y CONFIG_CMA_SIZE_MBYTES=0 diff --git a/arch/loongarch/include/asm/acenv.h b/arch/loongarch/include/asm/acenv.h index 52f298f7293b..483c955f2ae5 100644 --- a/arch/loongarch/include/asm/acenv.h +++ b/arch/loongarch/include/asm/acenv.h @@ -10,9 +10,8 @@ #ifndef _ASM_LOONGARCH_ACENV_H #define _ASM_LOONGARCH_ACENV_H -/* - * This header is required by ACPI core, but we have nothing to fill in - * right now. Will be updated later when needed. - */ +#ifdef CONFIG_ARCH_STRICT_ALIGN +#define ACPI_MISALIGNMENT_NOT_SUPPORTED +#endif /* CONFIG_ARCH_STRICT_ALIGN */ #endif /* _ASM_LOONGARCH_ACENV_H */ diff --git a/arch/loongarch/include/asm/image.h b/arch/loongarch/include/asm/image.h new file mode 100644 index 000000000000..cab981cdb72a --- /dev/null +++ b/arch/loongarch/include/asm/image.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * LoongArch binary image header for EFI(PE/COFF) format. + * + * Author: Youling Tang <tangyouling@kylinos.cn> + * Copyright (C) 2025 KylinSoft Corporation. + */ + +#ifndef __ASM_IMAGE_H +#define __ASM_IMAGE_H + +#ifndef __ASSEMBLER__ + +/** + * struct loongarch_image_header + * + * @dos_sig: Optional PE format 'MZ' signature. + * @padding_1: Reserved. + * @kernel_entry: Kernel image entry pointer. + * @kernel_asize: An estimated size of the memory image size in LSB byte order. + * @text_offset: The image load offset in LSB byte order. + * @padding_2: Reserved. + * @pe_header: Optional offset to a PE format header. + **/ + +struct loongarch_image_header { + uint8_t dos_sig[2]; + uint16_t padding_1[3]; + uint64_t kernel_entry; + uint64_t kernel_asize; + uint64_t text_offset; + uint32_t padding_2[7]; + uint32_t pe_header; +}; + +/* + * loongarch_header_check_dos_sig - Helper to check the header + * + * Returns true (non-zero) if 'MZ' signature is found. + */ + +static inline int loongarch_header_check_dos_sig(const struct loongarch_image_header *h) +{ + if (!h) + return 0; + + return (h->dos_sig[0] == 'M' && h->dos_sig[1] == 'Z'); +} + +#endif /* __ASSEMBLER__ */ + +#endif /* __ASM_IMAGE_H */ diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h index 277d2140676b..55e64a12a124 100644 --- a/arch/loongarch/include/asm/inst.h +++ b/arch/loongarch/include/asm/inst.h @@ -77,6 +77,10 @@ enum reg2_op { iocsrwrh_op = 0x19205, iocsrwrw_op = 0x19206, iocsrwrd_op = 0x19207, + llacqw_op = 0xe15e0, + screlw_op = 0xe15e1, + llacqd_op = 0xe15e2, + screld_op = 0xe15e3, }; enum reg2i5_op { @@ -189,6 +193,7 @@ enum reg3_op { fldxd_op = 0x7068, fstxs_op = 0x7070, fstxd_op = 0x7078, + scq_op = 0x70ae, amswapw_op = 0x70c0, amswapd_op = 0x70c1, amaddw_op = 0x70c2, diff --git a/arch/loongarch/include/asm/kasan.h b/arch/loongarch/include/asm/kasan.h index 62f139a9c87d..0e50e5b5e056 100644 --- a/arch/loongarch/include/asm/kasan.h +++ b/arch/loongarch/include/asm/kasan.h @@ -66,7 +66,6 @@ #define XKPRANGE_WC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_WC_KASAN_OFFSET) #define XKVRANGE_VC_SHADOW_OFFSET (KASAN_SHADOW_START + XKVRANGE_VC_KASAN_OFFSET) -extern bool kasan_early_stage; extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; #define kasan_mem_to_shadow kasan_mem_to_shadow @@ -75,12 +74,6 @@ void *kasan_mem_to_shadow(const void *addr); #define kasan_shadow_to_mem kasan_shadow_to_mem const void *kasan_shadow_to_mem(const void *shadow_addr); -#define kasan_arch_is_ready kasan_arch_is_ready -static __always_inline bool kasan_arch_is_ready(void) -{ - return !kasan_early_stage; -} - #define addr_has_metadata addr_has_metadata static __always_inline bool addr_has_metadata(const void *addr) { diff --git a/arch/loongarch/include/asm/kexec.h b/arch/loongarch/include/asm/kexec.h index cf95cd3eb2de..209fa43222e1 100644 --- a/arch/loongarch/include/asm/kexec.h +++ b/arch/loongarch/include/asm/kexec.h @@ -41,6 +41,18 @@ struct kimage_arch { unsigned long systable_ptr; }; +#ifdef CONFIG_KEXEC_FILE +extern const struct kexec_file_ops kexec_efi_ops; +extern const struct kexec_file_ops kexec_elf_ops; + +int arch_kimage_file_post_load_cleanup(struct kimage *image); +#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup + +extern int load_other_segments(struct kimage *image, + unsigned long kernel_load_addr, unsigned long kernel_size, + char *initrd, unsigned long initrd_len, char *cmdline, unsigned long cmdline_len); +#endif + typedef void (*do_kexec_t)(unsigned long efi_boot, unsigned long cmdline_ptr, unsigned long systable_ptr, diff --git a/arch/loongarch/include/asm/kvm_mmu.h b/arch/loongarch/include/asm/kvm_mmu.h index 099bafc6f797..e36cc7e8ed20 100644 --- a/arch/loongarch/include/asm/kvm_mmu.h +++ b/arch/loongarch/include/asm/kvm_mmu.h @@ -16,6 +16,13 @@ */ #define KVM_MMU_CACHE_MIN_PAGES (CONFIG_PGTABLE_LEVELS - 1) +/* + * _PAGE_MODIFIED is a SW pte bit, it records page ever written on host + * kernel, on secondary MMU it records the page writeable attribute, in + * order for fast path handling. + */ +#define KVM_PAGE_WRITEABLE _PAGE_MODIFIED + #define _KVM_FLUSH_PGTABLE 0x1 #define _KVM_HAS_PGMASK 0x2 #define kvm_pfn_pte(pfn, prot) (((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot)) @@ -52,10 +59,10 @@ static inline void kvm_set_pte(kvm_pte_t *ptep, kvm_pte_t val) WRITE_ONCE(*ptep, val); } -static inline int kvm_pte_write(kvm_pte_t pte) { return pte & _PAGE_WRITE; } -static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & _PAGE_DIRTY; } static inline int kvm_pte_young(kvm_pte_t pte) { return pte & _PAGE_ACCESSED; } static inline int kvm_pte_huge(kvm_pte_t pte) { return pte & _PAGE_HUGE; } +static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & __WRITEABLE; } +static inline int kvm_pte_writeable(kvm_pte_t pte) { return pte & KVM_PAGE_WRITEABLE; } static inline kvm_pte_t kvm_pte_mkyoung(kvm_pte_t pte) { @@ -69,12 +76,12 @@ static inline kvm_pte_t kvm_pte_mkold(kvm_pte_t pte) static inline kvm_pte_t kvm_pte_mkdirty(kvm_pte_t pte) { - return pte | _PAGE_DIRTY; + return pte | __WRITEABLE; } static inline kvm_pte_t kvm_pte_mkclean(kvm_pte_t pte) { - return pte & ~_PAGE_DIRTY; + return pte & ~__WRITEABLE; } static inline kvm_pte_t kvm_pte_mkhuge(kvm_pte_t pte) @@ -87,6 +94,11 @@ static inline kvm_pte_t kvm_pte_mksmall(kvm_pte_t pte) return pte & ~_PAGE_HUGE; } +static inline kvm_pte_t kvm_pte_mkwriteable(kvm_pte_t pte) +{ + return pte | KVM_PAGE_WRITEABLE; +} + static inline int kvm_need_flush(kvm_ptw_ctx *ctx) { return ctx->flag & _KVM_FLUSH_PGTABLE; diff --git a/arch/loongarch/include/asm/kvm_pch_pic.h b/arch/loongarch/include/asm/kvm_pch_pic.h index e6df6a4c1c70..7f33a3039272 100644 --- a/arch/loongarch/include/asm/kvm_pch_pic.h +++ b/arch/loongarch/include/asm/kvm_pch_pic.h @@ -34,13 +34,26 @@ #define PCH_PIC_INT_ISR_END 0x3af #define PCH_PIC_POLARITY_START 0x3e0 #define PCH_PIC_POLARITY_END 0x3e7 -#define PCH_PIC_INT_ID_VAL 0x7000000UL +#define PCH_PIC_INT_ID_VAL 0x7UL #define PCH_PIC_INT_ID_VER 0x1UL +union pch_pic_id { + struct { + uint8_t reserved_0[3]; + uint8_t id; + uint8_t version; + uint8_t reserved_1; + uint8_t irq_num; + uint8_t reserved_2; + } desc; + uint64_t data; +}; + struct loongarch_pch_pic { spinlock_t lock; struct kvm *kvm; struct kvm_io_device device; + union pch_pic_id id; uint64_t mask; /* 1:disable irq, 0:enable irq */ uint64_t htmsi_en; /* 1:msi */ uint64_t edge; /* 1:edge triggered, 0:level triggered */ diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h index 3eda298702b1..5cb568a60cf8 100644 --- a/arch/loongarch/include/asm/stackframe.h +++ b/arch/loongarch/include/asm/stackframe.h @@ -58,7 +58,7 @@ .endm .macro STACKLEAK_ERASE -#ifdef CONFIG_GCC_PLUGIN_STACKLEAK +#ifdef CONFIG_KSTACK_ERASE bl stackleak_erase_on_task_stack #endif .endm diff --git a/arch/loongarch/include/asm/thread_info.h b/arch/loongarch/include/asm/thread_info.h index 9dfa2ef00816..4d7117fcdc78 100644 --- a/arch/loongarch/include/asm/thread_info.h +++ b/arch/loongarch/include/asm/thread_info.h @@ -65,50 +65,42 @@ register unsigned long current_stack_pointer __asm__("$sp"); * access * - pending work-to-be-done flags are in LSW * - other flags in MSW + * + * Tell the generic TIF infrastructure which special bits loongarch supports */ -#define TIF_NEED_RESCHED 0 /* rescheduling necessary */ -#define TIF_NEED_RESCHED_LAZY 1 /* lazy rescheduling necessary */ -#define TIF_SIGPENDING 2 /* signal pending */ -#define TIF_NOTIFY_RESUME 3 /* callback before returning to user */ -#define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */ -#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ -#define TIF_NOHZ 6 /* in adaptive nohz mode */ -#define TIF_UPROBE 7 /* breakpointed or singlestepping */ -#define TIF_USEDFPU 8 /* FPU was used by this task this quantum (SMP) */ -#define TIF_USEDSIMD 9 /* SIMD has been used this quantum */ -#define TIF_MEMDIE 10 /* is terminating due to OOM killer */ -#define TIF_FIXADE 11 /* Fix address errors in software */ -#define TIF_LOGADE 12 /* Log address errors to syslog */ -#define TIF_32BIT_REGS 13 /* 32-bit general purpose registers */ -#define TIF_32BIT_ADDR 14 /* 32-bit address space */ -#define TIF_LOAD_WATCH 15 /* If set, load watch registers */ -#define TIF_SINGLESTEP 16 /* Single Step */ -#define TIF_LSX_CTX_LIVE 17 /* LSX context must be preserved */ -#define TIF_LASX_CTX_LIVE 18 /* LASX context must be preserved */ -#define TIF_USEDLBT 19 /* LBT was used by this task this quantum (SMP) */ -#define TIF_LBT_CTX_LIVE 20 /* LBT context must be preserved */ -#define TIF_PATCH_PENDING 21 /* pending live patching update */ +#define HAVE_TIF_NEED_RESCHED_LAZY +#define HAVE_TIF_RESTORE_SIGMASK + +#include <asm-generic/thread_info_tif.h> + +/* Architecture specific bits */ +#define TIF_NOHZ 16 /* in adaptive nohz mode */ +#define TIF_USEDFPU 17 /* FPU was used by this task this quantum (SMP) */ +#define TIF_USEDSIMD 18 /* SIMD has been used this quantum */ +#define TIF_FIXADE 19 /* Fix address errors in software */ +#define TIF_LOGADE 20 /* Log address errors to syslog */ +#define TIF_32BIT_REGS 21 /* 32-bit general purpose registers */ +#define TIF_32BIT_ADDR 22 /* 32-bit address space */ +#define TIF_LOAD_WATCH 23 /* If set, load watch registers */ +#define TIF_SINGLESTEP 24 /* Single Step */ +#define TIF_LSX_CTX_LIVE 25 /* LSX context must be preserved */ +#define TIF_LASX_CTX_LIVE 26 /* LASX context must be preserved */ +#define TIF_USEDLBT 27 /* LBT was used by this task this quantum (SMP) */ +#define TIF_LBT_CTX_LIVE 28 /* LBT context must be preserved */ -#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) -#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY) -#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) -#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) -#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL) -#define _TIF_NOHZ (1<<TIF_NOHZ) -#define _TIF_UPROBE (1<<TIF_UPROBE) -#define _TIF_USEDFPU (1<<TIF_USEDFPU) -#define _TIF_USEDSIMD (1<<TIF_USEDSIMD) -#define _TIF_FIXADE (1<<TIF_FIXADE) -#define _TIF_LOGADE (1<<TIF_LOGADE) -#define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS) -#define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR) -#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) -#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) -#define _TIF_LSX_CTX_LIVE (1<<TIF_LSX_CTX_LIVE) -#define _TIF_LASX_CTX_LIVE (1<<TIF_LASX_CTX_LIVE) -#define _TIF_USEDLBT (1<<TIF_USEDLBT) -#define _TIF_LBT_CTX_LIVE (1<<TIF_LBT_CTX_LIVE) -#define _TIF_PATCH_PENDING (1<<TIF_PATCH_PENDING) +#define _TIF_NOHZ BIT(TIF_NOHZ) +#define _TIF_USEDFPU BIT(TIF_USEDFPU) +#define _TIF_USEDSIMD BIT(TIF_USEDSIMD) +#define _TIF_FIXADE BIT(TIF_FIXADE) +#define _TIF_LOGADE BIT(TIF_LOGADE) +#define _TIF_32BIT_REGS BIT(TIF_32BIT_REGS) +#define _TIF_32BIT_ADDR BIT(TIF_32BIT_ADDR) +#define _TIF_LOAD_WATCH BIT(TIF_LOAD_WATCH) +#define _TIF_SINGLESTEP BIT(TIF_SINGLESTEP) +#define _TIF_LSX_CTX_LIVE BIT(TIF_LSX_CTX_LIVE) +#define _TIF_LASX_CTX_LIVE BIT(TIF_LASX_CTX_LIVE) +#define _TIF_USEDLBT BIT(TIF_USEDLBT) +#define _TIF_LBT_CTX_LIVE BIT(TIF_LBT_CTX_LIVE) #endif /* __KERNEL__ */ #endif /* _ASM_THREAD_INFO_H */ diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index 5f354f5c6847..57ba1a563bb1 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -103,6 +103,7 @@ struct kvm_fpu { #define KVM_LOONGARCH_VM_FEAT_PMU 5 #define KVM_LOONGARCH_VM_FEAT_PV_IPI 6 #define KVM_LOONGARCH_VM_FEAT_PV_STEALTIME 7 +#define KVM_LOONGARCH_VM_FEAT_PTW 8 /* Device Control API on vcpu fd */ #define KVM_LOONGARCH_VCPU_CPUCFG 0 diff --git a/arch/loongarch/include/uapi/asm/setup.h b/arch/loongarch/include/uapi/asm/setup.h new file mode 100644 index 000000000000..d46363ce3e02 --- /dev/null +++ b/arch/loongarch/include/uapi/asm/setup.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef _UAPI_ASM_LOONGARCH_SETUP_H +#define _UAPI_ASM_LOONGARCH_SETUP_H + +#define COMMAND_LINE_SIZE 4096 + +#endif /* _UAPI_ASM_LOONGARCH_SETUP_H */ diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile index 6f5a4574a911..001924877772 100644 --- a/arch/loongarch/kernel/Makefile +++ b/arch/loongarch/kernel/Makefile @@ -62,6 +62,7 @@ obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o obj-$(CONFIG_RELOCATABLE) += relocate.o obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o +obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_efi.o kexec_elf.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c index db1e4bb26b6a..3017c7157600 100644 --- a/arch/loongarch/kernel/asm-offsets.c +++ b/arch/loongarch/kernel/asm-offsets.c @@ -4,6 +4,8 @@ * * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ +#define COMPILE_OFFSETS + #include <linux/types.h> #include <linux/sched.h> #include <linux/mm.h> diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c index fedaa67cde41..cbfce2872d71 100644 --- a/arch/loongarch/kernel/cpu-probe.c +++ b/arch/loongarch/kernel/cpu-probe.c @@ -52,6 +52,48 @@ static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_loongarch *c) c->fpu_mask = ~(fcsr0 ^ fcsr1) & ~mask; } +/* simd = -1/0/128/256 */ +static unsigned int simd = -1U; + +static int __init cpu_setup_simd(char *str) +{ + get_option(&str, &simd); + pr_info("Set SIMD width = %u\n", simd); + + return 0; +} + +early_param("simd", cpu_setup_simd); + +static int __init cpu_final_simd(void) +{ + struct cpuinfo_loongarch *c = &cpu_data[0]; + + if (simd < 128) { + c->options &= ~LOONGARCH_CPU_LSX; + elf_hwcap &= ~HWCAP_LOONGARCH_LSX; + } + + if (simd < 256) { + c->options &= ~LOONGARCH_CPU_LASX; + elf_hwcap &= ~HWCAP_LOONGARCH_LASX; + } + + simd = 0; + + if (c->options & LOONGARCH_CPU_LSX) + simd = 128; + + if (c->options & LOONGARCH_CPU_LASX) + simd = 256; + + pr_info("Final SIMD width = %u\n", simd); + + return 0; +} + +arch_initcall(cpu_final_simd); + static inline void set_elf_platform(int cpu, const char *plat) { if (cpu == 0) @@ -134,13 +176,13 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c) elf_hwcap |= HWCAP_LOONGARCH_FPU; } #ifdef CONFIG_CPU_HAS_LSX - if (config & CPUCFG2_LSX) { + if ((config & CPUCFG2_LSX) && (simd >= 128)) { c->options |= LOONGARCH_CPU_LSX; elf_hwcap |= HWCAP_LOONGARCH_LSX; } #endif #ifdef CONFIG_CPU_HAS_LASX - if (config & CPUCFG2_LASX) { + if ((config & CPUCFG2_LASX) && (simd >= 256)) { c->options |= LOONGARCH_CPU_LASX; elf_hwcap |= HWCAP_LOONGARCH_LASX; } diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c index c0a5dc9aeae2..23bd5ae2212c 100644 --- a/arch/loongarch/kernel/env.c +++ b/arch/loongarch/kernel/env.c @@ -86,7 +86,7 @@ late_initcall(fdt_cpu_clk_init); static ssize_t boardinfo_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - return sprintf(buf, + return sysfs_emit(buf, "BIOS Information\n" "Vendor\t\t\t: %s\n" "Version\t\t\t: %s\n" @@ -109,6 +109,8 @@ static int __init boardinfo_init(void) struct kobject *loongson_kobj; loongson_kobj = kobject_create_and_add("loongson", firmware_kobj); + if (!loongson_kobj) + return -ENOMEM; return sysfs_create_file(loongson_kobj, &boardinfo_attr.attr); } diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c index 72ecfed29d55..bf037f0c6b26 100644 --- a/arch/loongarch/kernel/inst.c +++ b/arch/loongarch/kernel/inst.c @@ -141,6 +141,9 @@ bool insns_not_supported(union loongarch_instruction insn) case amswapw_op ... ammindbdu_op: pr_notice("atomic memory access instructions are not supported\n"); return true; + case scq_op: + pr_notice("sc.q instruction is not supported\n"); + return true; } switch (insn.reg2i14_format.opcode) { @@ -152,6 +155,15 @@ bool insns_not_supported(union loongarch_instruction insn) return true; } + switch (insn.reg2_format.opcode) { + case llacqw_op: + case llacqd_op: + case screlw_op: + case screld_op: + pr_notice("llacq and screl instructions are not supported\n"); + return true; + } + switch (insn.reg1i21_format.opcode) { case bceqz_op: pr_notice("bceqz and bcnez instructions are not supported\n"); diff --git a/arch/loongarch/kernel/kexec_efi.c b/arch/loongarch/kernel/kexec_efi.c new file mode 100644 index 000000000000..45121b914f8f --- /dev/null +++ b/arch/loongarch/kernel/kexec_efi.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Load EFI vmlinux file for the kexec_file_load syscall. + * + * Author: Youling Tang <tangyouling@kylinos.cn> + * Copyright (C) 2025 KylinSoft Corporation. + */ + +#define pr_fmt(fmt) "kexec_file(EFI): " fmt + +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/kexec.h> +#include <linux/pe.h> +#include <linux/string.h> +#include <asm/byteorder.h> +#include <asm/cpufeature.h> +#include <asm/image.h> + +static int efi_kexec_probe(const char *kernel_buf, unsigned long kernel_len) +{ + const struct loongarch_image_header *h = (const struct loongarch_image_header *)kernel_buf; + + if (!h || (kernel_len < sizeof(*h))) { + kexec_dprintk("No LoongArch image header.\n"); + return -EINVAL; + } + + if (!loongarch_header_check_dos_sig(h)) { + kexec_dprintk("No LoongArch PE image header.\n"); + return -EINVAL; + } + + return 0; +} + +static void *efi_kexec_load(struct kimage *image, + char *kernel, unsigned long kernel_len, + char *initrd, unsigned long initrd_len, + char *cmdline, unsigned long cmdline_len) +{ + int ret; + unsigned long text_offset, kernel_segment_number; + struct kexec_buf kbuf; + struct kexec_segment *kernel_segment; + struct loongarch_image_header *h; + + h = (struct loongarch_image_header *)kernel; + if (!h->kernel_asize) + return ERR_PTR(-EINVAL); + + /* + * Load the kernel + * FIXME: Non-relocatable kernel rejected for kexec_file (require CONFIG_RELOCATABLE) + */ + kbuf.image = image; + kbuf.buf_max = ULONG_MAX; + kbuf.top_down = false; + + kbuf.buffer = kernel; + kbuf.bufsz = kernel_len; + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; + kbuf.memsz = le64_to_cpu(h->kernel_asize); + text_offset = le64_to_cpu(h->text_offset); + kbuf.buf_min = text_offset; + kbuf.buf_align = SZ_2M; + + kernel_segment_number = image->nr_segments; + + /* + * The location of the kernel segment may make it impossible to + * satisfy the other segment requirements, so we try repeatedly + * to find a location that will work. + */ + while ((ret = kexec_add_buffer(&kbuf)) == 0) { + /* Try to load additional data */ + kernel_segment = &image->segment[kernel_segment_number]; + ret = load_other_segments(image, kernel_segment->mem, + kernel_segment->memsz, initrd, + initrd_len, cmdline, cmdline_len); + if (!ret) + break; + + /* + * We couldn't find space for the other segments; erase the + * kernel segment and try the next available hole. + */ + image->nr_segments -= 1; + kbuf.buf_min = kernel_segment->mem + kernel_segment->memsz; + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; + } + + if (ret < 0) { + pr_err("Could not find any suitable kernel location!"); + return ERR_PTR(ret); + } + + kernel_segment = &image->segment[kernel_segment_number]; + + /* Make sure the second kernel jumps to the correct "kernel_entry" */ + image->start = kernel_segment->mem + h->kernel_entry - text_offset; + + kexec_dprintk("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n", + kernel_segment->mem, kbuf.bufsz, kernel_segment->memsz); + + return NULL; +} + +const struct kexec_file_ops kexec_efi_ops = { + .probe = efi_kexec_probe, + .load = efi_kexec_load, +}; diff --git a/arch/loongarch/kernel/kexec_elf.c b/arch/loongarch/kernel/kexec_elf.c new file mode 100644 index 000000000000..97b2f049801a --- /dev/null +++ b/arch/loongarch/kernel/kexec_elf.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Load ELF vmlinux file for the kexec_file_load syscall. + * + * Author: Youling Tang <tangyouling@kylinos.cn> + * Copyright (C) 2025 KylinSoft Corporation. + */ + +#define pr_fmt(fmt) "kexec_file(ELF): " fmt + +#include <linux/elf.h> +#include <linux/kexec.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/memblock.h> +#include <asm/setup.h> + +#define elf_kexec_probe kexec_elf_probe + +static int _elf_kexec_load(struct kimage *image, + struct elfhdr *ehdr, struct kexec_elf_info *elf_info, + struct kexec_buf *kbuf, unsigned long *text_offset) +{ + int i, ret = -1; + + /* Read in the PT_LOAD segments. */ + for (i = 0; i < ehdr->e_phnum; i++) { + size_t size; + const struct elf_phdr *phdr; + + phdr = &elf_info->proghdrs[i]; + if (phdr->p_type != PT_LOAD) + continue; + + size = phdr->p_filesz; + if (size > phdr->p_memsz) + size = phdr->p_memsz; + + kbuf->buffer = (void *)elf_info->buffer + phdr->p_offset; + kbuf->bufsz = size; + kbuf->buf_align = phdr->p_align; + *text_offset = __pa(phdr->p_paddr); + kbuf->buf_min = *text_offset; + kbuf->memsz = ALIGN(phdr->p_memsz, SZ_64K); + kbuf->mem = KEXEC_BUF_MEM_UNKNOWN; + ret = kexec_add_buffer(kbuf); + if (ret < 0) + break; + } + + return ret; +} + +static void *elf_kexec_load(struct kimage *image, + char *kernel, unsigned long kernel_len, + char *initrd, unsigned long initrd_len, + char *cmdline, unsigned long cmdline_len) +{ + int ret; + unsigned long text_offset, kernel_segment_number; + struct elfhdr ehdr; + struct kexec_buf kbuf; + struct kexec_elf_info elf_info; + struct kexec_segment *kernel_segment; + + ret = kexec_build_elf_info(kernel, kernel_len, &ehdr, &elf_info); + if (ret < 0) + return ERR_PTR(ret); + + /* + * Load the kernel + * FIXME: Non-relocatable kernel rejected for kexec_file (require CONFIG_RELOCATABLE) + */ + kbuf.image = image; + kbuf.buf_max = ULONG_MAX; + kbuf.top_down = false; + + kernel_segment_number = image->nr_segments; + + ret = _elf_kexec_load(image, &ehdr, &elf_info, &kbuf, &text_offset); + if (ret < 0) + goto out; + + /* Load additional data */ + kernel_segment = &image->segment[kernel_segment_number]; + ret = load_other_segments(image, kernel_segment->mem, kernel_segment->memsz, + initrd, initrd_len, cmdline, cmdline_len); + if (ret < 0) + goto out; + + /* Make sure the second kernel jumps to the correct "kernel_entry". */ + image->start = kernel_segment->mem + __pa(ehdr.e_entry) - text_offset; + + kexec_dprintk("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n", + kernel_segment->mem, kbuf.bufsz, kernel_segment->memsz); + +out: + kexec_free_elf_info(&elf_info); + return ret ? ERR_PTR(ret) : NULL; +} + +const struct kexec_file_ops kexec_elf_ops = { + .probe = elf_kexec_probe, + .load = elf_kexec_load, +}; diff --git a/arch/loongarch/kernel/machine_kexec.c b/arch/loongarch/kernel/machine_kexec.c index f9381800e291..e4b2bbc47e62 100644 --- a/arch/loongarch/kernel/machine_kexec.c +++ b/arch/loongarch/kernel/machine_kexec.c @@ -70,18 +70,28 @@ int machine_kexec_prepare(struct kimage *kimage) kimage->arch.efi_boot = fw_arg0; kimage->arch.systable_ptr = fw_arg2; - /* Find the command line */ - for (i = 0; i < kimage->nr_segments; i++) { - if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) { - if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE)) - kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr; - break; + if (kimage->file_mode == 1) { + /* + * kimage->cmdline_buf will be released in kexec_file_load, so copy + * to the KEXEC_CMDLINE_ADDR safe area. + */ + memcpy((void *)KEXEC_CMDLINE_ADDR, (void *)kimage->arch.cmdline_ptr, + strlen((char *)kimage->arch.cmdline_ptr) + 1); + kimage->arch.cmdline_ptr = (unsigned long)KEXEC_CMDLINE_ADDR; + } else { + /* Find the command line */ + for (i = 0; i < kimage->nr_segments; i++) { + if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) { + if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE)) + kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr; + break; + } } - } - if (!kimage->arch.cmdline_ptr) { - pr_err("Command line not included in the provided image\n"); - return -EINVAL; + if (!kimage->arch.cmdline_ptr) { + pr_err("Command line not included in the provided image\n"); + return -EINVAL; + } } /* kexec/kdump need a safe page to save reboot_code_buffer */ @@ -287,9 +297,10 @@ void machine_kexec(struct kimage *image) /* We do not want to be bothered. */ local_irq_disable(); - pr_notice("EFI boot flag 0x%lx\n", efi_boot); - pr_notice("Command line at 0x%lx\n", cmdline_ptr); - pr_notice("System table at 0x%lx\n", systable_ptr); + pr_notice("EFI boot flag: 0x%lx\n", efi_boot); + pr_notice("Command line addr: 0x%lx\n", cmdline_ptr); + pr_notice("Command line string: %s\n", (char *)cmdline_ptr); + pr_notice("System table addr: 0x%lx\n", systable_ptr); pr_notice("We will call new kernel at 0x%lx\n", start_addr); pr_notice("Bye ...\n"); diff --git a/arch/loongarch/kernel/machine_kexec_file.c b/arch/loongarch/kernel/machine_kexec_file.c new file mode 100644 index 000000000000..dda236b51a88 --- /dev/null +++ b/arch/loongarch/kernel/machine_kexec_file.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * kexec_file for LoongArch + * + * Author: Youling Tang <tangyouling@kylinos.cn> + * Copyright (C) 2025 KylinSoft Corporation. + * + * Most code is derived from LoongArch port of kexec-tools + */ + +#define pr_fmt(fmt) "kexec_file: " fmt + +#include <linux/ioport.h> +#include <linux/kernel.h> +#include <linux/kexec.h> +#include <linux/memblock.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/vmalloc.h> +#include <asm/bootinfo.h> + +const struct kexec_file_ops * const kexec_file_loaders[] = { + &kexec_efi_ops, + &kexec_elf_ops, + NULL +}; + +int arch_kimage_file_post_load_cleanup(struct kimage *image) +{ + vfree(image->elf_headers); + image->elf_headers = NULL; + image->elf_headers_sz = 0; + + return kexec_image_post_load_cleanup_default(image); +} + +/* Add the "kexec_file" command line parameter to command line. */ +static void cmdline_add_loader(unsigned long *cmdline_tmplen, char *modified_cmdline) +{ + int loader_strlen; + + loader_strlen = sprintf(modified_cmdline + (*cmdline_tmplen), "kexec_file "); + *cmdline_tmplen += loader_strlen; +} + +/* Add the "initrd=start,size" command line parameter to command line. */ +static void cmdline_add_initrd(struct kimage *image, unsigned long *cmdline_tmplen, + char *modified_cmdline, unsigned long initrd) +{ + int initrd_strlen; + + initrd_strlen = sprintf(modified_cmdline + (*cmdline_tmplen), "initrd=0x%lx,0x%lx ", + initrd, image->initrd_buf_len); + *cmdline_tmplen += initrd_strlen; +} + +#ifdef CONFIG_CRASH_DUMP + +static int prepare_elf_headers(void **addr, unsigned long *sz) +{ + int ret, nr_ranges; + uint64_t i; + phys_addr_t start, end; + struct crash_mem *cmem; + + nr_ranges = 2; /* for exclusion of crashkernel region */ + for_each_mem_range(i, &start, &end) + nr_ranges++; + + cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL); + if (!cmem) + return -ENOMEM; + + cmem->max_nr_ranges = nr_ranges; + cmem->nr_ranges = 0; + for_each_mem_range(i, &start, &end) { + cmem->ranges[cmem->nr_ranges].start = start; + cmem->ranges[cmem->nr_ranges].end = end - 1; + cmem->nr_ranges++; + } + + /* Exclude crashkernel region */ + ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end); + if (ret < 0) + goto out; + + if (crashk_low_res.end) { + ret = crash_exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end); + if (ret < 0) + goto out; + } + + ret = crash_prepare_elf64_headers(cmem, true, addr, sz); + +out: + kfree(cmem); + return ret; +} + +/* + * Add the "mem=size@start" command line parameter to command line, indicating the + * memory region the new kernel can use to boot into. + */ +static void cmdline_add_mem(unsigned long *cmdline_tmplen, char *modified_cmdline) +{ + int mem_strlen = 0; + + mem_strlen = sprintf(modified_cmdline + (*cmdline_tmplen), "mem=0x%llx@0x%llx ", + crashk_res.end - crashk_res.start + 1, crashk_res.start); + *cmdline_tmplen += mem_strlen; + + if (crashk_low_res.end) { + mem_strlen = sprintf(modified_cmdline + (*cmdline_tmplen), "mem=0x%llx@0x%llx ", + crashk_low_res.end - crashk_low_res.start + 1, crashk_low_res.start); + *cmdline_tmplen += mem_strlen; + } +} + +/* Add the "elfcorehdr=size@start" command line parameter to command line. */ +static void cmdline_add_elfcorehdr(struct kimage *image, unsigned long *cmdline_tmplen, + char *modified_cmdline, unsigned long elfcorehdr_sz) +{ + int elfcorehdr_strlen = 0; + + elfcorehdr_strlen = sprintf(modified_cmdline + (*cmdline_tmplen), "elfcorehdr=0x%lx@0x%lx ", + elfcorehdr_sz, image->elf_load_addr); + *cmdline_tmplen += elfcorehdr_strlen; +} + +#endif + +/* + * Try to add the initrd to the image. If it is not possible to find valid + * locations, this function will undo changes to the image and return non zero. + */ +int load_other_segments(struct kimage *image, + unsigned long kernel_load_addr, unsigned long kernel_size, + char *initrd, unsigned long initrd_len, char *cmdline, unsigned long cmdline_len) +{ + int ret = 0; + unsigned long cmdline_tmplen = 0; + unsigned long initrd_load_addr = 0; + unsigned long orig_segments = image->nr_segments; + char *modified_cmdline = NULL; + struct kexec_buf kbuf; + + kbuf.image = image; + /* Don't allocate anything below the kernel */ + kbuf.buf_min = kernel_load_addr + kernel_size; + + modified_cmdline = kzalloc(COMMAND_LINE_SIZE, GFP_KERNEL); + if (!modified_cmdline) + return -EINVAL; + + cmdline_add_loader(&cmdline_tmplen, modified_cmdline); + /* Ensure it's null terminated */ + modified_cmdline[COMMAND_LINE_SIZE - 1] = '\0'; + +#ifdef CONFIG_CRASH_DUMP + /* Load elf core header */ + if (image->type == KEXEC_TYPE_CRASH) { + void *headers; + unsigned long headers_sz; + + ret = prepare_elf_headers(&headers, &headers_sz); + if (ret < 0) { + pr_err("Preparing elf core header failed\n"); + goto out_err; + } + + kbuf.buffer = headers; + kbuf.bufsz = headers_sz; + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; + kbuf.memsz = headers_sz; + kbuf.buf_align = SZ_64K; /* largest supported page size */ + kbuf.buf_max = ULONG_MAX; + kbuf.top_down = true; + + ret = kexec_add_buffer(&kbuf); + if (ret < 0) { + vfree(headers); + goto out_err; + } + image->elf_headers = headers; + image->elf_load_addr = kbuf.mem; + image->elf_headers_sz = headers_sz; + + kexec_dprintk("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n", + image->elf_load_addr, kbuf.bufsz, kbuf.memsz); + + /* Add the mem=size@start parameter to the command line */ + cmdline_add_mem(&cmdline_tmplen, modified_cmdline); + + /* Add the elfcorehdr=size@start parameter to the command line */ + cmdline_add_elfcorehdr(image, &cmdline_tmplen, modified_cmdline, headers_sz); + } +#endif + + /* Load initrd */ + if (initrd) { + kbuf.buffer = initrd; + kbuf.bufsz = initrd_len; + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; + kbuf.memsz = initrd_len; + kbuf.buf_align = 0; + /* within 1GB-aligned window of up to 32GB in size */ + kbuf.buf_max = round_down(kernel_load_addr, SZ_1G) + (unsigned long)SZ_1G * 32; + kbuf.top_down = false; + + ret = kexec_add_buffer(&kbuf); + if (ret < 0) + goto out_err; + initrd_load_addr = kbuf.mem; + + kexec_dprintk("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n", + initrd_load_addr, kbuf.bufsz, kbuf.memsz); + + /* Add the initrd=start,size parameter to the command line */ + cmdline_add_initrd(image, &cmdline_tmplen, modified_cmdline, initrd_load_addr); + } + + if (cmdline_len + cmdline_tmplen > COMMAND_LINE_SIZE) { + pr_err("Appending command line exceeds COMMAND_LINE_SIZE\n"); + ret = -EINVAL; + goto out_err; + } + + memcpy(modified_cmdline + cmdline_tmplen, cmdline, cmdline_len); + cmdline = modified_cmdline; + image->arch.cmdline_ptr = (unsigned long)cmdline; + + return 0; + +out_err: + image->nr_segments = orig_segments; + kfree(modified_cmdline); + return ret; +} diff --git a/arch/loongarch/kernel/module-sections.c b/arch/loongarch/kernel/module-sections.c index e2f30ff9afde..a43ba7f9f987 100644 --- a/arch/loongarch/kernel/module-sections.c +++ b/arch/loongarch/kernel/module-sections.c @@ -8,6 +8,7 @@ #include <linux/module.h> #include <linux/moduleloader.h> #include <linux/ftrace.h> +#include <linux/sort.h> Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val) { @@ -61,39 +62,38 @@ Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr v return (Elf_Addr)&plt[nr]; } -static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y) -{ - return x->r_info == y->r_info && x->r_addend == y->r_addend; -} +#define cmp_3way(a, b) ((a) < (b) ? -1 : (a) > (b)) -static bool duplicate_rela(const Elf_Rela *rela, int idx) +static int compare_rela(const void *x, const void *y) { - int i; + int ret; + const Elf_Rela *rela_x = x, *rela_y = y; - for (i = 0; i < idx; i++) { - if (is_rela_equal(&rela[i], &rela[idx])) - return true; - } + ret = cmp_3way(rela_x->r_info, rela_y->r_info); + if (ret == 0) + ret = cmp_3way(rela_x->r_addend, rela_y->r_addend); - return false; + return ret; } static void count_max_entries(Elf_Rela *relas, int num, unsigned int *plts, unsigned int *gots) { - unsigned int i, type; + unsigned int i; + + sort(relas, num, sizeof(Elf_Rela), compare_rela, NULL); for (i = 0; i < num; i++) { - type = ELF_R_TYPE(relas[i].r_info); - switch (type) { + if (i && !compare_rela(&relas[i-1], &relas[i])) + continue; + + switch (ELF_R_TYPE(relas[i].r_info)) { case R_LARCH_SOP_PUSH_PLT_PCREL: case R_LARCH_B26: - if (!duplicate_rela(relas, i)) - (*plts)++; + (*plts)++; break; case R_LARCH_GOT_PC_HI20: - if (!duplicate_rela(relas, i)) - (*gots)++; + (*gots)++; break; default: break; /* Do nothing. */ diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c index 3582f591bab2..efd9edf65603 100644 --- a/arch/loongarch/kernel/process.c +++ b/arch/loongarch/kernel/process.c @@ -167,7 +167,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) unsigned long childksp; unsigned long tls = args->tls; unsigned long usp = args->stack; - unsigned long clone_flags = args->flags; + u64 clone_flags = args->flags; struct pt_regs *childregs, *regs = current_pt_regs(); childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE; diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c index 50c469067f3a..b5e2312a2fca 100644 --- a/arch/loongarch/kernel/relocate.c +++ b/arch/loongarch/kernel/relocate.c @@ -166,6 +166,10 @@ static inline __init bool kaslr_disabled(void) return true; #endif + str = strstr(boot_command_line, "kexec_file"); + if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' ')) + return true; + return false; } diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 075b79b2c1d3..69c17d162fff 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -355,6 +355,7 @@ void __init platform_init(void) #ifdef CONFIG_ACPI acpi_table_upgrade(); + acpi_gbl_use_global_lock = false; acpi_gbl_use_default_register_widths = false; acpi_boot_table_init(); #endif diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c index 4740cb5b2388..c9f7ca778364 100644 --- a/arch/loongarch/kernel/signal.c +++ b/arch/loongarch/kernel/signal.c @@ -677,6 +677,11 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, for (i = 1; i < 32; i++) err |= __put_user(regs->regs[i], &sc->sc_regs[i]); +#ifdef CONFIG_CPU_HAS_LBT + if (extctx->lbt.addr) + err |= protected_save_lbt_context(extctx); +#endif + if (extctx->lasx.addr) err |= protected_save_lasx_context(extctx); else if (extctx->lsx.addr) @@ -684,11 +689,6 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, else if (extctx->fpu.addr) err |= protected_save_fpu_context(extctx); -#ifdef CONFIG_CPU_HAS_LBT - if (extctx->lbt.addr) - err |= protected_save_lbt_context(extctx); -#endif - /* Set the "end" magic */ info = (struct sctx_info *)extctx->end.addr; err |= __put_user(0, &info->magic); diff --git a/arch/loongarch/kernel/stacktrace.c b/arch/loongarch/kernel/stacktrace.c index 9a038d1070d7..387dc4d3c486 100644 --- a/arch/loongarch/kernel/stacktrace.c +++ b/arch/loongarch/kernel/stacktrace.c @@ -51,12 +51,13 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, if (task == current) { regs->regs[3] = (unsigned long)__builtin_frame_address(0); regs->csr_era = (unsigned long)__builtin_return_address(0); + regs->regs[22] = 0; } else { regs->regs[3] = thread_saved_fp(task); regs->csr_era = thread_saved_ra(task); + regs->regs[22] = task->thread.reg22; } regs->regs[1] = 0; - regs->regs[22] = 0; for (unwind_start(&state, task, regs); !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) { diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index 367906b10f81..6fb92cc1a4c9 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -5,6 +5,7 @@ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ #include <linux/clockchips.h> +#include <linux/cpuhotplug.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/init.h> @@ -102,6 +103,21 @@ static int constant_timer_next_event(unsigned long delta, struct clock_event_dev return 0; } +static int arch_timer_starting(unsigned int cpu) +{ + set_csr_ecfg(ECFGF_TIMER); + + return 0; +} + +static int arch_timer_dying(unsigned int cpu) +{ + /* Clear Timer Interrupt */ + write_csr_tintclear(CSR_TINTCLR_TI); + + return 0; +} + static unsigned long get_loops_per_jiffy(void) { unsigned long lpj = (unsigned long)const_clock_freq; @@ -172,6 +188,10 @@ int constant_clockevent_init(void) lpj_fine = get_loops_per_jiffy(); pr_info("Constant clock event device register\n"); + cpuhp_setup_state(CPUHP_AP_LOONGARCH_ARCH_TIMER_STARTING, + "clockevents/loongarch/timer:starting", + arch_timer_starting, arch_timer_dying); + return 0; } diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c index 7b888d9085a0..dee1a15d7f4c 100644 --- a/arch/loongarch/kernel/vdso.c +++ b/arch/loongarch/kernel/vdso.c @@ -54,6 +54,9 @@ static int __init init_vdso(void) vdso_info.code_mapping.pages = kcalloc(vdso_info.size / PAGE_SIZE, sizeof(struct page *), GFP_KERNEL); + if (!vdso_info.code_mapping.pages) + return -ENOMEM; + pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso)); for (i = 0; i < vdso_info.size / PAGE_SIZE; i++) vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i); diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 2ce41f93b2a4..cb493980d874 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -218,16 +218,16 @@ int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu) } trace_kvm_iocsr(KVM_TRACE_IOCSR_WRITE, run->iocsr_io.len, addr, val); } else { + vcpu->arch.io_gpr = rd; /* Set register id for iocsr read completion */ idx = srcu_read_lock(&vcpu->kvm->srcu); - ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val); + ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, + run->iocsr_io.len, run->iocsr_io.data); srcu_read_unlock(&vcpu->kvm->srcu, idx); - if (ret == 0) + if (ret == 0) { + kvm_complete_iocsr_read(vcpu, run); ret = EMULATE_DONE; - else { + } else ret = EMULATE_DO_IOCSR; - /* Save register id for iocsr read completion */ - vcpu->arch.io_gpr = rd; - } trace_kvm_iocsr(KVM_TRACE_IOCSR_READ, run->iocsr_io.len, addr, NULL); } @@ -468,6 +468,8 @@ int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst) if (ret == EMULATE_DO_MMIO) { trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len, run->mmio.phys_addr, NULL); + vcpu->arch.io_gpr = rd; /* Set for kvm_complete_mmio_read() use */ + /* * If mmio device such as PCH-PIC is emulated in KVM, * it need not return to user space to handle the mmio @@ -475,16 +477,15 @@ int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst) */ idx = srcu_read_lock(&vcpu->kvm->srcu); ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, - run->mmio.len, &vcpu->arch.gprs[rd]); + run->mmio.len, run->mmio.data); srcu_read_unlock(&vcpu->kvm->srcu, idx); if (!ret) { + kvm_complete_mmio_read(vcpu, run); update_pc(&vcpu->arch); vcpu->mmio_needed = 0; return EMULATE_DONE; } - /* Set for kvm_complete_mmio_read() use */ - vcpu->arch.io_gpr = rd; run->mmio.is_write = 0; vcpu->mmio_is_write = 0; return EMULATE_DO_MMIO; @@ -778,10 +779,8 @@ static long kvm_save_notify(struct kvm_vcpu *vcpu) return 0; default: return KVM_HCALL_INVALID_CODE; - }; - - return KVM_HCALL_INVALID_CODE; -}; + } +} /* * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root. diff --git a/arch/loongarch/kvm/intc/eiointc.c b/arch/loongarch/kvm/intc/eiointc.c index a3a12af9ecbf..c32333695381 100644 --- a/arch/loongarch/kvm/intc/eiointc.c +++ b/arch/loongarch/kvm/intc/eiointc.c @@ -45,7 +45,12 @@ static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level) } cpu = s->sw_coremap[irq]; - vcpu = kvm_get_vcpu(s->kvm, cpu); + vcpu = kvm_get_vcpu_by_id(s->kvm, cpu); + if (unlikely(vcpu == NULL)) { + kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); + return; + } + if (level) { /* if not enable return false */ if (!test_bit(irq, (unsigned long *)s->enable.reg_u32)) @@ -421,21 +426,26 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev, struct loongarch_eiointc *s = dev->kvm->arch.eiointc; data = (void __user *)attr->addr; - spin_lock_irqsave(&s->lock, flags); switch (type) { case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU: + case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE: if (copy_from_user(&val, data, 4)) - ret = -EFAULT; - else { - if (val >= EIOINTC_ROUTE_MAX_VCPUS) - ret = -EINVAL; - else - s->num_cpu = val; - } + return -EFAULT; + break; + default: + break; + } + + spin_lock_irqsave(&s->lock, flags); + switch (type) { + case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU: + if (val >= EIOINTC_ROUTE_MAX_VCPUS) + ret = -EINVAL; + else + s->num_cpu = val; break; case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE: - if (copy_from_user(&s->features, data, 4)) - ret = -EFAULT; + s->features = val; if (!(s->features & BIT(EIOINTC_HAS_VIRT_EXTENSION))) s->status |= BIT(EIOINTC_ENABLE); break; @@ -457,19 +467,17 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev, static int kvm_eiointc_regs_access(struct kvm_device *dev, struct kvm_device_attr *attr, - bool is_write) + bool is_write, int *data) { int addr, cpu, offset, ret = 0; unsigned long flags; void *p = NULL; - void __user *data; struct loongarch_eiointc *s; s = dev->kvm->arch.eiointc; addr = attr->attr; cpu = addr >> 16; addr &= 0xffff; - data = (void __user *)attr->addr; switch (addr) { case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END: offset = (addr - EIOINTC_NODETYPE_START) / 4; @@ -508,13 +516,10 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev, } spin_lock_irqsave(&s->lock, flags); - if (is_write) { - if (copy_from_user(p, data, 4)) - ret = -EFAULT; - } else { - if (copy_to_user(data, p, 4)) - ret = -EFAULT; - } + if (is_write) + memcpy(p, data, 4); + else + memcpy(data, p, 4); spin_unlock_irqrestore(&s->lock, flags); return ret; @@ -522,19 +527,17 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev, static int kvm_eiointc_sw_status_access(struct kvm_device *dev, struct kvm_device_attr *attr, - bool is_write) + bool is_write, int *data) { int addr, ret = 0; unsigned long flags; void *p = NULL; - void __user *data; struct loongarch_eiointc *s; s = dev->kvm->arch.eiointc; addr = attr->attr; addr &= 0xffff; - data = (void __user *)attr->addr; switch (addr) { case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU: if (is_write) @@ -556,13 +559,10 @@ static int kvm_eiointc_sw_status_access(struct kvm_device *dev, return -EINVAL; } spin_lock_irqsave(&s->lock, flags); - if (is_write) { - if (copy_from_user(p, data, 4)) - ret = -EFAULT; - } else { - if (copy_to_user(data, p, 4)) - ret = -EFAULT; - } + if (is_write) + memcpy(p, data, 4); + else + memcpy(data, p, 4); spin_unlock_irqrestore(&s->lock, flags); return ret; @@ -571,11 +571,27 @@ static int kvm_eiointc_sw_status_access(struct kvm_device *dev, static int kvm_eiointc_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { + int ret, data; + switch (attr->group) { case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS: - return kvm_eiointc_regs_access(dev, attr, false); + ret = kvm_eiointc_regs_access(dev, attr, false, &data); + if (ret) + return ret; + + if (copy_to_user((void __user *)attr->addr, &data, 4)) + ret = -EFAULT; + + return ret; case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS: - return kvm_eiointc_sw_status_access(dev, attr, false); + ret = kvm_eiointc_sw_status_access(dev, attr, false, &data); + if (ret) + return ret; + + if (copy_to_user((void __user *)attr->addr, &data, 4)) + ret = -EFAULT; + + return ret; default: return -EINVAL; } @@ -584,13 +600,21 @@ static int kvm_eiointc_get_attr(struct kvm_device *dev, static int kvm_eiointc_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { + int data; + switch (attr->group) { case KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL: return kvm_eiointc_ctrl_access(dev, attr); case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS: - return kvm_eiointc_regs_access(dev, attr, true); + if (copy_from_user(&data, (void __user *)attr->addr, 4)) + return -EFAULT; + + return kvm_eiointc_regs_access(dev, attr, true, &data); case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS: - return kvm_eiointc_sw_status_access(dev, attr, true); + if (copy_from_user(&data, (void __user *)attr->addr, 4)) + return -EFAULT; + + return kvm_eiointc_sw_status_access(dev, attr, true, &data); default: return -EINVAL; } diff --git a/arch/loongarch/kvm/intc/ipi.c b/arch/loongarch/kvm/intc/ipi.c index e658d5b37c04..05cefd29282e 100644 --- a/arch/loongarch/kvm/intc/ipi.c +++ b/arch/loongarch/kvm/intc/ipi.c @@ -7,13 +7,26 @@ #include <asm/kvm_ipi.h> #include <asm/kvm_vcpu.h> -static void ipi_send(struct kvm *kvm, uint64_t data) +static void ipi_set(struct kvm_vcpu *vcpu, uint32_t data) { - int cpu, action; uint32_t status; - struct kvm_vcpu *vcpu; struct kvm_interrupt irq; + spin_lock(&vcpu->arch.ipi_state.lock); + status = vcpu->arch.ipi_state.status; + vcpu->arch.ipi_state.status |= data; + spin_unlock(&vcpu->arch.ipi_state.lock); + if ((status == 0) && data) { + irq.irq = LARCH_INT_IPI; + kvm_vcpu_ioctl_interrupt(vcpu, &irq); + } +} + +static void ipi_send(struct kvm *kvm, uint64_t data) +{ + int cpu; + struct kvm_vcpu *vcpu; + cpu = ((data & 0xffffffff) >> 16) & 0x3ff; vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu); if (unlikely(vcpu == NULL)) { @@ -21,15 +34,7 @@ static void ipi_send(struct kvm *kvm, uint64_t data) return; } - action = BIT(data & 0x1f); - spin_lock(&vcpu->arch.ipi_state.lock); - status = vcpu->arch.ipi_state.status; - vcpu->arch.ipi_state.status |= action; - spin_unlock(&vcpu->arch.ipi_state.lock); - if (status == 0) { - irq.irq = LARCH_INT_IPI; - kvm_vcpu_ioctl_interrupt(vcpu, &irq); - } + ipi_set(vcpu, BIT(data & 0x1f)); } static void ipi_clear(struct kvm_vcpu *vcpu, uint64_t data) @@ -96,10 +101,38 @@ static void write_mailbox(struct kvm_vcpu *vcpu, int offset, uint64_t data, int spin_unlock(&vcpu->arch.ipi_state.lock); } +static int mail_send(struct kvm *kvm, uint64_t data) +{ + int i, cpu, mailbox, offset; + uint32_t val = 0, mask = 0; + struct kvm_vcpu *vcpu; + + cpu = ((data & 0xffffffff) >> 16) & 0x3ff; + vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu); + if (unlikely(vcpu == NULL)) { + kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); + return -EINVAL; + } + mailbox = ((data & 0xffffffff) >> 2) & 0x7; + offset = IOCSR_IPI_BUF_20 + mailbox * 4; + if ((data >> 27) & 0xf) { + val = read_mailbox(vcpu, offset, 4); + for (i = 0; i < 4; i++) + if (data & (BIT(27 + i))) + mask |= (0xff << (i * 8)); + val &= mask; + } + + val |= ((uint32_t)(data >> 32) & ~mask); + write_mailbox(vcpu, offset, val, 4); + + return 0; +} + static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data) { int i, idx, ret; - uint32_t val = 0, mask = 0; + uint64_t val = 0, mask = 0; /* * Bit 27-30 is mask for byte writing. @@ -108,7 +141,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data) if ((data >> 27) & 0xf) { /* Read the old val */ idx = srcu_read_lock(&vcpu->kvm->srcu); - ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); + ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, 4, &val); srcu_read_unlock(&vcpu->kvm->srcu, idx); if (unlikely(ret)) { kvm_err("%s: : read data from addr %llx failed\n", __func__, addr); @@ -124,7 +157,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data) } val |= ((uint32_t)(data >> 32) & ~mask); idx = srcu_read_lock(&vcpu->kvm->srcu); - ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); + ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, 4, &val); srcu_read_unlock(&vcpu->kvm->srcu, idx); if (unlikely(ret)) kvm_err("%s: : write data to addr %llx failed\n", __func__, addr); @@ -132,23 +165,6 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data) return ret; } -static int mail_send(struct kvm *kvm, uint64_t data) -{ - int cpu, mailbox, offset; - struct kvm_vcpu *vcpu; - - cpu = ((data & 0xffffffff) >> 16) & 0x3ff; - vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu); - if (unlikely(vcpu == NULL)) { - kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); - return -EINVAL; - } - mailbox = ((data & 0xffffffff) >> 2) & 0x7; - offset = IOCSR_IPI_BASE + IOCSR_IPI_BUF_20 + mailbox * 4; - - return send_ipi_data(vcpu, offset, data); -} - static int any_send(struct kvm *kvm, uint64_t data) { int cpu, offset; @@ -231,7 +247,7 @@ static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr, int len, cons spin_unlock(&vcpu->arch.ipi_state.lock); break; case IOCSR_IPI_SET: - ret = -EINVAL; + ipi_set(vcpu, data); break; case IOCSR_IPI_CLEAR: /* Just clear the status of the current vcpu */ @@ -250,10 +266,10 @@ static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr, int len, cons ipi_send(vcpu->kvm, data); break; case IOCSR_MAIL_SEND: - ret = mail_send(vcpu->kvm, *(uint64_t *)val); + ret = mail_send(vcpu->kvm, data); break; case IOCSR_ANY_SEND: - ret = any_send(vcpu->kvm, *(uint64_t *)val); + ret = any_send(vcpu->kvm, data); break; default: kvm_err("%s: unknown addr: %llx\n", __func__, addr); @@ -298,7 +314,7 @@ static int kvm_ipi_regs_access(struct kvm_device *dev, cpu = (attr->attr >> 16) & 0x3ff; addr = attr->attr & 0xff; - vcpu = kvm_get_vcpu(dev->kvm, cpu); + vcpu = kvm_get_vcpu_by_id(dev->kvm, cpu); if (unlikely(vcpu == NULL)) { kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); return -EINVAL; diff --git a/arch/loongarch/kvm/intc/pch_pic.c b/arch/loongarch/kvm/intc/pch_pic.c index 6f00ffe05c54..a698a73de399 100644 --- a/arch/loongarch/kvm/intc/pch_pic.c +++ b/arch/loongarch/kvm/intc/pch_pic.c @@ -35,16 +35,11 @@ static void pch_pic_update_irq(struct loongarch_pch_pic *s, int irq, int level) /* update batch irqs, the irq_mask is a bitmap of irqs */ static void pch_pic_update_batch_irqs(struct loongarch_pch_pic *s, u64 irq_mask, int level) { - int irq, bits; + unsigned int irq; + DECLARE_BITMAP(irqs, 64) = { BITMAP_FROM_U64(irq_mask) }; - /* find each irq by irqs bitmap and update each irq */ - bits = sizeof(irq_mask) * 8; - irq = find_first_bit((void *)&irq_mask, bits); - while (irq < bits) { + for_each_set_bit(irq, irqs, 64) pch_pic_update_irq(s, irq, level); - bitmap_clear((void *)&irq_mask, irq, 1); - irq = find_first_bit((void *)&irq_mask, bits); - } } /* called when a irq is triggered in pch pic */ @@ -77,109 +72,65 @@ void pch_msi_set_irq(struct kvm *kvm, int irq, int level) eiointc_set_irq(kvm->arch.eiointc, irq, level); } -/* - * pch pic register is 64-bit, but it is accessed by 32-bit, - * so we use high to get whether low or high 32 bits we want - * to read. - */ -static u32 pch_pic_read_reg(u64 *s, int high) -{ - u64 val = *s; - - /* read the high 32 bits when high is 1 */ - return high ? (u32)(val >> 32) : (u32)val; -} - -/* - * pch pic register is 64-bit, but it is accessed by 32-bit, - * so we use high to get whether low or high 32 bits we want - * to write. - */ -static u32 pch_pic_write_reg(u64 *s, int high, u32 v) -{ - u64 val = *s, data = v; - - if (high) { - /* - * Clear val high 32 bits - * Write the high 32 bits when the high is 1 - */ - *s = (val << 32 >> 32) | (data << 32); - val >>= 32; - } else - /* - * Clear val low 32 bits - * Write the low 32 bits when the high is 0 - */ - *s = (val >> 32 << 32) | v; - - return (u32)val; -} - static int loongarch_pch_pic_read(struct loongarch_pch_pic *s, gpa_t addr, int len, void *val) { - int offset, index, ret = 0; - u32 data = 0; - u64 int_id = 0; + int ret = 0, offset; + u64 data = 0; + void *ptemp; offset = addr - s->pch_pic_base; + offset -= offset & 7; spin_lock(&s->lock); switch (offset) { case PCH_PIC_INT_ID_START ... PCH_PIC_INT_ID_END: - /* int id version */ - int_id |= (u64)PCH_PIC_INT_ID_VER << 32; - /* irq number */ - int_id |= (u64)31 << (32 + 16); - /* int id value */ - int_id |= PCH_PIC_INT_ID_VAL; - *(u64 *)val = int_id; + data = s->id.data; break; case PCH_PIC_MASK_START ... PCH_PIC_MASK_END: - offset -= PCH_PIC_MASK_START; - index = offset >> 2; - /* read mask reg */ - data = pch_pic_read_reg(&s->mask, index); - *(u32 *)val = data; + data = s->mask; break; case PCH_PIC_HTMSI_EN_START ... PCH_PIC_HTMSI_EN_END: - offset -= PCH_PIC_HTMSI_EN_START; - index = offset >> 2; /* read htmsi enable reg */ - data = pch_pic_read_reg(&s->htmsi_en, index); - *(u32 *)val = data; + data = s->htmsi_en; break; case PCH_PIC_EDGE_START ... PCH_PIC_EDGE_END: - offset -= PCH_PIC_EDGE_START; - index = offset >> 2; /* read edge enable reg */ - data = pch_pic_read_reg(&s->edge, index); - *(u32 *)val = data; + data = s->edge; break; case PCH_PIC_AUTO_CTRL0_START ... PCH_PIC_AUTO_CTRL0_END: case PCH_PIC_AUTO_CTRL1_START ... PCH_PIC_AUTO_CTRL1_END: /* we only use default mode: fixed interrupt distribution mode */ - *(u32 *)val = 0; break; case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END: /* only route to int0: eiointc */ - *(u8 *)val = 1; + ptemp = s->route_entry + (offset - PCH_PIC_ROUTE_ENTRY_START); + data = *(u64 *)ptemp; break; case PCH_PIC_HTMSI_VEC_START ... PCH_PIC_HTMSI_VEC_END: - offset -= PCH_PIC_HTMSI_VEC_START; /* read htmsi vector */ - data = s->htmsi_vector[offset]; - *(u8 *)val = data; + ptemp = s->htmsi_vector + (offset - PCH_PIC_HTMSI_VEC_START); + data = *(u64 *)ptemp; break; case PCH_PIC_POLARITY_START ... PCH_PIC_POLARITY_END: - /* we only use defalut value 0: high level triggered */ - *(u32 *)val = 0; + data = s->polarity; + break; + case PCH_PIC_INT_IRR_START: + data = s->irr; + break; + case PCH_PIC_INT_ISR_START: + data = s->isr; break; default: ret = -EINVAL; } spin_unlock(&s->lock); + if (ret == 0) { + offset = (addr - s->pch_pic_base) & 7; + data = data >> (offset * 8); + memcpy(val, &data, len); + } + return ret; } @@ -195,6 +146,11 @@ static int kvm_pch_pic_read(struct kvm_vcpu *vcpu, return -EINVAL; } + if (addr & (len - 1)) { + kvm_err("%s: pch pic not aligned addr %llx len %d\n", __func__, addr, len); + return -EINVAL; + } + /* statistics of pch pic reading */ vcpu->stat.pch_pic_read_exits++; ret = loongarch_pch_pic_read(s, addr, len, val); @@ -205,81 +161,69 @@ static int kvm_pch_pic_read(struct kvm_vcpu *vcpu, static int loongarch_pch_pic_write(struct loongarch_pch_pic *s, gpa_t addr, int len, const void *val) { - int ret; - u32 old, data, offset, index; - u64 irq; + int ret = 0, offset; + u64 old, data, mask; + void *ptemp; + + switch (len) { + case 1: + data = *(u8 *)val; + mask = 0xFF; + break; + case 2: + data = *(u16 *)val; + mask = USHRT_MAX; + break; + case 4: + data = *(u32 *)val; + mask = UINT_MAX; + break; + case 8: + default: + data = *(u64 *)val; + mask = ULONG_MAX; + break; + } - ret = 0; - data = *(u32 *)val; - offset = addr - s->pch_pic_base; + offset = (addr - s->pch_pic_base) & 7; + mask = mask << (offset * 8); + data = data << (offset * 8); + offset = (addr - s->pch_pic_base) - offset; spin_lock(&s->lock); switch (offset) { - case PCH_PIC_MASK_START ... PCH_PIC_MASK_END: - offset -= PCH_PIC_MASK_START; - /* get whether high or low 32 bits we want to write */ - index = offset >> 2; - old = pch_pic_write_reg(&s->mask, index, data); - /* enable irq when mask value change to 0 */ - irq = (old & ~data) << (32 * index); - pch_pic_update_batch_irqs(s, irq, 1); - /* disable irq when mask value change to 1 */ - irq = (~old & data) << (32 * index); - pch_pic_update_batch_irqs(s, irq, 0); - break; - case PCH_PIC_HTMSI_EN_START ... PCH_PIC_HTMSI_EN_END: - offset -= PCH_PIC_HTMSI_EN_START; - index = offset >> 2; - pch_pic_write_reg(&s->htmsi_en, index, data); + case PCH_PIC_MASK_START: + old = s->mask; + s->mask = (old & ~mask) | data; + if (old & ~data) + pch_pic_update_batch_irqs(s, old & ~data, 1); + if (~old & data) + pch_pic_update_batch_irqs(s, ~old & data, 0); break; - case PCH_PIC_EDGE_START ... PCH_PIC_EDGE_END: - offset -= PCH_PIC_EDGE_START; - index = offset >> 2; - /* 1: edge triggered, 0: level triggered */ - pch_pic_write_reg(&s->edge, index, data); - break; - case PCH_PIC_CLEAR_START ... PCH_PIC_CLEAR_END: - offset -= PCH_PIC_CLEAR_START; - index = offset >> 2; - /* write 1 to clear edge irq */ - old = pch_pic_read_reg(&s->irr, index); - /* - * get the irq bitmap which is edge triggered and - * already set and to be cleared - */ - irq = old & pch_pic_read_reg(&s->edge, index) & data; - /* write irr to the new state where irqs have been cleared */ - pch_pic_write_reg(&s->irr, index, old & ~irq); - /* update cleared irqs */ - pch_pic_update_batch_irqs(s, irq, 0); + case PCH_PIC_HTMSI_EN_START: + s->htmsi_en = (s->htmsi_en & ~mask) | data; break; - case PCH_PIC_AUTO_CTRL0_START ... PCH_PIC_AUTO_CTRL0_END: - offset -= PCH_PIC_AUTO_CTRL0_START; - index = offset >> 2; - /* we only use default mode: fixed interrupt distribution mode */ - pch_pic_write_reg(&s->auto_ctrl0, index, 0); + case PCH_PIC_EDGE_START: + s->edge = (s->edge & ~mask) | data; break; - case PCH_PIC_AUTO_CTRL1_START ... PCH_PIC_AUTO_CTRL1_END: - offset -= PCH_PIC_AUTO_CTRL1_START; - index = offset >> 2; - /* we only use default mode: fixed interrupt distribution mode */ - pch_pic_write_reg(&s->auto_ctrl1, index, 0); + case PCH_PIC_POLARITY_START: + s->polarity = (s->polarity & ~mask) | data; break; - case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END: - offset -= PCH_PIC_ROUTE_ENTRY_START; - /* only route to int0: eiointc */ - s->route_entry[offset] = 1; + case PCH_PIC_CLEAR_START: + old = s->irr & s->edge & data; + if (old) { + s->irr &= ~old; + pch_pic_update_batch_irqs(s, old, 0); + } break; case PCH_PIC_HTMSI_VEC_START ... PCH_PIC_HTMSI_VEC_END: - /* route table to eiointc */ - offset -= PCH_PIC_HTMSI_VEC_START; - s->htmsi_vector[offset] = (u8)data; + ptemp = s->htmsi_vector + (offset - PCH_PIC_HTMSI_VEC_START); + *(u64 *)ptemp = (*(u64 *)ptemp & ~mask) | data; break; - case PCH_PIC_POLARITY_START ... PCH_PIC_POLARITY_END: - offset -= PCH_PIC_POLARITY_START; - index = offset >> 2; - /* we only use defalut value 0: high level triggered */ - pch_pic_write_reg(&s->polarity, index, 0); + /* Not implemented */ + case PCH_PIC_AUTO_CTRL0_START: + case PCH_PIC_AUTO_CTRL1_START: + case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END: break; default: ret = -EINVAL; @@ -302,6 +246,11 @@ static int kvm_pch_pic_write(struct kvm_vcpu *vcpu, return -EINVAL; } + if (addr & (len - 1)) { + kvm_err("%s: pch pic not aligned addr %llx len %d\n", __func__, addr, len); + return -EINVAL; + } + /* statistics of pch pic writing */ vcpu->stat.pch_pic_write_exits++; ret = loongarch_pch_pic_write(s, addr, len, val); @@ -338,6 +287,7 @@ static int kvm_pch_pic_regs_access(struct kvm_device *dev, struct kvm_device_attr *attr, bool is_write) { + char buf[8]; int addr, offset, len = 8, ret = 0; void __user *data; void *p = NULL; @@ -387,17 +337,23 @@ static int kvm_pch_pic_regs_access(struct kvm_device *dev, return -EINVAL; } - spin_lock(&s->lock); - /* write or read value according to is_write */ if (is_write) { - if (copy_from_user(p, data, len)) - ret = -EFAULT; - } else { - if (copy_to_user(data, p, len)) - ret = -EFAULT; + if (copy_from_user(buf, data, len)) + return -EFAULT; } + + spin_lock(&s->lock); + if (is_write) + memcpy(p, buf, len); + else + memcpy(buf, p, len); spin_unlock(&s->lock); + if (!is_write) { + if (copy_to_user(data, buf, len)) + return -EFAULT; + } + return ret; } @@ -467,7 +423,7 @@ static int kvm_setup_default_irq_routing(struct kvm *kvm) static int kvm_pch_pic_create(struct kvm_device *dev, u32 type) { - int ret; + int i, ret, irq_num; struct kvm *kvm = dev->kvm; struct loongarch_pch_pic *s; @@ -483,6 +439,22 @@ static int kvm_pch_pic_create(struct kvm_device *dev, u32 type) if (!s) return -ENOMEM; + /* + * Interrupt controller identification register 1 + * Bit 24-31 Interrupt Controller ID + * Interrupt controller identification register 2 + * Bit 0-7 Interrupt Controller version number + * Bit 16-23 The number of interrupt sources supported + */ + irq_num = 32; + s->mask = -1UL; + s->id.desc.id = PCH_PIC_INT_ID_VAL; + s->id.desc.version = PCH_PIC_INT_ID_VER; + s->id.desc.irq_num = irq_num - 1; + for (i = 0; i < irq_num; i++) { + s->route_entry[i] = 1; + s->htmsi_vector[i] = i; + } spin_lock_init(&s->lock); s->kvm = kvm; kvm->arch.pch_pic = s; diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index ed956c5cf2cc..7c8143e79c12 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -569,7 +569,7 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ /* Track access to pages marked old */ new = kvm_pte_mkyoung(*ptep); if (write && !kvm_pte_dirty(new)) { - if (!kvm_pte_write(new)) { + if (!kvm_pte_writeable(new)) { ret = -EFAULT; goto out; } @@ -856,9 +856,9 @@ retry: prot_bits |= _CACHE_SUC; if (writeable) { - prot_bits |= _PAGE_WRITE; + prot_bits = kvm_pte_mkwriteable(prot_bits); if (write) - prot_bits |= __WRITEABLE; + prot_bits = kvm_pte_mkdirty(prot_bits); } /* Disable dirty logging on HugePages */ @@ -904,7 +904,7 @@ retry: kvm_release_faultin_page(kvm, page, false, writeable); spin_unlock(&kvm->mmu_lock); - if (prot_bits & _PAGE_DIRTY) + if (kvm_pte_dirty(prot_bits)) mark_page_dirty_in_slot(kvm, memslot, gfn); out: diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h index 145514dab6d5..3467ee22b704 100644 --- a/arch/loongarch/kvm/trace.h +++ b/arch/loongarch/kvm/trace.h @@ -161,6 +161,41 @@ TRACE_EVENT(kvm_aux, __entry->pc) ); +#define KVM_TRACE_IOCSR_READ_UNSATISFIED 0 +#define KVM_TRACE_IOCSR_READ 1 +#define KVM_TRACE_IOCSR_WRITE 2 + +#define kvm_trace_symbol_iocsr \ + { KVM_TRACE_IOCSR_READ_UNSATISFIED, "unsatisfied-read" }, \ + { KVM_TRACE_IOCSR_READ, "read" }, \ + { KVM_TRACE_IOCSR_WRITE, "write" } + +TRACE_EVENT(kvm_iocsr, + TP_PROTO(int type, int len, u64 gpa, void *val), + TP_ARGS(type, len, gpa, val), + + TP_STRUCT__entry( + __field( u32, type ) + __field( u32, len ) + __field( u64, gpa ) + __field( u64, val ) + ), + + TP_fast_assign( + __entry->type = type; + __entry->len = len; + __entry->gpa = gpa; + __entry->val = 0; + if (val) + memcpy(&__entry->val, val, + min_t(u32, sizeof(__entry->val), len)); + ), + + TP_printk("iocsr %s len %u gpa 0x%llx val 0x%llx", + __print_symbolic(__entry->type, kvm_trace_symbol_iocsr), + __entry->len, __entry->gpa, __entry->val) +); + TRACE_EVENT(kvm_vpid_change, TP_PROTO(struct kvm_vcpu *vcpu, unsigned long vpid), TP_ARGS(vcpu, vpid), diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index d1b8c50941ca..9c802f7103c6 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -680,6 +680,8 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v) *v |= CPUCFG2_ARMBT; if (cpu_has_lbt_mips) *v |= CPUCFG2_MIPSBT; + if (cpu_has_ptw) + *v |= CPUCFG2_PTW; return 0; case LOONGARCH_CPUCFG3: @@ -1283,9 +1285,11 @@ int kvm_own_lbt(struct kvm_vcpu *vcpu) return -EINVAL; preempt_disable(); - set_csr_euen(CSR_EUEN_LBTEN); - _restore_lbt(&vcpu->arch.lbt); - vcpu->arch.aux_inuse |= KVM_LARCH_LBT; + if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) { + set_csr_euen(CSR_EUEN_LBTEN); + _restore_lbt(&vcpu->arch.lbt); + vcpu->arch.aux_inuse |= KVM_LARCH_LBT; + } preempt_enable(); return 0; diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c index edccfc8c9cd8..a49b1c1a3dd1 100644 --- a/arch/loongarch/kvm/vm.c +++ b/arch/loongarch/kvm/vm.c @@ -146,6 +146,10 @@ static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr if (kvm_pvtime_supported()) return 0; return -ENXIO; + case KVM_LOONGARCH_VM_FEAT_PTW: + if (cpu_has_ptw) + return 0; + return -ENXIO; default: return -ENXIO; } diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c index deefd9617d00..2c93d33356e5 100644 --- a/arch/loongarch/mm/fault.c +++ b/arch/loongarch/mm/fault.c @@ -215,6 +215,58 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, flags |= FAULT_FLAG_USER; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + + if (!(flags & FAULT_FLAG_USER)) + goto lock_mmap; + + vma = lock_vma_under_rcu(mm, address); + if (!vma) + goto lock_mmap; + + if (write) { + flags |= FAULT_FLAG_WRITE; + if (!(vma->vm_flags & VM_WRITE)) { + vma_end_read(vma); + si_code = SEGV_ACCERR; + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + goto bad_area_nosemaphore; + } + } else { + if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs)) { + vma_end_read(vma); + si_code = SEGV_ACCERR; + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + goto bad_area_nosemaphore; + } + if (!(vma->vm_flags & (VM_READ | VM_WRITE)) && address != exception_era(regs)) { + vma_end_read(vma); + si_code = SEGV_ACCERR; + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + goto bad_area_nosemaphore; + } + } + + fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); + if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) + vma_end_read(vma); + + if (!(fault & VM_FAULT_RETRY)) { + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + goto done; + } + + count_vm_vma_lock_event(VMA_LOCK_RETRY); + if (fault & VM_FAULT_MAJOR) + flags |= FAULT_FLAG_TRIED; + + /* Quick path to respond to signals */ + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + no_context(regs, write, address); + return; + } +lock_mmap: + retry: vma = lock_mm_and_find_vma(mm, address, regs); if (unlikely(!vma)) @@ -276,8 +328,10 @@ good_area: */ goto retry; } + mmap_read_unlock(mm); + +done: if (unlikely(fault & VM_FAULT_ERROR)) { - mmap_read_unlock(mm); if (fault & VM_FAULT_OOM) { do_out_of_memory(regs, write, address); return; @@ -290,8 +344,6 @@ good_area: } BUG(); } - - mmap_read_unlock(mm); } asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c index d2681272d8f0..170da98ad4f5 100644 --- a/arch/loongarch/mm/kasan_init.c +++ b/arch/loongarch/mm/kasan_init.c @@ -40,11 +40,9 @@ static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); #define __pte_none(early, pte) (early ? pte_none(pte) : \ ((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page))) -bool kasan_early_stage = true; - void *kasan_mem_to_shadow(const void *addr) { - if (!kasan_arch_is_ready()) { + if (!kasan_enabled()) { return (void *)(kasan_early_shadow_page); } else { unsigned long maddr = (unsigned long)addr; @@ -298,7 +296,8 @@ void __init kasan_init(void) kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START), kasan_mem_to_shadow((void *)KFENCE_AREA_END)); - kasan_early_stage = false; + /* Enable KASAN here before kasan_mem_to_shadow(). */ + kasan_init_generic(); /* Populate the linear mapping */ for_each_mem_range(i, &pa_start, &pa_end) { @@ -329,5 +328,4 @@ void __init kasan_init(void) /* At this point kasan is fully initialized. Enable error messages */ init_task.kasan_depth = 0; - pr_info("KernelAddressSanitizer initialized.\n"); } diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index abfdb6bb5c38..cbe53d0b7fb0 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -527,13 +527,11 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext emit_zext_32(ctx, dst, is32); break; case 8: - move_reg(ctx, t1, src); - emit_insn(ctx, extwb, dst, t1); + emit_insn(ctx, extwb, dst, src); emit_zext_32(ctx, dst, is32); break; case 16: - move_reg(ctx, t1, src); - emit_insn(ctx, extwh, dst, t1); + emit_insn(ctx, extwh, dst, src); emit_zext_32(ctx, dst, is32); break; case 32: @@ -1294,8 +1292,10 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, u32 old_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP}; u32 new_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP}; - if (!is_kernel_text((unsigned long)ip) && - !is_bpf_text_address((unsigned long)ip)) + /* Only poking bpf text is supported. Since kernel function entry + * is set up by ftrace, we rely on ftrace to poke kernel functions. + */ + if (!is_bpf_text_address((unsigned long)ip)) return -ENOTSUPP; ret = emit_jump_or_nops(old_addr, ip, old_insns, is_call); @@ -1448,12 +1448,43 @@ void arch_free_bpf_trampoline(void *image, unsigned int size) bpf_prog_pack_free(image, size); } +/* + * Sign-extend the register if necessary + */ +static void sign_extend(struct jit_ctx *ctx, int rd, int rj, u8 size, bool sign) +{ + /* ABI requires unsigned char/short to be zero-extended */ + if (!sign && (size == 1 || size == 2)) { + if (rd != rj) + move_reg(ctx, rd, rj); + return; + } + + switch (size) { + case 1: + emit_insn(ctx, extwb, rd, rj); + break; + case 2: + emit_insn(ctx, extwh, rd, rj); + break; + case 4: + emit_insn(ctx, addiw, rd, rj, 0); + break; + case 8: + if (rd != rj) + move_reg(ctx, rd, rj); + break; + default: + pr_warn("bpf_jit: invalid size %d for sign_extend\n", size); + } +} + static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, const struct btf_func_model *m, struct bpf_tramp_links *tlinks, void *func_addr, u32 flags) { int i, ret, save_ret; - int stack_size = 0, nargs = 0; + int stack_size, nargs; int retval_off, args_off, nargs_off, ip_off, run_ctx_off, sreg_off, tcc_ptr_off; bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT; void *orig_call = func_addr; @@ -1462,9 +1493,6 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; u32 **branches = NULL; - if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY)) - return -ENOTSUPP; - /* * FP + 8 [ RA to parent func ] return address to parent * function @@ -1495,20 +1523,23 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i if (m->nr_args > LOONGARCH_MAX_REG_ARGS) return -ENOTSUPP; + /* FIXME: No support of struct argument */ + for (i = 0; i < m->nr_args; i++) { + if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) + return -ENOTSUPP; + } + if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY)) return -ENOTSUPP; - stack_size = 0; - /* Room of trampoline frame to store return address and frame pointer */ - stack_size += 16; + stack_size = 16; save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); - if (save_ret) { - /* Save BPF R0 and A0 */ - stack_size += 16; - retval_off = stack_size; - } + if (save_ret) + stack_size += 16; /* Save BPF R0 and A0 */ + + retval_off = stack_size; /* Room of trampoline frame to store args */ nargs = m->nr_args; @@ -1595,7 +1626,7 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i orig_call += LOONGARCH_BPF_FENTRY_NBYTES; if (flags & BPF_TRAMP_F_CALL_ORIG) { - move_imm(ctx, LOONGARCH_GPR_A0, (const s64)im, false); + move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im); ret = emit_call(ctx, (const u64)__bpf_tramp_enter); if (ret) return ret; @@ -1645,7 +1676,7 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i if (flags & BPF_TRAMP_F_CALL_ORIG) { im->ip_epilogue = ctx->ro_image + ctx->idx; - move_imm(ctx, LOONGARCH_GPR_A0, (const s64)im, false); + move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im); ret = emit_call(ctx, (const u64)__bpf_tramp_exit); if (ret) goto out; @@ -1655,8 +1686,12 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i restore_args(ctx, m->nr_args, args_off); if (save_ret) { - emit_insn(ctx, ldd, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off); emit_insn(ctx, ldd, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8)); + if (is_struct_ops) + sign_extend(ctx, LOONGARCH_GPR_A0, regmap[BPF_REG_0], + m->ret_size, m->ret_flags & BTF_FMODEL_SIGNED_ARG); + else + emit_insn(ctx, ldd, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off); } emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_FP, -sreg_off); @@ -1715,7 +1750,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, jit_fill_hole(image, (unsigned int)(ro_image_end - ro_image)); ret = __arch_prepare_bpf_trampoline(&ctx, im, m, tlinks, func_addr, flags); - if (ret > 0 && validate_code(&ctx) < 0) { + if (ret < 0) + goto out; + + if (validate_code(&ctx) < 0) { ret = -EINVAL; goto out; } @@ -1726,7 +1764,6 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, goto out; } - bpf_flush_icache(ro_image, ro_image_end); out: kvfree(image); return ret < 0 ? ret : size; @@ -1744,8 +1781,7 @@ int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, ret = __arch_prepare_bpf_trampoline(&ctx, &im, m, tlinks, func_addr, flags); - /* Page align */ - return ret < 0 ? ret : round_up(ret * LOONGARCH_INSN_SIZE, PAGE_SIZE); + return ret < 0 ? ret : ret * LOONGARCH_INSN_SIZE; } struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) |