summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/configs/am200epdkit_defconfig1
-rw-r--r--arch/arm/configs/lpc32xx_defconfig1
-rw-r--r--arch/arm/configs/omap1_defconfig1
-rw-r--r--arch/arm/configs/tegra_defconfig1
-rw-r--r--arch/arm64/crypto/aes-glue.c75
-rw-r--r--arch/arm64/crypto/aes-neonbs-glue.c44
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c2
-rw-r--r--arch/arm64/crypto/sm4-ce-glue.c42
-rw-r--r--arch/loongarch/Kconfig5
-rw-r--r--arch/loongarch/Makefile7
-rw-r--r--arch/loongarch/configs/loongson32_defconfig1105
-rw-r--r--arch/loongarch/configs/loongson64_defconfig (renamed from arch/loongarch/configs/loongson3_defconfig)7
-rw-r--r--arch/loongarch/include/asm/Kbuild1
-rw-r--r--arch/loongarch/include/asm/addrspace.h15
-rw-r--r--arch/loongarch/include/asm/asm.h77
-rw-r--r--arch/loongarch/include/asm/asmmacro.h118
-rw-r--r--arch/loongarch/include/asm/atomic-amo.h206
-rw-r--r--arch/loongarch/include/asm/atomic-llsc.h100
-rw-r--r--arch/loongarch/include/asm/atomic.h197
-rw-r--r--arch/loongarch/include/asm/bitops.h11
-rw-r--r--arch/loongarch/include/asm/bitrev.h2
-rw-r--r--arch/loongarch/include/asm/checksum.h4
-rw-r--r--arch/loongarch/include/asm/cmpxchg.h48
-rw-r--r--arch/loongarch/include/asm/cpu-features.h3
-rw-r--r--arch/loongarch/include/asm/dmi.h2
-rw-r--r--arch/loongarch/include/asm/elf.h31
-rw-r--r--arch/loongarch/include/asm/inst.h12
-rw-r--r--arch/loongarch/include/asm/irq.h12
-rw-r--r--arch/loongarch/include/asm/jump_label.h12
-rw-r--r--arch/loongarch/include/asm/local.h37
-rw-r--r--arch/loongarch/include/asm/loongarch.h102
-rw-r--r--arch/loongarch/include/asm/module.h11
-rw-r--r--arch/loongarch/include/asm/page.h2
-rw-r--r--arch/loongarch/include/asm/percpu.h44
-rw-r--r--arch/loongarch/include/asm/pgtable-bits.h36
-rw-r--r--arch/loongarch/include/asm/pgtable.h79
-rw-r--r--arch/loongarch/include/asm/stackframe.h34
-rw-r--r--arch/loongarch/include/asm/string.h2
-rw-r--r--arch/loongarch/include/asm/timex.h33
-rw-r--r--arch/loongarch/include/asm/uaccess.h63
-rw-r--r--arch/loongarch/include/asm/vdso/gettimeofday.h4
-rw-r--r--arch/loongarch/include/uapi/asm/Kbuild1
-rw-r--r--arch/loongarch/include/uapi/asm/ptrace.h10
-rw-r--r--arch/loongarch/include/uapi/asm/unistd.h6
-rw-r--r--arch/loongarch/kernel/Makefile.syscalls1
-rw-r--r--arch/loongarch/kernel/cpu-probe.c13
-rw-r--r--arch/loongarch/kernel/efi-header.S4
-rw-r--r--arch/loongarch/kernel/efi.c4
-rw-r--r--arch/loongarch/kernel/entry.S22
-rw-r--r--arch/loongarch/kernel/env.c5
-rw-r--r--arch/loongarch/kernel/fpu.S111
-rw-r--r--arch/loongarch/kernel/head.S39
-rw-r--r--arch/loongarch/kernel/module-sections.c1
-rw-r--r--arch/loongarch/kernel/module.c204
-rw-r--r--arch/loongarch/kernel/proc.c10
-rw-r--r--arch/loongarch/kernel/process.c11
-rw-r--r--arch/loongarch/kernel/ptrace.c5
-rw-r--r--arch/loongarch/kernel/relocate.c13
-rw-r--r--arch/loongarch/kernel/setup.c8
-rw-r--r--arch/loongarch/kernel/switch.S28
-rw-r--r--arch/loongarch/kernel/syscall.c15
-rw-r--r--arch/loongarch/kernel/time.c31
-rw-r--r--arch/loongarch/kernel/traps.c15
-rw-r--r--arch/loongarch/kernel/unaligned.c30
-rw-r--r--arch/loongarch/kvm/vcpu.c5
-rw-r--r--arch/loongarch/lib/bswapdi.c13
-rw-r--r--arch/loongarch/lib/bswapsi.c13
-rw-r--r--arch/loongarch/lib/clear_user.S22
-rw-r--r--arch/loongarch/lib/copy_user.S28
-rw-r--r--arch/loongarch/lib/dump_tlb.c14
-rw-r--r--arch/loongarch/lib/unaligned.S72
-rw-r--r--arch/loongarch/mm/init.c4
-rw-r--r--arch/loongarch/mm/page.S118
-rw-r--r--arch/loongarch/mm/tlb.c12
-rw-r--r--arch/loongarch/mm/tlbex.S322
-rw-r--r--arch/loongarch/pci/pci.c2
-rw-r--r--arch/loongarch/power/hibernate.c6
-rw-r--r--arch/loongarch/power/platform.c4
-rw-r--r--arch/loongarch/power/suspend.c24
-rw-r--r--arch/loongarch/power/suspend_asm.S72
-rw-r--r--arch/loongarch/vdso/Makefile7
-rw-r--r--arch/loongarch/vdso/vdso.lds.S4
-rw-r--r--arch/loongarch/vdso/vgetcpu.c8
-rw-r--r--arch/mips/configs/gcw0_defconfig1
-rw-r--r--arch/mips/configs/loongson1_defconfig1
-rw-r--r--arch/mips/configs/qi_lb60_defconfig1
-rw-r--r--arch/mips/configs/rbtx49xx_defconfig1
-rw-r--r--arch/mips/configs/rs90_defconfig1
-rw-r--r--arch/powerpc/configs/85xx-hw.config1
-rw-r--r--arch/powerpc/configs/86xx-hw.config1
-rw-r--r--arch/powerpc/configs/mpc5200_defconfig1
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig1
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c3
-rw-r--r--arch/riscv/crypto/Kconfig12
-rw-r--r--arch/sh/configs/edosk7760_defconfig1
-rw-r--r--arch/sh/configs/se7724_defconfig1
-rw-r--r--arch/sh/configs/sh7785lcr_32bit_defconfig1
-rw-r--r--arch/x86/events/amd/uncore.c5
-rw-r--r--arch/x86/events/intel/core.c3
-rw-r--r--arch/x86/hyperv/.gitignore1
100 files changed, 3087 insertions, 875 deletions
diff --git a/arch/arm/configs/am200epdkit_defconfig b/arch/arm/configs/am200epdkit_defconfig
index 134a559aba3d..2367b1685c1c 100644
--- a/arch/arm/configs/am200epdkit_defconfig
+++ b/arch/arm/configs/am200epdkit_defconfig
@@ -68,7 +68,6 @@ CONFIG_SOUND=m
CONFIG_SND=m
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
-# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_VERBOSE_PROCFS is not set
CONFIG_SND_PXA2XX_AC97=m
CONFIG_USB_GADGET=y
diff --git a/arch/arm/configs/lpc32xx_defconfig b/arch/arm/configs/lpc32xx_defconfig
index 2bddb0924a8c..b9e2e603cd95 100644
--- a/arch/arm/configs/lpc32xx_defconfig
+++ b/arch/arm/configs/lpc32xx_defconfig
@@ -113,7 +113,6 @@ CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_SOUND=y
CONFIG_SND=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_VERBOSE_PROCFS is not set
CONFIG_SND_DEBUG=y
CONFIG_SND_DEBUG_VERBOSE=y
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index dee820474f44..df88763fc7c3 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -148,7 +148,6 @@ CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_MIXER_OSS=y
CONFIG_SND_PCM_OSS=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_VERBOSE_PROCFS is not set
CONFIG_SND_DUMMY=y
CONFIG_SND_USB_AUDIO=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index ce70ff07c978..68aedaf92667 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -219,7 +219,6 @@ CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
CONFIG_LOGO=y
CONFIG_SOUND=y
CONFIG_SND=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_DRIVERS is not set
CONFIG_SND_HDA_TEGRA=y
CONFIG_SND_HDA_INPUT_BEEP=y
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index b087b900d279..c51d4487e9e9 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -549,38 +549,37 @@ static int __maybe_unused xts_encrypt(struct skcipher_request *req)
tail = 0;
}
- for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
- int nbytes = walk.nbytes;
+ scoped_ksimd() {
+ for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
+ int nbytes = walk.nbytes;
- if (walk.nbytes < walk.total)
- nbytes &= ~(AES_BLOCK_SIZE - 1);
+ if (walk.nbytes < walk.total)
+ nbytes &= ~(AES_BLOCK_SIZE - 1);
- scoped_ksimd()
aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
ctx->key1.key_enc, rounds, nbytes,
ctx->key2.key_enc, walk.iv, first);
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
- if (err || likely(!tail))
- return err;
+ if (err || likely(!tail))
+ return err;
- dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
- if (req->dst != req->src)
- dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
+ dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
+ if (req->dst != req->src)
+ dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
- skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
- req->iv);
+ skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
+ req->iv);
- err = skcipher_walk_virt(&walk, &subreq, false);
- if (err)
- return err;
+ err = skcipher_walk_virt(&walk, &subreq, false);
+ if (err)
+ return err;
- scoped_ksimd()
aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
ctx->key1.key_enc, rounds, walk.nbytes,
ctx->key2.key_enc, walk.iv, first);
-
+ }
return skcipher_walk_done(&walk, 0);
}
@@ -619,39 +618,37 @@ static int __maybe_unused xts_decrypt(struct skcipher_request *req)
tail = 0;
}
- for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
- int nbytes = walk.nbytes;
+ scoped_ksimd() {
+ for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
+ int nbytes = walk.nbytes;
- if (walk.nbytes < walk.total)
- nbytes &= ~(AES_BLOCK_SIZE - 1);
+ if (walk.nbytes < walk.total)
+ nbytes &= ~(AES_BLOCK_SIZE - 1);
- scoped_ksimd()
aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
ctx->key1.key_dec, rounds, nbytes,
ctx->key2.key_enc, walk.iv, first);
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
- if (err || likely(!tail))
- return err;
-
- dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
- if (req->dst != req->src)
- dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
+ if (err || likely(!tail))
+ return err;
- skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
- req->iv);
+ dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
+ if (req->dst != req->src)
+ dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
- err = skcipher_walk_virt(&walk, &subreq, false);
- if (err)
- return err;
+ skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
+ req->iv);
+ err = skcipher_walk_virt(&walk, &subreq, false);
+ if (err)
+ return err;
- scoped_ksimd()
aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
ctx->key1.key_dec, rounds, walk.nbytes,
ctx->key2.key_enc, walk.iv, first);
-
+ }
return skcipher_walk_done(&walk, 0);
}
diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
index d496effb0a5b..cb87c8fc66b3 100644
--- a/arch/arm64/crypto/aes-neonbs-glue.c
+++ b/arch/arm64/crypto/aes-neonbs-glue.c
@@ -312,13 +312,13 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
if (err)
return err;
- while (walk.nbytes >= AES_BLOCK_SIZE) {
- int blocks = (walk.nbytes / AES_BLOCK_SIZE) & ~7;
- out = walk.dst.virt.addr;
- in = walk.src.virt.addr;
- nbytes = walk.nbytes;
+ scoped_ksimd() {
+ while (walk.nbytes >= AES_BLOCK_SIZE) {
+ int blocks = (walk.nbytes / AES_BLOCK_SIZE) & ~7;
+ out = walk.dst.virt.addr;
+ in = walk.src.virt.addr;
+ nbytes = walk.nbytes;
- scoped_ksimd() {
if (blocks >= 8) {
if (first == 1)
neon_aes_ecb_encrypt(walk.iv, walk.iv,
@@ -344,30 +344,28 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
ctx->twkey, walk.iv, first);
nbytes = first = 0;
}
+ err = skcipher_walk_done(&walk, nbytes);
}
- err = skcipher_walk_done(&walk, nbytes);
- }
- if (err || likely(!tail))
- return err;
+ if (err || likely(!tail))
+ return err;
- /* handle ciphertext stealing */
- dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
- if (req->dst != req->src)
- dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
+ /* handle ciphertext stealing */
+ dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
+ if (req->dst != req->src)
+ dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
- skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
- req->iv);
+ skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
+ req->iv);
- err = skcipher_walk_virt(&walk, req, false);
- if (err)
- return err;
+ err = skcipher_walk_virt(&walk, req, false);
+ if (err)
+ return err;
- out = walk.dst.virt.addr;
- in = walk.src.virt.addr;
- nbytes = walk.nbytes;
+ out = walk.dst.virt.addr;
+ in = walk.src.virt.addr;
+ nbytes = walk.nbytes;
- scoped_ksimd() {
if (encrypt)
neon_aes_xts_encrypt(out, in, ctx->cts.key_enc,
ctx->key.rounds, nbytes, ctx->twkey,
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 7951557a285a..ef249d06c92c 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -133,7 +133,7 @@ static int ghash_finup(struct shash_desc *desc, const u8 *src,
u8 buf[GHASH_BLOCK_SIZE] = {};
memcpy(buf, src, len);
- ghash_do_simd_update(1, ctx->digest, src, key, NULL,
+ ghash_do_simd_update(1, ctx->digest, buf, key, NULL,
pmull_ghash_update_p8);
memzero_explicit(buf, sizeof(buf));
}
diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c
index 5569cece5a0b..0eeabfa9ef25 100644
--- a/arch/arm64/crypto/sm4-ce-glue.c
+++ b/arch/arm64/crypto/sm4-ce-glue.c
@@ -346,11 +346,11 @@ static int sm4_xts_crypt(struct skcipher_request *req, bool encrypt)
tail = 0;
}
- while ((nbytes = walk.nbytes) >= SM4_BLOCK_SIZE) {
- if (nbytes < walk.total)
- nbytes &= ~(SM4_BLOCK_SIZE - 1);
+ scoped_ksimd() {
+ while ((nbytes = walk.nbytes) >= SM4_BLOCK_SIZE) {
+ if (nbytes < walk.total)
+ nbytes &= ~(SM4_BLOCK_SIZE - 1);
- scoped_ksimd() {
if (encrypt)
sm4_ce_xts_enc(ctx->key1.rkey_enc, walk.dst.virt.addr,
walk.src.virt.addr, walk.iv, nbytes,
@@ -359,32 +359,30 @@ static int sm4_xts_crypt(struct skcipher_request *req, bool encrypt)
sm4_ce_xts_dec(ctx->key1.rkey_dec, walk.dst.virt.addr,
walk.src.virt.addr, walk.iv, nbytes,
rkey2_enc);
- }
- rkey2_enc = NULL;
+ rkey2_enc = NULL;
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- if (err)
- return err;
- }
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ if (err)
+ return err;
+ }
- if (likely(tail == 0))
- return 0;
+ if (likely(tail == 0))
+ return 0;
- /* handle ciphertext stealing */
+ /* handle ciphertext stealing */
- dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
- if (req->dst != req->src)
- dst = scatterwalk_ffwd(sg_dst, req->dst, subreq.cryptlen);
+ dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
+ if (req->dst != req->src)
+ dst = scatterwalk_ffwd(sg_dst, req->dst, subreq.cryptlen);
- skcipher_request_set_crypt(&subreq, src, dst, SM4_BLOCK_SIZE + tail,
- req->iv);
+ skcipher_request_set_crypt(&subreq, src, dst,
+ SM4_BLOCK_SIZE + tail, req->iv);
- err = skcipher_walk_virt(&walk, &subreq, false);
- if (err)
- return err;
+ err = skcipher_walk_virt(&walk, &subreq, false);
+ if (err)
+ return err;
- scoped_ksimd() {
if (encrypt)
sm4_ce_xts_enc(ctx->key1.rkey_enc, walk.dst.virt.addr,
walk.src.virt.addr, walk.iv, walk.nbytes,
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 5b1116733d88..730f34214519 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -115,6 +115,7 @@ config LOONGARCH
select GPIOLIB
select HAS_IOPORT
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_BITREVERSE
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN
@@ -567,6 +568,10 @@ config ARCH_STRICT_ALIGN
to run kernel only on systems with h/w unaligned access support in
order to optimise for performance.
+config CPU_HAS_AMO
+ bool
+ default 64BIT
+
config CPU_HAS_FPU
bool
default y
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index 96ca1a688984..8d45b860fe56 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -5,7 +5,12 @@
boot := arch/loongarch/boot
-KBUILD_DEFCONFIG := loongson3_defconfig
+ifeq ($(shell uname -m),loongarch32)
+KBUILD_DEFCONFIG := loongson32_defconfig
+else
+KBUILD_DEFCONFIG := loongson64_defconfig
+endif
+
KBUILD_DTBS := dtbs
image-name-y := vmlinux
diff --git a/arch/loongarch/configs/loongson32_defconfig b/arch/loongarch/configs/loongson32_defconfig
new file mode 100644
index 000000000000..276b1577e0be
--- /dev/null
+++ b/arch/loongarch/configs/loongson32_defconfig
@@ -0,0 +1,1105 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_ZSTD=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_PREEMPT=y
+CONFIG_PREEMPT_DYNAMIC=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_IKHEADERS=y
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_MEMCG=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_RDMA=y
+CONFIG_CGROUP_DMEM=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_BPF=y
+CONFIG_CGROUP_MISC=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PERF_EVENTS=y
+CONFIG_KEXEC=y
+CONFIG_LOONGARCH=y
+CONFIG_32BIT=y
+CONFIG_32BIT_STANDARD=y
+CONFIG_MACH_LOONGSON32=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_HZ_250=y
+CONFIG_DMI=y
+CONFIG_EFI=y
+CONFIG_SUSPEND=y
+CONFIG_HIBERNATION=y
+CONFIG_ACPI=y
+CONFIG_ACPI_SPCR_TABLE=y
+CONFIG_ACPI_TAD=y
+CONFIG_ACPI_DOCK=y
+CONFIG_ACPI_IPMI=m
+CONFIG_ACPI_HOTPLUG_CPU=y
+CONFIG_ACPI_PCI_SLOT=y
+CONFIG_ACPI_HOTPLUG_MEMORY=y
+CONFIG_ACPI_BGRT=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_COMPRESS=y
+CONFIG_MODULE_COMPRESS_ZSTD=y
+CONFIG_MODULE_DECOMPRESS=y
+CONFIG_BLK_DEV_ZONED=y
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BLK_WBT=y
+CONFIG_BLK_CGROUP_IOLATENCY=y
+CONFIG_BLK_CGROUP_FC_APPID=y
+CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_CGROUP_IOPRIO=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_CMDLINE_PARTITION=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
+CONFIG_BINFMT_MISC=m
+CONFIG_ZSWAP=y
+CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y
+CONFIG_ZSMALLOC=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MEMORY_HOTPLUG=y
+# CONFIG_MHP_DEFAULT_ONLINE_TYPE_OFFLINE is not set
+CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_AUTO=y
+# CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_KERNEL is not set
+# CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_MOVABLE is not set
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
+CONFIG_CMA=y
+CONFIG_CMA_SYSFS=y
+CONFIG_USERFAULTFD=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_TLS=m
+CONFIG_TLS_DEVICE=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
+CONFIG_INET_ESPINTCP=y
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_UDP_DIAG=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_BIC=y
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_VEGAS=m
+CONFIG_TCP_CONG_NV=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_DCTCP=m
+CONFIG_TCP_CONG_CDG=m
+CONFIG_TCP_CONG_BBR=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
+CONFIG_INET6_ESPINTCP=y
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_MPTCP=y
+CONFIG_NETWORK_PHY_TIMESTAMPING=y
+CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=m
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=y
+CONFIG_NFT_CT=m
+CONFIG_NFT_CONNLIMIT=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_TUNNEL=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
+CONFIG_NFT_REJECT=m
+CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
+CONFIG_NFT_OSF=m
+CONFIG_NFT_TPROXY=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_AUDIT=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LED=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CGROUP=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_PROTO_SCTP=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_NFCT=y
+CONFIG_NF_TABLES_IPV4=y
+CONFIG_NFT_DUP_IPV4=m
+CONFIG_NFT_FIB_IPV4=m
+CONFIG_NF_TABLES_ARP=y
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_SYNPROXY=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_TABLES_IPV6=y
+CONFIG_NFT_FIB_IPV6=m
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_SRH=m
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_TARGET_SYNPROXY=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NF_CONNTRACK_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_IP_SCTP=m
+CONFIG_RDS=y
+CONFIG_L2TP=m
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=m
+CONFIG_L2TP_ETH=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
+CONFIG_LLC2=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFB=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_CBS=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_SKBPRIO=m
+CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_CODEL=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_SCH_CAKE=m
+CONFIG_NET_SCH_FQ=m
+CONFIG_NET_SCH_PIE=m
+CONFIG_NET_SCH_FQ_PIE=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_DEFAULT=y
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_CGROUP=m
+CONFIG_NET_CLS_BPF=m
+CONFIG_NET_CLS_FLOWER=m
+CONFIG_NET_CLS_MATCHALL=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_BPF=m
+CONFIG_OPENVSWITCH=m
+CONFIG_VSOCKETS=m
+CONFIG_VIRTIO_VSOCKETS=m
+CONFIG_NETLINK_DIAG=y
+CONFIG_CGROUP_NET_PRIO=y
+CONFIG_BPF_STREAM_PARSER=y
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+CONFIG_BT_HS=y
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y
+CONFIG_BT_HCIBTUSB_MTK=y
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_ATH3K=y
+CONFIG_BT_HCIUART_INTEL=y
+CONFIG_BT_HCIUART_AG6XX=y
+CONFIG_BT_HCIBCM203X=m
+CONFIG_BT_HCIBPA10X=m
+CONFIG_BT_HCIBFUSB=m
+CONFIG_BT_HCIDTL1=m
+CONFIG_BT_HCIBT3C=m
+CONFIG_BT_HCIBLUECARD=m
+CONFIG_BT_HCIVHCI=m
+CONFIG_BT_MRVL=m
+CONFIG_BT_ATH3K=m
+CONFIG_BT_VIRTIO=m
+CONFIG_CFG80211=m
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=m
+CONFIG_RFKILL=m
+CONFIG_RFKILL_INPUT=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_CEPH_LIB=m
+CONFIG_PCIEPORTBUS=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCIEAER=y
+# CONFIG_PCIEASPM is not set
+CONFIG_PCI_IOV=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_SHPC=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCCARD=m
+CONFIG_YENTA=m
+CONFIG_RAPIDIO=y
+CONFIG_RAPIDIO_TSI721=y
+CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y
+CONFIG_RAPIDIO_ENUM_BASIC=m
+CONFIG_RAPIDIO_CHMAN=m
+CONFIG_RAPIDIO_MPORT_CDEV=m
+CONFIG_UEVENT_HELPER=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_FW_LOADER_COMPRESS=y
+CONFIG_FW_LOADER_COMPRESS_ZSTD=y
+CONFIG_EFI_ZBOOT=y
+CONFIG_EFI_BOOTLOADER_CONTROL=m
+CONFIG_EFI_CAPSULE_LOADER=m
+CONFIG_EFI_TEST=m
+CONFIG_MTD=m
+CONFIG_MTD_BLOCK=m
+CONFIG_MTD_CFI=m
+CONFIG_MTD_JEDECPROBE=m
+CONFIG_MTD_CFI_INTELEXT=m
+CONFIG_MTD_CFI_AMDSTD=m
+CONFIG_MTD_CFI_STAA=m
+CONFIG_MTD_RAM=m
+CONFIG_MTD_ROM=m
+CONFIG_MTD_RAW_NAND=m
+CONFIG_MTD_NAND_PLATFORM=m
+CONFIG_MTD_NAND_LOONGSON=m
+CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y
+CONFIG_MTD_NAND_ECC_SW_BCH=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_BLOCK=y
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=y
+CONFIG_PARPORT_SERIAL=y
+CONFIG_PARPORT_PC_FIFO=y
+CONFIG_ZRAM=m
+CONFIG_ZRAM_BACKEND_LZ4=y
+CONFIG_ZRAM_BACKEND_LZ4HC=y
+CONFIG_ZRAM_BACKEND_ZSTD=y
+CONFIG_ZRAM_BACKEND_DEFLATE=y
+CONFIG_ZRAM_BACKEND_842=y
+CONFIG_ZRAM_BACKEND_LZO=y
+CONFIG_ZRAM_DEF_COMP_ZSTD=y
+CONFIG_ZRAM_WRITEBACK=y
+CONFIG_ZRAM_MEMORY_TRACKING=y
+CONFIG_ZRAM_MULTI_COMP=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_DRBD=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_RBD=m
+CONFIG_BLK_DEV_NVME=y
+CONFIG_NVME_MULTIPATH=y
+CONFIG_NVME_RDMA=m
+CONFIG_NVME_FC=m
+CONFIG_NVME_TCP=m
+CONFIG_NVME_TARGET=m
+CONFIG_NVME_TARGET_PASSTHRU=y
+CONFIG_NVME_TARGET_LOOP=m
+CONFIG_NVME_TARGET_RDMA=m
+CONFIG_NVME_TARGET_FC=m
+CONFIG_NVME_TARGET_TCP=m
+CONFIG_EEPROM_AT24=m
+CONFIG_PVPANIC=y
+CONFIG_PVPANIC_MMIO=m
+CONFIG_PVPANIC_PCI=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_ISCSI_TCP=m
+CONFIG_SCSI_MVSAS=y
+# CONFIG_SCSI_MVSAS_DEBUG is not set
+CONFIG_SCSI_MVSAS_TASKLET=y
+CONFIG_SCSI_MVUMI=y
+CONFIG_MEGARAID_NEWGEN=y
+CONFIG_MEGARAID_MM=y
+CONFIG_MEGARAID_MAILBOX=y
+CONFIG_MEGARAID_LEGACY=y
+CONFIG_MEGARAID_SAS=y
+CONFIG_SCSI_MPT2SAS=y
+CONFIG_LIBFC=m
+CONFIG_LIBFCOE=m
+CONFIG_FCOE=m
+CONFIG_SCSI_QLOGIC_1280=m
+CONFIG_SCSI_QLA_FC=m
+CONFIG_TCM_QLA2XXX=m
+CONFIG_SCSI_QLA_ISCSI=m
+CONFIG_SCSI_LPFC=m
+CONFIG_SCSI_VIRTIO=m
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_DWC=y
+CONFIG_PATA_ATIIXP=y
+CONFIG_PATA_PCMCIA=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LLBITMAP=y
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_BCACHE=m
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
+CONFIG_DM_WRITECACHE=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_MULTIPATH_HST=m
+CONFIG_DM_MULTIPATH_IOA=m
+CONFIG_DM_INIT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=m
+CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_INTEGRITY=m
+CONFIG_DM_ZONED=m
+CONFIG_DM_VDO=m
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
+CONFIG_TCM_USER2=m
+CONFIG_LOOPBACK_TARGET=m
+CONFIG_ISCSI_TARGET=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=y
+CONFIG_WIREGUARD=m
+CONFIG_IFB=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_RANDOM=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
+CONFIG_VXLAN=y
+CONFIG_RIONET=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=m
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_AGERE is not set
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+CONFIG_BNX2=y
+# CONFIG_NET_VENDOR_CAVIUM is not set
+CONFIG_CHELSIO_T1=m
+CONFIG_CHELSIO_T1_1G=y
+CONFIG_CHELSIO_T3=m
+CONFIG_CHELSIO_T4=m
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_I825XX is not set
+CONFIG_E1000=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IXGBE=y
+CONFIG_I40E=y
+CONFIG_ICE=y
+CONFIG_FM10K=y
+CONFIG_IGC=y
+CONFIG_IDPF=y
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RDC is not set
+CONFIG_8139CP=m
+CONFIG_8139TOO=m
+CONFIG_R8169=y
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+CONFIG_NGBE=y
+CONFIG_TXGBE=y
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
+CONFIG_MOTORCOMM_PHY=y
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_USBNET=m
+# CONFIG_USB_NET_AX8817X is not set
+# CONFIG_USB_NET_AX88179_178A is not set
+CONFIG_USB_NET_CDC_EEM=m
+CONFIG_USB_NET_HUAWEI_CDC_NCM=m
+CONFIG_USB_NET_CDC_MBIM=m
+# CONFIG_USB_NET_NET1080 is not set
+CONFIG_USB_NET_RNDIS_HOST=m
+# CONFIG_USB_BELKIN is not set
+# CONFIG_USB_ARMLINUX is not set
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_ATH9K=m
+CONFIG_ATH9K_HTC=m
+CONFIG_IWLWIFI=m
+CONFIG_IWLDVM=m
+CONFIG_IWLMVM=m
+CONFIG_MT7601U=m
+CONFIG_RT2X00=m
+CONFIG_RT2800USB=m
+CONFIG_RTL8180=m
+CONFIG_RTL8187=m
+CONFIG_RTL8192CE=m
+CONFIG_RTL8192SE=m
+CONFIG_RTL8192DE=m
+CONFIG_RTL8723AE=m
+CONFIG_RTL8723BE=m
+CONFIG_RTL8188EE=m
+CONFIG_RTL8192EE=m
+CONFIG_RTL8821AE=m
+CONFIG_RTL8192CU=m
+CONFIG_RTL8192DU=m
+# CONFIG_RTLWIFI_DEBUG is not set
+CONFIG_RTL8XXXU=m
+CONFIG_RTW88=m
+CONFIG_RTW88_8822BE=m
+CONFIG_RTW88_8822BU=m
+CONFIG_RTW88_8822CE=m
+CONFIG_RTW88_8822CU=m
+CONFIG_RTW88_8723DE=m
+CONFIG_RTW88_8723DU=m
+CONFIG_RTW88_8821CE=m
+CONFIG_RTW88_8821CU=m
+CONFIG_RTW88_8821AU=m
+CONFIG_RTW88_8812AU=m
+CONFIG_RTW88_8814AE=m
+CONFIG_RTW88_8814AU=m
+CONFIG_RTW89=m
+CONFIG_RTW89_8851BE=m
+CONFIG_RTW89_8852AE=m
+CONFIG_RTW89_8852BE=m
+CONFIG_RTW89_8852BTE=m
+CONFIG_RTW89_8852CE=m
+CONFIG_RTW89_8922AE=m
+CONFIG_ZD1211RW=m
+CONFIG_USB4_NET=m
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=m
+CONFIG_KEYBOARD_GPIO_POLLED=m
+CONFIG_KEYBOARD_MATRIX=m
+CONFIG_KEYBOARD_XTKBD=m
+CONFIG_MOUSE_PS2_ELANTECH=y
+CONFIG_MOUSE_PS2_SENTELIC=y
+CONFIG_MOUSE_SERIAL=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=m
+CONFIG_SERIO_SERPORT=m
+CONFIG_SERIO_RAW=m
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=16
+CONFIG_SERIAL_8250_RUNTIME_UARTS=16
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_8250_LOONGSON=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_PRINTER=m
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_PIIX4=y
+CONFIG_I2C_DESIGNWARE_CORE=y
+CONFIG_I2C_DESIGNWARE_SLAVE=y
+CONFIG_I2C_DESIGNWARE_PCI=y
+CONFIG_I2C_GPIO=y
+CONFIG_SPI=y
+CONFIG_SPI_LOONGSON_PCI=m
+CONFIG_SPI_LOONGSON_PLATFORM=m
+CONFIG_PINCTRL=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_LOONGSON1=y
+CONFIG_GPIO_PCA953X=m
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_PCA9570=m
+CONFIG_GPIO_PCF857X=m
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_RESTART=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_RESET_SYSCON_POWEROFF=y
+CONFIG_SYSCON_REBOOT_MODE=y
+CONFIG_SENSORS_LM75=m
+CONFIG_SENSORS_LM93=m
+CONFIG_SENSORS_W83795=m
+CONFIG_SENSORS_W83627HF=m
+CONFIG_WATCHDOG=y
+CONFIG_LOONGSON1_WDT=m
+CONFIG_RC_CORE=m
+CONFIG_LIRC=y
+CONFIG_RC_DECODERS=y
+CONFIG_IR_IMON_DECODER=m
+CONFIG_IR_JVC_DECODER=m
+CONFIG_IR_MCE_KBD_DECODER=m
+CONFIG_IR_NEC_DECODER=m
+CONFIG_IR_RC5_DECODER=m
+CONFIG_IR_RC6_DECODER=m
+CONFIG_IR_SANYO_DECODER=m
+CONFIG_IR_SHARP_DECODER=m
+CONFIG_IR_SONY_DECODER=m
+CONFIG_IR_XMP_DECODER=m
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_MEDIA_PCI_SUPPORT=y
+CONFIG_VIDEO_BT848=m
+CONFIG_DVB_BT8XX=m
+CONFIG_DRM=y
+CONFIG_DRM_LOAD_EDID_FIRMWARE=y
+CONFIG_DRM_EFIDRM=y
+CONFIG_DRM_SIMPLEDRM=y
+CONFIG_DRM_RADEON=m
+CONFIG_DRM_RADEON_USERPTR=y
+CONFIG_DRM_QXL=m
+CONFIG_DRM_VIRTIO_GPU=m
+CONFIG_DRM_LOONGSON=y
+CONFIG_FB=y
+CONFIG_FB_RADEON=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_LCD_PLATFORM=m
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_LOGO=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_BT87X=m
+CONFIG_SND_BT87X_OVERCLOCK=y
+CONFIG_SND_HDA_INTEL=y
+CONFIG_SND_HDA_HWDEP=y
+CONFIG_SND_HDA_INPUT_BEEP=y
+CONFIG_SND_HDA_PATCH_LOADER=y
+CONFIG_SND_HDA_CODEC_REALTEK=y
+CONFIG_SND_HDA_CODEC_REALTEK_LIB=y
+CONFIG_SND_HDA_CODEC_ALC260=y
+CONFIG_SND_HDA_CODEC_ALC262=y
+CONFIG_SND_HDA_CODEC_ALC268=y
+CONFIG_SND_HDA_CODEC_ALC269=y
+CONFIG_SND_HDA_CODEC_ALC662=y
+CONFIG_SND_HDA_CODEC_ALC680=y
+CONFIG_SND_HDA_CODEC_ALC861=y
+CONFIG_SND_HDA_CODEC_ALC861VD=y
+CONFIG_SND_HDA_CODEC_ALC880=y
+CONFIG_SND_HDA_CODEC_ALC882=y
+CONFIG_SND_HDA_CODEC_SIGMATEL=y
+CONFIG_SND_HDA_CODEC_HDMI=y
+CONFIG_SND_HDA_CODEC_HDMI_GENERIC=y
+CONFIG_SND_HDA_CODEC_HDMI_INTEL=y
+CONFIG_SND_HDA_CODEC_HDMI_ATI=y
+CONFIG_SND_HDA_CODEC_HDMI_NVIDIA=y
+CONFIG_SND_HDA_CODEC_CONEXANT=y
+CONFIG_SND_USB_AUDIO=m
+CONFIG_SND_USB_AUDIO_MIDI_V2=y
+CONFIG_SND_SOC=m
+CONFIG_SND_SOC_LOONGSON_CARD=m
+CONFIG_SND_LOONGSON1_AC97=m
+CONFIG_SND_SOC_ES7134=m
+CONFIG_SND_SOC_ES7241=m
+CONFIG_SND_SOC_ES8311=m
+CONFIG_SND_SOC_ES8316=m
+CONFIG_SND_SOC_ES8323=m
+CONFIG_SND_SOC_ES8326=m
+CONFIG_SND_SOC_ES8328_I2C=m
+CONFIG_SND_SOC_ES8328_SPI=m
+CONFIG_SND_SOC_UDA1334=m
+CONFIG_SND_SOC_UDA1342=m
+CONFIG_SND_VIRTIO=m
+CONFIG_HIDRAW=y
+CONFIG_UHID=m
+CONFIG_HID_A4TECH=m
+CONFIG_HID_CHERRY=m
+CONFIG_HID_ELAN=m
+CONFIG_HID_LOGITECH=m
+CONFIG_HID_LOGITECH_DJ=m
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_HID_MICROSOFT=m
+CONFIG_HID_MULTITOUCH=m
+CONFIG_HID_SUNPLUS=m
+CONFIG_HID_WACOM=m
+CONFIG_USB_HIDDEV=y
+CONFIG_I2C_HID_ACPI=m
+CONFIG_I2C_HID_OF=m
+CONFIG_I2C_HID_OF_ELAN=m
+CONFIG_USB=y
+CONFIG_USB_OTG=y
+CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_UHCI_HCD=m
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_STORAGE=m
+CONFIG_USB_STORAGE_REALTEK=m
+CONFIG_USB_UAS=m
+CONFIG_USB_DWC2=y
+CONFIG_USB_DWC2_HOST=y
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_CH341=m
+CONFIG_USB_SERIAL_CP210X=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_USB_GADGET=y
+CONFIG_TYPEC=m
+CONFIG_TYPEC_TCPM=m
+CONFIG_TYPEC_TCPCI=m
+CONFIG_TYPEC_UCSI=m
+CONFIG_UCSI_ACPI=m
+CONFIG_MMC=y
+CONFIG_INFINIBAND=m
+CONFIG_EDAC=y
+# CONFIG_EDAC_LEGACY_SYSFS is not set
+CONFIG_EDAC_LOONGSON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_LOONGSON=y
+CONFIG_DMADEVICES=y
+CONFIG_LOONGSON1_APB_DMA=y
+CONFIG_UDMABUF=y
+CONFIG_DMABUF_HEAPS=y
+CONFIG_DMABUF_HEAPS_SYSTEM=y
+CONFIG_DMABUF_HEAPS_CMA=y
+CONFIG_UIO=m
+CONFIG_UIO_PDRV_GENIRQ=m
+CONFIG_UIO_DMEM_GENIRQ=m
+CONFIG_UIO_PCI_GENERIC=m
+CONFIG_VFIO=m
+CONFIG_VFIO_PCI=m
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=m
+CONFIG_VIRTIO_INPUT=m
+CONFIG_VIRTIO_MMIO=m
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_SCSI=m
+CONFIG_VHOST_VSOCK=m
+CONFIG_COMEDI=m
+CONFIG_COMEDI_PCI_DRIVERS=m
+CONFIG_COMEDI_8255_PCI=m
+CONFIG_COMEDI_ADL_PCI6208=m
+CONFIG_COMEDI_ADL_PCI7X3X=m
+CONFIG_COMEDI_ADL_PCI8164=m
+CONFIG_COMEDI_ADL_PCI9111=m
+CONFIG_COMEDI_ADL_PCI9118=m
+CONFIG_COMEDI_ADV_PCI1710=m
+CONFIG_COMEDI_ADV_PCI1720=m
+CONFIG_COMEDI_ADV_PCI1723=m
+CONFIG_COMEDI_ADV_PCI1724=m
+CONFIG_COMEDI_ADV_PCI1760=m
+CONFIG_COMEDI_ADV_PCI_DIO=m
+CONFIG_COMEDI_NI_LABPC_PCI=m
+CONFIG_COMEDI_NI_PCIDIO=m
+CONFIG_COMEDI_NI_PCIMIO=m
+CONFIG_STAGING=y
+CONFIG_CLKSRC_LOONGSON1_PWM=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PM_DEVFREQ=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_NTB=m
+CONFIG_NTB_MSI=y
+CONFIG_NTB_IDT=m
+CONFIG_NTB_EPF=m
+CONFIG_NTB_SWITCHTEC=m
+CONFIG_NTB_PERF=m
+CONFIG_NTB_TRANSPORT=m
+CONFIG_PWM=y
+CONFIG_GENERIC_PHY=y
+CONFIG_USB4=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_XFS_FS=y
+CONFIG_XFS_SUPPORT_V4=y
+CONFIG_XFS_SUPPORT_ASCII_CI=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_GFS2_FS=m
+CONFIG_GFS2_FS_LOCKING_DLM=y
+CONFIG_OCFS2_FS=m
+CONFIG_BTRFS_FS=y
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_F2FS_FS=m
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_CHECK_FS=y
+CONFIG_F2FS_FS_COMPRESSION=y
+CONFIG_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
+CONFIG_FS_VERITY=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS_FS=y
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_VIRTIO_FS=m
+CONFIG_OVERLAY_FS=y
+CONFIG_OVERLAY_FS_INDEX=y
+CONFIG_OVERLAY_FS_XINO_AUTO=y
+CONFIG_OVERLAY_FS_METACOPY=y
+CONFIG_FSCACHE=y
+CONFIG_CACHEFILES=m
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=936
+CONFIG_FAT_DEFAULT_IOCHARSET="gb2312"
+CONFIG_EXFAT_FS=m
+CONFIG_NTFS3_FS=m
+CONFIG_NTFS3_LZX_XPRESS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_ORANGEFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_UBIFS_FS=m
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_FILE_DIRECT=y
+CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT=y
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZ4=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_SQUASHFS_ZSTD=y
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_PSTORE=m
+CONFIG_PSTORE_COMPRESS=y
+CONFIG_UFS_FS=m
+CONFIG_EROFS_FS=m
+CONFIG_EROFS_FS_ZIP_LZMA=y
+CONFIG_EROFS_FS_ZIP_DEFLATE=y
+CONFIG_EROFS_FS_ZIP_ZSTD=y
+CONFIG_EROFS_FS_ONDEMAND=y
+CONFIG_EROFS_FS_PCPU_KTHREAD=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_BLOCKLAYOUT=y
+CONFIG_CEPH_FS=m
+CONFIG_CEPH_FSCACHE=y
+CONFIG_CEPH_FS_POSIX_ACL=y
+CONFIG_CEPH_FS_SECURITY_LABEL=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_9P_FS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_DLM=m
+CONFIG_KEY_DH_OPERATIONS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_APPARMOR=y
+CONFIG_SECURITY_YAMA=y
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_SELFTESTS=y
+CONFIG_CRYPTO_PCRYPT=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_SM4_GENERIC=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_SM3_GENERIC=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_DEV_VIRTIO=m
+CONFIG_CRYPTO_DEV_LOONGSON_RNG=m
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
+CONFIG_PRINTK_TIME=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson64_defconfig
index 50e1304e7a6f..a14db1a95e7e 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson64_defconfig
@@ -435,7 +435,6 @@ CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_FW_LOADER_COMPRESS=y
CONFIG_FW_LOADER_COMPRESS_ZSTD=y
-CONFIG_SYSFB_SIMPLEFB=y
CONFIG_EFI_ZBOOT=y
CONFIG_EFI_BOOTLOADER_CONTROL=m
CONFIG_EFI_CAPSULE_LOADER=m
@@ -530,6 +529,7 @@ CONFIG_PATA_ATIIXP=y
CONFIG_PATA_PCMCIA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LLBITMAP=y
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
@@ -738,6 +738,7 @@ CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_8250_LOONGSON=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_PRINTER=m
@@ -801,6 +802,8 @@ CONFIG_VIDEO_BT848=m
CONFIG_DVB_BT8XX=m
CONFIG_DRM=y
CONFIG_DRM_LOAD_EDID_FIRMWARE=y
+CONFIG_DRM_EFIDRM=y
+CONFIG_DRM_SIMPLEDRM=y
CONFIG_DRM_RADEON=m
CONFIG_DRM_RADEON_USERPTR=y
CONFIG_DRM_AMDGPU=m
@@ -811,9 +814,7 @@ CONFIG_DRM_AST=y
CONFIG_DRM_QXL=m
CONFIG_DRM_VIRTIO_GPU=m
CONFIG_DRM_LOONGSON=y
-CONFIG_DRM_SIMPLEDRM=y
CONFIG_FB=y
-CONFIG_FB_EFI=y
CONFIG_FB_RADEON=y
CONFIG_FIRMWARE_EDID=y
CONFIG_LCD_CLASS_DEVICE=y
diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild
index b04d2cef935f..9034b583a88a 100644
--- a/arch/loongarch/include/asm/Kbuild
+++ b/arch/loongarch/include/asm/Kbuild
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscall_table_32.h
syscall-y += syscall_table_64.h
generated-y += orc_hash.h
diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h
index e739dbc6329d..d6472cafb32c 100644
--- a/arch/loongarch/include/asm/addrspace.h
+++ b/arch/loongarch/include/asm/addrspace.h
@@ -38,11 +38,20 @@ extern unsigned long vm_map_base;
#endif
#ifndef WRITECOMBINE_BASE
+#ifdef CONFIG_32BIT
+#define WRITECOMBINE_BASE CSR_DMW0_BASE
+#else
#define WRITECOMBINE_BASE CSR_DMW2_BASE
#endif
+#endif
+#ifdef CONFIG_32BIT
+#define DMW_PABITS 29
+#define TO_PHYS_MASK ((_UL(1) << _UL(DMW_PABITS)) - 1)
+#else
#define DMW_PABITS 48
-#define TO_PHYS_MASK ((1ULL << DMW_PABITS) - 1)
+#define TO_PHYS_MASK ((_ULL(1) << _ULL(DMW_PABITS)) - 1)
+#endif
/*
* Memory above this physical address will be considered highmem.
@@ -112,7 +121,11 @@ extern unsigned long vm_map_base;
/*
* Returns the physical address of a KPRANGEx / XKPRANGE address
*/
+#ifdef CONFIG_32BIT
+#define PHYSADDR(a) ((_ACAST32_(a)) & TO_PHYS_MASK)
+#else
#define PHYSADDR(a) ((_ACAST64_(a)) & TO_PHYS_MASK)
+#endif
/*
* On LoongArch, I/O ports mappring is following:
diff --git a/arch/loongarch/include/asm/asm.h b/arch/loongarch/include/asm/asm.h
index f018d26fc995..719cab1a0ad8 100644
--- a/arch/loongarch/include/asm/asm.h
+++ b/arch/loongarch/include/asm/asm.h
@@ -72,11 +72,11 @@
#define INT_SUB sub.w
#define INT_L ld.w
#define INT_S st.w
-#define INT_SLL slli.w
+#define INT_SLLI slli.w
#define INT_SLLV sll.w
-#define INT_SRL srli.w
+#define INT_SRLI srli.w
#define INT_SRLV srl.w
-#define INT_SRA srai.w
+#define INT_SRAI srai.w
#define INT_SRAV sra.w
#endif
@@ -86,11 +86,11 @@
#define INT_SUB sub.d
#define INT_L ld.d
#define INT_S st.d
-#define INT_SLL slli.d
+#define INT_SLLI slli.d
#define INT_SLLV sll.d
-#define INT_SRL srli.d
+#define INT_SRLI srli.d
#define INT_SRLV srl.d
-#define INT_SRA srai.d
+#define INT_SRAI srai.d
#define INT_SRAV sra.d
#endif
@@ -100,15 +100,23 @@
#if (__SIZEOF_LONG__ == 4)
#define LONG_ADD add.w
#define LONG_ADDI addi.w
+#define LONG_ALSL alsl.w
+#define LONG_BSTRINS bstrins.w
+#define LONG_BSTRPICK bstrpick.w
#define LONG_SUB sub.w
#define LONG_L ld.w
+#define LONG_LI li.w
+#define LONG_LPTR ld.w
#define LONG_S st.w
-#define LONG_SLL slli.w
+#define LONG_SPTR st.w
+#define LONG_SLLI slli.w
#define LONG_SLLV sll.w
-#define LONG_SRL srli.w
+#define LONG_SRLI srli.w
#define LONG_SRLV srl.w
-#define LONG_SRA srai.w
+#define LONG_SRAI srai.w
#define LONG_SRAV sra.w
+#define LONG_ROTR rotr.w
+#define LONG_ROTRI rotri.w
#ifdef __ASSEMBLER__
#define LONG .word
@@ -121,15 +129,23 @@
#if (__SIZEOF_LONG__ == 8)
#define LONG_ADD add.d
#define LONG_ADDI addi.d
+#define LONG_ALSL alsl.d
+#define LONG_BSTRINS bstrins.d
+#define LONG_BSTRPICK bstrpick.d
#define LONG_SUB sub.d
#define LONG_L ld.d
+#define LONG_LI li.d
+#define LONG_LPTR ldptr.d
#define LONG_S st.d
-#define LONG_SLL slli.d
+#define LONG_SPTR stptr.d
+#define LONG_SLLI slli.d
#define LONG_SLLV sll.d
-#define LONG_SRL srli.d
+#define LONG_SRLI srli.d
#define LONG_SRLV srl.d
-#define LONG_SRA srai.d
+#define LONG_SRAI srai.d
#define LONG_SRAV sra.d
+#define LONG_ROTR rotr.d
+#define LONG_ROTRI rotri.d
#ifdef __ASSEMBLER__
#define LONG .dword
@@ -145,16 +161,23 @@
#if (__SIZEOF_POINTER__ == 4)
#define PTR_ADD add.w
#define PTR_ADDI addi.w
+#define PTR_ALSL alsl.w
+#define PTR_BSTRINS bstrins.w
+#define PTR_BSTRPICK bstrpick.w
#define PTR_SUB sub.w
#define PTR_L ld.w
-#define PTR_S st.w
#define PTR_LI li.w
-#define PTR_SLL slli.w
+#define PTR_LPTR ld.w
+#define PTR_S st.w
+#define PTR_SPTR st.w
+#define PTR_SLLI slli.w
#define PTR_SLLV sll.w
-#define PTR_SRL srli.w
+#define PTR_SRLI srli.w
#define PTR_SRLV srl.w
-#define PTR_SRA srai.w
+#define PTR_SRAI srai.w
#define PTR_SRAV sra.w
+#define PTR_ROTR rotr.w
+#define PTR_ROTRI rotri.w
#define PTR_SCALESHIFT 2
@@ -168,16 +191,23 @@
#if (__SIZEOF_POINTER__ == 8)
#define PTR_ADD add.d
#define PTR_ADDI addi.d
+#define PTR_ALSL alsl.d
+#define PTR_BSTRINS bstrins.d
+#define PTR_BSTRPICK bstrpick.d
#define PTR_SUB sub.d
#define PTR_L ld.d
-#define PTR_S st.d
#define PTR_LI li.d
-#define PTR_SLL slli.d
+#define PTR_LPTR ldptr.d
+#define PTR_S st.d
+#define PTR_SPTR stptr.d
+#define PTR_SLLI slli.d
#define PTR_SLLV sll.d
-#define PTR_SRL srli.d
+#define PTR_SRLI srli.d
#define PTR_SRLV srl.d
-#define PTR_SRA srai.d
+#define PTR_SRAI srai.d
#define PTR_SRAV sra.d
+#define PTR_ROTR rotr.d
+#define PTR_ROTRI rotri.d
#define PTR_SCALESHIFT 3
@@ -190,10 +220,17 @@
/* Annotate a function as being unsuitable for kprobes. */
#ifdef CONFIG_KPROBES
+#ifdef CONFIG_32BIT
+#define _ASM_NOKPROBE(name) \
+ .pushsection "_kprobe_blacklist", "aw"; \
+ .long name; \
+ .popsection
+#else
#define _ASM_NOKPROBE(name) \
.pushsection "_kprobe_blacklist", "aw"; \
.quad name; \
.popsection
+#endif
#else
#define _ASM_NOKPROBE(name)
#endif
diff --git a/arch/loongarch/include/asm/asmmacro.h b/arch/loongarch/include/asm/asmmacro.h
index 8d7f501b0a12..a648be5f723f 100644
--- a/arch/loongarch/include/asm/asmmacro.h
+++ b/arch/loongarch/include/asm/asmmacro.h
@@ -5,43 +5,55 @@
#ifndef _ASM_ASMMACRO_H
#define _ASM_ASMMACRO_H
+#include <linux/sizes.h>
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
#include <asm/fpregdef.h>
#include <asm/loongarch.h>
+#ifdef CONFIG_64BIT
+#define TASK_STRUCT_OFFSET 0
+#else
+#define TASK_STRUCT_OFFSET 2000
+#endif
+
.macro cpu_save_nonscratch thread
- stptr.d s0, \thread, THREAD_REG23
- stptr.d s1, \thread, THREAD_REG24
- stptr.d s2, \thread, THREAD_REG25
- stptr.d s3, \thread, THREAD_REG26
- stptr.d s4, \thread, THREAD_REG27
- stptr.d s5, \thread, THREAD_REG28
- stptr.d s6, \thread, THREAD_REG29
- stptr.d s7, \thread, THREAD_REG30
- stptr.d s8, \thread, THREAD_REG31
- stptr.d sp, \thread, THREAD_REG03
- stptr.d fp, \thread, THREAD_REG22
+ LONG_SPTR s0, \thread, (THREAD_REG23 - TASK_STRUCT_OFFSET)
+ LONG_SPTR s1, \thread, (THREAD_REG24 - TASK_STRUCT_OFFSET)
+ LONG_SPTR s2, \thread, (THREAD_REG25 - TASK_STRUCT_OFFSET)
+ LONG_SPTR s3, \thread, (THREAD_REG26 - TASK_STRUCT_OFFSET)
+ LONG_SPTR s4, \thread, (THREAD_REG27 - TASK_STRUCT_OFFSET)
+ LONG_SPTR s5, \thread, (THREAD_REG28 - TASK_STRUCT_OFFSET)
+ LONG_SPTR s6, \thread, (THREAD_REG29 - TASK_STRUCT_OFFSET)
+ LONG_SPTR s7, \thread, (THREAD_REG30 - TASK_STRUCT_OFFSET)
+ LONG_SPTR s8, \thread, (THREAD_REG31 - TASK_STRUCT_OFFSET)
+ LONG_SPTR ra, \thread, (THREAD_REG01 - TASK_STRUCT_OFFSET)
+ LONG_SPTR sp, \thread, (THREAD_REG03 - TASK_STRUCT_OFFSET)
+ LONG_SPTR fp, \thread, (THREAD_REG22 - TASK_STRUCT_OFFSET)
.endm
.macro cpu_restore_nonscratch thread
- ldptr.d s0, \thread, THREAD_REG23
- ldptr.d s1, \thread, THREAD_REG24
- ldptr.d s2, \thread, THREAD_REG25
- ldptr.d s3, \thread, THREAD_REG26
- ldptr.d s4, \thread, THREAD_REG27
- ldptr.d s5, \thread, THREAD_REG28
- ldptr.d s6, \thread, THREAD_REG29
- ldptr.d s7, \thread, THREAD_REG30
- ldptr.d s8, \thread, THREAD_REG31
- ldptr.d ra, \thread, THREAD_REG01
- ldptr.d sp, \thread, THREAD_REG03
- ldptr.d fp, \thread, THREAD_REG22
+ LONG_LPTR s0, \thread, (THREAD_REG23 - TASK_STRUCT_OFFSET)
+ LONG_LPTR s1, \thread, (THREAD_REG24 - TASK_STRUCT_OFFSET)
+ LONG_LPTR s2, \thread, (THREAD_REG25 - TASK_STRUCT_OFFSET)
+ LONG_LPTR s3, \thread, (THREAD_REG26 - TASK_STRUCT_OFFSET)
+ LONG_LPTR s4, \thread, (THREAD_REG27 - TASK_STRUCT_OFFSET)
+ LONG_LPTR s5, \thread, (THREAD_REG28 - TASK_STRUCT_OFFSET)
+ LONG_LPTR s6, \thread, (THREAD_REG29 - TASK_STRUCT_OFFSET)
+ LONG_LPTR s7, \thread, (THREAD_REG30 - TASK_STRUCT_OFFSET)
+ LONG_LPTR s8, \thread, (THREAD_REG31 - TASK_STRUCT_OFFSET)
+ LONG_LPTR ra, \thread, (THREAD_REG01 - TASK_STRUCT_OFFSET)
+ LONG_LPTR sp, \thread, (THREAD_REG03 - TASK_STRUCT_OFFSET)
+ LONG_LPTR fp, \thread, (THREAD_REG22 - TASK_STRUCT_OFFSET)
.endm
.macro fpu_save_csr thread tmp
movfcsr2gr \tmp, fcsr0
+#ifdef CONFIG_32BIT
+ st.w \tmp, \thread, THREAD_FCSR
+#else
stptr.w \tmp, \thread, THREAD_FCSR
+#endif
#ifdef CONFIG_CPU_HAS_LBT
/* TM bit is always 0 if LBT not supported */
andi \tmp, \tmp, FPU_CSR_TM
@@ -56,7 +68,11 @@
.endm
.macro fpu_restore_csr thread tmp0 tmp1
+#ifdef CONFIG_32BIT
+ ld.w \tmp0, \thread, THREAD_FCSR
+#else
ldptr.w \tmp0, \thread, THREAD_FCSR
+#endif
movgr2fcsr fcsr0, \tmp0
#ifdef CONFIG_CPU_HAS_LBT
/* TM bit is always 0 if LBT not supported */
@@ -88,9 +104,52 @@
#endif
.endm
+#ifdef CONFIG_32BIT
.macro fpu_save_cc thread tmp0 tmp1
movcf2gr \tmp0, $fcc0
- move \tmp1, \tmp0
+ move \tmp1, \tmp0
+ movcf2gr \tmp0, $fcc1
+ bstrins.w \tmp1, \tmp0, 15, 8
+ movcf2gr \tmp0, $fcc2
+ bstrins.w \tmp1, \tmp0, 23, 16
+ movcf2gr \tmp0, $fcc3
+ bstrins.w \tmp1, \tmp0, 31, 24
+ st.w \tmp1, \thread, THREAD_FCC
+ movcf2gr \tmp0, $fcc4
+ move \tmp1, \tmp0
+ movcf2gr \tmp0, $fcc5
+ bstrins.w \tmp1, \tmp0, 15, 8
+ movcf2gr \tmp0, $fcc6
+ bstrins.w \tmp1, \tmp0, 23, 16
+ movcf2gr \tmp0, $fcc7
+ bstrins.w \tmp1, \tmp0, 31, 24
+ st.w \tmp1, \thread, (THREAD_FCC + 4)
+ .endm
+
+ .macro fpu_restore_cc thread tmp0 tmp1
+ ld.w \tmp0, \thread, THREAD_FCC
+ bstrpick.w \tmp1, \tmp0, 7, 0
+ movgr2cf $fcc0, \tmp1
+ bstrpick.w \tmp1, \tmp0, 15, 8
+ movgr2cf $fcc1, \tmp1
+ bstrpick.w \tmp1, \tmp0, 23, 16
+ movgr2cf $fcc2, \tmp1
+ bstrpick.w \tmp1, \tmp0, 31, 24
+ movgr2cf $fcc3, \tmp1
+ ld.w \tmp0, \thread, (THREAD_FCC + 4)
+ bstrpick.w \tmp1, \tmp0, 7, 0
+ movgr2cf $fcc4, \tmp1
+ bstrpick.w \tmp1, \tmp0, 15, 8
+ movgr2cf $fcc5, \tmp1
+ bstrpick.w \tmp1, \tmp0, 23, 16
+ movgr2cf $fcc6, \tmp1
+ bstrpick.w \tmp1, \tmp0, 31, 24
+ movgr2cf $fcc7, \tmp1
+ .endm
+#else
+ .macro fpu_save_cc thread tmp0 tmp1
+ movcf2gr \tmp0, $fcc0
+ move \tmp1, \tmp0
movcf2gr \tmp0, $fcc1
bstrins.d \tmp1, \tmp0, 15, 8
movcf2gr \tmp0, $fcc2
@@ -109,7 +168,7 @@
.endm
.macro fpu_restore_cc thread tmp0 tmp1
- ldptr.d \tmp0, \thread, THREAD_FCC
+ ldptr.d \tmp0, \thread, THREAD_FCC
bstrpick.d \tmp1, \tmp0, 7, 0
movgr2cf $fcc0, \tmp1
bstrpick.d \tmp1, \tmp0, 15, 8
@@ -127,6 +186,7 @@
bstrpick.d \tmp1, \tmp0, 63, 56
movgr2cf $fcc7, \tmp1
.endm
+#endif
.macro fpu_save_double thread tmp
li.w \tmp, THREAD_FPR0
@@ -606,12 +666,14 @@
766:
lu12i.w \reg, 0
ori \reg, \reg, 0
+#ifdef CONFIG_64BIT
lu32i.d \reg, 0
lu52i.d \reg, \reg, 0
+#endif
.pushsection ".la_abs", "aw", %progbits
- .p2align 3
- .dword 766b
- .dword \sym
+ .p2align PTRLOG
+ PTR 766b
+ PTR \sym
.popsection
#endif
.endm
diff --git a/arch/loongarch/include/asm/atomic-amo.h b/arch/loongarch/include/asm/atomic-amo.h
new file mode 100644
index 000000000000..d5efa5252d56
--- /dev/null
+++ b/arch/loongarch/include/asm/atomic-amo.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Atomic operations (AMO).
+ *
+ * Copyright (C) 2020-2025 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_ATOMIC_AMO_H
+#define _ASM_ATOMIC_AMO_H
+
+#include <linux/types.h>
+#include <asm/barrier.h>
+#include <asm/cmpxchg.h>
+
+#define ATOMIC_OP(op, I, asm_op) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
+{ \
+ __asm__ __volatile__( \
+ "am"#asm_op".w" " $zero, %1, %0 \n" \
+ : "+ZB" (v->counter) \
+ : "r" (I) \
+ : "memory"); \
+}
+
+#define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
+static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v) \
+{ \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "am"#asm_op#mb".w" " %1, %2, %0 \n" \
+ : "+ZB" (v->counter), "=&r" (result) \
+ : "r" (I) \
+ : "memory"); \
+ \
+ return result c_op I; \
+}
+
+#define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix) \
+static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v) \
+{ \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "am"#asm_op#mb".w" " %1, %2, %0 \n" \
+ : "+ZB" (v->counter), "=&r" (result) \
+ : "r" (I) \
+ : "memory"); \
+ \
+ return result; \
+}
+
+#define ATOMIC_OPS(op, I, asm_op, c_op) \
+ ATOMIC_OP(op, I, asm_op) \
+ ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db, ) \
+ ATOMIC_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
+ ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
+ ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
+
+ATOMIC_OPS(add, i, add, +)
+ATOMIC_OPS(sub, -i, add, +)
+
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_add_return_acquire arch_atomic_add_return
+#define arch_atomic_add_return_release arch_atomic_add_return
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return arch_atomic_sub_return
+#define arch_atomic_sub_return_acquire arch_atomic_sub_return
+#define arch_atomic_sub_return_release arch_atomic_sub_return
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
+#define arch_atomic_fetch_add_release arch_atomic_fetch_add
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+
+#undef ATOMIC_OPS
+
+#define ATOMIC_OPS(op, I, asm_op) \
+ ATOMIC_OP(op, I, asm_op) \
+ ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
+ ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
+
+ATOMIC_OPS(and, i, and)
+ATOMIC_OPS(or, i, or)
+ATOMIC_OPS(xor, i, xor)
+
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
+#define arch_atomic_fetch_and_release arch_atomic_fetch_and
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
+#define arch_atomic_fetch_or_release arch_atomic_fetch_or
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#ifdef CONFIG_64BIT
+
+#define ATOMIC64_OP(op, I, asm_op) \
+static inline void arch_atomic64_##op(long i, atomic64_t *v) \
+{ \
+ __asm__ __volatile__( \
+ "am"#asm_op".d " " $zero, %1, %0 \n" \
+ : "+ZB" (v->counter) \
+ : "r" (I) \
+ : "memory"); \
+}
+
+#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
+static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t *v) \
+{ \
+ long result; \
+ __asm__ __volatile__( \
+ "am"#asm_op#mb".d " " %1, %2, %0 \n" \
+ : "+ZB" (v->counter), "=&r" (result) \
+ : "r" (I) \
+ : "memory"); \
+ \
+ return result c_op I; \
+}
+
+#define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix) \
+static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v) \
+{ \
+ long result; \
+ \
+ __asm__ __volatile__( \
+ "am"#asm_op#mb".d " " %1, %2, %0 \n" \
+ : "+ZB" (v->counter), "=&r" (result) \
+ : "r" (I) \
+ : "memory"); \
+ \
+ return result; \
+}
+
+#define ATOMIC64_OPS(op, I, asm_op, c_op) \
+ ATOMIC64_OP(op, I, asm_op) \
+ ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db, ) \
+ ATOMIC64_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
+ ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
+ ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
+
+ATOMIC64_OPS(add, i, add, +)
+ATOMIC64_OPS(sub, -i, add, +)
+
+#define arch_atomic64_add_return arch_atomic64_add_return
+#define arch_atomic64_add_return_acquire arch_atomic64_add_return
+#define arch_atomic64_add_return_release arch_atomic64_add_return
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return arch_atomic64_sub_return
+#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
+#define arch_atomic64_sub_return_release arch_atomic64_sub_return
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
+
+#undef ATOMIC64_OPS
+
+#define ATOMIC64_OPS(op, I, asm_op) \
+ ATOMIC64_OP(op, I, asm_op) \
+ ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
+ ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
+
+ATOMIC64_OPS(and, i, and)
+ATOMIC64_OPS(or, i, or)
+ATOMIC64_OPS(xor, i, xor)
+
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+
+#endif
+
+#endif /* _ASM_ATOMIC_AMO_H */
diff --git a/arch/loongarch/include/asm/atomic-llsc.h b/arch/loongarch/include/asm/atomic-llsc.h
new file mode 100644
index 000000000000..3ce500c88272
--- /dev/null
+++ b/arch/loongarch/include/asm/atomic-llsc.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Atomic operations (LLSC).
+ *
+ * Copyright (C) 2024-2025 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_ATOMIC_LLSC_H
+#define _ASM_ATOMIC_LLSC_H
+
+#include <linux/types.h>
+#include <asm/barrier.h>
+#include <asm/cmpxchg.h>
+
+#define ATOMIC_OP(op, I, asm_op) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
+{ \
+ int temp; \
+ \
+ __asm__ __volatile__( \
+ "1: ll.w %0, %1 #atomic_" #op " \n" \
+ " " #asm_op " %0, %0, %2 \n" \
+ " sc.w %0, %1 \n" \
+ " beq %0, $r0, 1b \n" \
+ :"=&r" (temp) , "+ZC"(v->counter) \
+ :"r" (I) \
+ ); \
+}
+
+#define ATOMIC_OP_RETURN(op, I, asm_op) \
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
+{ \
+ int result, temp; \
+ \
+ __asm__ __volatile__( \
+ "1: ll.w %1, %2 # atomic_" #op "_return \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " sc.w %0, %2 \n" \
+ " beq %0, $r0 ,1b \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ : "=&r" (result), "=&r" (temp), "+ZC"(v->counter) \
+ : "r" (I)); \
+ \
+ return result; \
+}
+
+#define ATOMIC_FETCH_OP(op, I, asm_op) \
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+{ \
+ int result, temp; \
+ \
+ __asm__ __volatile__( \
+ "1: ll.w %1, %2 # atomic_fetch_" #op " \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " sc.w %0, %2 \n" \
+ " beq %0, $r0 ,1b \n" \
+ " add.w %0, %1 ,$r0 \n" \
+ : "=&r" (result), "=&r" (temp), "+ZC" (v->counter) \
+ : "r" (I)); \
+ \
+ return result; \
+}
+
+#define ATOMIC_OPS(op,I ,asm_op, c_op) \
+ ATOMIC_OP(op, I, asm_op) \
+ ATOMIC_OP_RETURN(op, I , asm_op) \
+ ATOMIC_FETCH_OP(op, I, asm_op)
+
+ATOMIC_OPS(add, i , add.w ,+=)
+ATOMIC_OPS(sub, -i , add.w ,+=)
+
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+
+#undef ATOMIC_OPS
+
+#define ATOMIC_OPS(op, I, asm_op) \
+ ATOMIC_OP(op, I, asm_op) \
+ ATOMIC_FETCH_OP(op, I, asm_op)
+
+ATOMIC_OPS(and, i, and)
+ATOMIC_OPS(or, i, or)
+ATOMIC_OPS(xor, i, xor)
+
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#ifdef CONFIG_64BIT
+#error "64-bit LLSC atomic operations are not supported"
+#endif
+
+#endif /* _ASM_ATOMIC_LLSC_H */
diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h
index c86f0ab922ec..444b9ddcd004 100644
--- a/arch/loongarch/include/asm/atomic.h
+++ b/arch/loongarch/include/asm/atomic.h
@@ -11,6 +11,16 @@
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
+#ifdef CONFIG_CPU_HAS_AMO
+#include <asm/atomic-amo.h>
+#else
+#include <asm/atomic-llsc.h>
+#endif
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
+
#if __SIZEOF_LONG__ == 4
#define __LL "ll.w "
#define __SC "sc.w "
@@ -34,100 +44,6 @@
#define arch_atomic_read(v) READ_ONCE((v)->counter)
#define arch_atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
-#define ATOMIC_OP(op, I, asm_op) \
-static inline void arch_atomic_##op(int i, atomic_t *v) \
-{ \
- __asm__ __volatile__( \
- "am"#asm_op".w" " $zero, %1, %0 \n" \
- : "+ZB" (v->counter) \
- : "r" (I) \
- : "memory"); \
-}
-
-#define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
-static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v) \
-{ \
- int result; \
- \
- __asm__ __volatile__( \
- "am"#asm_op#mb".w" " %1, %2, %0 \n" \
- : "+ZB" (v->counter), "=&r" (result) \
- : "r" (I) \
- : "memory"); \
- \
- return result c_op I; \
-}
-
-#define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix) \
-static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v) \
-{ \
- int result; \
- \
- __asm__ __volatile__( \
- "am"#asm_op#mb".w" " %1, %2, %0 \n" \
- : "+ZB" (v->counter), "=&r" (result) \
- : "r" (I) \
- : "memory"); \
- \
- return result; \
-}
-
-#define ATOMIC_OPS(op, I, asm_op, c_op) \
- ATOMIC_OP(op, I, asm_op) \
- ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db, ) \
- ATOMIC_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
- ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
- ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
-
-ATOMIC_OPS(add, i, add, +)
-ATOMIC_OPS(sub, -i, add, +)
-
-#define arch_atomic_add_return arch_atomic_add_return
-#define arch_atomic_add_return_acquire arch_atomic_add_return
-#define arch_atomic_add_return_release arch_atomic_add_return
-#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
-#define arch_atomic_sub_return arch_atomic_sub_return
-#define arch_atomic_sub_return_acquire arch_atomic_sub_return
-#define arch_atomic_sub_return_release arch_atomic_sub_return
-#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
-#define arch_atomic_fetch_add arch_atomic_fetch_add
-#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
-#define arch_atomic_fetch_add_release arch_atomic_fetch_add
-#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
-#define arch_atomic_fetch_sub arch_atomic_fetch_sub
-#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
-#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
-#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
-
-#undef ATOMIC_OPS
-
-#define ATOMIC_OPS(op, I, asm_op) \
- ATOMIC_OP(op, I, asm_op) \
- ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
- ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
-
-ATOMIC_OPS(and, i, and)
-ATOMIC_OPS(or, i, or)
-ATOMIC_OPS(xor, i, xor)
-
-#define arch_atomic_fetch_and arch_atomic_fetch_and
-#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
-#define arch_atomic_fetch_and_release arch_atomic_fetch_and
-#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
-#define arch_atomic_fetch_or arch_atomic_fetch_or
-#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
-#define arch_atomic_fetch_or_release arch_atomic_fetch_or
-#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
-#define arch_atomic_fetch_xor arch_atomic_fetch_xor
-#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
-#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
-#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
-
-#undef ATOMIC_OPS
-#undef ATOMIC_FETCH_OP
-#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
-
static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int prev, rc;
@@ -194,99 +110,6 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
#define arch_atomic64_read(v) READ_ONCE((v)->counter)
#define arch_atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
-#define ATOMIC64_OP(op, I, asm_op) \
-static inline void arch_atomic64_##op(long i, atomic64_t *v) \
-{ \
- __asm__ __volatile__( \
- "am"#asm_op".d " " $zero, %1, %0 \n" \
- : "+ZB" (v->counter) \
- : "r" (I) \
- : "memory"); \
-}
-
-#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
-static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t *v) \
-{ \
- long result; \
- __asm__ __volatile__( \
- "am"#asm_op#mb".d " " %1, %2, %0 \n" \
- : "+ZB" (v->counter), "=&r" (result) \
- : "r" (I) \
- : "memory"); \
- \
- return result c_op I; \
-}
-
-#define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix) \
-static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v) \
-{ \
- long result; \
- \
- __asm__ __volatile__( \
- "am"#asm_op#mb".d " " %1, %2, %0 \n" \
- : "+ZB" (v->counter), "=&r" (result) \
- : "r" (I) \
- : "memory"); \
- \
- return result; \
-}
-
-#define ATOMIC64_OPS(op, I, asm_op, c_op) \
- ATOMIC64_OP(op, I, asm_op) \
- ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db, ) \
- ATOMIC64_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
- ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
- ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
-
-ATOMIC64_OPS(add, i, add, +)
-ATOMIC64_OPS(sub, -i, add, +)
-
-#define arch_atomic64_add_return arch_atomic64_add_return
-#define arch_atomic64_add_return_acquire arch_atomic64_add_return
-#define arch_atomic64_add_return_release arch_atomic64_add_return
-#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
-#define arch_atomic64_sub_return arch_atomic64_sub_return
-#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
-#define arch_atomic64_sub_return_release arch_atomic64_sub_return
-#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
-#define arch_atomic64_fetch_add arch_atomic64_fetch_add
-#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
-#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
-#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
-#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
-#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
-#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
-#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
-
-#undef ATOMIC64_OPS
-
-#define ATOMIC64_OPS(op, I, asm_op) \
- ATOMIC64_OP(op, I, asm_op) \
- ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
- ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
-
-ATOMIC64_OPS(and, i, and)
-ATOMIC64_OPS(or, i, or)
-ATOMIC64_OPS(xor, i, xor)
-
-#define arch_atomic64_fetch_and arch_atomic64_fetch_and
-#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
-#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
-#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
-#define arch_atomic64_fetch_or arch_atomic64_fetch_or
-#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
-#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
-#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
-#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
-#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
-#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
-#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
-
-#undef ATOMIC64_OPS
-#undef ATOMIC64_FETCH_OP
-#undef ATOMIC64_OP_RETURN
-#undef ATOMIC64_OP
-
static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
{
long prev, rc;
diff --git a/arch/loongarch/include/asm/bitops.h b/arch/loongarch/include/asm/bitops.h
index 69e00f8d8034..411106bf9902 100644
--- a/arch/loongarch/include/asm/bitops.h
+++ b/arch/loongarch/include/asm/bitops.h
@@ -13,11 +13,22 @@
#include <asm/barrier.h>
+#ifdef CONFIG_32BIT_REDUCED
+
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/__fls.h>
+
+#else /* CONFIG_32BIT_STANDARD || CONFIG_64BIT */
+
#include <asm-generic/bitops/builtin-ffs.h>
#include <asm-generic/bitops/builtin-fls.h>
#include <asm-generic/bitops/builtin-__ffs.h>
#include <asm-generic/bitops/builtin-__fls.h>
+#endif
+
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/fls64.h>
diff --git a/arch/loongarch/include/asm/bitrev.h b/arch/loongarch/include/asm/bitrev.h
index 46f275b9cdf7..757738ea38d7 100644
--- a/arch/loongarch/include/asm/bitrev.h
+++ b/arch/loongarch/include/asm/bitrev.h
@@ -11,7 +11,7 @@ static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x)
{
u32 ret;
- asm("bitrev.4b %0, %1" : "=r"(ret) : "r"(__swab32(x)));
+ asm("bitrev.w %0, %1" : "=r"(ret) : "r"(x));
return ret;
}
diff --git a/arch/loongarch/include/asm/checksum.h b/arch/loongarch/include/asm/checksum.h
index cabbf6af44c4..cc2754e0aa25 100644
--- a/arch/loongarch/include/asm/checksum.h
+++ b/arch/loongarch/include/asm/checksum.h
@@ -9,6 +9,8 @@
#include <linux/bitops.h>
#include <linux/in6.h>
+#ifdef CONFIG_64BIT
+
#define _HAVE_ARCH_IPV6_CSUM
__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
@@ -61,6 +63,8 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
extern unsigned int do_csum(const unsigned char *buff, int len);
#define do_csum do_csum
+#endif
+
#include <asm-generic/checksum.h>
#endif /* __ASM_CHECKSUM_H */
diff --git a/arch/loongarch/include/asm/cmpxchg.h b/arch/loongarch/include/asm/cmpxchg.h
index 979fde61bba8..0494c2ab553e 100644
--- a/arch/loongarch/include/asm/cmpxchg.h
+++ b/arch/loongarch/include/asm/cmpxchg.h
@@ -9,17 +9,33 @@
#include <linux/build_bug.h>
#include <asm/barrier.h>
-#define __xchg_asm(amswap_db, m, val) \
+#define __xchg_amo_asm(amswap_db, m, val) \
({ \
- __typeof(val) __ret; \
+ __typeof(val) __ret; \
\
- __asm__ __volatile__ ( \
- " "amswap_db" %1, %z2, %0 \n" \
- : "+ZB" (*m), "=&r" (__ret) \
- : "Jr" (val) \
- : "memory"); \
+ __asm__ __volatile__ ( \
+ " "amswap_db" %1, %z2, %0 \n" \
+ : "+ZB" (*m), "=&r" (__ret) \
+ : "Jr" (val) \
+ : "memory"); \
\
- __ret; \
+ __ret; \
+})
+
+#define __xchg_llsc_asm(ld, st, m, val) \
+({ \
+ __typeof(val) __ret, __tmp; \
+ \
+ asm volatile ( \
+ "1: ll.w %0, %3 \n" \
+ " move %1, %z4 \n" \
+ " sc.w %1, %2 \n" \
+ " beqz %1, 1b \n" \
+ : "=&r" (__ret), "=&r" (__tmp), "=ZC" (*m) \
+ : "ZC" (*m), "Jr" (val) \
+ : "memory"); \
+ \
+ __ret; \
})
static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val,
@@ -67,13 +83,23 @@ __arch_xchg(volatile void *ptr, unsigned long x, int size)
switch (size) {
case 1:
case 2:
- return __xchg_small(ptr, x, size);
+ return __xchg_small((volatile void *)ptr, x, size);
case 4:
- return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x);
+#ifdef CONFIG_CPU_HAS_AMO
+ return __xchg_amo_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x);
+#else
+ return __xchg_llsc_asm("ll.w", "sc.w", (volatile u32 *)ptr, (u32)x);
+#endif /* CONFIG_CPU_HAS_AMO */
+#ifdef CONFIG_64BIT
case 8:
- return __xchg_asm("amswap_db.d", (volatile u64 *)ptr, (u64)x);
+#ifdef CONFIG_CPU_HAS_AMO
+ return __xchg_amo_asm("amswap_db.d", (volatile u64 *)ptr, (u64)x);
+#else
+ return __xchg_llsc_asm("ll.d", "sc.d", (volatile u64 *)ptr, (u64)x);
+#endif /* CONFIG_CPU_HAS_AMO */
+#endif /* CONFIG_64BIT */
default:
BUILD_BUG();
diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h
index bd5f0457ad21..3745d991a99a 100644
--- a/arch/loongarch/include/asm/cpu-features.h
+++ b/arch/loongarch/include/asm/cpu-features.h
@@ -20,16 +20,13 @@
#define cpu_has_loongarch64 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
#ifdef CONFIG_32BIT
-# define cpu_has_64bits (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
# define cpu_vabits 31
# define cpu_pabits 31
#endif
#ifdef CONFIG_64BIT
-# define cpu_has_64bits 1
# define cpu_vabits cpu_data[0].vabits
# define cpu_pabits cpu_data[0].pabits
-# define __NEED_ADDRBITS_PROBE
#endif
/*
diff --git a/arch/loongarch/include/asm/dmi.h b/arch/loongarch/include/asm/dmi.h
index 605493417753..11bb3c8a7179 100644
--- a/arch/loongarch/include/asm/dmi.h
+++ b/arch/loongarch/include/asm/dmi.h
@@ -12,7 +12,7 @@
#define dmi_early_unmap(x, l) dmi_unmap(x)
#define dmi_alloc(l) memblock_alloc(l, PAGE_SIZE)
-static inline void *dmi_remap(u64 phys_addr, unsigned long size)
+static inline void *dmi_remap(phys_addr_t phys_addr, unsigned long size)
{
return ((void *)TO_CACHE(phys_addr));
}
diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/elf.h
index f16bd42456e4..912c50cdd6b7 100644
--- a/arch/loongarch/include/asm/elf.h
+++ b/arch/loongarch/include/asm/elf.h
@@ -120,6 +120,36 @@
#define R_LARCH_ADD_ULEB128 107
#define R_LARCH_SUB_ULEB128 108
#define R_LARCH_64_PCREL 109
+#define R_LARCH_CALL36 110
+#define R_LARCH_TLS_DESC_PC_HI20 111
+#define R_LARCH_TLS_DESC_PC_LO12 112
+#define R_LARCH_TLS_DESC64_PC_LO20 113
+#define R_LARCH_TLS_DESC64_PC_HI12 114
+#define R_LARCH_TLS_DESC_HI20 115
+#define R_LARCH_TLS_DESC_LO12 116
+#define R_LARCH_TLS_DESC64_LO20 117
+#define R_LARCH_TLS_DESC64_HI12 118
+#define R_LARCH_TLS_DESC_LD 119
+#define R_LARCH_TLS_DESC_CALL 120
+#define R_LARCH_TLS_LE_HI20_R 121
+#define R_LARCH_TLS_LE_ADD_R 122
+#define R_LARCH_TLS_LE_LO12_R 123
+#define R_LARCH_TLS_LD_PCREL20_S2 124
+#define R_LARCH_TLS_GD_PCREL20_S2 125
+#define R_LARCH_TLS_DESC_PCREL20_S2 126
+#define R_LARCH_CALL30 127
+#define R_LARCH_PCADD_HI20 128
+#define R_LARCH_PCADD_LO12 129
+#define R_LARCH_GOT_PCADD_HI20 130
+#define R_LARCH_GOT_PCADD_LO12 131
+#define R_LARCH_TLS_IE_PCADD_HI20 132
+#define R_LARCH_TLS_IE_PCADD_LO12 133
+#define R_LARCH_TLS_LD_PCADD_HI20 134
+#define R_LARCH_TLS_LD_PCADD_LO12 135
+#define R_LARCH_TLS_GD_PCADD_HI20 136
+#define R_LARCH_TLS_GD_PCADD_LO12 137
+#define R_LARCH_TLS_DESC_PCADD_HI20 138
+#define R_LARCH_TLS_DESC_PCADD_LO12 139
#ifndef ELF_ARCH
@@ -156,6 +186,7 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+void loongarch_dump_regs32(u32 *uregs, const struct pt_regs *regs);
void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs);
#ifdef CONFIG_32BIT
diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
index 55e64a12a124..f9f207082d0e 100644
--- a/arch/loongarch/include/asm/inst.h
+++ b/arch/loongarch/include/asm/inst.h
@@ -438,8 +438,10 @@ static inline bool is_branch_ins(union loongarch_instruction *ip)
static inline bool is_ra_save_ins(union loongarch_instruction *ip)
{
- /* st.d $ra, $sp, offset */
- return ip->reg2i12_format.opcode == std_op &&
+ const u32 opcode = IS_ENABLED(CONFIG_32BIT) ? stw_op : std_op;
+
+ /* st.w / st.d $ra, $sp, offset */
+ return ip->reg2i12_format.opcode == opcode &&
ip->reg2i12_format.rj == LOONGARCH_GPR_SP &&
ip->reg2i12_format.rd == LOONGARCH_GPR_RA &&
!is_imm12_negative(ip->reg2i12_format.immediate);
@@ -447,8 +449,10 @@ static inline bool is_ra_save_ins(union loongarch_instruction *ip)
static inline bool is_stack_alloc_ins(union loongarch_instruction *ip)
{
- /* addi.d $sp, $sp, -imm */
- return ip->reg2i12_format.opcode == addid_op &&
+ const u32 opcode = IS_ENABLED(CONFIG_32BIT) ? addiw_op : addid_op;
+
+ /* addi.w / addi.d $sp, $sp, -imm */
+ return ip->reg2i12_format.opcode == opcode &&
ip->reg2i12_format.rj == LOONGARCH_GPR_SP &&
ip->reg2i12_format.rd == LOONGARCH_GPR_SP &&
is_imm12_negative(ip->reg2i12_format.immediate);
diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h
index 12bd15578c33..3943647503a9 100644
--- a/arch/loongarch/include/asm/irq.h
+++ b/arch/loongarch/include/asm/irq.h
@@ -50,10 +50,22 @@ void spurious_interrupt(void);
#define NR_LEGACY_VECTORS 16
#define IRQ_MATRIX_BITS NR_VECTORS
+#define AVEC_IRQ_SHIFT 4
+#define AVEC_IRQ_BIT 8
+#define AVEC_IRQ_MASK GENMASK(AVEC_IRQ_BIT - 1, 0)
+#define AVEC_CPU_SHIFT 12
+#define AVEC_CPU_BIT 16
+#define AVEC_CPU_MASK GENMASK(AVEC_CPU_BIT - 1, 0)
+
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu);
+#ifdef CONFIG_32BIT
+#define MAX_IO_PICS 1
+#else
#define MAX_IO_PICS 8
+#endif
+
#define NR_IRQS (64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS))
struct acpi_vector_group {
diff --git a/arch/loongarch/include/asm/jump_label.h b/arch/loongarch/include/asm/jump_label.h
index 4000c7603d8e..dcaecf69ea5a 100644
--- a/arch/loongarch/include/asm/jump_label.h
+++ b/arch/loongarch/include/asm/jump_label.h
@@ -10,15 +10,23 @@
#ifndef __ASSEMBLER__
#include <linux/types.h>
+#include <linux/stringify.h>
+#include <asm/asm.h>
#define JUMP_LABEL_NOP_SIZE 4
+#ifdef CONFIG_32BIT
+#define JUMP_LABEL_TYPE ".long "
+#else
+#define JUMP_LABEL_TYPE ".quad "
+#endif
+
/* This macro is also expanded on the Rust side. */
#define JUMP_TABLE_ENTRY(key, label) \
".pushsection __jump_table, \"aw\" \n\t" \
- ".align 3 \n\t" \
+ ".align " __stringify(PTRLOG) " \n\t" \
".long 1b - ., " label " - . \n\t" \
- ".quad " key " - . \n\t" \
+ JUMP_LABEL_TYPE key " - . \n\t" \
".popsection \n\t"
#define ARCH_STATIC_BRANCH_ASM(key, label) \
diff --git a/arch/loongarch/include/asm/local.h b/arch/loongarch/include/asm/local.h
index f53ea653af76..2d118b1a060e 100644
--- a/arch/loongarch/include/asm/local.h
+++ b/arch/loongarch/include/asm/local.h
@@ -8,6 +8,7 @@
#include <linux/percpu.h>
#include <linux/bitops.h>
#include <linux/atomic.h>
+#include <asm/asm.h>
#include <asm/cmpxchg.h>
typedef struct {
@@ -27,6 +28,7 @@ typedef struct {
/*
* Same as above, but return the result value
*/
+#ifdef CONFIG_CPU_HAS_AMO
static inline long local_add_return(long i, local_t *l)
{
unsigned long result;
@@ -55,6 +57,41 @@ static inline long local_sub_return(long i, local_t *l)
return result;
}
+#else
+static inline long local_add_return(long i, local_t *l)
+{
+ unsigned long result, temp;
+
+ __asm__ __volatile__(
+ "1:" __LL "%1, %2 # local_add_return \n"
+ __stringify(LONG_ADD) " %0, %1, %3 \n"
+ __SC "%0, %2 \n"
+ " beq %0, $r0, 1b \n"
+ __stringify(LONG_ADD) " %0, %1, %3 \n"
+ : "=&r" (result), "=&r" (temp), "=ZC" (l->a.counter)
+ : "r" (i), "ZC" (l->a.counter)
+ : "memory");
+
+ return result;
+}
+
+static inline long local_sub_return(long i, local_t *l)
+{
+ unsigned long result, temp;
+
+ __asm__ __volatile__(
+ "1:" __LL "%1, %2 # local_sub_return \n"
+ __stringify(LONG_SUB) " %0, %1, %3 \n"
+ __SC "%0, %2 \n"
+ " beq %0, $r0, 1b \n"
+ __stringify(LONG_SUB) " %0, %1, %3 \n"
+ : "=&r" (result), "=&r" (temp), "=ZC" (l->a.counter)
+ : "r" (i), "ZC" (l->a.counter)
+ : "memory");
+
+ return result;
+}
+#endif
static inline long local_cmpxchg(local_t *l, long old, long new)
{
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index 58a4a3b6b035..e6b8ff61c8cc 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -182,6 +182,16 @@
#define csr_xchg32(val, mask, reg) __csrxchg_w(val, mask, reg)
#define csr_xchg64(val, mask, reg) __csrxchg_d(val, mask, reg)
+#ifdef CONFIG_32BIT
+#define csr_read(reg) csr_read32(reg)
+#define csr_write(val, reg) csr_write32(val, reg)
+#define csr_xchg(val, mask, reg) csr_xchg32(val, mask, reg)
+#else
+#define csr_read(reg) csr_read64(reg)
+#define csr_write(val, reg) csr_write64(val, reg)
+#define csr_xchg(val, mask, reg) csr_xchg64(val, mask, reg)
+#endif
+
/* IOCSR */
#define iocsr_read32(reg) __iocsrrd_w(reg)
#define iocsr_read64(reg) __iocsrrd_d(reg)
@@ -904,6 +914,26 @@
#define LOONGARCH_CSR_DMWIN3 0x183 /* 64 direct map win3: MEM */
/* Direct Map window 0/1/2/3 */
+
+#ifdef CONFIG_32BIT
+
+#define CSR_DMW0_PLV0 (1 << 0)
+#define CSR_DMW0_VSEG (0x4)
+#define CSR_DMW0_BASE (CSR_DMW0_VSEG << DMW_PABITS)
+#define CSR_DMW0_INIT (CSR_DMW0_BASE | CSR_DMW0_PLV0)
+
+#define CSR_DMW1_PLV0 (1 << 0)
+#define CSR_DMW1_MAT (1 << 4)
+#define CSR_DMW1_VSEG (0x5)
+#define CSR_DMW1_BASE (CSR_DMW1_VSEG << DMW_PABITS)
+#define CSR_DMW1_INIT (CSR_DMW1_BASE | CSR_DMW1_MAT | CSR_DMW1_PLV0)
+
+#define CSR_DMW2_INIT 0x0
+
+#define CSR_DMW3_INIT 0x0
+
+#else
+
#define CSR_DMW0_PLV0 _CONST64_(1 << 0)
#define CSR_DMW0_VSEG _CONST64_(0x8000)
#define CSR_DMW0_BASE (CSR_DMW0_VSEG << DMW_PABITS)
@@ -923,6 +953,8 @@
#define CSR_DMW3_INIT 0x0
+#endif
+
/* Performance Counter registers */
#define LOONGARCH_CSR_PERFCTRL0 0x200 /* 32 perf event 0 config */
#define LOONGARCH_CSR_PERFCNTR0 0x201 /* 64 perf event 0 count value */
@@ -1208,7 +1240,35 @@
#ifndef __ASSEMBLER__
-static __always_inline u64 drdtime(void)
+#ifdef CONFIG_32BIT
+
+static __always_inline u32 rdtime_h(void)
+{
+ u32 val = 0;
+
+ __asm__ __volatile__(
+ "rdtimeh.w %0, $zero\n\t"
+ : "=r"(val)
+ :
+ );
+ return val;
+}
+
+static __always_inline u32 rdtime_l(void)
+{
+ u32 val = 0;
+
+ __asm__ __volatile__(
+ "rdtimel.w %0, $zero\n\t"
+ : "=r"(val)
+ :
+ );
+ return val;
+}
+
+#else
+
+static __always_inline u64 rdtime_d(void)
{
u64 val = 0;
@@ -1220,11 +1280,14 @@ static __always_inline u64 drdtime(void)
return val;
}
+#endif
+
static inline unsigned int get_csr_cpuid(void)
{
return csr_read32(LOONGARCH_CSR_CPUID);
}
+#ifdef CONFIG_64BIT
static inline void csr_any_send(unsigned int addr, unsigned int data,
unsigned int data_mask, unsigned int cpu)
{
@@ -1236,6 +1299,7 @@ static inline void csr_any_send(unsigned int addr, unsigned int data,
val |= ((uint64_t)data << IOCSR_ANY_SEND_BUF_SHIFT);
iocsr_write64(val, LOONGARCH_IOCSR_ANY_SEND);
}
+#endif
static inline unsigned int read_csr_excode(void)
{
@@ -1259,22 +1323,22 @@ static inline void write_csr_pagesize(unsigned int size)
static inline unsigned int read_csr_tlbrefill_pagesize(void)
{
- return (csr_read64(LOONGARCH_CSR_TLBREHI) & CSR_TLBREHI_PS) >> CSR_TLBREHI_PS_SHIFT;
+ return (csr_read(LOONGARCH_CSR_TLBREHI) & CSR_TLBREHI_PS) >> CSR_TLBREHI_PS_SHIFT;
}
static inline void write_csr_tlbrefill_pagesize(unsigned int size)
{
- csr_xchg64(size << CSR_TLBREHI_PS_SHIFT, CSR_TLBREHI_PS, LOONGARCH_CSR_TLBREHI);
+ csr_xchg(size << CSR_TLBREHI_PS_SHIFT, CSR_TLBREHI_PS, LOONGARCH_CSR_TLBREHI);
}
#define read_csr_asid() csr_read32(LOONGARCH_CSR_ASID)
#define write_csr_asid(val) csr_write32(val, LOONGARCH_CSR_ASID)
-#define read_csr_entryhi() csr_read64(LOONGARCH_CSR_TLBEHI)
-#define write_csr_entryhi(val) csr_write64(val, LOONGARCH_CSR_TLBEHI)
-#define read_csr_entrylo0() csr_read64(LOONGARCH_CSR_TLBELO0)
-#define write_csr_entrylo0(val) csr_write64(val, LOONGARCH_CSR_TLBELO0)
-#define read_csr_entrylo1() csr_read64(LOONGARCH_CSR_TLBELO1)
-#define write_csr_entrylo1(val) csr_write64(val, LOONGARCH_CSR_TLBELO1)
+#define read_csr_entryhi() csr_read(LOONGARCH_CSR_TLBEHI)
+#define write_csr_entryhi(val) csr_write(val, LOONGARCH_CSR_TLBEHI)
+#define read_csr_entrylo0() csr_read(LOONGARCH_CSR_TLBELO0)
+#define write_csr_entrylo0(val) csr_write(val, LOONGARCH_CSR_TLBELO0)
+#define read_csr_entrylo1() csr_read(LOONGARCH_CSR_TLBELO1)
+#define write_csr_entrylo1(val) csr_write(val, LOONGARCH_CSR_TLBELO1)
#define read_csr_ecfg() csr_read32(LOONGARCH_CSR_ECFG)
#define write_csr_ecfg(val) csr_write32(val, LOONGARCH_CSR_ECFG)
#define read_csr_estat() csr_read32(LOONGARCH_CSR_ESTAT)
@@ -1284,20 +1348,20 @@ static inline void write_csr_tlbrefill_pagesize(unsigned int size)
#define read_csr_euen() csr_read32(LOONGARCH_CSR_EUEN)
#define write_csr_euen(val) csr_write32(val, LOONGARCH_CSR_EUEN)
#define read_csr_cpuid() csr_read32(LOONGARCH_CSR_CPUID)
-#define read_csr_prcfg1() csr_read64(LOONGARCH_CSR_PRCFG1)
-#define write_csr_prcfg1(val) csr_write64(val, LOONGARCH_CSR_PRCFG1)
-#define read_csr_prcfg2() csr_read64(LOONGARCH_CSR_PRCFG2)
-#define write_csr_prcfg2(val) csr_write64(val, LOONGARCH_CSR_PRCFG2)
-#define read_csr_prcfg3() csr_read64(LOONGARCH_CSR_PRCFG3)
-#define write_csr_prcfg3(val) csr_write64(val, LOONGARCH_CSR_PRCFG3)
+#define read_csr_prcfg1() csr_read(LOONGARCH_CSR_PRCFG1)
+#define write_csr_prcfg1(val) csr_write(val, LOONGARCH_CSR_PRCFG1)
+#define read_csr_prcfg2() csr_read(LOONGARCH_CSR_PRCFG2)
+#define write_csr_prcfg2(val) csr_write(val, LOONGARCH_CSR_PRCFG2)
+#define read_csr_prcfg3() csr_read(LOONGARCH_CSR_PRCFG3)
+#define write_csr_prcfg3(val) csr_write(val, LOONGARCH_CSR_PRCFG3)
#define read_csr_stlbpgsize() csr_read32(LOONGARCH_CSR_STLBPGSIZE)
#define write_csr_stlbpgsize(val) csr_write32(val, LOONGARCH_CSR_STLBPGSIZE)
#define read_csr_rvacfg() csr_read32(LOONGARCH_CSR_RVACFG)
#define write_csr_rvacfg(val) csr_write32(val, LOONGARCH_CSR_RVACFG)
#define write_csr_tintclear(val) csr_write32(val, LOONGARCH_CSR_TINTCLR)
-#define read_csr_impctl1() csr_read64(LOONGARCH_CSR_IMPCTL1)
-#define write_csr_impctl1(val) csr_write64(val, LOONGARCH_CSR_IMPCTL1)
-#define write_csr_impctl2(val) csr_write64(val, LOONGARCH_CSR_IMPCTL2)
+#define read_csr_impctl1() csr_read(LOONGARCH_CSR_IMPCTL1)
+#define write_csr_impctl1(val) csr_write(val, LOONGARCH_CSR_IMPCTL1)
+#define write_csr_impctl2(val) csr_write(val, LOONGARCH_CSR_IMPCTL2)
#define read_csr_perfctrl0() csr_read64(LOONGARCH_CSR_PERFCTRL0)
#define read_csr_perfcntr0() csr_read64(LOONGARCH_CSR_PERFCNTR0)
@@ -1378,8 +1442,10 @@ __BUILD_CSR_OP(tlbidx)
#define ENTRYLO_C_SHIFT 4
#define ENTRYLO_C (_ULCAST_(3) << ENTRYLO_C_SHIFT)
#define ENTRYLO_G (_ULCAST_(1) << 6)
+#ifdef CONFIG_64BIT
#define ENTRYLO_NR (_ULCAST_(1) << 61)
#define ENTRYLO_NX (_ULCAST_(1) << 62)
+#endif
/* Values for PageSize register */
#define PS_4K 0x0000000c
diff --git a/arch/loongarch/include/asm/module.h b/arch/loongarch/include/asm/module.h
index f33f3fd32ecc..d56a968273de 100644
--- a/arch/loongarch/include/asm/module.h
+++ b/arch/loongarch/include/asm/module.h
@@ -38,8 +38,10 @@ struct got_entry {
struct plt_entry {
u32 inst_lu12iw;
+#ifdef CONFIG_64BIT
u32 inst_lu32id;
u32 inst_lu52id;
+#endif
u32 inst_jirl;
};
@@ -57,6 +59,14 @@ static inline struct got_entry emit_got_entry(Elf_Addr val)
static inline struct plt_entry emit_plt_entry(unsigned long val)
{
+#ifdef CONFIG_32BIT
+ u32 lu12iw, jirl;
+
+ lu12iw = larch_insn_gen_lu12iw(LOONGARCH_GPR_T1, ADDR_IMM(val, LU12IW));
+ jirl = larch_insn_gen_jirl(0, LOONGARCH_GPR_T1, ADDR_IMM(val, ORI));
+
+ return (struct plt_entry) { lu12iw, jirl };
+#else
u32 lu12iw, lu32id, lu52id, jirl;
lu12iw = larch_insn_gen_lu12iw(LOONGARCH_GPR_T1, ADDR_IMM(val, LU12IW));
@@ -65,6 +75,7 @@ static inline struct plt_entry emit_plt_entry(unsigned long val)
jirl = larch_insn_gen_jirl(0, LOONGARCH_GPR_T1, ADDR_IMM(val, ORI));
return (struct plt_entry) { lu12iw, lu32id, lu52id, jirl };
+#endif
}
static inline struct plt_idx_entry emit_plt_idx_entry(unsigned long val)
diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h
index a3aaf34fba16..256d1ff7a1e3 100644
--- a/arch/loongarch/include/asm/page.h
+++ b/arch/loongarch/include/asm/page.h
@@ -10,7 +10,7 @@
#include <vdso/page.h>
-#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
+#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - PTRLOG)
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
index 87be9b14e9da..583f2466262f 100644
--- a/arch/loongarch/include/asm/percpu.h
+++ b/arch/loongarch/include/asm/percpu.h
@@ -13,7 +13,7 @@
* the loading address of main kernel image, but far from where the modules are
* loaded. Tell the compiler this fact when using explicit relocs.
*/
-#if defined(MODULE) && defined(CONFIG_AS_HAS_EXPLICIT_RELOCS)
+#if defined(MODULE) && defined(CONFIG_AS_HAS_EXPLICIT_RELOCS) && defined(CONFIG_64BIT)
# if __has_attribute(model)
# define PER_CPU_ATTRIBUTES __attribute__((model("extreme")))
# else
@@ -27,7 +27,7 @@ register unsigned long __my_cpu_offset __asm__("$r21");
static inline void set_my_cpu_offset(unsigned long off)
{
__my_cpu_offset = off;
- csr_write64(off, PERCPU_BASE_KS);
+ csr_write(off, PERCPU_BASE_KS);
}
#define __my_cpu_offset \
@@ -36,6 +36,8 @@ static inline void set_my_cpu_offset(unsigned long off)
__my_cpu_offset; \
})
+#ifdef CONFIG_CPU_HAS_AMO
+
#define PERCPU_OP(op, asm_op, c_op) \
static __always_inline unsigned long __percpu_##op(void *ptr, \
unsigned long val, int size) \
@@ -68,25 +70,9 @@ PERCPU_OP(and, and, &)
PERCPU_OP(or, or, |)
#undef PERCPU_OP
-static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, int size)
-{
- switch (size) {
- case 1:
- case 2:
- return __xchg_small((volatile void *)ptr, val, size);
-
- case 4:
- return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val);
-
- case 8:
- return __xchg_asm("amswap.d", (volatile u64 *)ptr, (u64)val);
+#endif
- default:
- BUILD_BUG();
- }
-
- return 0;
-}
+#ifdef CONFIG_64BIT
#define __pcpu_op_1(op) op ".b "
#define __pcpu_op_2(op) op ".h "
@@ -115,6 +101,10 @@ do { \
: "memory"); \
} while (0)
+#endif
+
+#define __percpu_xchg __arch_xchg
+
/* this_cpu_cmpxchg */
#define _protect_cmpxchg_local(pcp, o, n) \
({ \
@@ -135,6 +125,8 @@ do { \
__retval; \
})
+#ifdef CONFIG_CPU_HAS_AMO
+
#define _percpu_add(pcp, val) \
_pcp_protect(__percpu_add, pcp, val)
@@ -146,9 +138,6 @@ do { \
#define _percpu_or(pcp, val) \
_pcp_protect(__percpu_or, pcp, val)
-#define _percpu_xchg(pcp, val) ((typeof(pcp)) \
- _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)))
-
#define this_cpu_add_4(pcp, val) _percpu_add(pcp, val)
#define this_cpu_add_8(pcp, val) _percpu_add(pcp, val)
@@ -161,6 +150,10 @@ do { \
#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
+#endif
+
+#ifdef CONFIG_64BIT
+
#define this_cpu_read_1(pcp) _percpu_read(1, pcp)
#define this_cpu_read_2(pcp) _percpu_read(2, pcp)
#define this_cpu_read_4(pcp) _percpu_read(4, pcp)
@@ -171,6 +164,11 @@ do { \
#define this_cpu_write_4(pcp, val) _percpu_write(4, pcp, val)
#define this_cpu_write_8(pcp, val) _percpu_write(8, pcp, val)
+#endif
+
+#define _percpu_xchg(pcp, val) ((typeof(pcp)) \
+ _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)))
+
#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val)
#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h
index 2fc3789220ac..b565573cd82e 100644
--- a/arch/loongarch/include/asm/pgtable-bits.h
+++ b/arch/loongarch/include/asm/pgtable-bits.h
@@ -6,6 +6,26 @@
#define _ASM_PGTABLE_BITS_H
/* Page table bits */
+
+#ifdef CONFIG_32BIT
+#define _PAGE_VALID_SHIFT 0
+#define _PAGE_ACCESSED_SHIFT 0 /* Reuse Valid for Accessed */
+#define _PAGE_DIRTY_SHIFT 1
+#define _PAGE_PLV_SHIFT 2 /* 2~3, two bits */
+#define _CACHE_SHIFT 4 /* 4~5, two bits */
+#define _PAGE_GLOBAL_SHIFT 6
+#define _PAGE_HUGE_SHIFT 6 /* HUGE is a PMD bit */
+#define _PAGE_PRESENT_SHIFT 7
+#define _PAGE_PFN_SHIFT 8
+#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */
+#define _PAGE_SWP_EXCLUSIVE_SHIFT 13
+#define _PAGE_PFN_END_SHIFT 28
+#define _PAGE_WRITE_SHIFT 29
+#define _PAGE_MODIFIED_SHIFT 30
+#define _PAGE_PRESENT_INVALID_SHIFT 31
+#endif
+
+#ifdef CONFIG_64BIT
#define _PAGE_VALID_SHIFT 0
#define _PAGE_ACCESSED_SHIFT 0 /* Reuse Valid for Accessed */
#define _PAGE_DIRTY_SHIFT 1
@@ -18,14 +38,15 @@
#define _PAGE_MODIFIED_SHIFT 9
#define _PAGE_PROTNONE_SHIFT 10
#define _PAGE_SPECIAL_SHIFT 11
-#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */
#define _PAGE_PFN_SHIFT 12
+#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */
#define _PAGE_SWP_EXCLUSIVE_SHIFT 23
#define _PAGE_PFN_END_SHIFT 48
#define _PAGE_PRESENT_INVALID_SHIFT 60
#define _PAGE_NO_READ_SHIFT 61
#define _PAGE_NO_EXEC_SHIFT 62
#define _PAGE_RPLV_SHIFT 63
+#endif
/* Used by software */
#define _PAGE_PRESENT (_ULCAST_(1) << _PAGE_PRESENT_SHIFT)
@@ -33,10 +54,15 @@
#define _PAGE_WRITE (_ULCAST_(1) << _PAGE_WRITE_SHIFT)
#define _PAGE_ACCESSED (_ULCAST_(1) << _PAGE_ACCESSED_SHIFT)
#define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT)
+#ifdef CONFIG_32BIT
+#define _PAGE_PROTNONE 0
+#define _PAGE_SPECIAL 0
+#else
#define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT)
#define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT)
+#endif
-/* We borrow bit 23 to store the exclusive marker in swap PTEs. */
+/* We borrow bit 13/23 to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE (_ULCAST_(1) << _PAGE_SWP_EXCLUSIVE_SHIFT)
/* Used by TLB hardware (placed in EntryLo*) */
@@ -46,9 +72,15 @@
#define _PAGE_GLOBAL (_ULCAST_(1) << _PAGE_GLOBAL_SHIFT)
#define _PAGE_HUGE (_ULCAST_(1) << _PAGE_HUGE_SHIFT)
#define _PAGE_HGLOBAL (_ULCAST_(1) << _PAGE_HGLOBAL_SHIFT)
+#ifdef CONFIG_32BIT
+#define _PAGE_NO_READ 0
+#define _PAGE_NO_EXEC 0
+#define _PAGE_RPLV 0
+#else
#define _PAGE_NO_READ (_ULCAST_(1) << _PAGE_NO_READ_SHIFT)
#define _PAGE_NO_EXEC (_ULCAST_(1) << _PAGE_NO_EXEC_SHIFT)
#define _PAGE_RPLV (_ULCAST_(1) << _PAGE_RPLV_SHIFT)
+#endif
#define _CACHE_MASK (_ULCAST_(3) << _CACHE_SHIFT)
#define PFN_PTE_SHIFT (PAGE_SHIFT - 12 + _PAGE_PFN_SHIFT)
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index 03fb60432fde..f41a648a3d9e 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -11,6 +11,7 @@
#include <linux/compiler.h>
#include <asm/addrspace.h>
+#include <asm/asm.h>
#include <asm/page.h>
#include <asm/pgtable-bits.h>
@@ -23,37 +24,45 @@
#endif
#if CONFIG_PGTABLE_LEVELS == 2
-#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
+#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
#elif CONFIG_PGTABLE_LEVELS == 3
-#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
+#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
+#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - PTRLOG))
#elif CONFIG_PGTABLE_LEVELS == 4
-#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
+#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
+#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - PTRLOG))
#define PUD_SIZE (1UL << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE-1))
-#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - 3))
+#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - PTRLOG))
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
-#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3))
+#ifdef CONFIG_32BIT
+#define VA_BITS 32
+#else
+#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - PTRLOG))
+#endif
-#define PTRS_PER_PGD (PAGE_SIZE >> 3)
+#define PTRS_PER_PGD (PAGE_SIZE >> PTRLOG)
#if CONFIG_PGTABLE_LEVELS > 3
-#define PTRS_PER_PUD (PAGE_SIZE >> 3)
+#define PTRS_PER_PUD (PAGE_SIZE >> PTRLOG)
#endif
#if CONFIG_PGTABLE_LEVELS > 2
-#define PTRS_PER_PMD (PAGE_SIZE >> 3)
+#define PTRS_PER_PMD (PAGE_SIZE >> PTRLOG)
#endif
-#define PTRS_PER_PTE (PAGE_SIZE >> 3)
+#define PTRS_PER_PTE (PAGE_SIZE >> PTRLOG)
+#ifdef CONFIG_32BIT
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+#else
#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
+#endif
#ifndef __ASSEMBLER__
@@ -74,11 +83,15 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
-/*
- * TLB refill handlers may also map the vmalloc area into xkvrange.
- * Avoid the first couple of pages so NULL pointer dereferences will
- * still reliably trap.
- */
+#ifdef CONFIG_32BIT
+
+#define VMALLOC_START (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
+#define VMALLOC_END (FIXADDR_START - (2 * PAGE_SIZE))
+
+#endif
+
+#ifdef CONFIG_64BIT
+
#define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
#define MODULES_END (MODULES_VADDR + SZ_256M)
@@ -106,6 +119,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define KFENCE_AREA_START (VMEMMAP_END + 1)
#define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
+#endif
+
#define ptep_get(ptep) READ_ONCE(*(ptep))
#define pmdp_get(pmdp) READ_ONCE(*(pmdp))
@@ -277,7 +292,16 @@ extern void kernel_pte_init(void *addr);
* Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
* are !pte_none() && !pte_present().
*
- * Format of swap PTEs:
+ * Format of 32bit swap PTEs:
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * <------------ offset -------------> E <- type -> <-- zeroes -->
+ *
+ * E is the exclusive marker that is not stored in swap entries.
+ * The zero'ed bits include _PAGE_PRESENT.
+ *
+ * Format of 64bit swap PTEs:
*
* 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
* 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
@@ -290,16 +314,27 @@ extern void kernel_pte_init(void *addr);
* E is the exclusive marker that is not stored in swap entries.
* The zero'ed bits include _PAGE_PRESENT and _PAGE_PROTNONE.
*/
+
+#define __SWP_TYPE_BITS (IS_ENABLED(CONFIG_32BIT) ? 5 : 7)
+#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
+#define __SWP_TYPE_SHIFT (IS_ENABLED(CONFIG_32BIT) ? 8 : 16)
+#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT + 1)
+
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
-{ pte_t pte; pte_val(pte) = ((type & 0x7f) << 16) | (offset << 24); return pte; }
+{
+ pte_t pte;
+ pte_val(pte) = ((type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | (offset << __SWP_OFFSET_SHIFT);
+ return pte;
+}
-#define __swp_type(x) (((x).val >> 16) & 0x7f)
-#define __swp_offset(x) ((x).val >> 24)
+#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
+#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
+
+#define __swp_entry_to_pte(x) __pte((x).val)
+#define __swp_entry_to_pmd(x) __pmd((x).val | _PAGE_HUGE)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
-#define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE })
static inline bool pte_swp_exclusive(pte_t pte)
{
diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h
index 5cb568a60cf8..ecc8e50fffa8 100644
--- a/arch/loongarch/include/asm/stackframe.h
+++ b/arch/loongarch/include/asm/stackframe.h
@@ -38,22 +38,42 @@
cfi_restore \reg \offset \docfi
.endm
+ .macro SETUP_TWINS temp
+ pcaddi t0, 0
+ PTR_LI t1, ~TO_PHYS_MASK
+ and t0, t0, t1
+ ori t0, t0, (1 << 4 | 1)
+ csrwr t0, LOONGARCH_CSR_DMWIN0
+ PTR_LI t0, CSR_DMW1_INIT
+ csrwr t0, LOONGARCH_CSR_DMWIN1
+ .endm
+
+ .macro SETUP_MODES temp
+ /* Enable PG */
+ li.w \temp, 0xb0 # PLV=0, IE=0, PG=1
+ csrwr \temp, LOONGARCH_CSR_CRMD
+ li.w \temp, 0x04 # PLV=0, PIE=1, PWE=0
+ csrwr \temp, LOONGARCH_CSR_PRMD
+ li.w \temp, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0
+ csrwr \temp, LOONGARCH_CSR_EUEN
+ .endm
+
.macro SETUP_DMWINS temp
- li.d \temp, CSR_DMW0_INIT # WUC, PLV0, 0x8000 xxxx xxxx xxxx
+ PTR_LI \temp, CSR_DMW0_INIT # SUC, PLV0, LA32: 0x8xxx xxxx, LA64: 0x8000 xxxx xxxx xxxx
csrwr \temp, LOONGARCH_CSR_DMWIN0
- li.d \temp, CSR_DMW1_INIT # CAC, PLV0, 0x9000 xxxx xxxx xxxx
+ PTR_LI \temp, CSR_DMW1_INIT # CAC, PLV0, LA32: 0xaxxx xxxx, LA64: 0x9000 xxxx xxxx xxxx
csrwr \temp, LOONGARCH_CSR_DMWIN1
- li.d \temp, CSR_DMW2_INIT # WUC, PLV0, 0xa000 xxxx xxxx xxxx
+ PTR_LI \temp, CSR_DMW2_INIT # WUC, PLV0, LA32: unavailable, LA64: 0xa000 xxxx xxxx xxxx
csrwr \temp, LOONGARCH_CSR_DMWIN2
- li.d \temp, CSR_DMW3_INIT # 0x0, unused
+ PTR_LI \temp, CSR_DMW3_INIT # 0x0, unused
csrwr \temp, LOONGARCH_CSR_DMWIN3
.endm
/* Jump to the runtime virtual address. */
.macro JUMP_VIRT_ADDR temp1 temp2
- li.d \temp1, CACHE_BASE
+ PTR_LI \temp1, CACHE_BASE
pcaddi \temp2, 0
- bstrins.d \temp1, \temp2, (DMW_PABITS - 1), 0
+ PTR_BSTRINS \temp1, \temp2, (DMW_PABITS - 1), 0
jirl zero, \temp1, 0xc
.endm
@@ -171,7 +191,7 @@
andi t0, t0, 0x3 /* extract pplv bit */
beqz t0, 9f
- li.d tp, ~_THREAD_MASK
+ LONG_LI tp, ~_THREAD_MASK
and tp, tp, sp
cfi_st u0, PT_R21, \docfi
csrrd u0, PERCPU_BASE_KS
diff --git a/arch/loongarch/include/asm/string.h b/arch/loongarch/include/asm/string.h
index 5bb5a90d2681..bfa3fd879c7f 100644
--- a/arch/loongarch/include/asm/string.h
+++ b/arch/loongarch/include/asm/string.h
@@ -5,6 +5,7 @@
#ifndef _ASM_STRING_H
#define _ASM_STRING_H
+#ifdef CONFIG_64BIT
#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count);
extern void *__memset(void *__s, int __c, size_t __count);
@@ -16,6 +17,7 @@ extern void *__memcpy(void *__to, __const__ void *__from, size_t __n);
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
extern void *__memmove(void *__dest, __const__ void *__src, size_t __n);
+#endif
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
diff --git a/arch/loongarch/include/asm/timex.h b/arch/loongarch/include/asm/timex.h
index fb41e9e7a222..9ea52fad9690 100644
--- a/arch/loongarch/include/asm/timex.h
+++ b/arch/loongarch/include/asm/timex.h
@@ -18,7 +18,38 @@ typedef unsigned long cycles_t;
static inline cycles_t get_cycles(void)
{
- return drdtime();
+#ifdef CONFIG_32BIT
+ return rdtime_l();
+#else
+ return rdtime_d();
+#endif
+}
+
+#ifdef CONFIG_32BIT
+
+#define get_cycles_hi get_cycles_hi
+
+static inline cycles_t get_cycles_hi(void)
+{
+ return rdtime_h();
+}
+
+#endif
+
+static inline u64 get_cycles64(void)
+{
+#ifdef CONFIG_32BIT
+ u32 hi, lo;
+
+ do {
+ hi = rdtime_h();
+ lo = rdtime_l();
+ } while (hi != rdtime_h());
+
+ return ((u64)hi << 32) | lo;
+#else
+ return rdtime_d();
+#endif
}
#endif /* __KERNEL__ */
diff --git a/arch/loongarch/include/asm/uaccess.h b/arch/loongarch/include/asm/uaccess.h
index 0d22991ae430..4e259d490e45 100644
--- a/arch/loongarch/include/asm/uaccess.h
+++ b/arch/loongarch/include/asm/uaccess.h
@@ -19,10 +19,16 @@
#include <asm/asm-extable.h>
#include <asm-generic/access_ok.h>
+#define __LSW 0
+#define __MSW 1
+
extern u64 __ua_limit;
-#define __UA_ADDR ".dword"
+#ifdef CONFIG_64BIT
#define __UA_LIMIT __ua_limit
+#else
+#define __UA_LIMIT 0x80000000UL
+#endif
/*
* get_user: - Get a simple variable from user space.
@@ -126,6 +132,7 @@ extern u64 __ua_limit;
*
* Returns zero on success, or -EFAULT on error.
*/
+
#define __put_user(x, ptr) \
({ \
int __pu_err = 0; \
@@ -146,7 +153,7 @@ do { \
case 1: __get_data_asm(val, "ld.b", ptr); break; \
case 2: __get_data_asm(val, "ld.h", ptr); break; \
case 4: __get_data_asm(val, "ld.w", ptr); break; \
- case 8: __get_data_asm(val, "ld.d", ptr); break; \
+ case 8: __get_data_asm_8(val, ptr); break; \
default: BUILD_BUG(); break; \
} \
} while (0)
@@ -167,13 +174,39 @@ do { \
(val) = (__typeof__(*(ptr))) __gu_tmp; \
}
+#ifdef CONFIG_64BIT
+#define __get_data_asm_8(val, ptr) \
+ __get_data_asm(val, "ld.d", ptr)
+#else /* !CONFIG_64BIT */
+#define __get_data_asm_8(val, ptr) \
+{ \
+ u32 __lo, __hi; \
+ u32 __user *__ptr = (u32 __user *)(ptr); \
+ \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " ld.w %1, %3 \n" \
+ "2:\n" \
+ " ld.w %2, %4 \n" \
+ "3:\n" \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1) \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1) \
+ : "+r" (__gu_err), "=&r" (__lo), "=r" (__hi) \
+ : "m" (__ptr[__LSW]), "m" (__ptr[__MSW])); \
+ if (__gu_err) \
+ __hi = 0; \
+ (val) = (__typeof__(val))((__typeof__((val)-(val))) \
+ ((((u64)__hi << 32) | __lo))); \
+}
+#endif /* CONFIG_64BIT */
+
#define __put_user_common(ptr, size) \
do { \
switch (size) { \
case 1: __put_data_asm("st.b", ptr); break; \
case 2: __put_data_asm("st.h", ptr); break; \
case 4: __put_data_asm("st.w", ptr); break; \
- case 8: __put_data_asm("st.d", ptr); break; \
+ case 8: __put_data_asm_8(ptr); break; \
default: BUILD_BUG(); break; \
} \
} while (0)
@@ -190,6 +223,30 @@ do { \
: "Jr" (__pu_val)); \
}
+#ifdef CONFIG_64BIT
+#define __put_data_asm_8(ptr) \
+ __put_data_asm("st.d", ptr)
+#else /* !CONFIG_64BIT */
+#define __put_data_asm_8(ptr) \
+{ \
+ u32 __user *__ptr = (u32 __user *)(ptr); \
+ u64 __x = (__typeof__((__pu_val)-(__pu_val)))(__pu_val); \
+ \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " st.w %z3, %1 \n" \
+ "2:\n" \
+ " st.w %z4, %2 \n" \
+ "3:\n" \
+ _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \
+ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \
+ : "+r" (__pu_err), \
+ "=m" (__ptr[__LSW]), \
+ "=m" (__ptr[__MSW]) \
+ : "rJ" (__x), "rJ" (__x >> 32)); \
+}
+#endif /* CONFIG_64BIT */
+
#define __get_kernel_nofault(dst, src, type, err_label) \
do { \
int __gu_err = 0; \
diff --git a/arch/loongarch/include/asm/vdso/gettimeofday.h b/arch/loongarch/include/asm/vdso/gettimeofday.h
index dcafabca9bb6..bae76767c693 100644
--- a/arch/loongarch/include/asm/vdso/gettimeofday.h
+++ b/arch/loongarch/include/asm/vdso/gettimeofday.h
@@ -12,6 +12,8 @@
#include <asm/unistd.h>
#include <asm/vdso/vdso.h>
+#ifdef CONFIG_GENERIC_GETTIMEOFDAY
+
#define VDSO_HAS_CLOCK_GETRES 1
static __always_inline long gettimeofday_fallback(
@@ -89,6 +91,8 @@ static inline bool loongarch_vdso_hres_capable(void)
}
#define __arch_vdso_hres_capable loongarch_vdso_hres_capable
+#endif /* CONFIG_GENERIC_GETTIMEOFDAY */
+
#endif /* !__ASSEMBLER__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/loongarch/include/uapi/asm/Kbuild b/arch/loongarch/include/uapi/asm/Kbuild
index 517761419999..89ac01faa5ae 100644
--- a/arch/loongarch/include/uapi/asm/Kbuild
+++ b/arch/loongarch/include/uapi/asm/Kbuild
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += unistd_32.h
syscall-y += unistd_64.h
diff --git a/arch/loongarch/include/uapi/asm/ptrace.h b/arch/loongarch/include/uapi/asm/ptrace.h
index 215e0f9e8aa3..b35c794323bc 100644
--- a/arch/loongarch/include/uapi/asm/ptrace.h
+++ b/arch/loongarch/include/uapi/asm/ptrace.h
@@ -61,8 +61,13 @@ struct user_lbt_state {
struct user_watch_state {
__u64 dbg_info;
struct {
+#if __BITS_PER_LONG == 32
+ __u32 addr;
+ __u32 mask;
+#else
__u64 addr;
__u64 mask;
+#endif
__u32 ctrl;
__u32 pad;
} dbg_regs[8];
@@ -71,8 +76,13 @@ struct user_watch_state {
struct user_watch_state_v2 {
__u64 dbg_info;
struct {
+#if __BITS_PER_LONG == 32
+ __u32 addr;
+ __u32 mask;
+#else
__u64 addr;
__u64 mask;
+#endif
__u32 ctrl;
__u32 pad;
} dbg_regs[14];
diff --git a/arch/loongarch/include/uapi/asm/unistd.h b/arch/loongarch/include/uapi/asm/unistd.h
index 1f01980f9c94..e19c7f2f9f87 100644
--- a/arch/loongarch/include/uapi/asm/unistd.h
+++ b/arch/loongarch/include/uapi/asm/unistd.h
@@ -1,3 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#include <asm/bitsperlong.h>
+
+#if __BITS_PER_LONG == 32
+#include <asm/unistd_32.h>
+#else
#include <asm/unistd_64.h>
+#endif
diff --git a/arch/loongarch/kernel/Makefile.syscalls b/arch/loongarch/kernel/Makefile.syscalls
index ab7d9baa2915..cd46c2b69c7f 100644
--- a/arch/loongarch/kernel/Makefile.syscalls
+++ b/arch/loongarch/kernel/Makefile.syscalls
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
# No special ABIs on loongarch so far
+syscall_abis_32 +=
syscall_abis_64 +=
diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
index a2060a24b39f..08a227034042 100644
--- a/arch/loongarch/kernel/cpu-probe.c
+++ b/arch/loongarch/kernel/cpu-probe.c
@@ -106,7 +106,11 @@ EXPORT_SYMBOL(vm_map_base);
static void cpu_probe_addrbits(struct cpuinfo_loongarch *c)
{
-#ifdef __NEED_ADDRBITS_PROBE
+#ifdef CONFIG_32BIT
+ c->pabits = cpu_pabits;
+ c->vabits = cpu_vabits;
+ vm_map_base = KVRANGE;
+#else
c->pabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_PABITS) >> 4;
c->vabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_VABITS) >> 12;
vm_map_base = 0UL - (1UL << c->vabits);
@@ -298,8 +302,15 @@ static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int
return;
}
+#ifdef CONFIG_64BIT
*vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
*cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
+#else
+ *vendor = iocsr_read32(LOONGARCH_IOCSR_VENDOR) |
+ (u64)iocsr_read32(LOONGARCH_IOCSR_VENDOR + 4) << 32;
+ *cpuname = iocsr_read32(LOONGARCH_IOCSR_CPUNAME) |
+ (u64)iocsr_read32(LOONGARCH_IOCSR_CPUNAME + 4) << 32;
+#endif
if (!__cpu_full_name[cpu]) {
if (((char *)vendor)[0] == 0)
diff --git a/arch/loongarch/kernel/efi-header.S b/arch/loongarch/kernel/efi-header.S
index ba0bdbf86aa8..6df56241cb95 100644
--- a/arch/loongarch/kernel/efi-header.S
+++ b/arch/loongarch/kernel/efi-header.S
@@ -9,7 +9,11 @@
.macro __EFI_PE_HEADER
.long IMAGE_NT_SIGNATURE
.Lcoff_header:
+#ifdef CONFIG_32BIT
+ .short IMAGE_FILE_MACHINE_LOONGARCH32 /* Machine */
+#else
.short IMAGE_FILE_MACHINE_LOONGARCH64 /* Machine */
+#endif
.short .Lsection_count /* NumberOfSections */
.long 0 /* TimeDateStamp */
.long 0 /* PointerToSymbolTable */
diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c
index 860a3bc030e0..52c21c895318 100644
--- a/arch/loongarch/kernel/efi.c
+++ b/arch/loongarch/kernel/efi.c
@@ -115,7 +115,9 @@ void __init efi_init(void)
efi_systab_report_header(&efi_systab->hdr, efi_systab->fw_vendor);
- set_bit(EFI_64BIT, &efi.flags);
+ if (IS_ENABLED(CONFIG_64BIT))
+ set_bit(EFI_64BIT, &efi.flags);
+
efi_nr_tables = efi_systab->nr_tables;
efi_config_table = (unsigned long)efi_systab->tables;
diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S
index 47e1db9a1ce4..b53d333a7c42 100644
--- a/arch/loongarch/kernel/entry.S
+++ b/arch/loongarch/kernel/entry.S
@@ -23,24 +23,24 @@ SYM_CODE_START(handle_syscall)
UNWIND_HINT_UNDEFINED
csrrd t0, PERCPU_BASE_KS
la.pcrel t1, kernelsp
- add.d t1, t1, t0
+ PTR_ADD t1, t1, t0
move t2, sp
- ld.d sp, t1, 0
+ PTR_L sp, t1, 0
- addi.d sp, sp, -PT_SIZE
+ PTR_ADDI sp, sp, -PT_SIZE
cfi_st t2, PT_R3
cfi_rel_offset sp, PT_R3
- st.d zero, sp, PT_R0
+ LONG_S zero, sp, PT_R0
csrrd t2, LOONGARCH_CSR_PRMD
- st.d t2, sp, PT_PRMD
+ LONG_S t2, sp, PT_PRMD
csrrd t2, LOONGARCH_CSR_CRMD
- st.d t2, sp, PT_CRMD
+ LONG_S t2, sp, PT_CRMD
csrrd t2, LOONGARCH_CSR_EUEN
- st.d t2, sp, PT_EUEN
+ LONG_S t2, sp, PT_EUEN
csrrd t2, LOONGARCH_CSR_ECFG
- st.d t2, sp, PT_ECFG
+ LONG_S t2, sp, PT_ECFG
csrrd t2, LOONGARCH_CSR_ESTAT
- st.d t2, sp, PT_ESTAT
+ LONG_S t2, sp, PT_ESTAT
cfi_st ra, PT_R1
cfi_st a0, PT_R4
cfi_st a1, PT_R5
@@ -51,7 +51,7 @@ SYM_CODE_START(handle_syscall)
cfi_st a6, PT_R10
cfi_st a7, PT_R11
csrrd ra, LOONGARCH_CSR_ERA
- st.d ra, sp, PT_ERA
+ LONG_S ra, sp, PT_ERA
cfi_rel_offset ra, PT_ERA
cfi_st tp, PT_R2
@@ -67,7 +67,7 @@ SYM_CODE_START(handle_syscall)
#endif
move u0, t0
- li.d tp, ~_THREAD_MASK
+ LONG_LI tp, ~_THREAD_MASK
and tp, tp, sp
move a0, sp
diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c
index 23bd5ae2212c..841206fde3ab 100644
--- a/arch/loongarch/kernel/env.c
+++ b/arch/loongarch/kernel/env.c
@@ -72,9 +72,12 @@ static int __init fdt_cpu_clk_init(void)
clk = of_clk_get(np, 0);
of_node_put(np);
+ cpu_clock_freq = 200 * 1000 * 1000;
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ pr_warn("No valid CPU clock freq, assume 200MHz.\n");
return -ENODEV;
+ }
cpu_clock_freq = clk_get_rate(clk);
clk_put(clk);
diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
index 28caf416ae36..f225dcc5b530 100644
--- a/arch/loongarch/kernel/fpu.S
+++ b/arch/loongarch/kernel/fpu.S
@@ -96,6 +96,49 @@
EX fld.d $f31, \base, (31 * FPU_REG_WIDTH)
.endm
+#ifdef CONFIG_32BIT
+ .macro sc_save_fcc thread tmp0 tmp1
+ movcf2gr \tmp0, $fcc0
+ move \tmp1, \tmp0
+ movcf2gr \tmp0, $fcc1
+ bstrins.w \tmp1, \tmp0, 15, 8
+ movcf2gr \tmp0, $fcc2
+ bstrins.w \tmp1, \tmp0, 23, 16
+ movcf2gr \tmp0, $fcc3
+ bstrins.w \tmp1, \tmp0, 31, 24
+ EX st.w \tmp1, \thread, THREAD_FCC
+ movcf2gr \tmp0, $fcc4
+ move \tmp1, \tmp0
+ movcf2gr \tmp0, $fcc5
+ bstrins.w \tmp1, \tmp0, 15, 8
+ movcf2gr \tmp0, $fcc6
+ bstrins.w \tmp1, \tmp0, 23, 16
+ movcf2gr \tmp0, $fcc7
+ bstrins.w \tmp1, \tmp0, 31, 24
+ EX st.w \tmp1, \thread, (THREAD_FCC + 4)
+ .endm
+
+ .macro sc_restore_fcc thread tmp0 tmp1
+ EX ld.w \tmp0, \thread, THREAD_FCC
+ bstrpick.w \tmp1, \tmp0, 7, 0
+ movgr2cf $fcc0, \tmp1
+ bstrpick.w \tmp1, \tmp0, 15, 8
+ movgr2cf $fcc1, \tmp1
+ bstrpick.w \tmp1, \tmp0, 23, 16
+ movgr2cf $fcc2, \tmp1
+ bstrpick.w \tmp1, \tmp0, 31, 24
+ movgr2cf $fcc3, \tmp1
+ EX ld.w \tmp0, \thread, (THREAD_FCC + 4)
+ bstrpick.w \tmp1, \tmp0, 7, 0
+ movgr2cf $fcc4, \tmp1
+ bstrpick.w \tmp1, \tmp0, 15, 8
+ movgr2cf $fcc5, \tmp1
+ bstrpick.w \tmp1, \tmp0, 23, 16
+ movgr2cf $fcc6, \tmp1
+ bstrpick.w \tmp1, \tmp0, 31, 24
+ movgr2cf $fcc7, \tmp1
+ .endm
+#else
.macro sc_save_fcc base, tmp0, tmp1
movcf2gr \tmp0, $fcc0
move \tmp1, \tmp0
@@ -135,6 +178,7 @@
bstrpick.d \tmp1, \tmp0, 63, 56
movgr2cf $fcc7, \tmp1
.endm
+#endif
.macro sc_save_fcsr base, tmp0
movfcsr2gr \tmp0, fcsr0
@@ -410,6 +454,72 @@ SYM_FUNC_START(_init_fpu)
li.w t1, -1 # SNaN
+#ifdef CONFIG_32BIT
+ movgr2fr.w $f0, t1
+ movgr2frh.w $f0, t1
+ movgr2fr.w $f1, t1
+ movgr2frh.w $f1, t1
+ movgr2fr.w $f2, t1
+ movgr2frh.w $f2, t1
+ movgr2fr.w $f3, t1
+ movgr2frh.w $f3, t1
+ movgr2fr.w $f4, t1
+ movgr2frh.w $f4, t1
+ movgr2fr.w $f5, t1
+ movgr2frh.w $f5, t1
+ movgr2fr.w $f6, t1
+ movgr2frh.w $f6, t1
+ movgr2fr.w $f7, t1
+ movgr2frh.w $f7, t1
+ movgr2fr.w $f8, t1
+ movgr2frh.w $f8, t1
+ movgr2fr.w $f9, t1
+ movgr2frh.w $f9, t1
+ movgr2fr.w $f10, t1
+ movgr2frh.w $f10, t1
+ movgr2fr.w $f11, t1
+ movgr2frh.w $f11, t1
+ movgr2fr.w $f12, t1
+ movgr2frh.w $f12, t1
+ movgr2fr.w $f13, t1
+ movgr2frh.w $f13, t1
+ movgr2fr.w $f14, t1
+ movgr2frh.w $f14, t1
+ movgr2fr.w $f15, t1
+ movgr2frh.w $f15, t1
+ movgr2fr.w $f16, t1
+ movgr2frh.w $f16, t1
+ movgr2fr.w $f17, t1
+ movgr2frh.w $f17, t1
+ movgr2fr.w $f18, t1
+ movgr2frh.w $f18, t1
+ movgr2fr.w $f19, t1
+ movgr2frh.w $f19, t1
+ movgr2fr.w $f20, t1
+ movgr2frh.w $f20, t1
+ movgr2fr.w $f21, t1
+ movgr2frh.w $f21, t1
+ movgr2fr.w $f22, t1
+ movgr2frh.w $f22, t1
+ movgr2fr.w $f23, t1
+ movgr2frh.w $f23, t1
+ movgr2fr.w $f24, t1
+ movgr2frh.w $f24, t1
+ movgr2fr.w $f25, t1
+ movgr2frh.w $f25, t1
+ movgr2fr.w $f26, t1
+ movgr2frh.w $f26, t1
+ movgr2fr.w $f27, t1
+ movgr2frh.w $f27, t1
+ movgr2fr.w $f28, t1
+ movgr2frh.w $f28, t1
+ movgr2fr.w $f29, t1
+ movgr2frh.w $f29, t1
+ movgr2fr.w $f30, t1
+ movgr2frh.w $f30, t1
+ movgr2fr.w $f31, t1
+ movgr2frh.w $f31, t1
+#else
movgr2fr.d $f0, t1
movgr2fr.d $f1, t1
movgr2fr.d $f2, t1
@@ -442,6 +552,7 @@ SYM_FUNC_START(_init_fpu)
movgr2fr.d $f29, t1
movgr2fr.d $f30, t1
movgr2fr.d $f31, t1
+#endif
jr ra
SYM_FUNC_END(_init_fpu)
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
index e3865e92a917..aba548db2446 100644
--- a/arch/loongarch/kernel/head.S
+++ b/arch/loongarch/kernel/head.S
@@ -43,36 +43,29 @@ SYM_DATA(kernel_fsize, .long _kernel_fsize);
SYM_CODE_START(kernel_entry) # kernel entry point
- /* Config direct window and set PG */
- SETUP_DMWINS t0
+ SETUP_TWINS
+ SETUP_MODES t0
JUMP_VIRT_ADDR t0, t1
-
- /* Enable PG */
- li.w t0, 0xb0 # PLV=0, IE=0, PG=1
- csrwr t0, LOONGARCH_CSR_CRMD
- li.w t0, 0x04 # PLV=0, PIE=1, PWE=0
- csrwr t0, LOONGARCH_CSR_PRMD
- li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0
- csrwr t0, LOONGARCH_CSR_EUEN
+ SETUP_DMWINS t0
la.pcrel t0, __bss_start # clear .bss
- st.d zero, t0, 0
+ LONG_S zero, t0, 0
la.pcrel t1, __bss_stop - LONGSIZE
1:
- addi.d t0, t0, LONGSIZE
- st.d zero, t0, 0
+ PTR_ADDI t0, t0, LONGSIZE
+ LONG_S zero, t0, 0
bne t0, t1, 1b
la.pcrel t0, fw_arg0
- st.d a0, t0, 0 # firmware arguments
+ PTR_S a0, t0, 0 # firmware arguments
la.pcrel t0, fw_arg1
- st.d a1, t0, 0
+ PTR_S a1, t0, 0
la.pcrel t0, fw_arg2
- st.d a2, t0, 0
+ PTR_S a2, t0, 0
#ifdef CONFIG_PAGE_SIZE_4KB
- li.d t0, 0
- li.d t1, CSR_STFILL
+ LONG_LI t0, 0
+ LONG_LI t1, CSR_STFILL
csrxchg t0, t1, LOONGARCH_CSR_IMPCTL1
#endif
/* KSave3 used for percpu base, initialized as 0 */
@@ -98,7 +91,7 @@ SYM_CODE_START(kernel_entry) # kernel entry point
/* Jump to the new kernel: new_pc = current_pc + random_offset */
pcaddi t0, 0
- add.d t0, t0, a0
+ PTR_ADD t0, t0, a0
jirl zero, t0, 0xc
#endif /* CONFIG_RANDOMIZE_BASE */
@@ -121,12 +114,14 @@ SYM_CODE_END(kernel_entry)
*/
SYM_CODE_START(smpboot_entry)
- SETUP_DMWINS t0
+ SETUP_TWINS
+ SETUP_MODES t0
JUMP_VIRT_ADDR t0, t1
+ SETUP_DMWINS t0
#ifdef CONFIG_PAGE_SIZE_4KB
- li.d t0, 0
- li.d t1, CSR_STFILL
+ LONG_LI t0, 0
+ LONG_LI t1, CSR_STFILL
csrxchg t0, t1, LOONGARCH_CSR_IMPCTL1
#endif
/* Enable PG */
diff --git a/arch/loongarch/kernel/module-sections.c b/arch/loongarch/kernel/module-sections.c
index a43ba7f9f987..9fa1c9814fcc 100644
--- a/arch/loongarch/kernel/module-sections.c
+++ b/arch/loongarch/kernel/module-sections.c
@@ -93,6 +93,7 @@ static void count_max_entries(Elf_Rela *relas, int num,
(*plts)++;
break;
case R_LARCH_GOT_PC_HI20:
+ case R_LARCH_GOT_PCADD_HI20:
(*gots)++;
break;
default:
diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c
index 36d6d9eeb7c7..7d4d571ee55e 100644
--- a/arch/loongarch/kernel/module.c
+++ b/arch/loongarch/kernel/module.c
@@ -22,72 +22,89 @@
#include <asm/inst.h>
#include <asm/unwind.h>
-static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top)
+/*
+ * reloc_rela_handler() - Apply a particular relocation to a module
+ * @mod: the module to apply the reloc to
+ * @location: the address at which the reloc is to be applied
+ * @v: the value of the reloc, with addend for RELA-style
+ * @rela_stack: the stack used for store relocation info, LOCAL to THIS module
+ * @rela_stac_top: where the stack operation(pop/push) applies to
+ *
+ * Return: 0 upon success, else -ERRNO
+ */
+typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v,
+ long *rela_stack, size_t *rela_stack_top, unsigned int type);
+
+static int rela_stack_push(long stack_value, long *rela_stack, size_t *rela_stack_top)
{
if (*rela_stack_top >= RELA_STACK_DEPTH)
return -ENOEXEC;
rela_stack[(*rela_stack_top)++] = stack_value;
- pr_debug("%s stack_value = 0x%llx\n", __func__, stack_value);
+ pr_debug("%s stack_value = 0x%lx\n", __func__, stack_value);
return 0;
}
-static int rela_stack_pop(s64 *stack_value, s64 *rela_stack, size_t *rela_stack_top)
+static int rela_stack_pop(long *stack_value, long *rela_stack, size_t *rela_stack_top)
{
if (*rela_stack_top == 0)
return -ENOEXEC;
*stack_value = rela_stack[--(*rela_stack_top)];
- pr_debug("%s stack_value = 0x%llx\n", __func__, *stack_value);
+ pr_debug("%s stack_value = 0x%lx\n", __func__, *stack_value);
return 0;
}
static int apply_r_larch_none(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
return 0;
}
static int apply_r_larch_error(struct module *me, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
pr_err("%s: Unsupport relocation type %u, please add its support.\n", me->name, type);
return -EINVAL;
}
static int apply_r_larch_32(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
*location = v;
return 0;
}
+#ifdef CONFIG_32BIT
+#define apply_r_larch_64 apply_r_larch_error
+#else
static int apply_r_larch_64(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
*(Elf_Addr *)location = v;
return 0;
}
+#endif
static int apply_r_larch_sop_push_pcrel(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
- return rela_stack_push(v - (u64)location, rela_stack, rela_stack_top);
+ return rela_stack_push(v - (unsigned long)location, rela_stack, rela_stack_top);
}
static int apply_r_larch_sop_push_absolute(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
return rela_stack_push(v, rela_stack, rela_stack_top);
}
static int apply_r_larch_sop_push_dup(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
int err = 0;
- s64 opr1;
+ long opr1;
err = rela_stack_pop(&opr1, rela_stack, rela_stack_top);
if (err)
@@ -104,7 +121,7 @@ static int apply_r_larch_sop_push_dup(struct module *mod, u32 *location, Elf_Add
static int apply_r_larch_sop_push_plt_pcrel(struct module *mod,
Elf_Shdr *sechdrs, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
ptrdiff_t offset = (void *)v - (void *)location;
@@ -118,10 +135,10 @@ static int apply_r_larch_sop_push_plt_pcrel(struct module *mod,
}
static int apply_r_larch_sop(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
int err = 0;
- s64 opr1, opr2, opr3;
+ long opr1, opr2, opr3;
if (type == R_LARCH_SOP_IF_ELSE) {
err = rela_stack_pop(&opr3, rela_stack, rela_stack_top);
@@ -164,10 +181,10 @@ static int apply_r_larch_sop(struct module *mod, u32 *location, Elf_Addr v,
}
static int apply_r_larch_sop_imm_field(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
int err = 0;
- s64 opr1;
+ long opr1;
union loongarch_instruction *insn = (union loongarch_instruction *)location;
err = rela_stack_pop(&opr1, rela_stack, rela_stack_top);
@@ -244,31 +261,33 @@ static int apply_r_larch_sop_imm_field(struct module *mod, u32 *location, Elf_Ad
}
overflow:
- pr_err("module %s: opr1 = 0x%llx overflow! dangerous %s (%u) relocation\n",
+ pr_err("module %s: opr1 = 0x%lx overflow! dangerous %s (%u) relocation\n",
mod->name, opr1, __func__, type);
return -ENOEXEC;
unaligned:
- pr_err("module %s: opr1 = 0x%llx unaligned! dangerous %s (%u) relocation\n",
+ pr_err("module %s: opr1 = 0x%lx unaligned! dangerous %s (%u) relocation\n",
mod->name, opr1, __func__, type);
return -ENOEXEC;
}
static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
switch (type) {
case R_LARCH_ADD32:
*(s32 *)location += v;
return 0;
- case R_LARCH_ADD64:
- *(s64 *)location += v;
- return 0;
case R_LARCH_SUB32:
*(s32 *)location -= v;
return 0;
+#ifdef CONFIG_64BIT
+ case R_LARCH_ADD64:
+ *(s64 *)location += v;
+ return 0;
case R_LARCH_SUB64:
*(s64 *)location -= v;
+#endif
return 0;
default:
pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
@@ -278,7 +297,7 @@ static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v,
static int apply_r_larch_b26(struct module *mod,
Elf_Shdr *sechdrs, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
ptrdiff_t offset = (void *)v - (void *)location;
union loongarch_instruction *insn = (union loongarch_instruction *)location;
@@ -310,15 +329,40 @@ static int apply_r_larch_b26(struct module *mod,
return 0;
}
+static int apply_r_larch_pcadd(struct module *mod, u32 *location, Elf_Addr v,
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ union loongarch_instruction *insn = (union loongarch_instruction *)location;
+ /* Use s32 for a sign-extension deliberately. */
+ s32 offset_hi20 = (void *)((v + 0x800)) - (void *)((Elf_Addr)location);
+
+ switch (type) {
+ case R_LARCH_PCADD_LO12:
+ insn->reg2i12_format.immediate = v & 0xfff;
+ break;
+ case R_LARCH_PCADD_HI20:
+ v = offset_hi20 >> 12;
+ insn->reg1i20_format.immediate = v & 0xfffff;
+ break;
+ default:
+ pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
union loongarch_instruction *insn = (union loongarch_instruction *)location;
/* Use s32 for a sign-extension deliberately. */
s32 offset_hi20 = (void *)((v + 0x800) & ~0xfff) -
(void *)((Elf_Addr)location & ~0xfff);
+#ifdef CONFIG_64BIT
Elf_Addr anchor = (((Elf_Addr)location) & ~0xfff) + offset_hi20;
ptrdiff_t offset_rem = (void *)v - (void *)anchor;
+#endif
switch (type) {
case R_LARCH_PCALA_LO12:
@@ -328,6 +372,7 @@ static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v,
v = offset_hi20 >> 12;
insn->reg1i20_format.immediate = v & 0xfffff;
break;
+#ifdef CONFIG_64BIT
case R_LARCH_PCALA64_LO20:
v = offset_rem >> 32;
insn->reg1i20_format.immediate = v & 0xfffff;
@@ -336,6 +381,7 @@ static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v,
v = offset_rem >> 52;
insn->reg2i12_format.immediate = v & 0xfff;
break;
+#endif
default:
pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
return -EINVAL;
@@ -346,30 +392,43 @@ static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v,
static int apply_r_larch_got_pc(struct module *mod,
Elf_Shdr *sechdrs, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
- Elf_Addr got = module_emit_got_entry(mod, sechdrs, v);
+ reloc_rela_handler got_handler;
- if (!got)
- return -EINVAL;
+ if (type != R_LARCH_GOT_PCADD_LO12) {
+ v = module_emit_got_entry(mod, sechdrs, v);
+ if (!v)
+ return -EINVAL;
+ }
switch (type) {
case R_LARCH_GOT_PC_LO12:
type = R_LARCH_PCALA_LO12;
+ got_handler = apply_r_larch_pcala;
break;
case R_LARCH_GOT_PC_HI20:
type = R_LARCH_PCALA_HI20;
+ got_handler = apply_r_larch_pcala;
+ break;
+ case R_LARCH_GOT_PCADD_LO12:
+ type = R_LARCH_PCADD_LO12;
+ got_handler = apply_r_larch_pcadd;
+ break;
+ case R_LARCH_GOT_PCADD_HI20:
+ type = R_LARCH_PCADD_HI20;
+ got_handler = apply_r_larch_pcadd;
break;
default:
pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
return -EINVAL;
}
- return apply_r_larch_pcala(mod, location, got, rela_stack, rela_stack_top, type);
+ return got_handler(mod, location, v, rela_stack, rela_stack_top, type);
}
static int apply_r_larch_32_pcrel(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
ptrdiff_t offset = (void *)v - (void *)location;
@@ -377,31 +436,22 @@ static int apply_r_larch_32_pcrel(struct module *mod, u32 *location, Elf_Addr v,
return 0;
}
+#ifdef CONFIG_32BIT
+#define apply_r_larch_64_pcrel apply_r_larch_error
+#else
static int apply_r_larch_64_pcrel(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
ptrdiff_t offset = (void *)v - (void *)location;
*(u64 *)location = offset;
return 0;
}
-
-/*
- * reloc_handlers_rela() - Apply a particular relocation to a module
- * @mod: the module to apply the reloc to
- * @location: the address at which the reloc is to be applied
- * @v: the value of the reloc, with addend for RELA-style
- * @rela_stack: the stack used for store relocation info, LOCAL to THIS module
- * @rela_stac_top: where the stack operation(pop/push) applies to
- *
- * Return: 0 upon success, else -ERRNO
- */
-typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type);
+#endif
/* The handlers for known reloc types */
static reloc_rela_handler reloc_rela_handlers[] = {
- [R_LARCH_NONE ... R_LARCH_64_PCREL] = apply_r_larch_error,
+ [R_LARCH_NONE ... R_LARCH_TLS_DESC_PCADD_LO12] = apply_r_larch_error,
[R_LARCH_NONE] = apply_r_larch_none,
[R_LARCH_32] = apply_r_larch_32,
@@ -414,7 +464,8 @@ static reloc_rela_handler reloc_rela_handlers[] = {
[R_LARCH_SOP_SUB ... R_LARCH_SOP_IF_ELSE] = apply_r_larch_sop,
[R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field,
[R_LARCH_ADD32 ... R_LARCH_SUB64] = apply_r_larch_add_sub,
- [R_LARCH_PCALA_HI20...R_LARCH_PCALA64_HI12] = apply_r_larch_pcala,
+ [R_LARCH_PCADD_HI20 ... R_LARCH_PCADD_LO12] = apply_r_larch_pcadd,
+ [R_LARCH_PCALA_HI20 ... R_LARCH_PCALA64_HI12] = apply_r_larch_pcala,
[R_LARCH_32_PCREL] = apply_r_larch_32_pcrel,
[R_LARCH_64_PCREL] = apply_r_larch_64_pcrel,
};
@@ -423,9 +474,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *mod)
{
- int i, err;
- unsigned int type;
- s64 rela_stack[RELA_STACK_DEPTH];
+ int err;
+ unsigned int i, idx, type;
+ unsigned int num_relocations;
+ long rela_stack[RELA_STACK_DEPTH];
size_t rela_stack_top = 0;
reloc_rela_handler handler;
void *location;
@@ -436,8 +488,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
pr_debug("%s: Applying relocate section %u to %u\n", __func__, relsec,
sechdrs[relsec].sh_info);
+ idx = 0;
rela_stack_top = 0;
- for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
+ for (i = 0; i < num_relocations; i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
/* This is the symbol it is referring to */
@@ -462,17 +516,59 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
return -EINVAL;
}
- pr_debug("type %d st_value %llx r_addend %llx loc %llx\n",
+ pr_debug("type %d st_value %lx r_addend %lx loc %lx\n",
(int)ELF_R_TYPE(rel[i].r_info),
- sym->st_value, rel[i].r_addend, (u64)location);
+ (unsigned long)sym->st_value, (unsigned long)rel[i].r_addend, (unsigned long)location);
v = sym->st_value + rel[i].r_addend;
+
+ if (type == R_LARCH_PCADD_LO12 || type == R_LARCH_GOT_PCADD_LO12) {
+ bool found = false;
+ unsigned int j = idx;
+
+ do {
+ u32 hi20_type = ELF_R_TYPE(rel[j].r_info);
+ unsigned long hi20_location =
+ sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[j].r_offset;
+
+ /* Find the corresponding HI20 relocation entry */
+ if ((hi20_location == sym->st_value) && (hi20_type == type - 1)) {
+ s32 hi20, lo12;
+ Elf_Sym *hi20_sym =
+ (Elf_Sym *)sechdrs[symindex].sh_addr + ELF_R_SYM(rel[j].r_info);
+ unsigned long hi20_sym_val = hi20_sym->st_value + rel[j].r_addend;
+
+ /* Calculate LO12 offset */
+ size_t offset = hi20_sym_val - hi20_location;
+ if (hi20_type == R_LARCH_GOT_PCADD_HI20) {
+ offset = module_emit_got_entry(mod, sechdrs, hi20_sym_val);
+ offset = offset - hi20_location;
+ }
+ hi20 = (offset + 0x800) & 0xfffff000;
+ v = lo12 = offset - hi20;
+ found = true;
+ break;
+ }
+
+ j = (j + 1) % num_relocations;
+
+ } while (idx != j);
+
+ if (!found) {
+ pr_err("%s: Can not find HI20 relocation information\n", mod->name);
+ return -EINVAL;
+ }
+
+ idx = j; /* Record the previous j-loop end index */
+ }
+
switch (type) {
case R_LARCH_B26:
err = apply_r_larch_b26(mod, sechdrs, location,
v, rela_stack, &rela_stack_top, type);
break;
case R_LARCH_GOT_PC_HI20...R_LARCH_GOT_PC_LO12:
+ case R_LARCH_GOT_PCADD_HI20...R_LARCH_GOT_PCADD_LO12:
err = apply_r_larch_got_pc(mod, sechdrs, location,
v, rela_stack, &rela_stack_top, type);
break;
diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c
index 63d2b7e7e844..a8800d20e11b 100644
--- a/arch/loongarch/kernel/proc.c
+++ b/arch/loongarch/kernel/proc.c
@@ -20,11 +20,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
unsigned int prid = cpu_data[n].processor_id;
unsigned int version = cpu_data[n].processor_id & 0xff;
unsigned int fp_version = cpu_data[n].fpu_vers;
+ u64 freq = cpu_clock_freq, bogomips = lpj_fine * cpu_clock_freq;
#ifdef CONFIG_SMP
if (!cpu_online(n))
return 0;
#endif
+ do_div(freq, 10000);
+ do_div(bogomips, const_clock_freq * (5000/HZ));
/*
* For the first processor also print the system type
@@ -41,11 +44,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "PRID\t\t\t: %s (%08x)\n", id_to_core_name(prid), prid);
seq_printf(m, "CPU Revision\t\t: 0x%02x\n", version);
seq_printf(m, "FPU Revision\t\t: 0x%02x\n", fp_version);
- seq_printf(m, "CPU MHz\t\t\t: %llu.%02llu\n",
- cpu_clock_freq / 1000000, (cpu_clock_freq / 10000) % 100);
- seq_printf(m, "BogoMIPS\t\t: %llu.%02llu\n",
- (lpj_fine * cpu_clock_freq / const_clock_freq) / (500000/HZ),
- ((lpj_fine * cpu_clock_freq / const_clock_freq) / (5000/HZ)) % 100);
+ seq_printf(m, "CPU MHz\t\t\t: %u.%02u\n", (u32)freq / 100, (u32)freq % 100);
+ seq_printf(m, "BogoMIPS\t\t: %u.%02u\n", (u32)bogomips / 100, (u32)bogomips % 100);
seq_printf(m, "TLB Entries\t\t: %d\n", cpu_data[n].tlbsize);
seq_printf(m, "Address Sizes\t\t: %d bits physical, %d bits virtual\n",
cpu_pabits + 1, cpu_vabits + 1);
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
index efd9edf65603..4ac1c3086152 100644
--- a/arch/loongarch/kernel/process.c
+++ b/arch/loongarch/kernel/process.c
@@ -130,6 +130,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
preempt_enable();
+ if (IS_ENABLED(CONFIG_RANDSTRUCT)) {
+ memcpy(dst, src, sizeof(struct task_struct));
+ return 0;
+ }
+
if (!used_math())
memcpy(dst, src, offsetof(struct task_struct, thread.fpu.fpr));
else
@@ -377,8 +382,11 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace);
}
-#ifdef CONFIG_64BIT
+#ifdef CONFIG_32BIT
+void loongarch_dump_regs32(u32 *uregs, const struct pt_regs *regs)
+#else
void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs)
+#endif
{
unsigned int i;
@@ -395,4 +403,3 @@ void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs)
uregs[LOONGARCH_EF_CSR_ECFG] = regs->csr_ecfg;
uregs[LOONGARCH_EF_CSR_ESTAT] = regs->csr_estat;
}
-#endif /* CONFIG_64BIT */
diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
index 8edd0954e55a..be38430f7e28 100644
--- a/arch/loongarch/kernel/ptrace.c
+++ b/arch/loongarch/kernel/ptrace.c
@@ -650,8 +650,13 @@ static int ptrace_hbp_set_addr(unsigned int note_type,
struct perf_event_attr attr;
/* Kernel-space address cannot be monitored by user-space */
+#ifdef CONFIG_32BIT
+ if ((unsigned long)addr >= KPRANGE0)
+ return -EINVAL;
+#else
if ((unsigned long)addr >= XKPRANGE)
return -EINVAL;
+#endif
bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
if (IS_ERR(bp))
diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c
index b5e2312a2fca..82aa3f035927 100644
--- a/arch/loongarch/kernel/relocate.c
+++ b/arch/loongarch/kernel/relocate.c
@@ -68,18 +68,25 @@ static inline void __init relocate_absolute(long random_offset)
for (p = begin; (void *)p < end; p++) {
long v = p->symvalue;
- uint32_t lu12iw, ori, lu32id, lu52id;
+ uint32_t lu12iw, ori;
+#ifdef CONFIG_64BIT
+ uint32_t lu32id, lu52id;
+#endif
union loongarch_instruction *insn = (void *)p->pc;
lu12iw = (v >> 12) & 0xfffff;
ori = v & 0xfff;
+#ifdef CONFIG_64BIT
lu32id = (v >> 32) & 0xfffff;
lu52id = v >> 52;
+#endif
insn[0].reg1i20_format.immediate = lu12iw;
insn[1].reg2i12_format.immediate = ori;
+#ifdef CONFIG_64BIT
insn[2].reg1i20_format.immediate = lu32id;
insn[3].reg2i12_format.immediate = lu52id;
+#endif
}
}
@@ -183,7 +190,7 @@ static inline void __init *determine_relocation_address(void)
if (kaslr_disabled())
return destination;
- kernel_length = (long)_end - (long)_text;
+ kernel_length = (unsigned long)_end - (unsigned long)_text;
random_offset = get_random_boot() << 16;
random_offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1);
@@ -232,7 +239,7 @@ unsigned long __init relocate_kernel(void)
early_memunmap(cmdline, COMMAND_LINE_SIZE);
if (random_offset) {
- kernel_length = (long)(_end) - (long)(_text);
+ kernel_length = (unsigned long)(_end) - (unsigned long)(_text);
/* Copy the kernel to it's new location */
memcpy(location_new, _text, kernel_length);
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index 25a87378e48e..20cb6f306456 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -56,6 +56,7 @@
#define SMBIOS_FREQLOW_MASK 0xFF
#define SMBIOS_CORE_PACKAGE_OFFSET 0x23
#define SMBIOS_THREAD_PACKAGE_OFFSET 0x25
+#define SMBIOS_THREAD_PACKAGE_2_OFFSET 0x2E
#define LOONGSON_EFI_ENABLE (1 << 3)
unsigned long fw_arg0, fw_arg1, fw_arg2;
@@ -126,7 +127,12 @@ static void __init parse_cpu_table(const struct dmi_header *dm)
cpu_clock_freq = freq_temp * 1000000;
loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]);
- loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_THREAD_PACKAGE_OFFSET);
+ loongson_sysconf.cores_per_package = *(u8 *)(dmi_data + SMBIOS_THREAD_PACKAGE_OFFSET);
+ if (dm->length >= 0x30 && loongson_sysconf.cores_per_package == 0xff) {
+ /* SMBIOS 3.0+ has ThreadCount2 for more than 255 threads */
+ loongson_sysconf.cores_per_package =
+ *(u16 *)(dmi_data + SMBIOS_THREAD_PACKAGE_2_OFFSET);
+ }
pr_info("CpuClock = %llu\n", cpu_clock_freq);
}
diff --git a/arch/loongarch/kernel/switch.S b/arch/loongarch/kernel/switch.S
index 9c23cb7e432f..f377d8f5c51a 100644
--- a/arch/loongarch/kernel/switch.S
+++ b/arch/loongarch/kernel/switch.S
@@ -16,18 +16,23 @@
*/
.align 5
SYM_FUNC_START(__switch_to)
- csrrd t1, LOONGARCH_CSR_PRMD
- stptr.d t1, a0, THREAD_CSRPRMD
+#ifdef CONFIG_32BIT
+ PTR_ADDI a0, a0, TASK_STRUCT_OFFSET
+ PTR_ADDI a1, a1, TASK_STRUCT_OFFSET
+#endif
+ csrrd t1, LOONGARCH_CSR_PRMD
+ LONG_SPTR t1, a0, (THREAD_CSRPRMD - TASK_STRUCT_OFFSET)
cpu_save_nonscratch a0
- stptr.d ra, a0, THREAD_REG01
- stptr.d a3, a0, THREAD_SCHED_RA
- stptr.d a4, a0, THREAD_SCHED_CFA
+ LONG_SPTR a3, a0, (THREAD_SCHED_RA - TASK_STRUCT_OFFSET)
+ LONG_SPTR a4, a0, (THREAD_SCHED_CFA - TASK_STRUCT_OFFSET)
+
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
- la t7, __stack_chk_guard
- LONG_L t8, a1, TASK_STACK_CANARY
- LONG_S t8, t7, 0
+ la t7, __stack_chk_guard
+ LONG_LPTR t8, a1, (TASK_STACK_CANARY - TASK_STRUCT_OFFSET)
+ LONG_SPTR t8, t7, 0
#endif
+
move tp, a2
cpu_restore_nonscratch a1
@@ -35,8 +40,11 @@ SYM_FUNC_START(__switch_to)
PTR_ADD t0, t0, tp
set_saved_sp t0, t1, t2
- ldptr.d t1, a1, THREAD_CSRPRMD
- csrwr t1, LOONGARCH_CSR_PRMD
+ LONG_LPTR t1, a1, (THREAD_CSRPRMD - TASK_STRUCT_OFFSET)
+ csrwr t1, LOONGARCH_CSR_PRMD
+#ifdef CONFIG_32BIT
+ PTR_ADDI a0, a0, -TASK_STRUCT_OFFSET
+#endif
jr ra
SYM_FUNC_END(__switch_to)
diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c
index 168bd97540f8..1249d82c1cd0 100644
--- a/arch/loongarch/kernel/syscall.c
+++ b/arch/loongarch/kernel/syscall.c
@@ -34,9 +34,22 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long,
return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
}
+SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, unsigned long,
+ prot, unsigned long, flags, unsigned long, fd, unsigned long, offset)
+{
+ if (offset & (~PAGE_MASK >> 12))
+ return -EINVAL;
+
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> (PAGE_SHIFT - 12));
+}
+
void *sys_call_table[__NR_syscalls] = {
[0 ... __NR_syscalls - 1] = sys_ni_syscall,
+#ifdef CONFIG_32BIT
+#include <asm/syscall_table_32.h>
+#else
#include <asm/syscall_table_64.h>
+#endif
};
typedef long (*sys_call_fn)(unsigned long, unsigned long,
@@ -75,7 +88,7 @@ void noinstr __no_stack_protector do_syscall(struct pt_regs *regs)
*
* The resulting 6 bits of entropy is seen in SP[9:4].
*/
- choose_random_kstack_offset(drdtime());
+ choose_random_kstack_offset(get_cycles());
syscall_exit_to_user_mode(regs);
}
diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
index 6fb92cc1a4c9..dbaaabcaf6f0 100644
--- a/arch/loongarch/kernel/time.c
+++ b/arch/loongarch/kernel/time.c
@@ -18,6 +18,7 @@
#include <asm/loongarch.h>
#include <asm/paravirt.h>
#include <asm/time.h>
+#include <asm/timex.h>
u64 cpu_clock_freq;
EXPORT_SYMBOL(cpu_clock_freq);
@@ -50,10 +51,10 @@ static int constant_set_state_oneshot(struct clock_event_device *evt)
raw_spin_lock(&state_lock);
- timer_config = csr_read64(LOONGARCH_CSR_TCFG);
+ timer_config = csr_read(LOONGARCH_CSR_TCFG);
timer_config |= CSR_TCFG_EN;
timer_config &= ~CSR_TCFG_PERIOD;
- csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+ csr_write(timer_config, LOONGARCH_CSR_TCFG);
raw_spin_unlock(&state_lock);
@@ -62,15 +63,15 @@ static int constant_set_state_oneshot(struct clock_event_device *evt)
static int constant_set_state_periodic(struct clock_event_device *evt)
{
- unsigned long period;
unsigned long timer_config;
+ u64 period = const_clock_freq;
raw_spin_lock(&state_lock);
- period = const_clock_freq / HZ;
+ do_div(period, HZ);
timer_config = period & CSR_TCFG_VAL;
timer_config |= (CSR_TCFG_PERIOD | CSR_TCFG_EN);
- csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+ csr_write(timer_config, LOONGARCH_CSR_TCFG);
raw_spin_unlock(&state_lock);
@@ -83,9 +84,9 @@ static int constant_set_state_shutdown(struct clock_event_device *evt)
raw_spin_lock(&state_lock);
- timer_config = csr_read64(LOONGARCH_CSR_TCFG);
+ timer_config = csr_read(LOONGARCH_CSR_TCFG);
timer_config &= ~CSR_TCFG_EN;
- csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+ csr_write(timer_config, LOONGARCH_CSR_TCFG);
raw_spin_unlock(&state_lock);
@@ -98,7 +99,7 @@ static int constant_timer_next_event(unsigned long delta, struct clock_event_dev
delta &= CSR_TCFG_VAL;
timer_config = delta | CSR_TCFG_EN;
- csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+ csr_write(timer_config, LOONGARCH_CSR_TCFG);
return 0;
}
@@ -120,7 +121,7 @@ static int arch_timer_dying(unsigned int cpu)
static unsigned long get_loops_per_jiffy(void)
{
- unsigned long lpj = (unsigned long)const_clock_freq;
+ u64 lpj = const_clock_freq;
do_div(lpj, HZ);
@@ -131,13 +132,13 @@ static long init_offset;
void save_counter(void)
{
- init_offset = drdtime();
+ init_offset = get_cycles();
}
void sync_counter(void)
{
/* Ensure counter begin at 0 */
- csr_write64(init_offset, LOONGARCH_CSR_CNTC);
+ csr_write(init_offset, LOONGARCH_CSR_CNTC);
}
int constant_clockevent_init(void)
@@ -197,12 +198,12 @@ int constant_clockevent_init(void)
static u64 read_const_counter(struct clocksource *clk)
{
- return drdtime();
+ return get_cycles64();
}
static noinstr u64 sched_clock_read(void)
{
- return drdtime();
+ return get_cycles64();
}
static struct clocksource clocksource_const = {
@@ -211,7 +212,9 @@ static struct clocksource clocksource_const = {
.read = read_const_counter,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
+#ifdef CONFIG_GENERIC_GETTIMEOFDAY
.vdso_clock_mode = VDSO_CLOCKMODE_CPU,
+#endif
};
int __init constant_clocksource_init(void)
@@ -235,7 +238,7 @@ void __init time_init(void)
else
const_clock_freq = calc_const_freq();
- init_offset = -(drdtime() - csr_read64(LOONGARCH_CSR_CNTC));
+ init_offset = -(get_cycles() - csr_read(LOONGARCH_CSR_CNTC));
constant_clockevent_init();
constant_clocksource_init();
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
index da5926fead4a..004b8ebf0051 100644
--- a/arch/loongarch/kernel/traps.c
+++ b/arch/loongarch/kernel/traps.c
@@ -625,7 +625,7 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs)
bool user = user_mode(regs);
bool pie = regs_irqs_disabled(regs);
unsigned long era = exception_era(regs);
- u64 badv = 0, lower = 0, upper = ULONG_MAX;
+ unsigned long badv = 0, lower = 0, upper = ULONG_MAX;
union loongarch_instruction insn;
irqentry_state_t state = irqentry_enter(regs);
@@ -1070,10 +1070,13 @@ asmlinkage void noinstr do_reserved(struct pt_regs *regs)
asmlinkage void cache_parity_error(void)
{
+ u32 merrctl = csr_read32(LOONGARCH_CSR_MERRCTL);
+ unsigned long merrera = csr_read(LOONGARCH_CSR_MERRERA);
+
/* For the moment, report the problem and hang. */
pr_err("Cache error exception:\n");
- pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
- pr_err("csr_merrera == %016lx\n", csr_read64(LOONGARCH_CSR_MERRERA));
+ pr_err("csr_merrctl == %08x\n", merrctl);
+ pr_err("csr_merrera == %016lx\n", merrera);
panic("Can't handle the cache error!");
}
@@ -1130,9 +1133,9 @@ static void configure_exception_vector(void)
eentry = (unsigned long)exception_handlers;
tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
- csr_write64(eentry, LOONGARCH_CSR_EENTRY);
- csr_write64(__pa(eentry), LOONGARCH_CSR_MERRENTRY);
- csr_write64(__pa(tlbrentry), LOONGARCH_CSR_TLBRENTRY);
+ csr_write(eentry, LOONGARCH_CSR_EENTRY);
+ csr_write(__pa(eentry), LOONGARCH_CSR_MERRENTRY);
+ csr_write(__pa(tlbrentry), LOONGARCH_CSR_TLBRENTRY);
}
void per_cpu_trap_init(int cpu)
diff --git a/arch/loongarch/kernel/unaligned.c b/arch/loongarch/kernel/unaligned.c
index 487be604b96a..cc929c9fe7e9 100644
--- a/arch/loongarch/kernel/unaligned.c
+++ b/arch/loongarch/kernel/unaligned.c
@@ -27,12 +27,21 @@ static u32 unaligned_instructions_user;
static u32 unaligned_instructions_kernel;
#endif
-static inline unsigned long read_fpr(unsigned int idx)
+static inline u64 read_fpr(unsigned int idx)
{
+#ifdef CONFIG_64BIT
#define READ_FPR(idx, __value) \
__asm__ __volatile__("movfr2gr.d %0, $f"#idx"\n\t" : "=r"(__value));
-
- unsigned long __value;
+#else
+#define READ_FPR(idx, __value) \
+{ \
+ u32 __value_lo, __value_hi; \
+ __asm__ __volatile__("movfr2gr.s %0, $f"#idx"\n\t" : "=r"(__value_lo)); \
+ __asm__ __volatile__("movfrh2gr.s %0, $f"#idx"\n\t" : "=r"(__value_hi)); \
+ __value = (__value_lo | ((u64)__value_hi << 32)); \
+}
+#endif
+ u64 __value;
switch (idx) {
case 0:
@@ -138,11 +147,20 @@ static inline unsigned long read_fpr(unsigned int idx)
return __value;
}
-static inline void write_fpr(unsigned int idx, unsigned long value)
+static inline void write_fpr(unsigned int idx, u64 value)
{
+#ifdef CONFIG_64BIT
#define WRITE_FPR(idx, value) \
__asm__ __volatile__("movgr2fr.d $f"#idx", %0\n\t" :: "r"(value));
-
+#else
+#define WRITE_FPR(idx, value) \
+{ \
+ u32 value_lo = value; \
+ u32 value_hi = value >> 32; \
+ __asm__ __volatile__("movgr2fr.w $f"#idx", %0\n\t" :: "r"(value_lo)); \
+ __asm__ __volatile__("movgr2frh.w $f"#idx", %0\n\t" :: "r"(value_hi)); \
+}
+#endif
switch (idx) {
case 0:
WRITE_FPR(0, value);
@@ -252,7 +270,7 @@ void emulate_load_store_insn(struct pt_regs *regs, void __user *addr, unsigned i
bool sign, write;
bool user = user_mode(regs);
unsigned int res, size = 0;
- unsigned long value = 0;
+ u64 value = 0;
union loongarch_instruction insn;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 6d833599ef2e..656b954c1134 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -9,6 +9,7 @@
#include <asm/loongarch.h>
#include <asm/setup.h>
#include <asm/time.h>
+#include <asm/timex.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -814,7 +815,7 @@ static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
case KVM_REG_LOONGARCH_KVM:
switch (reg->id) {
case KVM_REG_LOONGARCH_COUNTER:
- *v = drdtime() + vcpu->kvm->arch.time_offset;
+ *v = get_cycles() + vcpu->kvm->arch.time_offset;
break;
case KVM_REG_LOONGARCH_DEBUG_INST:
*v = INSN_HVCL | KVM_HCALL_SWDBG;
@@ -909,7 +910,7 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
* only set for the first time for smp system
*/
if (vcpu->vcpu_id == 0)
- vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
+ vcpu->kvm->arch.time_offset = (signed long)(v - get_cycles());
break;
case KVM_REG_LOONGARCH_VCPU_RESET:
vcpu->arch.st.guest_addr = 0;
diff --git a/arch/loongarch/lib/bswapdi.c b/arch/loongarch/lib/bswapdi.c
new file mode 100644
index 000000000000..88242dc7de17
--- /dev/null
+++ b/arch/loongarch/lib/bswapdi.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/export.h>
+#include <linux/compiler.h>
+#include <uapi/linux/swab.h>
+
+/* To silence -Wmissing-prototypes. */
+unsigned long long __bswapdi2(unsigned long long u);
+
+unsigned long long notrace __bswapdi2(unsigned long long u)
+{
+ return ___constant_swab64(u);
+}
+EXPORT_SYMBOL(__bswapdi2);
diff --git a/arch/loongarch/lib/bswapsi.c b/arch/loongarch/lib/bswapsi.c
new file mode 100644
index 000000000000..2ed655497de5
--- /dev/null
+++ b/arch/loongarch/lib/bswapsi.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/export.h>
+#include <linux/compiler.h>
+#include <uapi/linux/swab.h>
+
+/* To silence -Wmissing-prototypes. */
+unsigned int __bswapsi2(unsigned int u);
+
+unsigned int notrace __bswapsi2(unsigned int u)
+{
+ return ___constant_swab32(u);
+}
+EXPORT_SYMBOL(__bswapsi2);
diff --git a/arch/loongarch/lib/clear_user.S b/arch/loongarch/lib/clear_user.S
index 7a0db643b286..58c667dde882 100644
--- a/arch/loongarch/lib/clear_user.S
+++ b/arch/loongarch/lib/clear_user.S
@@ -13,11 +13,15 @@
#include <asm/unwind_hints.h>
SYM_FUNC_START(__clear_user)
+#ifdef CONFIG_32BIT
+ b __clear_user_generic
+#else
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __clear_user_generic", \
"b __clear_user_fast", CPU_FEATURE_UAL
+#endif
SYM_FUNC_END(__clear_user)
EXPORT_SYMBOL(__clear_user)
@@ -29,19 +33,20 @@ EXPORT_SYMBOL(__clear_user)
* a1: size
*/
SYM_FUNC_START(__clear_user_generic)
- beqz a1, 2f
+ beqz a1, 2f
-1: st.b zero, a0, 0
- addi.d a0, a0, 1
- addi.d a1, a1, -1
- bgtz a1, 1b
+1: st.b zero, a0, 0
+ PTR_ADDI a0, a0, 1
+ PTR_ADDI a1, a1, -1
+ bgtz a1, 1b
-2: move a0, a1
- jr ra
+2: move a0, a1
+ jr ra
- _asm_extable 1b, 2b
+ _asm_extable 1b, 2b
SYM_FUNC_END(__clear_user_generic)
+#ifdef CONFIG_64BIT
/*
* unsigned long __clear_user_fast(void *addr, unsigned long size)
*
@@ -207,3 +212,4 @@ SYM_FUNC_START(__clear_user_fast)
SYM_FUNC_END(__clear_user_fast)
STACK_FRAME_NON_STANDARD __clear_user_fast
+#endif
diff --git a/arch/loongarch/lib/copy_user.S b/arch/loongarch/lib/copy_user.S
index 095ce9181c6c..c7264b779f6e 100644
--- a/arch/loongarch/lib/copy_user.S
+++ b/arch/loongarch/lib/copy_user.S
@@ -13,11 +13,15 @@
#include <asm/unwind_hints.h>
SYM_FUNC_START(__copy_user)
+#ifdef CONFIG_32BIT
+ b __copy_user_generic
+#else
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __copy_user_generic", \
"b __copy_user_fast", CPU_FEATURE_UAL
+#endif
SYM_FUNC_END(__copy_user)
EXPORT_SYMBOL(__copy_user)
@@ -30,22 +34,23 @@ EXPORT_SYMBOL(__copy_user)
* a2: n
*/
SYM_FUNC_START(__copy_user_generic)
- beqz a2, 3f
+ beqz a2, 3f
-1: ld.b t0, a1, 0
-2: st.b t0, a0, 0
- addi.d a0, a0, 1
- addi.d a1, a1, 1
- addi.d a2, a2, -1
- bgtz a2, 1b
+1: ld.b t0, a1, 0
+2: st.b t0, a0, 0
+ PTR_ADDI a0, a0, 1
+ PTR_ADDI a1, a1, 1
+ PTR_ADDI a2, a2, -1
+ bgtz a2, 1b
-3: move a0, a2
- jr ra
+3: move a0, a2
+ jr ra
- _asm_extable 1b, 3b
- _asm_extable 2b, 3b
+ _asm_extable 1b, 3b
+ _asm_extable 2b, 3b
SYM_FUNC_END(__copy_user_generic)
+#ifdef CONFIG_64BIT
/*
* unsigned long __copy_user_fast(void *to, const void *from, unsigned long n)
*
@@ -281,3 +286,4 @@ SYM_FUNC_START(__copy_user_fast)
SYM_FUNC_END(__copy_user_fast)
STACK_FRAME_NON_STANDARD __copy_user_fast
+#endif
diff --git a/arch/loongarch/lib/dump_tlb.c b/arch/loongarch/lib/dump_tlb.c
index 0b886a6e260f..e1cdad7a676e 100644
--- a/arch/loongarch/lib/dump_tlb.c
+++ b/arch/loongarch/lib/dump_tlb.c
@@ -20,9 +20,9 @@ void dump_tlb_regs(void)
pr_info("Index : 0x%0x\n", read_csr_tlbidx());
pr_info("PageSize : 0x%0x\n", read_csr_pagesize());
- pr_info("EntryHi : 0x%0*lx\n", field, read_csr_entryhi());
- pr_info("EntryLo0 : 0x%0*lx\n", field, read_csr_entrylo0());
- pr_info("EntryLo1 : 0x%0*lx\n", field, read_csr_entrylo1());
+ pr_info("EntryHi : 0x%0*lx\n", field, (unsigned long)read_csr_entryhi());
+ pr_info("EntryLo0 : 0x%0*lx\n", field, (unsigned long)read_csr_entrylo0());
+ pr_info("EntryLo1 : 0x%0*lx\n", field, (unsigned long)read_csr_entrylo1());
}
static void dump_tlb(int first, int last)
@@ -73,12 +73,16 @@ static void dump_tlb(int first, int last)
vwidth, (entryhi & ~0x1fffUL), asidwidth, asid & asidmask);
/* NR/NX are in awkward places, so mask them off separately */
+#ifdef CONFIG_64BIT
pa = entrylo0 & ~(ENTRYLO_NR | ENTRYLO_NX);
+#endif
pa = pa & PAGE_MASK;
pr_cont("\n\t[");
+#ifdef CONFIG_64BIT
pr_cont("nr=%d nx=%d ",
(entrylo0 & ENTRYLO_NR) ? 1 : 0,
(entrylo0 & ENTRYLO_NX) ? 1 : 0);
+#endif
pr_cont("pa=0x%0*llx c=%d d=%d v=%d g=%d plv=%lld] [",
pwidth, pa, c0,
(entrylo0 & ENTRYLO_D) ? 1 : 0,
@@ -86,11 +90,15 @@ static void dump_tlb(int first, int last)
(entrylo0 & ENTRYLO_G) ? 1 : 0,
(entrylo0 & ENTRYLO_PLV) >> ENTRYLO_PLV_SHIFT);
/* NR/NX are in awkward places, so mask them off separately */
+#ifdef CONFIG_64BIT
pa = entrylo1 & ~(ENTRYLO_NR | ENTRYLO_NX);
+#endif
pa = pa & PAGE_MASK;
+#ifdef CONFIG_64BIT
pr_cont("nr=%d nx=%d ",
(entrylo1 & ENTRYLO_NR) ? 1 : 0,
(entrylo1 & ENTRYLO_NX) ? 1 : 0);
+#endif
pr_cont("pa=0x%0*llx c=%d d=%d v=%d g=%d plv=%lld]\n",
pwidth, pa, c1,
(entrylo1 & ENTRYLO_D) ? 1 : 0,
diff --git a/arch/loongarch/lib/unaligned.S b/arch/loongarch/lib/unaligned.S
index 185f82d85810..470c0bfa3463 100644
--- a/arch/loongarch/lib/unaligned.S
+++ b/arch/loongarch/lib/unaligned.S
@@ -24,35 +24,35 @@
* a3: sign
*/
SYM_FUNC_START(unaligned_read)
- beqz a2, 5f
+ beqz a2, 5f
- li.w t2, 0
- addi.d t0, a2, -1
- slli.d t1, t0, 3
- add.d a0, a0, t0
+ li.w t2, 0
+ LONG_ADDI t0, a2, -1
+ PTR_SLLI t1, t0, LONGLOG
+ PTR_ADD a0, a0, t0
- beqz a3, 2f
-1: ld.b t3, a0, 0
- b 3f
+ beqz a3, 2f
+1: ld.b t3, a0, 0
+ b 3f
-2: ld.bu t3, a0, 0
-3: sll.d t3, t3, t1
- or t2, t2, t3
- addi.d t1, t1, -8
- addi.d a0, a0, -1
- addi.d a2, a2, -1
- bgtz a2, 2b
-4: st.d t2, a1, 0
+2: ld.bu t3, a0, 0
+3: LONG_SLLV t3, t3, t1
+ or t2, t2, t3
+ LONG_ADDI t1, t1, -8
+ PTR_ADDI a0, a0, -1
+ PTR_ADDI a2, a2, -1
+ bgtz a2, 2b
+4: LONG_S t2, a1, 0
- move a0, a2
- jr ra
+ move a0, a2
+ jr ra
-5: li.w a0, -EFAULT
- jr ra
+5: li.w a0, -EFAULT
+ jr ra
- _asm_extable 1b, .L_fixup_handle_unaligned
- _asm_extable 2b, .L_fixup_handle_unaligned
- _asm_extable 4b, .L_fixup_handle_unaligned
+ _asm_extable 1b, .L_fixup_handle_unaligned
+ _asm_extable 2b, .L_fixup_handle_unaligned
+ _asm_extable 4b, .L_fixup_handle_unaligned
SYM_FUNC_END(unaligned_read)
/*
@@ -63,21 +63,21 @@ SYM_FUNC_END(unaligned_read)
* a2: n
*/
SYM_FUNC_START(unaligned_write)
- beqz a2, 3f
+ beqz a2, 3f
- li.w t0, 0
-1: srl.d t1, a1, t0
-2: st.b t1, a0, 0
- addi.d t0, t0, 8
- addi.d a2, a2, -1
- addi.d a0, a0, 1
- bgtz a2, 1b
+ li.w t0, 0
+1: LONG_SRLV t1, a1, t0
+2: st.b t1, a0, 0
+ LONG_ADDI t0, t0, 8
+ PTR_ADDI a2, a2, -1
+ PTR_ADDI a0, a0, 1
+ bgtz a2, 1b
- move a0, a2
- jr ra
+ move a0, a2
+ jr ra
-3: li.w a0, -EFAULT
- jr ra
+3: li.w a0, -EFAULT
+ jr ra
- _asm_extable 2b, .L_fixup_handle_unaligned
+ _asm_extable 2b, .L_fixup_handle_unaligned
SYM_FUNC_END(unaligned_write)
diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
index 6bfd4b8dad1b..0946662afdd6 100644
--- a/arch/loongarch/mm/init.c
+++ b/arch/loongarch/mm/init.c
@@ -224,7 +224,7 @@ EXPORT_SYMBOL(invalid_pmd_table);
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
EXPORT_SYMBOL(invalid_pte_table);
-#ifdef CONFIG_EXECMEM
+#if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR)
static struct execmem_info execmem_info __ro_after_init;
struct execmem_info __init *execmem_arch_setup(void)
@@ -242,4 +242,4 @@ struct execmem_info __init *execmem_arch_setup(void)
return &execmem_info;
}
-#endif /* CONFIG_EXECMEM */
+#endif /* CONFIG_EXECMEM && MODULES_VADDR */
diff --git a/arch/loongarch/mm/page.S b/arch/loongarch/mm/page.S
index 7ad76551d313..7286b804756d 100644
--- a/arch/loongarch/mm/page.S
+++ b/arch/loongarch/mm/page.S
@@ -10,75 +10,75 @@
.align 5
SYM_FUNC_START(clear_page)
- lu12i.w t0, 1 << (PAGE_SHIFT - 12)
- add.d t0, t0, a0
+ lu12i.w t0, 1 << (PAGE_SHIFT - 12)
+ PTR_ADD t0, t0, a0
1:
- st.d zero, a0, 0
- st.d zero, a0, 8
- st.d zero, a0, 16
- st.d zero, a0, 24
- st.d zero, a0, 32
- st.d zero, a0, 40
- st.d zero, a0, 48
- st.d zero, a0, 56
- addi.d a0, a0, 128
- st.d zero, a0, -64
- st.d zero, a0, -56
- st.d zero, a0, -48
- st.d zero, a0, -40
- st.d zero, a0, -32
- st.d zero, a0, -24
- st.d zero, a0, -16
- st.d zero, a0, -8
- bne t0, a0, 1b
+ LONG_S zero, a0, (LONGSIZE * 0)
+ LONG_S zero, a0, (LONGSIZE * 1)
+ LONG_S zero, a0, (LONGSIZE * 2)
+ LONG_S zero, a0, (LONGSIZE * 3)
+ LONG_S zero, a0, (LONGSIZE * 4)
+ LONG_S zero, a0, (LONGSIZE * 5)
+ LONG_S zero, a0, (LONGSIZE * 6)
+ LONG_S zero, a0, (LONGSIZE * 7)
+ PTR_ADDI a0, a0, (LONGSIZE * 16)
+ LONG_S zero, a0, -(LONGSIZE * 8)
+ LONG_S zero, a0, -(LONGSIZE * 7)
+ LONG_S zero, a0, -(LONGSIZE * 6)
+ LONG_S zero, a0, -(LONGSIZE * 5)
+ LONG_S zero, a0, -(LONGSIZE * 4)
+ LONG_S zero, a0, -(LONGSIZE * 3)
+ LONG_S zero, a0, -(LONGSIZE * 2)
+ LONG_S zero, a0, -(LONGSIZE * 1)
+ bne t0, a0, 1b
- jr ra
+ jr ra
SYM_FUNC_END(clear_page)
EXPORT_SYMBOL(clear_page)
.align 5
SYM_FUNC_START(copy_page)
- lu12i.w t8, 1 << (PAGE_SHIFT - 12)
- add.d t8, t8, a0
+ lu12i.w t8, 1 << (PAGE_SHIFT - 12)
+ PTR_ADD t8, t8, a0
1:
- ld.d t0, a1, 0
- ld.d t1, a1, 8
- ld.d t2, a1, 16
- ld.d t3, a1, 24
- ld.d t4, a1, 32
- ld.d t5, a1, 40
- ld.d t6, a1, 48
- ld.d t7, a1, 56
+ LONG_L t0, a1, (LONGSIZE * 0)
+ LONG_L t1, a1, (LONGSIZE * 1)
+ LONG_L t2, a1, (LONGSIZE * 2)
+ LONG_L t3, a1, (LONGSIZE * 3)
+ LONG_L t4, a1, (LONGSIZE * 4)
+ LONG_L t5, a1, (LONGSIZE * 5)
+ LONG_L t6, a1, (LONGSIZE * 6)
+ LONG_L t7, a1, (LONGSIZE * 7)
- st.d t0, a0, 0
- st.d t1, a0, 8
- ld.d t0, a1, 64
- ld.d t1, a1, 72
- st.d t2, a0, 16
- st.d t3, a0, 24
- ld.d t2, a1, 80
- ld.d t3, a1, 88
- st.d t4, a0, 32
- st.d t5, a0, 40
- ld.d t4, a1, 96
- ld.d t5, a1, 104
- st.d t6, a0, 48
- st.d t7, a0, 56
- ld.d t6, a1, 112
- ld.d t7, a1, 120
- addi.d a0, a0, 128
- addi.d a1, a1, 128
+ LONG_S t0, a0, (LONGSIZE * 0)
+ LONG_S t1, a0, (LONGSIZE * 1)
+ LONG_L t0, a1, (LONGSIZE * 8)
+ LONG_L t1, a1, (LONGSIZE * 9)
+ LONG_S t2, a0, (LONGSIZE * 2)
+ LONG_S t3, a0, (LONGSIZE * 3)
+ LONG_L t2, a1, (LONGSIZE * 10)
+ LONG_L t3, a1, (LONGSIZE * 11)
+ LONG_S t4, a0, (LONGSIZE * 4)
+ LONG_S t5, a0, (LONGSIZE * 5)
+ LONG_L t4, a1, (LONGSIZE * 12)
+ LONG_L t5, a1, (LONGSIZE * 13)
+ LONG_S t6, a0, (LONGSIZE * 6)
+ LONG_S t7, a0, (LONGSIZE * 7)
+ LONG_L t6, a1, (LONGSIZE * 14)
+ LONG_L t7, a1, (LONGSIZE * 15)
+ PTR_ADDI a0, a0, (LONGSIZE * 16)
+ PTR_ADDI a1, a1, (LONGSIZE * 16)
- st.d t0, a0, -64
- st.d t1, a0, -56
- st.d t2, a0, -48
- st.d t3, a0, -40
- st.d t4, a0, -32
- st.d t5, a0, -24
- st.d t6, a0, -16
- st.d t7, a0, -8
+ LONG_S t0, a0, -(LONGSIZE * 8)
+ LONG_S t1, a0, -(LONGSIZE * 7)
+ LONG_S t2, a0, -(LONGSIZE * 6)
+ LONG_S t3, a0, -(LONGSIZE * 5)
+ LONG_S t4, a0, -(LONGSIZE * 4)
+ LONG_S t5, a0, -(LONGSIZE * 3)
+ LONG_S t6, a0, -(LONGSIZE * 2)
+ LONG_S t7, a0, -(LONGSIZE * 1)
- bne t8, a0, 1b
- jr ra
+ bne t8, a0, 1b
+ jr ra
SYM_FUNC_END(copy_page)
EXPORT_SYMBOL(copy_page)
diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c
index 3b427b319db2..6a3c91b9cacd 100644
--- a/arch/loongarch/mm/tlb.c
+++ b/arch/loongarch/mm/tlb.c
@@ -229,11 +229,11 @@ static void setup_ptwalker(void)
if (cpu_has_ptw)
pwctl1 |= CSR_PWCTL1_PTW;
- csr_write64(pwctl0, LOONGARCH_CSR_PWCTL0);
- csr_write64(pwctl1, LOONGARCH_CSR_PWCTL1);
- csr_write64((long)swapper_pg_dir, LOONGARCH_CSR_PGDH);
- csr_write64((long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
- csr_write64((long)smp_processor_id(), LOONGARCH_CSR_TMID);
+ csr_write(pwctl0, LOONGARCH_CSR_PWCTL0);
+ csr_write(pwctl1, LOONGARCH_CSR_PWCTL1);
+ csr_write((long)swapper_pg_dir, LOONGARCH_CSR_PGDH);
+ csr_write((long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
+ csr_write((long)smp_processor_id(), LOONGARCH_CSR_TMID);
}
static void output_pgtable_bits_defines(void)
@@ -251,8 +251,10 @@ static void output_pgtable_bits_defines(void)
pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
+#ifdef CONFIG_64BIT
pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
+#endif
pr_define("PFN_PTE_SHIFT %d\n", PFN_PTE_SHIFT);
pr_debug("\n");
}
diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S
index c08682a89c58..84a881a339a7 100644
--- a/arch/loongarch/mm/tlbex.S
+++ b/arch/loongarch/mm/tlbex.S
@@ -11,10 +11,18 @@
#define INVTLB_ADDR_GFALSE_AND_ASID 5
-#define PTRS_PER_PGD_BITS (PAGE_SHIFT - 3)
-#define PTRS_PER_PUD_BITS (PAGE_SHIFT - 3)
-#define PTRS_PER_PMD_BITS (PAGE_SHIFT - 3)
-#define PTRS_PER_PTE_BITS (PAGE_SHIFT - 3)
+#define PTRS_PER_PGD_BITS (PAGE_SHIFT - PTRLOG)
+#define PTRS_PER_PUD_BITS (PAGE_SHIFT - PTRLOG)
+#define PTRS_PER_PMD_BITS (PAGE_SHIFT - PTRLOG)
+#define PTRS_PER_PTE_BITS (PAGE_SHIFT - PTRLOG)
+
+#ifdef CONFIG_32BIT
+#define PTE_LL ll.w
+#define PTE_SC sc.w
+#else
+#define PTE_LL ll.d
+#define PTE_SC sc.d
+#endif
.macro tlb_do_page_fault, write
SYM_CODE_START(tlb_do_page_fault_\write)
@@ -60,52 +68,61 @@ SYM_CODE_START(handle_tlb_load)
vmalloc_done_load:
/* Get PGD offset in bytes */
- bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
- alsl.d t1, ra, t1, 3
+#ifdef CONFIG_32BIT
+ PTR_BSTRPICK ra, t0, 31, PGDIR_SHIFT
+#else
+ PTR_BSTRPICK ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
+#endif
+ PTR_ALSL t1, ra, t1, _PGD_T_LOG2
+
#if CONFIG_PGTABLE_LEVELS > 3
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
- alsl.d t1, ra, t1, 3
+ PTR_L t1, t1, 0
+ PTR_BSTRPICK ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
+ PTR_ALSL t1, ra, t1, _PMD_T_LOG2
+
#endif
#if CONFIG_PGTABLE_LEVELS > 2
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
- alsl.d t1, ra, t1, 3
+ PTR_L t1, t1, 0
+ PTR_BSTRPICK ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
+ PTR_ALSL t1, ra, t1, _PMD_T_LOG2
+
#endif
- ld.d ra, t1, 0
+ PTR_L ra, t1, 0
/*
* For huge tlb entries, pmde doesn't contain an address but
* instead contains the tlb pte. Check the PAGE_HUGE bit and
* see if we need to jump to huge tlb processing.
*/
- rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
+ PTR_ROTRI ra, ra, _PAGE_HUGE_SHIFT + 1
bltz ra, tlb_huge_update_load
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
- bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
- alsl.d t1, t0, ra, _PTE_T_LOG2
+ PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
+ PTR_BSTRPICK t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
+ PTR_ALSL t1, t0, ra, _PTE_T_LOG2
#ifdef CONFIG_SMP
smp_pgtable_change_load:
- ll.d t0, t1, 0
+ PTE_LL t0, t1, 0
#else
- ld.d t0, t1, 0
+ PTR_L t0, t1, 0
#endif
andi ra, t0, _PAGE_PRESENT
beqz ra, nopage_tlb_load
ori t0, t0, _PAGE_VALID
+
#ifdef CONFIG_SMP
- sc.d t0, t1, 0
+ PTE_SC t0, t1, 0
beqz t0, smp_pgtable_change_load
#else
- st.d t0, t1, 0
+ PTR_S t0, t1, 0
#endif
+
tlbsrch
- bstrins.d t1, zero, 3, 3
- ld.d t0, t1, 0
- ld.d t1, t1, 8
+ PTR_BSTRINS t1, zero, _PTE_T_LOG2, _PTE_T_LOG2
+ PTR_L t0, t1, 0
+ PTR_L t1, t1, _PTE_T_SIZE
csrwr t0, LOONGARCH_CSR_TLBELO0
csrwr t1, LOONGARCH_CSR_TLBELO1
tlbwr
@@ -115,30 +132,28 @@ smp_pgtable_change_load:
csrrd ra, EXCEPTION_KS2
ertn
-#ifdef CONFIG_64BIT
vmalloc_load:
la_abs t1, swapper_pg_dir
b vmalloc_done_load
-#endif
/* This is the entry point of a huge page. */
tlb_huge_update_load:
#ifdef CONFIG_SMP
- ll.d ra, t1, 0
+ PTE_LL ra, t1, 0
#else
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
+ PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
#endif
andi t0, ra, _PAGE_PRESENT
beqz t0, nopage_tlb_load
#ifdef CONFIG_SMP
ori t0, ra, _PAGE_VALID
- sc.d t0, t1, 0
+ PTE_SC t0, t1, 0
beqz t0, tlb_huge_update_load
ori t0, ra, _PAGE_VALID
#else
ori t0, ra, _PAGE_VALID
- st.d t0, t1, 0
+ PTR_S t0, t1, 0
#endif
csrrd ra, LOONGARCH_CSR_ASID
csrrd t1, LOONGARCH_CSR_BADV
@@ -158,27 +173,27 @@ tlb_huge_update_load:
xori t0, t0, _PAGE_HUGE
lu12i.w t1, _PAGE_HGLOBAL >> 12
and t1, t0, t1
- srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
+ PTR_SRLI t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
or t0, t0, t1
move ra, t0
csrwr ra, LOONGARCH_CSR_TLBELO0
/* Convert to entrylo1 */
- addi.d t1, zero, 1
- slli.d t1, t1, (HPAGE_SHIFT - 1)
- add.d t0, t0, t1
+ PTR_ADDI t1, zero, 1
+ PTR_SLLI t1, t1, (HPAGE_SHIFT - 1)
+ PTR_ADD t0, t0, t1
csrwr t0, LOONGARCH_CSR_TLBELO1
/* Set huge page tlb entry size */
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
+ PTR_LI t1, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbfill
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
+ PTR_LI t1, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
csrrd t0, EXCEPTION_KS0
@@ -216,53 +231,71 @@ SYM_CODE_START(handle_tlb_store)
vmalloc_done_store:
/* Get PGD offset in bytes */
- bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
- alsl.d t1, ra, t1, 3
+#ifdef CONFIG_32BIT
+ PTR_BSTRPICK ra, t0, 31, PGDIR_SHIFT
+#else
+ PTR_BSTRPICK ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
+#endif
+ PTR_ALSL t1, ra, t1, _PGD_T_LOG2
+
#if CONFIG_PGTABLE_LEVELS > 3
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
- alsl.d t1, ra, t1, 3
+ PTR_L t1, t1, 0
+ PTR_BSTRPICK ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
+ PTR_ALSL t1, ra, t1, _PMD_T_LOG2
#endif
#if CONFIG_PGTABLE_LEVELS > 2
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
- alsl.d t1, ra, t1, 3
+ PTR_L t1, t1, 0
+ PTR_BSTRPICK ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
+ PTR_ALSL t1, ra, t1, _PMD_T_LOG2
#endif
- ld.d ra, t1, 0
+ PTR_L ra, t1, 0
/*
* For huge tlb entries, pmde doesn't contain an address but
* instead contains the tlb pte. Check the PAGE_HUGE bit and
* see if we need to jump to huge tlb processing.
*/
- rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
+ PTR_ROTRI ra, ra, _PAGE_HUGE_SHIFT + 1
bltz ra, tlb_huge_update_store
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
- bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
- alsl.d t1, t0, ra, _PTE_T_LOG2
+ PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
+ PTR_BSTRPICK t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
+ PTR_ALSL t1, t0, ra, _PTE_T_LOG2
#ifdef CONFIG_SMP
smp_pgtable_change_store:
- ll.d t0, t1, 0
+ PTE_LL t0, t1, 0
#else
- ld.d t0, t1, 0
+ PTR_L t0, t1, 0
#endif
+
+#ifdef CONFIG_64BIT
andi ra, t0, _PAGE_PRESENT | _PAGE_WRITE
xori ra, ra, _PAGE_PRESENT | _PAGE_WRITE
+#else
+ PTR_LI ra, _PAGE_PRESENT | _PAGE_WRITE
+ and ra, ra, t0
+ nor ra, ra, zero
+#endif
bnez ra, nopage_tlb_store
+#ifdef CONFIG_64BIT
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+#else
+ PTR_LI ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+ or t0, ra, t0
+#endif
+
#ifdef CONFIG_SMP
- sc.d t0, t1, 0
+ PTE_SC t0, t1, 0
beqz t0, smp_pgtable_change_store
#else
- st.d t0, t1, 0
+ PTR_S t0, t1, 0
#endif
tlbsrch
- bstrins.d t1, zero, 3, 3
- ld.d t0, t1, 0
- ld.d t1, t1, 8
+ PTR_BSTRINS t1, zero, _PTE_T_LOG2, _PTE_T_LOG2
+ PTR_L t0, t1, 0
+ PTR_L t1, t1, _PTE_T_SIZE
csrwr t0, LOONGARCH_CSR_TLBELO0
csrwr t1, LOONGARCH_CSR_TLBELO1
tlbwr
@@ -272,31 +305,42 @@ smp_pgtable_change_store:
csrrd ra, EXCEPTION_KS2
ertn
-#ifdef CONFIG_64BIT
vmalloc_store:
la_abs t1, swapper_pg_dir
b vmalloc_done_store
-#endif
/* This is the entry point of a huge page. */
tlb_huge_update_store:
#ifdef CONFIG_SMP
- ll.d ra, t1, 0
+ PTE_LL ra, t1, 0
#else
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
+ PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
#endif
+
+#ifdef CONFIG_64BIT
andi t0, ra, _PAGE_PRESENT | _PAGE_WRITE
xori t0, t0, _PAGE_PRESENT | _PAGE_WRITE
+#else
+ PTR_LI t0, _PAGE_PRESENT | _PAGE_WRITE
+ and t0, t0, ra
+ nor t0, t0, zero
+#endif
+
bnez t0, nopage_tlb_store
#ifdef CONFIG_SMP
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
- sc.d t0, t1, 0
+ PTE_SC t0, t1, 0
beqz t0, tlb_huge_update_store
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#else
+#ifdef CONFIG_64BIT
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
- st.d t0, t1, 0
+#else
+ PTR_LI t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+ or t0, ra, t0
+#endif
+ PTR_S t0, t1, 0
#endif
csrrd ra, LOONGARCH_CSR_ASID
csrrd t1, LOONGARCH_CSR_BADV
@@ -316,28 +360,28 @@ tlb_huge_update_store:
xori t0, t0, _PAGE_HUGE
lu12i.w t1, _PAGE_HGLOBAL >> 12
and t1, t0, t1
- srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
+ PTR_SRLI t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
or t0, t0, t1
move ra, t0
csrwr ra, LOONGARCH_CSR_TLBELO0
/* Convert to entrylo1 */
- addi.d t1, zero, 1
- slli.d t1, t1, (HPAGE_SHIFT - 1)
- add.d t0, t0, t1
+ PTR_ADDI t1, zero, 1
+ PTR_SLLI t1, t1, (HPAGE_SHIFT - 1)
+ PTR_ADD t0, t0, t1
csrwr t0, LOONGARCH_CSR_TLBELO1
/* Set huge page tlb entry size */
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
+ PTR_LI t1, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbfill
/* Reset default page size */
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
+ PTR_LI t1, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
csrrd t0, EXCEPTION_KS0
@@ -375,52 +419,69 @@ SYM_CODE_START(handle_tlb_modify)
vmalloc_done_modify:
/* Get PGD offset in bytes */
- bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
- alsl.d t1, ra, t1, 3
+#ifdef CONFIG_32BIT
+ PTR_BSTRPICK ra, t0, 31, PGDIR_SHIFT
+#else
+ PTR_BSTRPICK ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
+#endif
+ PTR_ALSL t1, ra, t1, _PGD_T_LOG2
+
#if CONFIG_PGTABLE_LEVELS > 3
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
- alsl.d t1, ra, t1, 3
+ PTR_L t1, t1, 0
+ PTR_BSTRPICK ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
+ PTR_ALSL t1, ra, t1, _PMD_T_LOG2
#endif
#if CONFIG_PGTABLE_LEVELS > 2
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
- alsl.d t1, ra, t1, 3
+ PTR_L t1, t1, 0
+ PTR_BSTRPICK ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
+ PTR_ALSL t1, ra, t1, _PMD_T_LOG2
#endif
- ld.d ra, t1, 0
+ PTR_L ra, t1, 0
/*
* For huge tlb entries, pmde doesn't contain an address but
* instead contains the tlb pte. Check the PAGE_HUGE bit and
* see if we need to jump to huge tlb processing.
*/
- rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
+ PTR_ROTRI ra, ra, _PAGE_HUGE_SHIFT + 1
bltz ra, tlb_huge_update_modify
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
- bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
- alsl.d t1, t0, ra, _PTE_T_LOG2
+ PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
+ PTR_BSTRPICK t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
+ PTR_ALSL t1, t0, ra, _PTE_T_LOG2
#ifdef CONFIG_SMP
smp_pgtable_change_modify:
- ll.d t0, t1, 0
+ PTE_LL t0, t1, 0
#else
- ld.d t0, t1, 0
+ PTR_L t0, t1, 0
#endif
+#ifdef CONFIG_64BIT
andi ra, t0, _PAGE_WRITE
+#else
+ PTR_LI ra, _PAGE_WRITE
+ and ra, t0, ra
+#endif
+
beqz ra, nopage_tlb_modify
+#ifdef CONFIG_64BIT
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+#else
+ PTR_LI ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+ or t0, ra, t0
+#endif
+
#ifdef CONFIG_SMP
- sc.d t0, t1, 0
+ PTE_SC t0, t1, 0
beqz t0, smp_pgtable_change_modify
#else
- st.d t0, t1, 0
+ PTR_S t0, t1, 0
#endif
tlbsrch
- bstrins.d t1, zero, 3, 3
- ld.d t0, t1, 0
- ld.d t1, t1, 8
+ PTR_BSTRINS t1, zero, _PTE_T_LOG2, _PTE_T_LOG2
+ PTR_L t0, t1, 0
+ PTR_L t1, t1, _PTE_T_SIZE
csrwr t0, LOONGARCH_CSR_TLBELO0
csrwr t1, LOONGARCH_CSR_TLBELO1
tlbwr
@@ -430,30 +491,40 @@ smp_pgtable_change_modify:
csrrd ra, EXCEPTION_KS2
ertn
-#ifdef CONFIG_64BIT
vmalloc_modify:
la_abs t1, swapper_pg_dir
b vmalloc_done_modify
-#endif
/* This is the entry point of a huge page. */
tlb_huge_update_modify:
#ifdef CONFIG_SMP
- ll.d ra, t1, 0
+ PTE_LL ra, t1, 0
#else
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
+ PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
#endif
+
+#ifdef CONFIG_64BIT
andi t0, ra, _PAGE_WRITE
+#else
+ PTR_LI t0, _PAGE_WRITE
+ and t0, ra, t0
+#endif
+
beqz t0, nopage_tlb_modify
#ifdef CONFIG_SMP
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
- sc.d t0, t1, 0
+ PTE_SC t0, t1, 0
beqz t0, tlb_huge_update_modify
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#else
+#ifdef CONFIG_64BIT
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
- st.d t0, t1, 0
+#else
+ PTR_LI t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+ or t0, ra, t0
+#endif
+ PTR_S t0, t1, 0
#endif
csrrd ra, LOONGARCH_CSR_ASID
csrrd t1, LOONGARCH_CSR_BADV
@@ -473,28 +544,28 @@ tlb_huge_update_modify:
xori t0, t0, _PAGE_HUGE
lu12i.w t1, _PAGE_HGLOBAL >> 12
and t1, t0, t1
- srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
+ PTR_SRLI t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
or t0, t0, t1
move ra, t0
csrwr ra, LOONGARCH_CSR_TLBELO0
/* Convert to entrylo1 */
- addi.d t1, zero, 1
- slli.d t1, t1, (HPAGE_SHIFT - 1)
- add.d t0, t0, t1
+ PTR_ADDI t1, zero, 1
+ PTR_SLLI t1, t1, (HPAGE_SHIFT - 1)
+ PTR_ADD t0, t0, t1
csrwr t0, LOONGARCH_CSR_TLBELO1
/* Set huge page tlb entry size */
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
+ PTR_LI t1, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbfill
/* Reset default page size */
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
+ PTR_LI t1, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
csrrd t0, EXCEPTION_KS0
@@ -517,6 +588,44 @@ SYM_CODE_START(handle_tlb_modify_ptw)
jr t0
SYM_CODE_END(handle_tlb_modify_ptw)
+#ifdef CONFIG_32BIT
+SYM_CODE_START(handle_tlb_refill)
+ UNWIND_HINT_UNDEFINED
+ csrwr t0, EXCEPTION_KS0
+ csrwr t1, EXCEPTION_KS1
+ csrwr ra, EXCEPTION_KS2
+ li.w ra, 0x1fffffff
+
+ csrrd t0, LOONGARCH_CSR_PGD
+ csrrd t1, LOONGARCH_CSR_TLBRBADV
+ srli.w t1, t1, PGDIR_SHIFT
+ slli.w t1, t1, 0x2
+ add.w t0, t0, t1
+ and t0, t0, ra
+
+ ld.w t0, t0, 0
+ csrrd t1, LOONGARCH_CSR_TLBRBADV
+ slli.w t1, t1, (32 - PGDIR_SHIFT)
+ srli.w t1, t1, (32 - PGDIR_SHIFT + PAGE_SHIFT + 1)
+ slli.w t1, t1, (0x2 + 1)
+ add.w t0, t0, t1
+ and t0, t0, ra
+
+ ld.w t1, t0, 0x0
+ csrwr t1, LOONGARCH_CSR_TLBRELO0
+
+ ld.w t1, t0, 0x4
+ csrwr t1, LOONGARCH_CSR_TLBRELO1
+
+ tlbfill
+ csrrd t0, EXCEPTION_KS0
+ csrrd t1, EXCEPTION_KS1
+ csrrd ra, EXCEPTION_KS2
+ ertn
+SYM_CODE_END(handle_tlb_refill)
+#endif
+
+#ifdef CONFIG_64BIT
SYM_CODE_START(handle_tlb_refill)
UNWIND_HINT_UNDEFINED
csrwr t0, LOONGARCH_CSR_TLBRSAVE
@@ -534,3 +643,4 @@ SYM_CODE_START(handle_tlb_refill)
csrrd t0, LOONGARCH_CSR_TLBRSAVE
ertn
SYM_CODE_END(handle_tlb_refill)
+#endif
diff --git a/arch/loongarch/pci/pci.c b/arch/loongarch/pci/pci.c
index d9fc5d520b37..d923295ab8c6 100644
--- a/arch/loongarch/pci/pci.c
+++ b/arch/loongarch/pci/pci.c
@@ -14,6 +14,7 @@
#define PCI_DEVICE_ID_LOONGSON_HOST 0x7a00
#define PCI_DEVICE_ID_LOONGSON_DC1 0x7a06
#define PCI_DEVICE_ID_LOONGSON_DC2 0x7a36
+#define PCI_DEVICE_ID_LOONGSON_DC3 0x7a46
int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 *val)
@@ -97,3 +98,4 @@ static void pci_fixup_vgadev(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC1, pci_fixup_vgadev);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC2, pci_fixup_vgadev);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC3, pci_fixup_vgadev);
diff --git a/arch/loongarch/power/hibernate.c b/arch/loongarch/power/hibernate.c
index e7b7346592cb..817270410ef9 100644
--- a/arch/loongarch/power/hibernate.c
+++ b/arch/loongarch/power/hibernate.c
@@ -10,7 +10,7 @@ static u32 saved_crmd;
static u32 saved_prmd;
static u32 saved_euen;
static u32 saved_ecfg;
-static u64 saved_pcpu_base;
+static unsigned long saved_pcpu_base;
struct pt_regs saved_regs;
void save_processor_state(void)
@@ -20,7 +20,7 @@ void save_processor_state(void)
saved_prmd = csr_read32(LOONGARCH_CSR_PRMD);
saved_euen = csr_read32(LOONGARCH_CSR_EUEN);
saved_ecfg = csr_read32(LOONGARCH_CSR_ECFG);
- saved_pcpu_base = csr_read64(PERCPU_BASE_KS);
+ saved_pcpu_base = csr_read(PERCPU_BASE_KS);
if (is_fpu_owner())
save_fp(current);
@@ -33,7 +33,7 @@ void restore_processor_state(void)
csr_write32(saved_prmd, LOONGARCH_CSR_PRMD);
csr_write32(saved_euen, LOONGARCH_CSR_EUEN);
csr_write32(saved_ecfg, LOONGARCH_CSR_ECFG);
- csr_write64(saved_pcpu_base, PERCPU_BASE_KS);
+ csr_write(saved_pcpu_base, PERCPU_BASE_KS);
if (is_fpu_owner())
restore_fp(current);
diff --git a/arch/loongarch/power/platform.c b/arch/loongarch/power/platform.c
index 5bbdb9fd76e5..faa4fe4e7461 100644
--- a/arch/loongarch/power/platform.c
+++ b/arch/loongarch/power/platform.c
@@ -72,10 +72,10 @@ static int __init loongson3_acpi_suspend_init(void)
status = acpi_evaluate_integer(NULL, "\\SADR", NULL, &suspend_addr);
if (ACPI_FAILURE(status) || !suspend_addr) {
pr_info("ACPI S3 supported with hardware register default\n");
- loongson_sysconf.suspend_addr = (u64)default_suspend_addr;
+ loongson_sysconf.suspend_addr = (unsigned long)default_suspend_addr;
} else {
pr_info("ACPI S3 supported with Loongson ACPI SADR extension\n");
- loongson_sysconf.suspend_addr = (u64)phys_to_virt(PHYSADDR(suspend_addr));
+ loongson_sysconf.suspend_addr = (unsigned long)phys_to_virt(PHYSADDR(suspend_addr));
}
#endif
return 0;
diff --git a/arch/loongarch/power/suspend.c b/arch/loongarch/power/suspend.c
index c9e594925c47..7e3d79f8bbd4 100644
--- a/arch/loongarch/power/suspend.c
+++ b/arch/loongarch/power/suspend.c
@@ -20,24 +20,24 @@ u64 loongarch_suspend_addr;
struct saved_registers {
u32 ecfg;
u32 euen;
- u64 pgd;
- u64 kpgd;
u32 pwctl0;
u32 pwctl1;
- u64 pcpu_base;
+ unsigned long pgd;
+ unsigned long kpgd;
+ unsigned long pcpu_base;
};
static struct saved_registers saved_regs;
void loongarch_common_suspend(void)
{
save_counter();
- saved_regs.pgd = csr_read64(LOONGARCH_CSR_PGDL);
- saved_regs.kpgd = csr_read64(LOONGARCH_CSR_PGDH);
+ saved_regs.pgd = csr_read(LOONGARCH_CSR_PGDL);
+ saved_regs.kpgd = csr_read(LOONGARCH_CSR_PGDH);
saved_regs.pwctl0 = csr_read32(LOONGARCH_CSR_PWCTL0);
saved_regs.pwctl1 = csr_read32(LOONGARCH_CSR_PWCTL1);
saved_regs.ecfg = csr_read32(LOONGARCH_CSR_ECFG);
saved_regs.euen = csr_read32(LOONGARCH_CSR_EUEN);
- saved_regs.pcpu_base = csr_read64(PERCPU_BASE_KS);
+ saved_regs.pcpu_base = csr_read(PERCPU_BASE_KS);
loongarch_suspend_addr = loongson_sysconf.suspend_addr;
}
@@ -46,17 +46,17 @@ void loongarch_common_resume(void)
{
sync_counter();
local_flush_tlb_all();
- csr_write64(eentry, LOONGARCH_CSR_EENTRY);
- csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
- csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
+ csr_write(eentry, LOONGARCH_CSR_EENTRY);
+ csr_write(eentry, LOONGARCH_CSR_MERRENTRY);
+ csr_write(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
- csr_write64(saved_regs.pgd, LOONGARCH_CSR_PGDL);
- csr_write64(saved_regs.kpgd, LOONGARCH_CSR_PGDH);
+ csr_write(saved_regs.pgd, LOONGARCH_CSR_PGDL);
+ csr_write(saved_regs.kpgd, LOONGARCH_CSR_PGDH);
csr_write32(saved_regs.pwctl0, LOONGARCH_CSR_PWCTL0);
csr_write32(saved_regs.pwctl1, LOONGARCH_CSR_PWCTL1);
csr_write32(saved_regs.ecfg, LOONGARCH_CSR_ECFG);
csr_write32(saved_regs.euen, LOONGARCH_CSR_EUEN);
- csr_write64(saved_regs.pcpu_base, PERCPU_BASE_KS);
+ csr_write(saved_regs.pcpu_base, PERCPU_BASE_KS);
}
int loongarch_acpi_suspend(void)
diff --git a/arch/loongarch/power/suspend_asm.S b/arch/loongarch/power/suspend_asm.S
index df0865df26fa..c8119ad5fb2c 100644
--- a/arch/loongarch/power/suspend_asm.S
+++ b/arch/loongarch/power/suspend_asm.S
@@ -14,41 +14,41 @@
/* preparatory stuff */
.macro SETUP_SLEEP
- addi.d sp, sp, -PT_SIZE
- st.d $r1, sp, PT_R1
- st.d $r2, sp, PT_R2
- st.d $r3, sp, PT_R3
- st.d $r4, sp, PT_R4
- st.d $r21, sp, PT_R21
- st.d $r22, sp, PT_R22
- st.d $r23, sp, PT_R23
- st.d $r24, sp, PT_R24
- st.d $r25, sp, PT_R25
- st.d $r26, sp, PT_R26
- st.d $r27, sp, PT_R27
- st.d $r28, sp, PT_R28
- st.d $r29, sp, PT_R29
- st.d $r30, sp, PT_R30
- st.d $r31, sp, PT_R31
+ PTR_ADDI sp, sp, -PT_SIZE
+ REG_S $r1, sp, PT_R1
+ REG_S $r2, sp, PT_R2
+ REG_S $r3, sp, PT_R3
+ REG_S $r4, sp, PT_R4
+ REG_S $r21, sp, PT_R21
+ REG_S $r22, sp, PT_R22
+ REG_S $r23, sp, PT_R23
+ REG_S $r24, sp, PT_R24
+ REG_S $r25, sp, PT_R25
+ REG_S $r26, sp, PT_R26
+ REG_S $r27, sp, PT_R27
+ REG_S $r28, sp, PT_R28
+ REG_S $r29, sp, PT_R29
+ REG_S $r30, sp, PT_R30
+ REG_S $r31, sp, PT_R31
.endm
.macro SETUP_WAKEUP
- ld.d $r1, sp, PT_R1
- ld.d $r2, sp, PT_R2
- ld.d $r3, sp, PT_R3
- ld.d $r4, sp, PT_R4
- ld.d $r21, sp, PT_R21
- ld.d $r22, sp, PT_R22
- ld.d $r23, sp, PT_R23
- ld.d $r24, sp, PT_R24
- ld.d $r25, sp, PT_R25
- ld.d $r26, sp, PT_R26
- ld.d $r27, sp, PT_R27
- ld.d $r28, sp, PT_R28
- ld.d $r29, sp, PT_R29
- ld.d $r30, sp, PT_R30
- ld.d $r31, sp, PT_R31
- addi.d sp, sp, PT_SIZE
+ REG_L $r1, sp, PT_R1
+ REG_L $r2, sp, PT_R2
+ REG_L $r3, sp, PT_R3
+ REG_L $r4, sp, PT_R4
+ REG_L $r21, sp, PT_R21
+ REG_L $r22, sp, PT_R22
+ REG_L $r23, sp, PT_R23
+ REG_L $r24, sp, PT_R24
+ REG_L $r25, sp, PT_R25
+ REG_L $r26, sp, PT_R26
+ REG_L $r27, sp, PT_R27
+ REG_L $r28, sp, PT_R28
+ REG_L $r29, sp, PT_R29
+ REG_L $r30, sp, PT_R30
+ REG_L $r31, sp, PT_R31
+ PTR_ADDI sp, sp, PT_SIZE
.endm
.text
@@ -59,15 +59,15 @@ SYM_FUNC_START(loongarch_suspend_enter)
SETUP_SLEEP
la.pcrel t0, acpi_saved_sp
- st.d sp, t0, 0
+ REG_S sp, t0, 0
bl __flush_cache_all
/* Pass RA and SP to BIOS */
- addi.d a1, sp, 0
+ PTR_ADDI a1, sp, 0
la.pcrel a0, loongarch_wakeup_start
la.pcrel t0, loongarch_suspend_addr
- ld.d t0, t0, 0
+ REG_L t0, t0, 0
jirl ra, t0, 0 /* Call BIOS's STR sleep routine */
/*
@@ -83,7 +83,7 @@ SYM_INNER_LABEL(loongarch_wakeup_start, SYM_L_GLOBAL)
csrwr t0, LOONGARCH_CSR_CRMD
la.pcrel t0, acpi_saved_sp
- ld.d sp, t0, 0
+ REG_L sp, t0, 0
SETUP_WAKEUP
jr ra
diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
index c0cc3ca5da9f..520f1513f07d 100644
--- a/arch/loongarch/vdso/Makefile
+++ b/arch/loongarch/vdso/Makefile
@@ -4,8 +4,9 @@
# Include the generic Makefile to check the built vdso.
include $(srctree)/lib/vdso/Makefile.include
-obj-vdso-y := elf.o vgetcpu.o vgettimeofday.o vgetrandom.o \
+obj-vdso-y := elf.o vgetcpu.o vgetrandom.o \
vgetrandom-chacha.o sigreturn.o
+obj-vdso-$(CONFIG_GENERIC_GETTIMEOFDAY) += vgettimeofday.o
# Common compiler flags between ABIs.
ccflags-vdso := \
@@ -16,6 +17,10 @@ ccflags-vdso := \
$(CLANG_FLAGS) \
-D__VDSO__
+ifdef CONFIG_32BIT
+ccflags-vdso += -DBUILD_VDSO32
+endif
+
cflags-vdso := $(ccflags-vdso) \
-isystem $(shell $(CC) -print-file-name=include) \
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
diff --git a/arch/loongarch/vdso/vdso.lds.S b/arch/loongarch/vdso/vdso.lds.S
index 8ff986499947..ac537e02beb1 100644
--- a/arch/loongarch/vdso/vdso.lds.S
+++ b/arch/loongarch/vdso/vdso.lds.S
@@ -7,8 +7,6 @@
#include <generated/asm-offsets.h>
#include <vdso/datapage.h>
-OUTPUT_FORMAT("elf64-loongarch", "elf64-loongarch", "elf64-loongarch")
-
OUTPUT_ARCH(loongarch)
SECTIONS
@@ -63,9 +61,11 @@ VERSION
LINUX_5.10 {
global:
__vdso_getcpu;
+#ifdef CONFIG_GENERIC_GETTIMEOFDAY
__vdso_clock_getres;
__vdso_clock_gettime;
__vdso_gettimeofday;
+#endif
__vdso_getrandom;
__vdso_rt_sigreturn;
local: *;
diff --git a/arch/loongarch/vdso/vgetcpu.c b/arch/loongarch/vdso/vgetcpu.c
index 5301cd9d0f83..73af49242ecd 100644
--- a/arch/loongarch/vdso/vgetcpu.c
+++ b/arch/loongarch/vdso/vgetcpu.c
@@ -10,11 +10,19 @@ static __always_inline int read_cpu_id(void)
{
int cpu_id;
+#ifdef CONFIG_64BIT
__asm__ __volatile__(
" rdtime.d $zero, %0\n"
: "=r" (cpu_id)
:
: "memory");
+#else
+ __asm__ __volatile__(
+ " rdtimel.w $zero, %0\n"
+ : "=r" (cpu_id)
+ :
+ : "memory");
+#endif
return cpu_id;
}
diff --git a/arch/mips/configs/gcw0_defconfig b/arch/mips/configs/gcw0_defconfig
index fda9971bdd8d..adb9fd62ddb0 100644
--- a/arch/mips/configs/gcw0_defconfig
+++ b/arch/mips/configs/gcw0_defconfig
@@ -79,7 +79,6 @@ CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_SOUND=y
CONFIG_SND=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_PROC_FS is not set
# CONFIG_SND_DRIVERS is not set
# CONFIG_SND_SPI is not set
diff --git a/arch/mips/configs/loongson1_defconfig b/arch/mips/configs/loongson1_defconfig
index 02d29110f702..1d9781ff9698 100644
--- a/arch/mips/configs/loongson1_defconfig
+++ b/arch/mips/configs/loongson1_defconfig
@@ -119,7 +119,6 @@ CONFIG_WATCHDOG_SYSFS=y
CONFIG_LOONGSON1_WDT=y
CONFIG_SOUND=y
CONFIG_SND=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_DRIVERS is not set
# CONFIG_SND_MIPS is not set
# CONFIG_SND_USB is not set
diff --git a/arch/mips/configs/qi_lb60_defconfig b/arch/mips/configs/qi_lb60_defconfig
index 5f5b0254d75e..a1bb0792f6eb 100644
--- a/arch/mips/configs/qi_lb60_defconfig
+++ b/arch/mips/configs/qi_lb60_defconfig
@@ -81,7 +81,6 @@ CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_CLUT224 is not set
CONFIG_SOUND=y
CONFIG_SND=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_VERBOSE_PROCFS is not set
# CONFIG_SND_DRIVERS is not set
# CONFIG_SND_SPI is not set
diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig
index 03a7bbe28a53..49c709d663be 100644
--- a/arch/mips/configs/rbtx49xx_defconfig
+++ b/arch/mips/configs/rbtx49xx_defconfig
@@ -53,7 +53,6 @@ CONFIG_TXX9_WDT=m
# CONFIG_VGA_ARB is not set
CONFIG_SOUND=m
CONFIG_SND=m
-# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_VERBOSE_PROCFS is not set
# CONFIG_SND_DRIVERS is not set
# CONFIG_SND_PCI is not set
diff --git a/arch/mips/configs/rs90_defconfig b/arch/mips/configs/rs90_defconfig
index a53dd66e9b86..8382d535e6dc 100644
--- a/arch/mips/configs/rs90_defconfig
+++ b/arch/mips/configs/rs90_defconfig
@@ -105,7 +105,6 @@ CONFIG_LOGO=y
CONFIG_SOUND=y
CONFIG_SND=y
# CONFIG_SND_PCM_TIMER is not set
-# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_PROC_FS is not set
# CONFIG_SND_DRIVERS is not set
# CONFIG_SND_MIPS is not set
diff --git a/arch/powerpc/configs/85xx-hw.config b/arch/powerpc/configs/85xx-hw.config
index 8aff83217397..2b19c20a9a2c 100644
--- a/arch/powerpc/configs/85xx-hw.config
+++ b/arch/powerpc/configs/85xx-hw.config
@@ -117,7 +117,6 @@ CONFIG_SND_INTEL8X0=y
CONFIG_SND_POWERPC_SOC=y
# CONFIG_SND_PPC is not set
CONFIG_SND_SOC=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_USB is not set
CONFIG_SND=y
CONFIG_SOUND=y
diff --git a/arch/powerpc/configs/86xx-hw.config b/arch/powerpc/configs/86xx-hw.config
index e7bd265fae5a..07f30ab881e5 100644
--- a/arch/powerpc/configs/86xx-hw.config
+++ b/arch/powerpc/configs/86xx-hw.config
@@ -80,7 +80,6 @@ CONFIG_SERIO_LIBPS2=y
CONFIG_SND_INTEL8X0=y
CONFIG_SND_MIXER_OSS=y
CONFIG_SND_PCM_OSS=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
CONFIG_SND=y
CONFIG_SOUND=y
CONFIG_ULI526X=y
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig
index c0fe5e76604a..617650cea56a 100644
--- a/arch/powerpc/configs/mpc5200_defconfig
+++ b/arch/powerpc/configs/mpc5200_defconfig
@@ -75,7 +75,6 @@ CONFIG_FB_SM501=m
CONFIG_LOGO=y
CONFIG_SOUND=y
CONFIG_SND=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_DRIVERS is not set
# CONFIG_SND_PCI is not set
# CONFIG_SND_PPC is not set
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index b082c1fae13c..787d707f64a4 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -726,7 +726,6 @@ CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_DYNAMIC_MINORS=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
CONFIG_SND_VERBOSE_PRINTK=y
CONFIG_SND_DEBUG=y
CONFIG_SND_DEBUG_VERBOSE=y
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 502133979e22..4cbbe2ee58ab 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -532,6 +532,7 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
balloon_page_insert(b_dev_info, newpage);
+ __count_vm_event(BALLOON_MIGRATE);
b_dev_info->isolated_pages--;
spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
@@ -550,7 +551,6 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
static void cmm_balloon_compaction_init(void)
{
- balloon_devinfo_init(&b_dev_info);
b_dev_info.migratepage = cmm_migratepage;
}
#else /* CONFIG_BALLOON_COMPACTION */
@@ -572,6 +572,7 @@ static int cmm_init(void)
if (!firmware_has_feature(FW_FEATURE_CMO) && !simulate)
return -EOPNOTSUPP;
+ balloon_devinfo_init(&b_dev_info);
cmm_balloon_compaction_init();
rc = register_oom_notifier(&cmm_oom_nb);
diff --git a/arch/riscv/crypto/Kconfig b/arch/riscv/crypto/Kconfig
index a75d6325607b..14c5acb935e9 100644
--- a/arch/riscv/crypto/Kconfig
+++ b/arch/riscv/crypto/Kconfig
@@ -4,7 +4,8 @@ menu "Accelerated Cryptographic Algorithms for CPU (riscv)"
config CRYPTO_AES_RISCV64
tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XTS"
- depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ depends on 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \
+ RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
select CRYPTO_ALGAPI
select CRYPTO_LIB_AES
select CRYPTO_SKCIPHER
@@ -20,7 +21,8 @@ config CRYPTO_AES_RISCV64
config CRYPTO_GHASH_RISCV64
tristate "Hash functions: GHASH"
- depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ depends on 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \
+ RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
select CRYPTO_GCM
help
GCM GHASH function (NIST SP 800-38D)
@@ -30,7 +32,8 @@ config CRYPTO_GHASH_RISCV64
config CRYPTO_SM3_RISCV64
tristate "Hash functions: SM3 (ShangMi 3)"
- depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ depends on 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \
+ RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
select CRYPTO_HASH
select CRYPTO_LIB_SM3
help
@@ -42,7 +45,8 @@ config CRYPTO_SM3_RISCV64
config CRYPTO_SM4_RISCV64
tristate "Ciphers: SM4 (ShangMi 4)"
- depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ depends on 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \
+ RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
select CRYPTO_ALGAPI
select CRYPTO_SM4
help
diff --git a/arch/sh/configs/edosk7760_defconfig b/arch/sh/configs/edosk7760_defconfig
index abeae220606a..905fac107284 100644
--- a/arch/sh/configs/edosk7760_defconfig
+++ b/arch/sh/configs/edosk7760_defconfig
@@ -79,7 +79,6 @@ CONFIG_FB_TILEBLITTING=y
CONFIG_FB_SH_MOBILE_LCDC=m
CONFIG_SOUND=y
CONFIG_SND=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_VERBOSE_PROCFS is not set
CONFIG_SND_VERBOSE_PRINTK=y
CONFIG_SND_SOC=y
diff --git a/arch/sh/configs/se7724_defconfig b/arch/sh/configs/se7724_defconfig
index 9e3a54936f76..8ca46d704c8b 100644
--- a/arch/sh/configs/se7724_defconfig
+++ b/arch/sh/configs/se7724_defconfig
@@ -83,7 +83,6 @@ CONFIG_LOGO=y
# CONFIG_LOGO_SUPERH_VGA16 is not set
CONFIG_SOUND=y
CONFIG_SND=m
-# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_DRIVERS is not set
# CONFIG_SND_SPI is not set
# CONFIG_SND_SUPERH is not set
diff --git a/arch/sh/configs/sh7785lcr_32bit_defconfig b/arch/sh/configs/sh7785lcr_32bit_defconfig
index eb63aa61b046..5468cc53cddb 100644
--- a/arch/sh/configs/sh7785lcr_32bit_defconfig
+++ b/arch/sh/configs/sh7785lcr_32bit_defconfig
@@ -93,7 +93,6 @@ CONFIG_SND_PCM_OSS=y
CONFIG_SND_SEQUENCER_OSS=y
CONFIG_SND_HRTIMER=y
CONFIG_SND_DYNAMIC_MINORS=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_VERBOSE_PROCFS is not set
CONFIG_SND_VERBOSE_PRINTK=y
CONFIG_SND_DEBUG=y
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index e8b6af199c73..9293ce50574d 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -656,14 +656,11 @@ static int amd_uncore_df_event_init(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
int ret = amd_uncore_event_init(event);
- if (ret || pmu_version < 2)
- return ret;
-
hwc->config = event->attr.config &
(pmu_version >= 2 ? AMD64_PERFMON_V2_RAW_EVENT_MASK_NB :
AMD64_RAW_EVENT_MASK_NB);
- return 0;
+ return ret;
}
static int amd_uncore_df_add(struct perf_event *event, int flags)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 853fe073bab3..bdf3f0d0fe21 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3378,6 +3378,9 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
if (!test_bit(bit, cpuc->active_mask))
continue;
+ /* Event may have already been cleared: */
+ if (!event)
+ continue;
/*
* There may be unprocessed PEBS records in the PEBS buffer,
diff --git a/arch/x86/hyperv/.gitignore b/arch/x86/hyperv/.gitignore
new file mode 100644
index 000000000000..333615d993b5
--- /dev/null
+++ b/arch/x86/hyperv/.gitignore
@@ -0,0 +1 @@
+mshv-asm-offsets.h